1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/irqdomain.h>
18 #include <linux/list.h>
19 #include <linux/log2.h>
20 #include <linux/memblock.h>
22 #include <linux/msi.h>
24 #include <linux/of_address.h>
25 #include <linux/of_irq.h>
26 #include <linux/of_pci.h>
27 #include <linux/of_platform.h>
28 #include <linux/percpu.h>
29 #include <linux/slab.h>
30 #include <linux/syscore_ops.h>
32 #include <linux/irqchip.h>
33 #include <linux/irqchip/arm-gic-v3.h>
34 #include <linux/irqchip/arm-gic-v4.h>
36 #include <asm/cputype.h>
37 #include <asm/exception.h>
39 #include "irq-gic-common.h"
41 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
42 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
44 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
46 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
47 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
49 static u32 lpi_id_bits;
52 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
53 * deal with (one configuration byte per interrupt). PENDBASE has to
54 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
56 #define LPI_NRBITS lpi_id_bits
57 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
58 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
60 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
63 * Collection structure - just an ID, and a redistributor address to
64 * ping. We use one per CPU as a bag of interrupts assigned to this
67 struct its_collection {
73 * The ITS_BASER structure - contains memory information, cached
74 * value of BASER register configuration and ITS page size.
86 * The ITS structure - contains most of the infrastructure, with the
87 * top-level MSI domain, the command queue, the collections, and the
88 * list of devices writing to it.
90 * dev_alloc_lock has to be taken for device allocations, while the
91 * spinlock must be taken to parse data structures such as the device
96 struct mutex dev_alloc_lock;
97 struct list_head entry;
99 phys_addr_t phys_base;
100 struct its_cmd_block *cmd_base;
101 struct its_cmd_block *cmd_write;
102 struct its_baser tables[GITS_BASER_NR_REGS];
103 struct its_collection *collections;
104 struct fwnode_handle *fwnode_handle;
105 u64 (*get_msi_base)(struct its_device *its_dev);
109 struct list_head its_device_list;
111 unsigned long list_nr;
113 unsigned int msi_domain_flags;
114 u32 pre_its_base; /* for Socionext Synquacer */
115 int vlpi_redist_offset;
118 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
119 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
121 #define ITS_ITT_ALIGN SZ_256
123 /* The maximum number of VPEID bits supported by VLPI commands */
124 #define ITS_MAX_VPEID_BITS (16)
125 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
127 /* Convert page order to size in bytes */
128 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
130 struct event_lpi_map {
131 unsigned long *lpi_map;
133 irq_hw_number_t lpi_base;
135 struct mutex vlpi_lock;
137 struct its_vlpi_map *vlpi_maps;
142 * The ITS view of a device - belongs to an ITS, owns an interrupt
143 * translation table, and a list of interrupts. If it some of its
144 * LPIs are injected into a guest (GICv4), the event_map.vm field
145 * indicates which one.
148 struct list_head entry;
149 struct its_node *its;
150 struct event_lpi_map event_map;
159 struct its_device *dev;
160 struct its_vpe **vpes;
164 static LIST_HEAD(its_nodes);
165 static DEFINE_RAW_SPINLOCK(its_lock);
166 static struct rdists *gic_rdists;
167 static struct irq_domain *its_parent;
169 static unsigned long its_list_map;
170 static u16 vmovp_seq_num;
171 static DEFINE_RAW_SPINLOCK(vmovp_lock);
173 static DEFINE_IDA(its_vpeid_ida);
175 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
176 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
177 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
178 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
180 static u16 get_its_list(struct its_vm *vm)
182 struct its_node *its;
183 unsigned long its_list = 0;
185 list_for_each_entry(its, &its_nodes, entry) {
189 if (vm->vlpi_count[its->list_nr])
190 __set_bit(its->list_nr, &its_list);
193 return (u16)its_list;
196 static inline u32 its_get_event_id(struct irq_data *d)
198 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
199 return d->hwirq - its_dev->event_map.lpi_base;
202 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
205 struct its_node *its = its_dev->its;
207 return its->collections + its_dev->event_map.col_map[event];
210 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
213 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
216 return &its_dev->event_map.vlpi_maps[event];
219 static struct its_collection *irq_to_col(struct irq_data *d)
221 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
223 return dev_event_to_col(its_dev, its_get_event_id(d));
226 static struct its_collection *valid_col(struct its_collection *col)
228 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
234 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
236 if (valid_col(its->collections + vpe->col_idx))
243 * ITS command descriptors - parameters to be encoded in a command
246 struct its_cmd_desc {
249 struct its_device *dev;
254 struct its_device *dev;
259 struct its_device *dev;
264 struct its_device *dev;
269 struct its_collection *col;
274 struct its_device *dev;
280 struct its_device *dev;
281 struct its_collection *col;
286 struct its_device *dev;
291 struct its_collection *col;
300 struct its_collection *col;
306 struct its_device *dev;
314 struct its_device *dev;
321 struct its_collection *col;
329 * The ITS command block, which is what the ITS actually parses.
331 struct its_cmd_block {
334 __le64 raw_cmd_le[4];
338 #define ITS_CMD_QUEUE_SZ SZ_64K
339 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
341 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
342 struct its_cmd_block *,
343 struct its_cmd_desc *);
345 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
346 struct its_cmd_block *,
347 struct its_cmd_desc *);
349 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
351 u64 mask = GENMASK_ULL(h, l);
353 *raw_cmd |= (val << l) & mask;
356 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
358 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
361 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
363 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
366 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
368 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
371 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
373 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
376 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
378 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
381 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
383 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
386 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
388 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
391 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
393 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
396 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
398 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
401 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
403 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
406 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
408 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
411 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
413 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
416 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
418 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
421 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
423 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
426 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
428 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
431 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
433 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
436 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
438 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
441 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
443 /* Let's fixup BE commands */
444 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
445 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
446 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
447 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
450 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
451 struct its_cmd_block *cmd,
452 struct its_cmd_desc *desc)
454 unsigned long itt_addr;
455 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
457 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
458 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
460 its_encode_cmd(cmd, GITS_CMD_MAPD);
461 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
462 its_encode_size(cmd, size - 1);
463 its_encode_itt(cmd, itt_addr);
464 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
471 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
472 struct its_cmd_block *cmd,
473 struct its_cmd_desc *desc)
475 its_encode_cmd(cmd, GITS_CMD_MAPC);
476 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
477 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
478 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
482 return desc->its_mapc_cmd.col;
485 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
486 struct its_cmd_block *cmd,
487 struct its_cmd_desc *desc)
489 struct its_collection *col;
491 col = dev_event_to_col(desc->its_mapti_cmd.dev,
492 desc->its_mapti_cmd.event_id);
494 its_encode_cmd(cmd, GITS_CMD_MAPTI);
495 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
496 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
497 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
498 its_encode_collection(cmd, col->col_id);
502 return valid_col(col);
505 static struct its_collection *its_build_movi_cmd(struct its_node *its,
506 struct its_cmd_block *cmd,
507 struct its_cmd_desc *desc)
509 struct its_collection *col;
511 col = dev_event_to_col(desc->its_movi_cmd.dev,
512 desc->its_movi_cmd.event_id);
514 its_encode_cmd(cmd, GITS_CMD_MOVI);
515 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
516 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
517 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
521 return valid_col(col);
524 static struct its_collection *its_build_discard_cmd(struct its_node *its,
525 struct its_cmd_block *cmd,
526 struct its_cmd_desc *desc)
528 struct its_collection *col;
530 col = dev_event_to_col(desc->its_discard_cmd.dev,
531 desc->its_discard_cmd.event_id);
533 its_encode_cmd(cmd, GITS_CMD_DISCARD);
534 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
535 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
539 return valid_col(col);
542 static struct its_collection *its_build_inv_cmd(struct its_node *its,
543 struct its_cmd_block *cmd,
544 struct its_cmd_desc *desc)
546 struct its_collection *col;
548 col = dev_event_to_col(desc->its_inv_cmd.dev,
549 desc->its_inv_cmd.event_id);
551 its_encode_cmd(cmd, GITS_CMD_INV);
552 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
553 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
557 return valid_col(col);
560 static struct its_collection *its_build_int_cmd(struct its_node *its,
561 struct its_cmd_block *cmd,
562 struct its_cmd_desc *desc)
564 struct its_collection *col;
566 col = dev_event_to_col(desc->its_int_cmd.dev,
567 desc->its_int_cmd.event_id);
569 its_encode_cmd(cmd, GITS_CMD_INT);
570 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
571 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
575 return valid_col(col);
578 static struct its_collection *its_build_clear_cmd(struct its_node *its,
579 struct its_cmd_block *cmd,
580 struct its_cmd_desc *desc)
582 struct its_collection *col;
584 col = dev_event_to_col(desc->its_clear_cmd.dev,
585 desc->its_clear_cmd.event_id);
587 its_encode_cmd(cmd, GITS_CMD_CLEAR);
588 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
589 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
593 return valid_col(col);
596 static struct its_collection *its_build_invall_cmd(struct its_node *its,
597 struct its_cmd_block *cmd,
598 struct its_cmd_desc *desc)
600 its_encode_cmd(cmd, GITS_CMD_INVALL);
601 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
608 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
609 struct its_cmd_block *cmd,
610 struct its_cmd_desc *desc)
612 its_encode_cmd(cmd, GITS_CMD_VINVALL);
613 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
617 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
620 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
621 struct its_cmd_block *cmd,
622 struct its_cmd_desc *desc)
624 unsigned long vpt_addr;
627 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
628 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
630 its_encode_cmd(cmd, GITS_CMD_VMAPP);
631 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
632 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
633 its_encode_target(cmd, target);
634 its_encode_vpt_addr(cmd, vpt_addr);
635 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
639 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
642 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
643 struct its_cmd_block *cmd,
644 struct its_cmd_desc *desc)
648 if (desc->its_vmapti_cmd.db_enabled)
649 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
653 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
654 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
655 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
656 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
657 its_encode_db_phys_id(cmd, db);
658 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
662 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
665 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
666 struct its_cmd_block *cmd,
667 struct its_cmd_desc *desc)
671 if (desc->its_vmovi_cmd.db_enabled)
672 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
676 its_encode_cmd(cmd, GITS_CMD_VMOVI);
677 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
678 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
679 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
680 its_encode_db_phys_id(cmd, db);
681 its_encode_db_valid(cmd, true);
685 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
688 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
689 struct its_cmd_block *cmd,
690 struct its_cmd_desc *desc)
694 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
695 its_encode_cmd(cmd, GITS_CMD_VMOVP);
696 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
697 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
698 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
699 its_encode_target(cmd, target);
703 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
706 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
707 struct its_cmd_block *cmd,
708 struct its_cmd_desc *desc)
710 struct its_vlpi_map *map;
712 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
713 desc->its_inv_cmd.event_id);
715 its_encode_cmd(cmd, GITS_CMD_INV);
716 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
717 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
721 return valid_vpe(its, map->vpe);
724 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
725 struct its_cmd_block *cmd,
726 struct its_cmd_desc *desc)
728 struct its_vlpi_map *map;
730 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
731 desc->its_int_cmd.event_id);
733 its_encode_cmd(cmd, GITS_CMD_INT);
734 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
735 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
739 return valid_vpe(its, map->vpe);
742 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
743 struct its_cmd_block *cmd,
744 struct its_cmd_desc *desc)
746 struct its_vlpi_map *map;
748 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
749 desc->its_clear_cmd.event_id);
751 its_encode_cmd(cmd, GITS_CMD_CLEAR);
752 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
753 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
757 return valid_vpe(its, map->vpe);
760 static u64 its_cmd_ptr_to_offset(struct its_node *its,
761 struct its_cmd_block *ptr)
763 return (ptr - its->cmd_base) * sizeof(*ptr);
766 static int its_queue_full(struct its_node *its)
771 widx = its->cmd_write - its->cmd_base;
772 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
774 /* This is incredibly unlikely to happen, unless the ITS locks up. */
775 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
781 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
783 struct its_cmd_block *cmd;
784 u32 count = 1000000; /* 1s! */
786 while (its_queue_full(its)) {
789 pr_err_ratelimited("ITS queue not draining\n");
796 cmd = its->cmd_write++;
798 /* Handle queue wrapping */
799 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
800 its->cmd_write = its->cmd_base;
811 static struct its_cmd_block *its_post_commands(struct its_node *its)
813 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
815 writel_relaxed(wr, its->base + GITS_CWRITER);
817 return its->cmd_write;
820 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
823 * Make sure the commands written to memory are observable by
826 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
827 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
832 static int its_wait_for_range_completion(struct its_node *its,
834 struct its_cmd_block *to)
836 u64 rd_idx, to_idx, linear_idx;
837 u32 count = 1000000; /* 1s! */
839 /* Linearize to_idx if the command set has wrapped around */
840 to_idx = its_cmd_ptr_to_offset(its, to);
841 if (to_idx < prev_idx)
842 to_idx += ITS_CMD_QUEUE_SZ;
844 linear_idx = prev_idx;
849 rd_idx = readl_relaxed(its->base + GITS_CREADR);
852 * Compute the read pointer progress, taking the
853 * potential wrap-around into account.
855 delta = rd_idx - prev_idx;
856 if (rd_idx < prev_idx)
857 delta += ITS_CMD_QUEUE_SZ;
860 if (linear_idx >= to_idx)
865 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
877 /* Warning, macro hell follows */
878 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
879 void name(struct its_node *its, \
881 struct its_cmd_desc *desc) \
883 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
884 synctype *sync_obj; \
885 unsigned long flags; \
888 raw_spin_lock_irqsave(&its->lock, flags); \
890 cmd = its_allocate_entry(its); \
891 if (!cmd) { /* We're soooooo screewed... */ \
892 raw_spin_unlock_irqrestore(&its->lock, flags); \
895 sync_obj = builder(its, cmd, desc); \
896 its_flush_cmd(its, cmd); \
899 sync_cmd = its_allocate_entry(its); \
903 buildfn(its, sync_cmd, sync_obj); \
904 its_flush_cmd(its, sync_cmd); \
908 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
909 next_cmd = its_post_commands(its); \
910 raw_spin_unlock_irqrestore(&its->lock, flags); \
912 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
913 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
916 static void its_build_sync_cmd(struct its_node *its,
917 struct its_cmd_block *sync_cmd,
918 struct its_collection *sync_col)
920 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
921 its_encode_target(sync_cmd, sync_col->target_address);
923 its_fixup_cmd(sync_cmd);
926 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
927 struct its_collection, its_build_sync_cmd)
929 static void its_build_vsync_cmd(struct its_node *its,
930 struct its_cmd_block *sync_cmd,
931 struct its_vpe *sync_vpe)
933 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
934 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
936 its_fixup_cmd(sync_cmd);
939 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
940 struct its_vpe, its_build_vsync_cmd)
942 static void its_send_int(struct its_device *dev, u32 event_id)
944 struct its_cmd_desc desc;
946 desc.its_int_cmd.dev = dev;
947 desc.its_int_cmd.event_id = event_id;
949 its_send_single_command(dev->its, its_build_int_cmd, &desc);
952 static void its_send_clear(struct its_device *dev, u32 event_id)
954 struct its_cmd_desc desc;
956 desc.its_clear_cmd.dev = dev;
957 desc.its_clear_cmd.event_id = event_id;
959 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
962 static void its_send_inv(struct its_device *dev, u32 event_id)
964 struct its_cmd_desc desc;
966 desc.its_inv_cmd.dev = dev;
967 desc.its_inv_cmd.event_id = event_id;
969 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
972 static void its_send_mapd(struct its_device *dev, int valid)
974 struct its_cmd_desc desc;
976 desc.its_mapd_cmd.dev = dev;
977 desc.its_mapd_cmd.valid = !!valid;
979 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
982 static void its_send_mapc(struct its_node *its, struct its_collection *col,
985 struct its_cmd_desc desc;
987 desc.its_mapc_cmd.col = col;
988 desc.its_mapc_cmd.valid = !!valid;
990 its_send_single_command(its, its_build_mapc_cmd, &desc);
993 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
995 struct its_cmd_desc desc;
997 desc.its_mapti_cmd.dev = dev;
998 desc.its_mapti_cmd.phys_id = irq_id;
999 desc.its_mapti_cmd.event_id = id;
1001 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1004 static void its_send_movi(struct its_device *dev,
1005 struct its_collection *col, u32 id)
1007 struct its_cmd_desc desc;
1009 desc.its_movi_cmd.dev = dev;
1010 desc.its_movi_cmd.col = col;
1011 desc.its_movi_cmd.event_id = id;
1013 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1016 static void its_send_discard(struct its_device *dev, u32 id)
1018 struct its_cmd_desc desc;
1020 desc.its_discard_cmd.dev = dev;
1021 desc.its_discard_cmd.event_id = id;
1023 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1026 static void its_send_invall(struct its_node *its, struct its_collection *col)
1028 struct its_cmd_desc desc;
1030 desc.its_invall_cmd.col = col;
1032 its_send_single_command(its, its_build_invall_cmd, &desc);
1035 static void its_send_vmapti(struct its_device *dev, u32 id)
1037 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1038 struct its_cmd_desc desc;
1040 desc.its_vmapti_cmd.vpe = map->vpe;
1041 desc.its_vmapti_cmd.dev = dev;
1042 desc.its_vmapti_cmd.virt_id = map->vintid;
1043 desc.its_vmapti_cmd.event_id = id;
1044 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1046 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1049 static void its_send_vmovi(struct its_device *dev, u32 id)
1051 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1052 struct its_cmd_desc desc;
1054 desc.its_vmovi_cmd.vpe = map->vpe;
1055 desc.its_vmovi_cmd.dev = dev;
1056 desc.its_vmovi_cmd.event_id = id;
1057 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1059 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1062 static void its_send_vmapp(struct its_node *its,
1063 struct its_vpe *vpe, bool valid)
1065 struct its_cmd_desc desc;
1067 desc.its_vmapp_cmd.vpe = vpe;
1068 desc.its_vmapp_cmd.valid = valid;
1069 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1071 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1074 static void its_send_vmovp(struct its_vpe *vpe)
1076 struct its_cmd_desc desc = {};
1077 struct its_node *its;
1078 unsigned long flags;
1079 int col_id = vpe->col_idx;
1081 desc.its_vmovp_cmd.vpe = vpe;
1083 if (!its_list_map) {
1084 its = list_first_entry(&its_nodes, struct its_node, entry);
1085 desc.its_vmovp_cmd.col = &its->collections[col_id];
1086 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1091 * Yet another marvel of the architecture. If using the
1092 * its_list "feature", we need to make sure that all ITSs
1093 * receive all VMOVP commands in the same order. The only way
1094 * to guarantee this is to make vmovp a serialization point.
1098 raw_spin_lock_irqsave(&vmovp_lock, flags);
1100 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1101 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1104 list_for_each_entry(its, &its_nodes, entry) {
1108 if (!vpe->its_vm->vlpi_count[its->list_nr])
1111 desc.its_vmovp_cmd.col = &its->collections[col_id];
1112 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1115 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1118 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1120 struct its_cmd_desc desc;
1122 desc.its_vinvall_cmd.vpe = vpe;
1123 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1126 static void its_send_vinv(struct its_device *dev, u32 event_id)
1128 struct its_cmd_desc desc;
1131 * There is no real VINV command. This is just a normal INV,
1132 * with a VSYNC instead of a SYNC.
1134 desc.its_inv_cmd.dev = dev;
1135 desc.its_inv_cmd.event_id = event_id;
1137 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1140 static void its_send_vint(struct its_device *dev, u32 event_id)
1142 struct its_cmd_desc desc;
1145 * There is no real VINT command. This is just a normal INT,
1146 * with a VSYNC instead of a SYNC.
1148 desc.its_int_cmd.dev = dev;
1149 desc.its_int_cmd.event_id = event_id;
1151 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1154 static void its_send_vclear(struct its_device *dev, u32 event_id)
1156 struct its_cmd_desc desc;
1159 * There is no real VCLEAR command. This is just a normal CLEAR,
1160 * with a VSYNC instead of a SYNC.
1162 desc.its_clear_cmd.dev = dev;
1163 desc.its_clear_cmd.event_id = event_id;
1165 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1169 * irqchip functions - assumes MSI, mostly.
1171 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
1173 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1174 u32 event = its_get_event_id(d);
1176 if (!irqd_is_forwarded_to_vcpu(d))
1179 return dev_event_to_vlpi_map(its_dev, event);
1182 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1184 struct its_vlpi_map *map = get_vlpi_map(d);
1185 irq_hw_number_t hwirq;
1190 va = page_address(map->vm->vprop_page);
1191 hwirq = map->vintid;
1193 /* Remember the updated property */
1194 map->properties &= ~clr;
1195 map->properties |= set | LPI_PROP_GROUP1;
1197 va = gic_rdists->prop_table_va;
1201 cfg = va + hwirq - 8192;
1203 *cfg |= set | LPI_PROP_GROUP1;
1206 * Make the above write visible to the redistributors.
1207 * And yes, we're flushing exactly: One. Single. Byte.
1210 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1211 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1216 static void wait_for_syncr(void __iomem *rdbase)
1218 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
1222 static void direct_lpi_inv(struct irq_data *d)
1224 struct its_collection *col;
1225 void __iomem *rdbase;
1227 /* Target the redistributor this LPI is currently routed to */
1228 col = irq_to_col(d);
1229 rdbase = per_cpu_ptr(gic_rdists->rdist, col->col_id)->rd_base;
1230 gic_write_lpir(d->hwirq, rdbase + GICR_INVLPIR);
1232 wait_for_syncr(rdbase);
1235 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1237 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1239 lpi_write_config(d, clr, set);
1240 if (gic_rdists->has_direct_lpi && !irqd_is_forwarded_to_vcpu(d))
1242 else if (!irqd_is_forwarded_to_vcpu(d))
1243 its_send_inv(its_dev, its_get_event_id(d));
1245 its_send_vinv(its_dev, its_get_event_id(d));
1248 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1250 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1251 u32 event = its_get_event_id(d);
1252 struct its_vlpi_map *map;
1254 map = dev_event_to_vlpi_map(its_dev, event);
1256 if (map->db_enabled == enable)
1259 map->db_enabled = enable;
1262 * More fun with the architecture:
1264 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1265 * value or to 1023, depending on the enable bit. But that
1266 * would be issueing a mapping for an /existing/ DevID+EventID
1267 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1268 * to the /same/ vPE, using this opportunity to adjust the
1269 * doorbell. Mouahahahaha. We loves it, Precious.
1271 its_send_vmovi(its_dev, event);
1274 static void its_mask_irq(struct irq_data *d)
1276 if (irqd_is_forwarded_to_vcpu(d))
1277 its_vlpi_set_doorbell(d, false);
1279 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1282 static void its_unmask_irq(struct irq_data *d)
1284 if (irqd_is_forwarded_to_vcpu(d))
1285 its_vlpi_set_doorbell(d, true);
1287 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1290 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1294 const struct cpumask *cpu_mask = cpu_online_mask;
1295 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1296 struct its_collection *target_col;
1297 u32 id = its_get_event_id(d);
1299 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1300 if (irqd_is_forwarded_to_vcpu(d))
1303 /* lpi cannot be routed to a redistributor that is on a foreign node */
1304 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1305 if (its_dev->its->numa_node >= 0) {
1306 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1307 if (!cpumask_intersects(mask_val, cpu_mask))
1312 cpu = cpumask_any_and(mask_val, cpu_mask);
1314 if (cpu >= nr_cpu_ids)
1317 /* don't set the affinity when the target cpu is same as current one */
1318 if (cpu != its_dev->event_map.col_map[id]) {
1319 target_col = &its_dev->its->collections[cpu];
1320 its_send_movi(its_dev, target_col, id);
1321 its_dev->event_map.col_map[id] = cpu;
1322 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1325 return IRQ_SET_MASK_OK_DONE;
1328 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1330 struct its_node *its = its_dev->its;
1332 return its->phys_base + GITS_TRANSLATER;
1335 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1337 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1338 struct its_node *its;
1342 addr = its->get_msi_base(its_dev);
1344 msg->address_lo = lower_32_bits(addr);
1345 msg->address_hi = upper_32_bits(addr);
1346 msg->data = its_get_event_id(d);
1348 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1351 static int its_irq_set_irqchip_state(struct irq_data *d,
1352 enum irqchip_irq_state which,
1355 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1356 u32 event = its_get_event_id(d);
1358 if (which != IRQCHIP_STATE_PENDING)
1361 if (irqd_is_forwarded_to_vcpu(d)) {
1363 its_send_vint(its_dev, event);
1365 its_send_vclear(its_dev, event);
1368 its_send_int(its_dev, event);
1370 its_send_clear(its_dev, event);
1376 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1378 unsigned long flags;
1380 /* Not using the ITS list? Everything is always mapped. */
1384 raw_spin_lock_irqsave(&vmovp_lock, flags);
1387 * If the VM wasn't mapped yet, iterate over the vpes and get
1390 vm->vlpi_count[its->list_nr]++;
1392 if (vm->vlpi_count[its->list_nr] == 1) {
1395 for (i = 0; i < vm->nr_vpes; i++) {
1396 struct its_vpe *vpe = vm->vpes[i];
1397 struct irq_data *d = irq_get_irq_data(vpe->irq);
1399 /* Map the VPE to the first possible CPU */
1400 vpe->col_idx = cpumask_first(cpu_online_mask);
1401 its_send_vmapp(its, vpe, true);
1402 its_send_vinvall(its, vpe);
1403 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1407 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1410 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1412 unsigned long flags;
1414 /* Not using the ITS list? Everything is always mapped. */
1418 raw_spin_lock_irqsave(&vmovp_lock, flags);
1420 if (!--vm->vlpi_count[its->list_nr]) {
1423 for (i = 0; i < vm->nr_vpes; i++)
1424 its_send_vmapp(its, vm->vpes[i], false);
1427 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1430 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1432 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1433 u32 event = its_get_event_id(d);
1439 mutex_lock(&its_dev->event_map.vlpi_lock);
1441 if (!its_dev->event_map.vm) {
1442 struct its_vlpi_map *maps;
1444 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1451 its_dev->event_map.vm = info->map->vm;
1452 its_dev->event_map.vlpi_maps = maps;
1453 } else if (its_dev->event_map.vm != info->map->vm) {
1458 /* Get our private copy of the mapping information */
1459 its_dev->event_map.vlpi_maps[event] = *info->map;
1461 if (irqd_is_forwarded_to_vcpu(d)) {
1462 /* Already mapped, move it around */
1463 its_send_vmovi(its_dev, event);
1465 /* Ensure all the VPEs are mapped on this ITS */
1466 its_map_vm(its_dev->its, info->map->vm);
1469 * Flag the interrupt as forwarded so that we can
1470 * start poking the virtual property table.
1472 irqd_set_forwarded_to_vcpu(d);
1474 /* Write out the property to the prop table */
1475 lpi_write_config(d, 0xff, info->map->properties);
1477 /* Drop the physical mapping */
1478 its_send_discard(its_dev, event);
1480 /* and install the virtual one */
1481 its_send_vmapti(its_dev, event);
1483 /* Increment the number of VLPIs */
1484 its_dev->event_map.nr_vlpis++;
1488 mutex_unlock(&its_dev->event_map.vlpi_lock);
1492 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1494 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1495 struct its_vlpi_map *map = get_vlpi_map(d);
1498 mutex_lock(&its_dev->event_map.vlpi_lock);
1500 if (!its_dev->event_map.vm || !map->vm) {
1505 /* Copy our mapping information to the incoming request */
1509 mutex_unlock(&its_dev->event_map.vlpi_lock);
1513 static int its_vlpi_unmap(struct irq_data *d)
1515 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1516 u32 event = its_get_event_id(d);
1519 mutex_lock(&its_dev->event_map.vlpi_lock);
1521 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1526 /* Drop the virtual mapping */
1527 its_send_discard(its_dev, event);
1529 /* and restore the physical one */
1530 irqd_clr_forwarded_to_vcpu(d);
1531 its_send_mapti(its_dev, d->hwirq, event);
1532 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1536 /* Potentially unmap the VM from this ITS */
1537 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1540 * Drop the refcount and make the device available again if
1541 * this was the last VLPI.
1543 if (!--its_dev->event_map.nr_vlpis) {
1544 its_dev->event_map.vm = NULL;
1545 kfree(its_dev->event_map.vlpi_maps);
1549 mutex_unlock(&its_dev->event_map.vlpi_lock);
1553 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1555 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1557 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1560 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1561 lpi_update_config(d, 0xff, info->config);
1563 lpi_write_config(d, 0xff, info->config);
1564 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1569 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1571 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1572 struct its_cmd_info *info = vcpu_info;
1575 if (!is_v4(its_dev->its))
1578 /* Unmap request? */
1580 return its_vlpi_unmap(d);
1582 switch (info->cmd_type) {
1584 return its_vlpi_map(d, info);
1587 return its_vlpi_get(d, info);
1589 case PROP_UPDATE_VLPI:
1590 case PROP_UPDATE_AND_INV_VLPI:
1591 return its_vlpi_prop_update(d, info);
1598 static struct irq_chip its_irq_chip = {
1600 .irq_mask = its_mask_irq,
1601 .irq_unmask = its_unmask_irq,
1602 .irq_eoi = irq_chip_eoi_parent,
1603 .irq_set_affinity = its_set_affinity,
1604 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1605 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1606 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1611 * How we allocate LPIs:
1613 * lpi_range_list contains ranges of LPIs that are to available to
1614 * allocate from. To allocate LPIs, just pick the first range that
1615 * fits the required allocation, and reduce it by the required
1616 * amount. Once empty, remove the range from the list.
1618 * To free a range of LPIs, add a free range to the list, sort it and
1619 * merge the result if the new range happens to be adjacent to an
1620 * already free block.
1622 * The consequence of the above is that allocation is cost is low, but
1623 * freeing is expensive. We assumes that freeing rarely occurs.
1625 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1627 static DEFINE_MUTEX(lpi_range_lock);
1628 static LIST_HEAD(lpi_range_list);
1631 struct list_head entry;
1636 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1638 struct lpi_range *range;
1640 range = kmalloc(sizeof(*range), GFP_KERNEL);
1642 range->base_id = base;
1649 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1651 struct lpi_range *range, *tmp;
1654 mutex_lock(&lpi_range_lock);
1656 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1657 if (range->span >= nr_lpis) {
1658 *base = range->base_id;
1659 range->base_id += nr_lpis;
1660 range->span -= nr_lpis;
1662 if (range->span == 0) {
1663 list_del(&range->entry);
1672 mutex_unlock(&lpi_range_lock);
1674 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1678 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1680 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1682 if (a->base_id + a->span != b->base_id)
1684 b->base_id = a->base_id;
1686 list_del(&a->entry);
1690 static int free_lpi_range(u32 base, u32 nr_lpis)
1692 struct lpi_range *new, *old;
1694 new = mk_lpi_range(base, nr_lpis);
1698 mutex_lock(&lpi_range_lock);
1700 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1701 if (old->base_id < base)
1705 * old is the last element with ->base_id smaller than base,
1706 * so new goes right after it. If there are no elements with
1707 * ->base_id smaller than base, &old->entry ends up pointing
1708 * at the head of the list, and inserting new it the start of
1709 * the list is the right thing to do in that case as well.
1711 list_add(&new->entry, &old->entry);
1713 * Now check if we can merge with the preceding and/or
1716 merge_lpi_ranges(old, new);
1717 merge_lpi_ranges(new, list_next_entry(new, entry));
1719 mutex_unlock(&lpi_range_lock);
1723 static int __init its_lpi_init(u32 id_bits)
1725 u32 lpis = (1UL << id_bits) - 8192;
1729 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1731 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1733 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1738 * Initializing the allocator is just the same as freeing the
1739 * full range of LPIs.
1741 err = free_lpi_range(8192, lpis);
1742 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1746 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1748 unsigned long *bitmap = NULL;
1752 err = alloc_lpi_range(nr_irqs, base);
1757 } while (nr_irqs > 0);
1765 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1773 *base = *nr_ids = 0;
1778 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1780 WARN_ON(free_lpi_range(base, nr_ids));
1784 static void gic_reset_prop_table(void *va)
1786 /* Priority 0xa0, Group-1, disabled */
1787 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
1789 /* Make sure the GIC will observe the written configuration */
1790 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
1793 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1795 struct page *prop_page;
1797 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1801 gic_reset_prop_table(page_address(prop_page));
1806 static void its_free_prop_table(struct page *prop_page)
1808 free_pages((unsigned long)page_address(prop_page),
1809 get_order(LPI_PROPBASE_SZ));
1812 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
1814 phys_addr_t start, end, addr_end;
1818 * We don't bother checking for a kdump kernel as by
1819 * construction, the LPI tables are out of this kernel's
1822 if (is_kdump_kernel())
1825 addr_end = addr + size - 1;
1827 for_each_reserved_mem_region(i, &start, &end) {
1828 if (addr >= start && addr_end <= end)
1832 /* Not found, not a good sign... */
1833 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
1835 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
1839 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
1841 if (efi_enabled(EFI_CONFIG_TABLES))
1842 return efi_mem_reserve_persistent(addr, size);
1847 static int __init its_setup_lpi_prop_table(void)
1849 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
1852 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
1853 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
1855 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
1856 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
1859 gic_reset_prop_table(gic_rdists->prop_table_va);
1863 lpi_id_bits = min_t(u32,
1864 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
1865 ITS_MAX_LPI_NRBITS);
1866 page = its_allocate_prop_table(GFP_NOWAIT);
1868 pr_err("Failed to allocate PROPBASE\n");
1872 gic_rdists->prop_table_pa = page_to_phys(page);
1873 gic_rdists->prop_table_va = page_address(page);
1874 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
1878 pr_info("GICv3: using LPI property table @%pa\n",
1879 &gic_rdists->prop_table_pa);
1881 return its_lpi_init(lpi_id_bits);
1884 static const char *its_base_type_string[] = {
1885 [GITS_BASER_TYPE_DEVICE] = "Devices",
1886 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
1887 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
1888 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
1889 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
1890 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
1891 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
1894 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1896 u32 idx = baser - its->tables;
1898 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1901 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1904 u32 idx = baser - its->tables;
1906 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1907 baser->val = its_read_baser(its, baser);
1910 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1911 u64 cache, u64 shr, u32 psz, u32 order,
1914 u64 val = its_read_baser(its, baser);
1915 u64 esz = GITS_BASER_ENTRY_SIZE(val);
1916 u64 type = GITS_BASER_TYPE(val);
1917 u64 baser_phys, tmp;
1923 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1924 if (alloc_pages > GITS_BASER_PAGES_MAX) {
1925 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1926 &its->phys_base, its_base_type_string[type],
1927 alloc_pages, GITS_BASER_PAGES_MAX);
1928 alloc_pages = GITS_BASER_PAGES_MAX;
1929 order = get_order(GITS_BASER_PAGES_MAX * psz);
1932 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1936 base = (void *)page_address(page);
1937 baser_phys = virt_to_phys(base);
1939 /* Check if the physical address of the memory is above 48bits */
1940 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1942 /* 52bit PA is supported only when PageSize=64K */
1943 if (psz != SZ_64K) {
1944 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1945 free_pages((unsigned long)base, order);
1949 /* Convert 52bit PA to 48bit field */
1950 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1955 (type << GITS_BASER_TYPE_SHIFT) |
1956 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
1957 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
1962 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
1966 val |= GITS_BASER_PAGE_SIZE_4K;
1969 val |= GITS_BASER_PAGE_SIZE_16K;
1972 val |= GITS_BASER_PAGE_SIZE_64K;
1976 its_write_baser(its, baser, val);
1979 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1981 * Shareability didn't stick. Just use
1982 * whatever the read reported, which is likely
1983 * to be the only thing this redistributor
1984 * supports. If that's zero, make it
1985 * non-cacheable as well.
1987 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1989 cache = GITS_BASER_nC;
1990 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1995 if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1997 * Page size didn't stick. Let's try a smaller
1998 * size and retry. If we reach 4K, then
1999 * something is horribly wrong...
2001 free_pages((unsigned long)base, order);
2007 goto retry_alloc_baser;
2010 goto retry_alloc_baser;
2015 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2016 &its->phys_base, its_base_type_string[type],
2018 free_pages((unsigned long)base, order);
2022 baser->order = order;
2025 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2027 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2028 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2029 its_base_type_string[type],
2030 (unsigned long)virt_to_phys(base),
2031 indirect ? "indirect" : "flat", (int)esz,
2032 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2037 static bool its_parse_indirect_baser(struct its_node *its,
2038 struct its_baser *baser,
2039 u32 psz, u32 *order, u32 ids)
2041 u64 tmp = its_read_baser(its, baser);
2042 u64 type = GITS_BASER_TYPE(tmp);
2043 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2044 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2045 u32 new_order = *order;
2046 bool indirect = false;
2048 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2049 if ((esz << ids) > (psz * 2)) {
2051 * Find out whether hw supports a single or two-level table by
2052 * table by reading bit at offset '62' after writing '1' to it.
2054 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2055 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2059 * The size of the lvl2 table is equal to ITS page size
2060 * which is 'psz'. For computing lvl1 table size,
2061 * subtract ID bits that sparse lvl2 table from 'ids'
2062 * which is reported by ITS hardware times lvl1 table
2065 ids -= ilog2(psz / (int)esz);
2066 esz = GITS_LVL1_ENTRY_SIZE;
2071 * Allocate as many entries as required to fit the
2072 * range of device IDs that the ITS can grok... The ID
2073 * space being incredibly sparse, this results in a
2074 * massive waste of memory if two-level device table
2075 * feature is not supported by hardware.
2077 new_order = max_t(u32, get_order(esz << ids), new_order);
2078 if (new_order >= MAX_ORDER) {
2079 new_order = MAX_ORDER - 1;
2080 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2081 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2082 &its->phys_base, its_base_type_string[type],
2083 device_ids(its), ids);
2091 static void its_free_tables(struct its_node *its)
2095 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2096 if (its->tables[i].base) {
2097 free_pages((unsigned long)its->tables[i].base,
2098 its->tables[i].order);
2099 its->tables[i].base = NULL;
2104 static int its_alloc_tables(struct its_node *its)
2106 u64 shr = GITS_BASER_InnerShareable;
2107 u64 cache = GITS_BASER_RaWaWb;
2111 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2112 /* erratum 24313: ignore memory access type */
2113 cache = GITS_BASER_nCnB;
2115 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2116 struct its_baser *baser = its->tables + i;
2117 u64 val = its_read_baser(its, baser);
2118 u64 type = GITS_BASER_TYPE(val);
2119 u32 order = get_order(psz);
2120 bool indirect = false;
2123 case GITS_BASER_TYPE_NONE:
2126 case GITS_BASER_TYPE_DEVICE:
2127 indirect = its_parse_indirect_baser(its, baser,
2132 case GITS_BASER_TYPE_VCPU:
2133 indirect = its_parse_indirect_baser(its, baser,
2135 ITS_MAX_VPEID_BITS);
2139 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
2141 its_free_tables(its);
2145 /* Update settings which will be used for next BASERn */
2147 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2148 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2154 static int its_alloc_collections(struct its_node *its)
2158 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2160 if (!its->collections)
2163 for (i = 0; i < nr_cpu_ids; i++)
2164 its->collections[i].target_address = ~0ULL;
2169 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2171 struct page *pend_page;
2173 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2174 get_order(LPI_PENDBASE_SZ));
2178 /* Make sure the GIC will observe the zero-ed page */
2179 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2184 static void its_free_pending_table(struct page *pt)
2186 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2190 * Booting with kdump and LPIs enabled is generally fine. Any other
2191 * case is wrong in the absence of firmware/EFI support.
2193 static bool enabled_lpis_allowed(void)
2198 /* Check whether the property table is in a reserved region */
2199 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2200 addr = val & GENMASK_ULL(51, 12);
2202 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2205 static int __init allocate_lpi_tables(void)
2211 * If LPIs are enabled while we run this from the boot CPU,
2212 * flag the RD tables as pre-allocated if the stars do align.
2214 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2215 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2216 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2217 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2218 pr_info("GICv3: Using preallocated redistributor tables\n");
2221 err = its_setup_lpi_prop_table();
2226 * We allocate all the pending tables anyway, as we may have a
2227 * mix of RDs that have had LPIs enabled, and some that
2228 * don't. We'll free the unused ones as each CPU comes online.
2230 for_each_possible_cpu(cpu) {
2231 struct page *pend_page;
2233 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2235 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2239 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2245 static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
2247 u32 count = 1000000; /* 1s! */
2251 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2252 val &= ~GICR_VPENDBASER_Valid;
2253 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2256 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2257 clean = !(val & GICR_VPENDBASER_Dirty);
2263 } while (!clean && count);
2268 static void its_cpu_init_lpis(void)
2270 void __iomem *rbase = gic_data_rdist_rd_base();
2271 struct page *pend_page;
2275 if (gic_data_rdist()->lpi_enabled)
2278 val = readl_relaxed(rbase + GICR_CTLR);
2279 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2280 (val & GICR_CTLR_ENABLE_LPIS)) {
2282 * Check that we get the same property table on all
2283 * RDs. If we don't, this is hopeless.
2285 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2286 paddr &= GENMASK_ULL(51, 12);
2287 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2288 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2290 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2291 paddr &= GENMASK_ULL(51, 16);
2293 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2294 its_free_pending_table(gic_data_rdist()->pend_page);
2295 gic_data_rdist()->pend_page = NULL;
2300 pend_page = gic_data_rdist()->pend_page;
2301 paddr = page_to_phys(pend_page);
2302 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2305 val = (gic_rdists->prop_table_pa |
2306 GICR_PROPBASER_InnerShareable |
2307 GICR_PROPBASER_RaWaWb |
2308 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2310 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2311 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2313 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2314 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2316 * The HW reports non-shareable, we must
2317 * remove the cacheability attributes as
2320 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2321 GICR_PROPBASER_CACHEABILITY_MASK);
2322 val |= GICR_PROPBASER_nC;
2323 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2325 pr_info_once("GIC: using cache flushing for LPI property table\n");
2326 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2330 val = (page_to_phys(pend_page) |
2331 GICR_PENDBASER_InnerShareable |
2332 GICR_PENDBASER_RaWaWb);
2334 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2335 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2337 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2339 * The HW reports non-shareable, we must remove the
2340 * cacheability attributes as well.
2342 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2343 GICR_PENDBASER_CACHEABILITY_MASK);
2344 val |= GICR_PENDBASER_nC;
2345 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2349 val = readl_relaxed(rbase + GICR_CTLR);
2350 val |= GICR_CTLR_ENABLE_LPIS;
2351 writel_relaxed(val, rbase + GICR_CTLR);
2353 if (gic_rdists->has_vlpis) {
2354 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2357 * It's possible for CPU to receive VLPIs before it is
2358 * sheduled as a vPE, especially for the first CPU, and the
2359 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2360 * as out of range and dropped by GIC.
2361 * So we initialize IDbits to known value to avoid VLPI drop.
2363 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2364 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
2365 smp_processor_id(), val);
2366 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2369 * Also clear Valid bit of GICR_VPENDBASER, in case some
2370 * ancient programming gets left in and has possibility of
2371 * corrupting memory.
2373 val = its_clear_vpend_valid(vlpi_base);
2374 WARN_ON(val & GICR_VPENDBASER_Dirty);
2377 /* Make sure the GIC has seen the above */
2380 gic_data_rdist()->lpi_enabled = true;
2381 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
2383 gic_data_rdist()->pend_page ? "allocated" : "reserved",
2387 static void its_cpu_init_collection(struct its_node *its)
2389 int cpu = smp_processor_id();
2392 /* avoid cross node collections and its mapping */
2393 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2394 struct device_node *cpu_node;
2396 cpu_node = of_get_cpu_node(cpu, NULL);
2397 if (its->numa_node != NUMA_NO_NODE &&
2398 its->numa_node != of_node_to_nid(cpu_node))
2403 * We now have to bind each collection to its target
2406 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2408 * This ITS wants the physical address of the
2411 target = gic_data_rdist()->phys_base;
2413 /* This ITS wants a linear CPU number. */
2414 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2415 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2418 /* Perform collection mapping */
2419 its->collections[cpu].target_address = target;
2420 its->collections[cpu].col_id = cpu;
2422 its_send_mapc(its, &its->collections[cpu], 1);
2423 its_send_invall(its, &its->collections[cpu]);
2426 static void its_cpu_init_collections(void)
2428 struct its_node *its;
2430 raw_spin_lock(&its_lock);
2432 list_for_each_entry(its, &its_nodes, entry)
2433 its_cpu_init_collection(its);
2435 raw_spin_unlock(&its_lock);
2438 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2440 struct its_device *its_dev = NULL, *tmp;
2441 unsigned long flags;
2443 raw_spin_lock_irqsave(&its->lock, flags);
2445 list_for_each_entry(tmp, &its->its_device_list, entry) {
2446 if (tmp->device_id == dev_id) {
2452 raw_spin_unlock_irqrestore(&its->lock, flags);
2457 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2461 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2462 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2463 return &its->tables[i];
2469 static bool its_alloc_table_entry(struct its_node *its,
2470 struct its_baser *baser, u32 id)
2476 /* Don't allow device id that exceeds single, flat table limit */
2477 esz = GITS_BASER_ENTRY_SIZE(baser->val);
2478 if (!(baser->val & GITS_BASER_INDIRECT))
2479 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2481 /* Compute 1st level table index & check if that exceeds table limit */
2482 idx = id >> ilog2(baser->psz / esz);
2483 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2486 table = baser->base;
2488 /* Allocate memory for 2nd level table */
2490 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2491 get_order(baser->psz));
2495 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2496 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2497 gic_flush_dcache_to_poc(page_address(page), baser->psz);
2499 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2501 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2502 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2503 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2505 /* Ensure updated table contents are visible to ITS hardware */
2512 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2514 struct its_baser *baser;
2516 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2518 /* Don't allow device id that exceeds ITS hardware limit */
2520 return (ilog2(dev_id) < device_ids(its));
2522 return its_alloc_table_entry(its, baser, dev_id);
2525 static bool its_alloc_vpe_table(u32 vpe_id)
2527 struct its_node *its;
2530 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2531 * could try and only do it on ITSs corresponding to devices
2532 * that have interrupts targeted at this VPE, but the
2533 * complexity becomes crazy (and you have tons of memory
2536 list_for_each_entry(its, &its_nodes, entry) {
2537 struct its_baser *baser;
2542 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2546 if (!its_alloc_table_entry(its, baser, vpe_id))
2553 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2554 int nvecs, bool alloc_lpis)
2556 struct its_device *dev;
2557 unsigned long *lpi_map = NULL;
2558 unsigned long flags;
2559 u16 *col_map = NULL;
2566 if (!its_alloc_device_table(its, dev_id))
2569 if (WARN_ON(!is_power_of_2(nvecs)))
2570 nvecs = roundup_pow_of_two(nvecs);
2572 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2574 * Even if the device wants a single LPI, the ITT must be
2575 * sized as a power of two (and you need at least one bit...).
2577 nr_ites = max(2, nvecs);
2578 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
2579 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2580 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
2582 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2584 col_map = kcalloc(nr_lpis, sizeof(*col_map),
2587 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2592 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
2600 gic_flush_dcache_to_poc(itt, sz);
2604 dev->nr_ites = nr_ites;
2605 dev->event_map.lpi_map = lpi_map;
2606 dev->event_map.col_map = col_map;
2607 dev->event_map.lpi_base = lpi_base;
2608 dev->event_map.nr_lpis = nr_lpis;
2609 mutex_init(&dev->event_map.vlpi_lock);
2610 dev->device_id = dev_id;
2611 INIT_LIST_HEAD(&dev->entry);
2613 raw_spin_lock_irqsave(&its->lock, flags);
2614 list_add(&dev->entry, &its->its_device_list);
2615 raw_spin_unlock_irqrestore(&its->lock, flags);
2617 /* Map device to its ITT */
2618 its_send_mapd(dev, 1);
2623 static void its_free_device(struct its_device *its_dev)
2625 unsigned long flags;
2627 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2628 list_del(&its_dev->entry);
2629 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2630 kfree(its_dev->event_map.col_map);
2631 kfree(its_dev->itt);
2635 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
2639 /* Find a free LPI region in lpi_map and allocate them. */
2640 idx = bitmap_find_free_region(dev->event_map.lpi_map,
2641 dev->event_map.nr_lpis,
2642 get_count_order(nvecs));
2646 *hwirq = dev->event_map.lpi_base + idx;
2651 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2652 int nvec, msi_alloc_info_t *info)
2654 struct its_node *its;
2655 struct its_device *its_dev;
2656 struct msi_domain_info *msi_info;
2661 * We ignore "dev" entirely, and rely on the dev_id that has
2662 * been passed via the scratchpad. This limits this domain's
2663 * usefulness to upper layers that definitely know that they
2664 * are built on top of the ITS.
2666 dev_id = info->scratchpad[0].ul;
2668 msi_info = msi_get_domain_info(domain);
2669 its = msi_info->data;
2671 if (!gic_rdists->has_direct_lpi &&
2673 vpe_proxy.dev->its == its &&
2674 dev_id == vpe_proxy.dev->device_id) {
2675 /* Bad luck. Get yourself a better implementation */
2676 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2681 mutex_lock(&its->dev_alloc_lock);
2682 its_dev = its_find_device(its, dev_id);
2685 * We already have seen this ID, probably through
2686 * another alias (PCI bridge of some sort). No need to
2687 * create the device.
2689 its_dev->shared = true;
2690 pr_debug("Reusing ITT for devID %x\n", dev_id);
2694 its_dev = its_create_device(its, dev_id, nvec, true);
2700 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2702 mutex_unlock(&its->dev_alloc_lock);
2703 info->scratchpad[0].ptr = its_dev;
2707 static struct msi_domain_ops its_msi_domain_ops = {
2708 .msi_prepare = its_msi_prepare,
2711 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2713 irq_hw_number_t hwirq)
2715 struct irq_fwspec fwspec;
2717 if (irq_domain_get_of_node(domain->parent)) {
2718 fwspec.fwnode = domain->parent->fwnode;
2719 fwspec.param_count = 3;
2720 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2721 fwspec.param[1] = hwirq;
2722 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2723 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2724 fwspec.fwnode = domain->parent->fwnode;
2725 fwspec.param_count = 2;
2726 fwspec.param[0] = hwirq;
2727 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2732 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2735 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2736 unsigned int nr_irqs, void *args)
2738 msi_alloc_info_t *info = args;
2739 struct its_device *its_dev = info->scratchpad[0].ptr;
2740 struct its_node *its = its_dev->its;
2741 irq_hw_number_t hwirq;
2745 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
2749 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
2753 for (i = 0; i < nr_irqs; i++) {
2754 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
2758 irq_domain_set_hwirq_and_chip(domain, virq + i,
2759 hwirq + i, &its_irq_chip, its_dev);
2760 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2761 pr_debug("ID:%d pID:%d vID:%d\n",
2762 (int)(hwirq + i - its_dev->event_map.lpi_base),
2763 (int)(hwirq + i), virq + i);
2769 static int its_irq_domain_activate(struct irq_domain *domain,
2770 struct irq_data *d, bool reserve)
2772 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2773 u32 event = its_get_event_id(d);
2774 const struct cpumask *cpu_mask = cpu_online_mask;
2777 /* get the cpu_mask of local node */
2778 if (its_dev->its->numa_node >= 0)
2779 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2781 /* Bind the LPI to the first possible CPU */
2782 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2783 if (cpu >= nr_cpu_ids) {
2784 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2787 cpu = cpumask_first(cpu_online_mask);
2790 its_dev->event_map.col_map[event] = cpu;
2791 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2793 /* Map the GIC IRQ and event to the device */
2794 its_send_mapti(its_dev, d->hwirq, event);
2798 static void its_irq_domain_deactivate(struct irq_domain *domain,
2801 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2802 u32 event = its_get_event_id(d);
2804 /* Stop the delivery of interrupts */
2805 its_send_discard(its_dev, event);
2808 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2809 unsigned int nr_irqs)
2811 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2812 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2813 struct its_node *its = its_dev->its;
2816 bitmap_release_region(its_dev->event_map.lpi_map,
2817 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
2818 get_count_order(nr_irqs));
2820 for (i = 0; i < nr_irqs; i++) {
2821 struct irq_data *data = irq_domain_get_irq_data(domain,
2823 /* Nuke the entry in the domain */
2824 irq_domain_reset_irq_data(data);
2827 mutex_lock(&its->dev_alloc_lock);
2830 * If all interrupts have been freed, start mopping the
2831 * floor. This is conditionned on the device not being shared.
2833 if (!its_dev->shared &&
2834 bitmap_empty(its_dev->event_map.lpi_map,
2835 its_dev->event_map.nr_lpis)) {
2836 its_lpi_free(its_dev->event_map.lpi_map,
2837 its_dev->event_map.lpi_base,
2838 its_dev->event_map.nr_lpis);
2840 /* Unmap device/itt */
2841 its_send_mapd(its_dev, 0);
2842 its_free_device(its_dev);
2845 mutex_unlock(&its->dev_alloc_lock);
2847 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2850 static const struct irq_domain_ops its_domain_ops = {
2851 .alloc = its_irq_domain_alloc,
2852 .free = its_irq_domain_free,
2853 .activate = its_irq_domain_activate,
2854 .deactivate = its_irq_domain_deactivate,
2860 * If a GICv4 doesn't implement Direct LPIs (which is extremely
2861 * likely), the only way to perform an invalidate is to use a fake
2862 * device to issue an INV command, implying that the LPI has first
2863 * been mapped to some event on that device. Since this is not exactly
2864 * cheap, we try to keep that mapping around as long as possible, and
2865 * only issue an UNMAP if we're short on available slots.
2867 * Broken by design(tm).
2869 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2871 /* Already unmapped? */
2872 if (vpe->vpe_proxy_event == -1)
2875 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2876 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2879 * We don't track empty slots at all, so let's move the
2880 * next_victim pointer if we can quickly reuse that slot
2881 * instead of nuking an existing entry. Not clear that this is
2882 * always a win though, and this might just generate a ripple
2883 * effect... Let's just hope VPEs don't migrate too often.
2885 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2886 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2888 vpe->vpe_proxy_event = -1;
2891 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2893 if (!gic_rdists->has_direct_lpi) {
2894 unsigned long flags;
2896 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2897 its_vpe_db_proxy_unmap_locked(vpe);
2898 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2902 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2904 /* Already mapped? */
2905 if (vpe->vpe_proxy_event != -1)
2908 /* This slot was already allocated. Kick the other VPE out. */
2909 if (vpe_proxy.vpes[vpe_proxy.next_victim])
2910 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2912 /* Map the new VPE instead */
2913 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2914 vpe->vpe_proxy_event = vpe_proxy.next_victim;
2915 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2917 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2918 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2921 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2923 unsigned long flags;
2924 struct its_collection *target_col;
2926 if (gic_rdists->has_direct_lpi) {
2927 void __iomem *rdbase;
2929 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2930 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2931 wait_for_syncr(rdbase);
2936 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2938 its_vpe_db_proxy_map_locked(vpe);
2940 target_col = &vpe_proxy.dev->its->collections[to];
2941 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2942 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2944 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2947 static int its_vpe_set_affinity(struct irq_data *d,
2948 const struct cpumask *mask_val,
2951 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2952 int cpu = cpumask_first(mask_val);
2955 * Changing affinity is mega expensive, so let's be as lazy as
2956 * we can and only do it if we really have to. Also, if mapped
2957 * into the proxy device, we need to move the doorbell
2958 * interrupt to its new location.
2960 if (vpe->col_idx != cpu) {
2961 int from = vpe->col_idx;
2964 its_send_vmovp(vpe);
2965 its_vpe_db_proxy_move(vpe, from, cpu);
2968 irq_data_update_effective_affinity(d, cpumask_of(cpu));
2970 return IRQ_SET_MASK_OK_DONE;
2973 static void its_vpe_schedule(struct its_vpe *vpe)
2975 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2978 /* Schedule the VPE */
2979 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2980 GENMASK_ULL(51, 12);
2981 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2982 val |= GICR_VPROPBASER_RaWb;
2983 val |= GICR_VPROPBASER_InnerShareable;
2984 gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2986 val = virt_to_phys(page_address(vpe->vpt_page)) &
2987 GENMASK_ULL(51, 16);
2988 val |= GICR_VPENDBASER_RaWaWb;
2989 val |= GICR_VPENDBASER_NonShareable;
2991 * There is no good way of finding out if the pending table is
2992 * empty as we can race against the doorbell interrupt very
2993 * easily. So in the end, vpe->pending_last is only an
2994 * indication that the vcpu has something pending, not one
2995 * that the pending table is empty. A good implementation
2996 * would be able to read its coarse map pretty quickly anyway,
2997 * making this a tolerable issue.
2999 val |= GICR_VPENDBASER_PendingLast;
3000 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3001 val |= GICR_VPENDBASER_Valid;
3002 gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3005 static void its_vpe_deschedule(struct its_vpe *vpe)
3007 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3010 val = its_clear_vpend_valid(vlpi_base);
3012 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
3013 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
3015 vpe->pending_last = true;
3017 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3018 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3022 static void its_vpe_invall(struct its_vpe *vpe)
3024 struct its_node *its;
3026 list_for_each_entry(its, &its_nodes, entry) {
3030 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3034 * Sending a VINVALL to a single ITS is enough, as all
3035 * we need is to reach the redistributors.
3037 its_send_vinvall(its, vpe);
3042 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3044 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3045 struct its_cmd_info *info = vcpu_info;
3047 switch (info->cmd_type) {
3049 its_vpe_schedule(vpe);
3052 case DESCHEDULE_VPE:
3053 its_vpe_deschedule(vpe);
3057 its_vpe_invall(vpe);
3065 static void its_vpe_send_cmd(struct its_vpe *vpe,
3066 void (*cmd)(struct its_device *, u32))
3068 unsigned long flags;
3070 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3072 its_vpe_db_proxy_map_locked(vpe);
3073 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3075 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3078 static void its_vpe_send_inv(struct irq_data *d)
3080 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3082 if (gic_rdists->has_direct_lpi) {
3083 void __iomem *rdbase;
3085 /* Target the redistributor this VPE is currently known on */
3086 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3087 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3088 wait_for_syncr(rdbase);
3090 its_vpe_send_cmd(vpe, its_send_inv);
3094 static void its_vpe_mask_irq(struct irq_data *d)
3097 * We need to unmask the LPI, which is described by the parent
3098 * irq_data. Instead of calling into the parent (which won't
3099 * exactly do the right thing, let's simply use the
3100 * parent_data pointer. Yes, I'm naughty.
3102 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3103 its_vpe_send_inv(d);
3106 static void its_vpe_unmask_irq(struct irq_data *d)
3108 /* Same hack as above... */
3109 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3110 its_vpe_send_inv(d);
3113 static int its_vpe_set_irqchip_state(struct irq_data *d,
3114 enum irqchip_irq_state which,
3117 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3119 if (which != IRQCHIP_STATE_PENDING)
3122 if (gic_rdists->has_direct_lpi) {
3123 void __iomem *rdbase;
3125 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3127 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3129 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3130 wait_for_syncr(rdbase);
3134 its_vpe_send_cmd(vpe, its_send_int);
3136 its_vpe_send_cmd(vpe, its_send_clear);
3142 static struct irq_chip its_vpe_irq_chip = {
3143 .name = "GICv4-vpe",
3144 .irq_mask = its_vpe_mask_irq,
3145 .irq_unmask = its_vpe_unmask_irq,
3146 .irq_eoi = irq_chip_eoi_parent,
3147 .irq_set_affinity = its_vpe_set_affinity,
3148 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3149 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3152 static int its_vpe_id_alloc(void)
3154 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
3157 static void its_vpe_id_free(u16 id)
3159 ida_simple_remove(&its_vpeid_ida, id);
3162 static int its_vpe_init(struct its_vpe *vpe)
3164 struct page *vpt_page;
3167 /* Allocate vpe_id */
3168 vpe_id = its_vpe_id_alloc();
3173 vpt_page = its_allocate_pending_table(GFP_KERNEL);
3175 its_vpe_id_free(vpe_id);
3179 if (!its_alloc_vpe_table(vpe_id)) {
3180 its_vpe_id_free(vpe_id);
3181 its_free_pending_table(vpt_page);
3185 vpe->vpe_id = vpe_id;
3186 vpe->vpt_page = vpt_page;
3187 vpe->vpe_proxy_event = -1;
3192 static void its_vpe_teardown(struct its_vpe *vpe)
3194 its_vpe_db_proxy_unmap(vpe);
3195 its_vpe_id_free(vpe->vpe_id);
3196 its_free_pending_table(vpe->vpt_page);
3199 static void its_vpe_irq_domain_free(struct irq_domain *domain,
3201 unsigned int nr_irqs)
3203 struct its_vm *vm = domain->host_data;
3206 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3208 for (i = 0; i < nr_irqs; i++) {
3209 struct irq_data *data = irq_domain_get_irq_data(domain,
3211 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
3213 BUG_ON(vm != vpe->its_vm);
3215 clear_bit(data->hwirq, vm->db_bitmap);
3216 its_vpe_teardown(vpe);
3217 irq_domain_reset_irq_data(data);
3220 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
3221 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
3222 its_free_prop_table(vm->vprop_page);
3226 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3227 unsigned int nr_irqs, void *args)
3229 struct its_vm *vm = args;
3230 unsigned long *bitmap;
3231 struct page *vprop_page;
3232 int base, nr_ids, i, err = 0;
3236 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
3240 if (nr_ids < nr_irqs) {
3241 its_lpi_free(bitmap, base, nr_ids);
3245 vprop_page = its_allocate_prop_table(GFP_KERNEL);
3247 its_lpi_free(bitmap, base, nr_ids);
3251 vm->db_bitmap = bitmap;
3252 vm->db_lpi_base = base;
3253 vm->nr_db_lpis = nr_ids;
3254 vm->vprop_page = vprop_page;
3256 for (i = 0; i < nr_irqs; i++) {
3257 vm->vpes[i]->vpe_db_lpi = base + i;
3258 err = its_vpe_init(vm->vpes[i]);
3261 err = its_irq_gic_domain_alloc(domain, virq + i,
3262 vm->vpes[i]->vpe_db_lpi);
3265 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
3266 &its_vpe_irq_chip, vm->vpes[i]);
3272 its_vpe_irq_domain_free(domain, virq, i - 1);
3274 its_lpi_free(bitmap, base, nr_ids);
3275 its_free_prop_table(vprop_page);
3281 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
3282 struct irq_data *d, bool reserve)
3284 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3285 struct its_node *its;
3287 /* If we use the list map, we issue VMAPP on demand... */
3291 /* Map the VPE to the first possible CPU */
3292 vpe->col_idx = cpumask_first(cpu_online_mask);
3294 list_for_each_entry(its, &its_nodes, entry) {
3298 its_send_vmapp(its, vpe, true);
3299 its_send_vinvall(its, vpe);
3302 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
3307 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
3310 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3311 struct its_node *its;
3314 * If we use the list map, we unmap the VPE once no VLPIs are
3315 * associated with the VM.
3320 list_for_each_entry(its, &its_nodes, entry) {
3324 its_send_vmapp(its, vpe, false);
3328 static const struct irq_domain_ops its_vpe_domain_ops = {
3329 .alloc = its_vpe_irq_domain_alloc,
3330 .free = its_vpe_irq_domain_free,
3331 .activate = its_vpe_irq_domain_activate,
3332 .deactivate = its_vpe_irq_domain_deactivate,
3335 static int its_force_quiescent(void __iomem *base)
3337 u32 count = 1000000; /* 1s */
3340 val = readl_relaxed(base + GITS_CTLR);
3342 * GIC architecture specification requires the ITS to be both
3343 * disabled and quiescent for writes to GITS_BASER<n> or
3344 * GITS_CBASER to not have UNPREDICTABLE results.
3346 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
3349 /* Disable the generation of all interrupts to this ITS */
3350 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
3351 writel_relaxed(val, base + GITS_CTLR);
3353 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
3355 val = readl_relaxed(base + GITS_CTLR);
3356 if (val & GITS_CTLR_QUIESCENT)
3368 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
3370 struct its_node *its = data;
3372 /* erratum 22375: only alloc 8MB table size (20 bits) */
3373 its->typer &= ~GITS_TYPER_DEVBITS;
3374 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
3375 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3380 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3382 struct its_node *its = data;
3384 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3389 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
3391 struct its_node *its = data;
3393 /* On QDF2400, the size of the ITE is 16Bytes */
3394 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
3395 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
3400 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3402 struct its_node *its = its_dev->its;
3405 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3406 * which maps 32-bit writes targeted at a separate window of
3407 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3408 * with device ID taken from bits [device_id_bits + 1:2] of
3409 * the window offset.
3411 return its->pre_its_base + (its_dev->device_id << 2);
3414 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3416 struct its_node *its = data;
3417 u32 pre_its_window[2];
3420 if (!fwnode_property_read_u32_array(its->fwnode_handle,
3421 "socionext,synquacer-pre-its",
3423 ARRAY_SIZE(pre_its_window))) {
3425 its->pre_its_base = pre_its_window[0];
3426 its->get_msi_base = its_irq_get_msi_base_pre_its;
3428 ids = ilog2(pre_its_window[1]) - 2;
3429 if (device_ids(its) > ids) {
3430 its->typer &= ~GITS_TYPER_DEVBITS;
3431 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
3434 /* the pre-ITS breaks isolation, so disable MSI remapping */
3435 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3441 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3443 struct its_node *its = data;
3446 * Hip07 insists on using the wrong address for the VLPI
3447 * page. Trick it into doing the right thing...
3449 its->vlpi_redist_offset = SZ_128K;
3453 static const struct gic_quirk its_quirks[] = {
3454 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3456 .desc = "ITS: Cavium errata 22375, 24313",
3457 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3459 .init = its_enable_quirk_cavium_22375,
3462 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3464 .desc = "ITS: Cavium erratum 23144",
3465 .iidr = 0xa100034c, /* ThunderX pass 1.x */
3467 .init = its_enable_quirk_cavium_23144,
3470 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3472 .desc = "ITS: QDF2400 erratum 0065",
3473 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
3475 .init = its_enable_quirk_qdf2400_e0065,
3478 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3481 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3482 * implementation, but with a 'pre-ITS' added that requires
3483 * special handling in software.
3485 .desc = "ITS: Socionext Synquacer pre-ITS",
3488 .init = its_enable_quirk_socionext_synquacer,
3491 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3493 .desc = "ITS: Hip07 erratum 161600802",
3496 .init = its_enable_quirk_hip07_161600802,
3503 static void its_enable_quirks(struct its_node *its)
3505 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3507 gic_enable_quirks(iidr, its_quirks, its);
3510 static int its_save_disable(void)
3512 struct its_node *its;
3515 raw_spin_lock(&its_lock);
3516 list_for_each_entry(its, &its_nodes, entry) {
3519 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3523 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3524 err = its_force_quiescent(base);
3526 pr_err("ITS@%pa: failed to quiesce: %d\n",
3527 &its->phys_base, err);
3528 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3532 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3537 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3540 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3544 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3547 raw_spin_unlock(&its_lock);
3552 static void its_restore_enable(void)
3554 struct its_node *its;
3557 raw_spin_lock(&its_lock);
3558 list_for_each_entry(its, &its_nodes, entry) {
3562 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3568 * Make sure that the ITS is disabled. If it fails to quiesce,
3569 * don't restore it since writing to CBASER or BASER<n>
3570 * registers is undefined according to the GIC v3 ITS
3573 ret = its_force_quiescent(base);
3575 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3576 &its->phys_base, ret);
3580 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3583 * Writing CBASER resets CREADR to 0, so make CWRITER and
3584 * cmd_write line up with it.
3586 its->cmd_write = its->cmd_base;
3587 gits_write_cwriter(0, base + GITS_CWRITER);
3589 /* Restore GITS_BASER from the value cache. */
3590 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3591 struct its_baser *baser = &its->tables[i];
3593 if (!(baser->val & GITS_BASER_VALID))
3596 its_write_baser(its, baser, baser->val);
3598 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3601 * Reinit the collection if it's stored in the ITS. This is
3602 * indicated by the col_id being less than the HCC field.
3603 * CID < HCC as specified in the GIC v3 Documentation.
3605 if (its->collections[smp_processor_id()].col_id <
3606 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3607 its_cpu_init_collection(its);
3609 raw_spin_unlock(&its_lock);
3612 static struct syscore_ops its_syscore_ops = {
3613 .suspend = its_save_disable,
3614 .resume = its_restore_enable,
3617 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3619 struct irq_domain *inner_domain;
3620 struct msi_domain_info *info;
3622 info = kzalloc(sizeof(*info), GFP_KERNEL);
3626 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3627 if (!inner_domain) {
3632 inner_domain->parent = its_parent;
3633 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3634 inner_domain->flags |= its->msi_domain_flags;
3635 info->ops = &its_msi_domain_ops;
3637 inner_domain->host_data = info;
3642 static int its_init_vpe_domain(void)
3644 struct its_node *its;
3648 if (gic_rdists->has_direct_lpi) {
3649 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3653 /* Any ITS will do, even if not v4 */
3654 its = list_first_entry(&its_nodes, struct its_node, entry);
3656 entries = roundup_pow_of_two(nr_cpu_ids);
3657 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
3659 if (!vpe_proxy.vpes) {
3660 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3664 /* Use the last possible DevID */
3665 devid = GENMASK(device_ids(its) - 1, 0);
3666 vpe_proxy.dev = its_create_device(its, devid, entries, false);
3667 if (!vpe_proxy.dev) {
3668 kfree(vpe_proxy.vpes);
3669 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3673 BUG_ON(entries > vpe_proxy.dev->nr_ites);
3675 raw_spin_lock_init(&vpe_proxy.lock);
3676 vpe_proxy.next_victim = 0;
3677 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3678 devid, vpe_proxy.dev->nr_ites);
3683 static int __init its_compute_its_list_map(struct resource *res,
3684 void __iomem *its_base)
3690 * This is assumed to be done early enough that we're
3691 * guaranteed to be single-threaded, hence no
3692 * locking. Should this change, we should address
3695 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3696 if (its_number >= GICv4_ITS_LIST_MAX) {
3697 pr_err("ITS@%pa: No ITSList entry available!\n",
3702 ctlr = readl_relaxed(its_base + GITS_CTLR);
3703 ctlr &= ~GITS_CTLR_ITS_NUMBER;
3704 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3705 writel_relaxed(ctlr, its_base + GITS_CTLR);
3706 ctlr = readl_relaxed(its_base + GITS_CTLR);
3707 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3708 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3709 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3712 if (test_and_set_bit(its_number, &its_list_map)) {
3713 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3714 &res->start, its_number);
3721 static int __init its_probe_one(struct resource *res,
3722 struct fwnode_handle *handle, int numa_node)
3724 struct its_node *its;
3725 void __iomem *its_base;
3727 u64 baser, tmp, typer;
3731 its_base = ioremap(res->start, resource_size(res));
3733 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
3737 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3738 if (val != 0x30 && val != 0x40) {
3739 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
3744 err = its_force_quiescent(its_base);
3746 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
3750 pr_info("ITS %pR\n", res);
3752 its = kzalloc(sizeof(*its), GFP_KERNEL);
3758 raw_spin_lock_init(&its->lock);
3759 mutex_init(&its->dev_alloc_lock);
3760 INIT_LIST_HEAD(&its->entry);
3761 INIT_LIST_HEAD(&its->its_device_list);
3762 typer = gic_read_typer(its_base + GITS_TYPER);
3764 its->base = its_base;
3765 its->phys_base = res->start;
3767 if (!(typer & GITS_TYPER_VMOVP)) {
3768 err = its_compute_its_list_map(res, its_base);
3774 pr_info("ITS@%pa: Using ITS number %d\n",
3777 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3781 its->numa_node = numa_node;
3783 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3784 get_order(ITS_CMD_QUEUE_SZ));
3789 its->cmd_base = (void *)page_address(page);
3790 its->cmd_write = its->cmd_base;
3791 its->fwnode_handle = handle;
3792 its->get_msi_base = its_irq_get_msi_base;
3793 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
3795 its_enable_quirks(its);
3797 err = its_alloc_tables(its);
3801 err = its_alloc_collections(its);
3803 goto out_free_tables;
3805 baser = (virt_to_phys(its->cmd_base) |
3806 GITS_CBASER_RaWaWb |
3807 GITS_CBASER_InnerShareable |
3808 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3811 gits_write_cbaser(baser, its->base + GITS_CBASER);
3812 tmp = gits_read_cbaser(its->base + GITS_CBASER);
3814 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3815 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3817 * The HW reports non-shareable, we must
3818 * remove the cacheability attributes as
3821 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3822 GITS_CBASER_CACHEABILITY_MASK);
3823 baser |= GITS_CBASER_nC;
3824 gits_write_cbaser(baser, its->base + GITS_CBASER);
3826 pr_info("ITS: using cache flushing for cmd queue\n");
3827 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3830 gits_write_cwriter(0, its->base + GITS_CWRITER);
3831 ctlr = readl_relaxed(its->base + GITS_CTLR);
3832 ctlr |= GITS_CTLR_ENABLE;
3834 ctlr |= GITS_CTLR_ImDe;
3835 writel_relaxed(ctlr, its->base + GITS_CTLR);
3837 if (GITS_TYPER_HCC(typer))
3838 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3840 err = its_init_domain(handle, its);
3842 goto out_free_tables;
3844 raw_spin_lock(&its_lock);
3845 list_add(&its->entry, &its_nodes);
3846 raw_spin_unlock(&its_lock);
3851 its_free_tables(its);
3853 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3858 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3862 static bool gic_rdists_supports_plpis(void)
3864 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3867 static int redist_disable_lpis(void)
3869 void __iomem *rbase = gic_data_rdist_rd_base();
3870 u64 timeout = USEC_PER_SEC;
3873 if (!gic_rdists_supports_plpis()) {
3874 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3878 val = readl_relaxed(rbase + GICR_CTLR);
3879 if (!(val & GICR_CTLR_ENABLE_LPIS))
3883 * If coming via a CPU hotplug event, we don't need to disable
3884 * LPIs before trying to re-enable them. They are already
3885 * configured and all is well in the world.
3887 * If running with preallocated tables, there is nothing to do.
3889 if (gic_data_rdist()->lpi_enabled ||
3890 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
3894 * From that point on, we only try to do some damage control.
3896 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3897 smp_processor_id());
3898 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3901 val &= ~GICR_CTLR_ENABLE_LPIS;
3902 writel_relaxed(val, rbase + GICR_CTLR);
3904 /* Make sure any change to GICR_CTLR is observable by the GIC */
3908 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3909 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3910 * Error out if we time out waiting for RWP to clear.
3912 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3914 pr_err("CPU%d: Timeout while disabling LPIs\n",
3915 smp_processor_id());
3923 * After it has been written to 1, it is IMPLEMENTATION
3924 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3925 * cleared to 0. Error out if clearing the bit failed.
3927 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3928 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3935 int its_cpu_init(void)
3937 if (!list_empty(&its_nodes)) {
3940 ret = redist_disable_lpis();
3944 its_cpu_init_lpis();
3945 its_cpu_init_collections();
3951 static const struct of_device_id its_device_id[] = {
3952 { .compatible = "arm,gic-v3-its", },
3956 static int __init its_of_probe(struct device_node *node)
3958 struct device_node *np;
3959 struct resource res;
3961 for (np = of_find_matching_node(node, its_device_id); np;
3962 np = of_find_matching_node(np, its_device_id)) {
3963 if (!of_device_is_available(np))
3965 if (!of_property_read_bool(np, "msi-controller")) {
3966 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3971 if (of_address_to_resource(np, 0, &res)) {
3972 pr_warn("%pOF: no regs?\n", np);
3976 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3983 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3985 #ifdef CONFIG_ACPI_NUMA
3986 struct its_srat_map {
3993 static struct its_srat_map *its_srat_maps __initdata;
3994 static int its_in_srat __initdata;
3996 static int __init acpi_get_its_numa_node(u32 its_id)
4000 for (i = 0; i < its_in_srat; i++) {
4001 if (its_id == its_srat_maps[i].its_id)
4002 return its_srat_maps[i].numa_node;
4004 return NUMA_NO_NODE;
4007 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
4008 const unsigned long end)
4013 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
4014 const unsigned long end)
4017 struct acpi_srat_gic_its_affinity *its_affinity;
4019 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
4023 if (its_affinity->header.length < sizeof(*its_affinity)) {
4024 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
4025 its_affinity->header.length);
4029 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
4031 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
4032 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
4036 its_srat_maps[its_in_srat].numa_node = node;
4037 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
4039 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
4040 its_affinity->proximity_domain, its_affinity->its_id, node);
4045 static void __init acpi_table_parse_srat_its(void)
4049 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
4050 sizeof(struct acpi_table_srat),
4051 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4052 gic_acpi_match_srat_its, 0);
4056 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
4058 if (!its_srat_maps) {
4059 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
4063 acpi_table_parse_entries(ACPI_SIG_SRAT,
4064 sizeof(struct acpi_table_srat),
4065 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
4066 gic_acpi_parse_srat_its, 0);
4069 /* free the its_srat_maps after ITS probing */
4070 static void __init acpi_its_srat_maps_free(void)
4072 kfree(its_srat_maps);
4075 static void __init acpi_table_parse_srat_its(void) { }
4076 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
4077 static void __init acpi_its_srat_maps_free(void) { }
4080 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
4081 const unsigned long end)
4083 struct acpi_madt_generic_translator *its_entry;
4084 struct fwnode_handle *dom_handle;
4085 struct resource res;
4088 its_entry = (struct acpi_madt_generic_translator *)header;
4089 memset(&res, 0, sizeof(res));
4090 res.start = its_entry->base_address;
4091 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
4092 res.flags = IORESOURCE_MEM;
4094 dom_handle = irq_domain_alloc_fwnode(&res.start);
4096 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
4101 err = iort_register_domain_token(its_entry->translation_id, res.start,
4104 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
4105 &res.start, its_entry->translation_id);
4109 err = its_probe_one(&res, dom_handle,
4110 acpi_get_its_numa_node(its_entry->translation_id));
4114 iort_deregister_domain_token(its_entry->translation_id);
4116 irq_domain_free_fwnode(dom_handle);
4120 static void __init its_acpi_probe(void)
4122 acpi_table_parse_srat_its();
4123 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
4124 gic_acpi_parse_madt_its, 0);
4125 acpi_its_srat_maps_free();
4128 static void __init its_acpi_probe(void) { }
4131 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
4132 struct irq_domain *parent_domain)
4134 struct device_node *of_node;
4135 struct its_node *its;
4136 bool has_v4 = false;
4139 its_parent = parent_domain;
4140 of_node = to_of_node(handle);
4142 its_of_probe(of_node);
4146 if (list_empty(&its_nodes)) {
4147 pr_warn("ITS: No ITS available, not enabling LPIs\n");
4151 gic_rdists = rdists;
4153 err = allocate_lpi_tables();
4157 list_for_each_entry(its, &its_nodes, entry)
4158 has_v4 |= is_v4(its);
4160 if (has_v4 & rdists->has_vlpis) {
4161 if (its_init_vpe_domain() ||
4162 its_init_v4(parent_domain, &its_vpe_domain_ops)) {
4163 rdists->has_vlpis = false;
4164 pr_err("ITS: Disabling GICv4 support\n");
4168 register_syscore_ops(&its_syscore_ops);