drm/nouveau/disp/gm200-: enforce identity-mapped SOR assignment for LVDS/eDP panels
[linux-2.6-microblaze.git] / drivers / irqchip / irq-gic-v3-its.c
1 /*
2  * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
3  * Author: Marc Zyngier <marc.zyngier@arm.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/acpi_iort.h>
20 #include <linux/bitmap.h>
21 #include <linux/cpu.h>
22 #include <linux/delay.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/list.h>
27 #include <linux/list_sort.h>
28 #include <linux/log2.h>
29 #include <linux/mm.h>
30 #include <linux/msi.h>
31 #include <linux/of.h>
32 #include <linux/of_address.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_pci.h>
35 #include <linux/of_platform.h>
36 #include <linux/percpu.h>
37 #include <linux/slab.h>
38 #include <linux/syscore_ops.h>
39
40 #include <linux/irqchip.h>
41 #include <linux/irqchip/arm-gic-v3.h>
42 #include <linux/irqchip/arm-gic-v4.h>
43
44 #include <asm/cputype.h>
45 #include <asm/exception.h>
46
47 #include "irq-gic-common.h"
48
49 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING           (1ULL << 0)
50 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375       (1ULL << 1)
51 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144       (1ULL << 2)
52 #define ITS_FLAGS_SAVE_SUSPEND_STATE            (1ULL << 3)
53
54 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING     (1 << 0)
55
56 static u32 lpi_id_bits;
57
58 /*
59  * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
60  * deal with (one configuration byte per interrupt). PENDBASE has to
61  * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
62  */
63 #define LPI_NRBITS              lpi_id_bits
64 #define LPI_PROPBASE_SZ         ALIGN(BIT(LPI_NRBITS), SZ_64K)
65 #define LPI_PENDBASE_SZ         ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
66
67 #define LPI_PROP_DEFAULT_PRIO   0xa0
68
69 /*
70  * Collection structure - just an ID, and a redistributor address to
71  * ping. We use one per CPU as a bag of interrupts assigned to this
72  * CPU.
73  */
74 struct its_collection {
75         u64                     target_address;
76         u16                     col_id;
77 };
78
79 /*
80  * The ITS_BASER structure - contains memory information, cached
81  * value of BASER register configuration and ITS page size.
82  */
83 struct its_baser {
84         void            *base;
85         u64             val;
86         u32             order;
87         u32             psz;
88 };
89
90 struct its_device;
91
92 /*
93  * The ITS structure - contains most of the infrastructure, with the
94  * top-level MSI domain, the command queue, the collections, and the
95  * list of devices writing to it.
96  */
97 struct its_node {
98         raw_spinlock_t          lock;
99         struct list_head        entry;
100         void __iomem            *base;
101         phys_addr_t             phys_base;
102         struct its_cmd_block    *cmd_base;
103         struct its_cmd_block    *cmd_write;
104         struct its_baser        tables[GITS_BASER_NR_REGS];
105         struct its_collection   *collections;
106         struct fwnode_handle    *fwnode_handle;
107         u64                     (*get_msi_base)(struct its_device *its_dev);
108         u64                     cbaser_save;
109         u32                     ctlr_save;
110         struct list_head        its_device_list;
111         u64                     flags;
112         unsigned long           list_nr;
113         u32                     ite_size;
114         u32                     device_ids;
115         int                     numa_node;
116         unsigned int            msi_domain_flags;
117         u32                     pre_its_base; /* for Socionext Synquacer */
118         bool                    is_v4;
119         int                     vlpi_redist_offset;
120 };
121
122 #define ITS_ITT_ALIGN           SZ_256
123
124 /* The maximum number of VPEID bits supported by VLPI commands */
125 #define ITS_MAX_VPEID_BITS      (16)
126 #define ITS_MAX_VPEID           (1 << (ITS_MAX_VPEID_BITS))
127
128 /* Convert page order to size in bytes */
129 #define PAGE_ORDER_TO_SIZE(o)   (PAGE_SIZE << (o))
130
131 struct event_lpi_map {
132         unsigned long           *lpi_map;
133         u16                     *col_map;
134         irq_hw_number_t         lpi_base;
135         int                     nr_lpis;
136         struct mutex            vlpi_lock;
137         struct its_vm           *vm;
138         struct its_vlpi_map     *vlpi_maps;
139         int                     nr_vlpis;
140 };
141
142 /*
143  * The ITS view of a device - belongs to an ITS, owns an interrupt
144  * translation table, and a list of interrupts.  If it some of its
145  * LPIs are injected into a guest (GICv4), the event_map.vm field
146  * indicates which one.
147  */
148 struct its_device {
149         struct list_head        entry;
150         struct its_node         *its;
151         struct event_lpi_map    event_map;
152         void                    *itt;
153         u32                     nr_ites;
154         u32                     device_id;
155 };
156
157 static struct {
158         raw_spinlock_t          lock;
159         struct its_device       *dev;
160         struct its_vpe          **vpes;
161         int                     next_victim;
162 } vpe_proxy;
163
164 static LIST_HEAD(its_nodes);
165 static DEFINE_RAW_SPINLOCK(its_lock);
166 static struct rdists *gic_rdists;
167 static struct irq_domain *its_parent;
168
169 static unsigned long its_list_map;
170 static u16 vmovp_seq_num;
171 static DEFINE_RAW_SPINLOCK(vmovp_lock);
172
173 static DEFINE_IDA(its_vpeid_ida);
174
175 #define gic_data_rdist()                (raw_cpu_ptr(gic_rdists->rdist))
176 #define gic_data_rdist_rd_base()        (gic_data_rdist()->rd_base)
177 #define gic_data_rdist_vlpi_base()      (gic_data_rdist_rd_base() + SZ_128K)
178
179 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
180                                                u32 event)
181 {
182         struct its_node *its = its_dev->its;
183
184         return its->collections + its_dev->event_map.col_map[event];
185 }
186
187 static struct its_collection *valid_col(struct its_collection *col)
188 {
189         if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
190                 return NULL;
191
192         return col;
193 }
194
195 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
196 {
197         if (valid_col(its->collections + vpe->col_idx))
198                 return vpe;
199
200         return NULL;
201 }
202
203 /*
204  * ITS command descriptors - parameters to be encoded in a command
205  * block.
206  */
207 struct its_cmd_desc {
208         union {
209                 struct {
210                         struct its_device *dev;
211                         u32 event_id;
212                 } its_inv_cmd;
213
214                 struct {
215                         struct its_device *dev;
216                         u32 event_id;
217                 } its_clear_cmd;
218
219                 struct {
220                         struct its_device *dev;
221                         u32 event_id;
222                 } its_int_cmd;
223
224                 struct {
225                         struct its_device *dev;
226                         int valid;
227                 } its_mapd_cmd;
228
229                 struct {
230                         struct its_collection *col;
231                         int valid;
232                 } its_mapc_cmd;
233
234                 struct {
235                         struct its_device *dev;
236                         u32 phys_id;
237                         u32 event_id;
238                 } its_mapti_cmd;
239
240                 struct {
241                         struct its_device *dev;
242                         struct its_collection *col;
243                         u32 event_id;
244                 } its_movi_cmd;
245
246                 struct {
247                         struct its_device *dev;
248                         u32 event_id;
249                 } its_discard_cmd;
250
251                 struct {
252                         struct its_collection *col;
253                 } its_invall_cmd;
254
255                 struct {
256                         struct its_vpe *vpe;
257                 } its_vinvall_cmd;
258
259                 struct {
260                         struct its_vpe *vpe;
261                         struct its_collection *col;
262                         bool valid;
263                 } its_vmapp_cmd;
264
265                 struct {
266                         struct its_vpe *vpe;
267                         struct its_device *dev;
268                         u32 virt_id;
269                         u32 event_id;
270                         bool db_enabled;
271                 } its_vmapti_cmd;
272
273                 struct {
274                         struct its_vpe *vpe;
275                         struct its_device *dev;
276                         u32 event_id;
277                         bool db_enabled;
278                 } its_vmovi_cmd;
279
280                 struct {
281                         struct its_vpe *vpe;
282                         struct its_collection *col;
283                         u16 seq_num;
284                         u16 its_list;
285                 } its_vmovp_cmd;
286         };
287 };
288
289 /*
290  * The ITS command block, which is what the ITS actually parses.
291  */
292 struct its_cmd_block {
293         u64     raw_cmd[4];
294 };
295
296 #define ITS_CMD_QUEUE_SZ                SZ_64K
297 #define ITS_CMD_QUEUE_NR_ENTRIES        (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
298
299 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
300                                                     struct its_cmd_block *,
301                                                     struct its_cmd_desc *);
302
303 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
304                                               struct its_cmd_block *,
305                                               struct its_cmd_desc *);
306
307 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
308 {
309         u64 mask = GENMASK_ULL(h, l);
310         *raw_cmd &= ~mask;
311         *raw_cmd |= (val << l) & mask;
312 }
313
314 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
315 {
316         its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
317 }
318
319 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
320 {
321         its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
322 }
323
324 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
325 {
326         its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
327 }
328
329 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
330 {
331         its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
332 }
333
334 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
335 {
336         its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
337 }
338
339 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
340 {
341         its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
342 }
343
344 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
345 {
346         its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
347 }
348
349 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
350 {
351         its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
352 }
353
354 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
355 {
356         its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
357 }
358
359 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
360 {
361         its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
362 }
363
364 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
365 {
366         its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
367 }
368
369 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
370 {
371         its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
372 }
373
374 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
375 {
376         its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
377 }
378
379 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
380 {
381         its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
382 }
383
384 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
385 {
386         its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
387 }
388
389 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
390 {
391         its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
392 }
393
394 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
395 {
396         its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
397 }
398
399 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
400 {
401         /* Let's fixup BE commands */
402         cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]);
403         cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]);
404         cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]);
405         cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
406 }
407
408 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
409                                                  struct its_cmd_block *cmd,
410                                                  struct its_cmd_desc *desc)
411 {
412         unsigned long itt_addr;
413         u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
414
415         itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
416         itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
417
418         its_encode_cmd(cmd, GITS_CMD_MAPD);
419         its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
420         its_encode_size(cmd, size - 1);
421         its_encode_itt(cmd, itt_addr);
422         its_encode_valid(cmd, desc->its_mapd_cmd.valid);
423
424         its_fixup_cmd(cmd);
425
426         return NULL;
427 }
428
429 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
430                                                  struct its_cmd_block *cmd,
431                                                  struct its_cmd_desc *desc)
432 {
433         its_encode_cmd(cmd, GITS_CMD_MAPC);
434         its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
435         its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
436         its_encode_valid(cmd, desc->its_mapc_cmd.valid);
437
438         its_fixup_cmd(cmd);
439
440         return desc->its_mapc_cmd.col;
441 }
442
443 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
444                                                   struct its_cmd_block *cmd,
445                                                   struct its_cmd_desc *desc)
446 {
447         struct its_collection *col;
448
449         col = dev_event_to_col(desc->its_mapti_cmd.dev,
450                                desc->its_mapti_cmd.event_id);
451
452         its_encode_cmd(cmd, GITS_CMD_MAPTI);
453         its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
454         its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
455         its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
456         its_encode_collection(cmd, col->col_id);
457
458         its_fixup_cmd(cmd);
459
460         return valid_col(col);
461 }
462
463 static struct its_collection *its_build_movi_cmd(struct its_node *its,
464                                                  struct its_cmd_block *cmd,
465                                                  struct its_cmd_desc *desc)
466 {
467         struct its_collection *col;
468
469         col = dev_event_to_col(desc->its_movi_cmd.dev,
470                                desc->its_movi_cmd.event_id);
471
472         its_encode_cmd(cmd, GITS_CMD_MOVI);
473         its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
474         its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
475         its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
476
477         its_fixup_cmd(cmd);
478
479         return valid_col(col);
480 }
481
482 static struct its_collection *its_build_discard_cmd(struct its_node *its,
483                                                     struct its_cmd_block *cmd,
484                                                     struct its_cmd_desc *desc)
485 {
486         struct its_collection *col;
487
488         col = dev_event_to_col(desc->its_discard_cmd.dev,
489                                desc->its_discard_cmd.event_id);
490
491         its_encode_cmd(cmd, GITS_CMD_DISCARD);
492         its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
493         its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
494
495         its_fixup_cmd(cmd);
496
497         return valid_col(col);
498 }
499
500 static struct its_collection *its_build_inv_cmd(struct its_node *its,
501                                                 struct its_cmd_block *cmd,
502                                                 struct its_cmd_desc *desc)
503 {
504         struct its_collection *col;
505
506         col = dev_event_to_col(desc->its_inv_cmd.dev,
507                                desc->its_inv_cmd.event_id);
508
509         its_encode_cmd(cmd, GITS_CMD_INV);
510         its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
511         its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
512
513         its_fixup_cmd(cmd);
514
515         return valid_col(col);
516 }
517
518 static struct its_collection *its_build_int_cmd(struct its_node *its,
519                                                 struct its_cmd_block *cmd,
520                                                 struct its_cmd_desc *desc)
521 {
522         struct its_collection *col;
523
524         col = dev_event_to_col(desc->its_int_cmd.dev,
525                                desc->its_int_cmd.event_id);
526
527         its_encode_cmd(cmd, GITS_CMD_INT);
528         its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
529         its_encode_event_id(cmd, desc->its_int_cmd.event_id);
530
531         its_fixup_cmd(cmd);
532
533         return valid_col(col);
534 }
535
536 static struct its_collection *its_build_clear_cmd(struct its_node *its,
537                                                   struct its_cmd_block *cmd,
538                                                   struct its_cmd_desc *desc)
539 {
540         struct its_collection *col;
541
542         col = dev_event_to_col(desc->its_clear_cmd.dev,
543                                desc->its_clear_cmd.event_id);
544
545         its_encode_cmd(cmd, GITS_CMD_CLEAR);
546         its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
547         its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
548
549         its_fixup_cmd(cmd);
550
551         return valid_col(col);
552 }
553
554 static struct its_collection *its_build_invall_cmd(struct its_node *its,
555                                                    struct its_cmd_block *cmd,
556                                                    struct its_cmd_desc *desc)
557 {
558         its_encode_cmd(cmd, GITS_CMD_INVALL);
559         its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
560
561         its_fixup_cmd(cmd);
562
563         return NULL;
564 }
565
566 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
567                                              struct its_cmd_block *cmd,
568                                              struct its_cmd_desc *desc)
569 {
570         its_encode_cmd(cmd, GITS_CMD_VINVALL);
571         its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
572
573         its_fixup_cmd(cmd);
574
575         return valid_vpe(its, desc->its_vinvall_cmd.vpe);
576 }
577
578 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
579                                            struct its_cmd_block *cmd,
580                                            struct its_cmd_desc *desc)
581 {
582         unsigned long vpt_addr;
583         u64 target;
584
585         vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
586         target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
587
588         its_encode_cmd(cmd, GITS_CMD_VMAPP);
589         its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
590         its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
591         its_encode_target(cmd, target);
592         its_encode_vpt_addr(cmd, vpt_addr);
593         its_encode_vpt_size(cmd, LPI_NRBITS - 1);
594
595         its_fixup_cmd(cmd);
596
597         return valid_vpe(its, desc->its_vmapp_cmd.vpe);
598 }
599
600 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
601                                             struct its_cmd_block *cmd,
602                                             struct its_cmd_desc *desc)
603 {
604         u32 db;
605
606         if (desc->its_vmapti_cmd.db_enabled)
607                 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
608         else
609                 db = 1023;
610
611         its_encode_cmd(cmd, GITS_CMD_VMAPTI);
612         its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
613         its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
614         its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
615         its_encode_db_phys_id(cmd, db);
616         its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
617
618         its_fixup_cmd(cmd);
619
620         return valid_vpe(its, desc->its_vmapti_cmd.vpe);
621 }
622
623 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
624                                            struct its_cmd_block *cmd,
625                                            struct its_cmd_desc *desc)
626 {
627         u32 db;
628
629         if (desc->its_vmovi_cmd.db_enabled)
630                 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
631         else
632                 db = 1023;
633
634         its_encode_cmd(cmd, GITS_CMD_VMOVI);
635         its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
636         its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
637         its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
638         its_encode_db_phys_id(cmd, db);
639         its_encode_db_valid(cmd, true);
640
641         its_fixup_cmd(cmd);
642
643         return valid_vpe(its, desc->its_vmovi_cmd.vpe);
644 }
645
646 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
647                                            struct its_cmd_block *cmd,
648                                            struct its_cmd_desc *desc)
649 {
650         u64 target;
651
652         target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
653         its_encode_cmd(cmd, GITS_CMD_VMOVP);
654         its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
655         its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
656         its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
657         its_encode_target(cmd, target);
658
659         its_fixup_cmd(cmd);
660
661         return valid_vpe(its, desc->its_vmovp_cmd.vpe);
662 }
663
664 static u64 its_cmd_ptr_to_offset(struct its_node *its,
665                                  struct its_cmd_block *ptr)
666 {
667         return (ptr - its->cmd_base) * sizeof(*ptr);
668 }
669
670 static int its_queue_full(struct its_node *its)
671 {
672         int widx;
673         int ridx;
674
675         widx = its->cmd_write - its->cmd_base;
676         ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
677
678         /* This is incredibly unlikely to happen, unless the ITS locks up. */
679         if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
680                 return 1;
681
682         return 0;
683 }
684
685 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
686 {
687         struct its_cmd_block *cmd;
688         u32 count = 1000000;    /* 1s! */
689
690         while (its_queue_full(its)) {
691                 count--;
692                 if (!count) {
693                         pr_err_ratelimited("ITS queue not draining\n");
694                         return NULL;
695                 }
696                 cpu_relax();
697                 udelay(1);
698         }
699
700         cmd = its->cmd_write++;
701
702         /* Handle queue wrapping */
703         if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
704                 its->cmd_write = its->cmd_base;
705
706         /* Clear command  */
707         cmd->raw_cmd[0] = 0;
708         cmd->raw_cmd[1] = 0;
709         cmd->raw_cmd[2] = 0;
710         cmd->raw_cmd[3] = 0;
711
712         return cmd;
713 }
714
715 static struct its_cmd_block *its_post_commands(struct its_node *its)
716 {
717         u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
718
719         writel_relaxed(wr, its->base + GITS_CWRITER);
720
721         return its->cmd_write;
722 }
723
724 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
725 {
726         /*
727          * Make sure the commands written to memory are observable by
728          * the ITS.
729          */
730         if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
731                 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
732         else
733                 dsb(ishst);
734 }
735
736 static int its_wait_for_range_completion(struct its_node *its,
737                                          struct its_cmd_block *from,
738                                          struct its_cmd_block *to)
739 {
740         u64 rd_idx, from_idx, to_idx;
741         u32 count = 1000000;    /* 1s! */
742
743         from_idx = its_cmd_ptr_to_offset(its, from);
744         to_idx = its_cmd_ptr_to_offset(its, to);
745
746         while (1) {
747                 rd_idx = readl_relaxed(its->base + GITS_CREADR);
748
749                 /* Direct case */
750                 if (from_idx < to_idx && rd_idx >= to_idx)
751                         break;
752
753                 /* Wrapped case */
754                 if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
755                         break;
756
757                 count--;
758                 if (!count) {
759                         pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
760                                            from_idx, to_idx, rd_idx);
761                         return -1;
762                 }
763                 cpu_relax();
764                 udelay(1);
765         }
766
767         return 0;
768 }
769
770 /* Warning, macro hell follows */
771 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn)       \
772 void name(struct its_node *its,                                         \
773           buildtype builder,                                            \
774           struct its_cmd_desc *desc)                                    \
775 {                                                                       \
776         struct its_cmd_block *cmd, *sync_cmd, *next_cmd;                \
777         synctype *sync_obj;                                             \
778         unsigned long flags;                                            \
779                                                                         \
780         raw_spin_lock_irqsave(&its->lock, flags);                       \
781                                                                         \
782         cmd = its_allocate_entry(its);                                  \
783         if (!cmd) {             /* We're soooooo screewed... */         \
784                 raw_spin_unlock_irqrestore(&its->lock, flags);          \
785                 return;                                                 \
786         }                                                               \
787         sync_obj = builder(its, cmd, desc);                             \
788         its_flush_cmd(its, cmd);                                        \
789                                                                         \
790         if (sync_obj) {                                                 \
791                 sync_cmd = its_allocate_entry(its);                     \
792                 if (!sync_cmd)                                          \
793                         goto post;                                      \
794                                                                         \
795                 buildfn(its, sync_cmd, sync_obj);                       \
796                 its_flush_cmd(its, sync_cmd);                           \
797         }                                                               \
798                                                                         \
799 post:                                                                   \
800         next_cmd = its_post_commands(its);                              \
801         raw_spin_unlock_irqrestore(&its->lock, flags);                  \
802                                                                         \
803         if (its_wait_for_range_completion(its, cmd, next_cmd))          \
804                 pr_err_ratelimited("ITS cmd %ps failed\n", builder);    \
805 }
806
807 static void its_build_sync_cmd(struct its_node *its,
808                                struct its_cmd_block *sync_cmd,
809                                struct its_collection *sync_col)
810 {
811         its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
812         its_encode_target(sync_cmd, sync_col->target_address);
813
814         its_fixup_cmd(sync_cmd);
815 }
816
817 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
818                              struct its_collection, its_build_sync_cmd)
819
820 static void its_build_vsync_cmd(struct its_node *its,
821                                 struct its_cmd_block *sync_cmd,
822                                 struct its_vpe *sync_vpe)
823 {
824         its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
825         its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
826
827         its_fixup_cmd(sync_cmd);
828 }
829
830 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
831                              struct its_vpe, its_build_vsync_cmd)
832
833 static void its_send_int(struct its_device *dev, u32 event_id)
834 {
835         struct its_cmd_desc desc;
836
837         desc.its_int_cmd.dev = dev;
838         desc.its_int_cmd.event_id = event_id;
839
840         its_send_single_command(dev->its, its_build_int_cmd, &desc);
841 }
842
843 static void its_send_clear(struct its_device *dev, u32 event_id)
844 {
845         struct its_cmd_desc desc;
846
847         desc.its_clear_cmd.dev = dev;
848         desc.its_clear_cmd.event_id = event_id;
849
850         its_send_single_command(dev->its, its_build_clear_cmd, &desc);
851 }
852
853 static void its_send_inv(struct its_device *dev, u32 event_id)
854 {
855         struct its_cmd_desc desc;
856
857         desc.its_inv_cmd.dev = dev;
858         desc.its_inv_cmd.event_id = event_id;
859
860         its_send_single_command(dev->its, its_build_inv_cmd, &desc);
861 }
862
863 static void its_send_mapd(struct its_device *dev, int valid)
864 {
865         struct its_cmd_desc desc;
866
867         desc.its_mapd_cmd.dev = dev;
868         desc.its_mapd_cmd.valid = !!valid;
869
870         its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
871 }
872
873 static void its_send_mapc(struct its_node *its, struct its_collection *col,
874                           int valid)
875 {
876         struct its_cmd_desc desc;
877
878         desc.its_mapc_cmd.col = col;
879         desc.its_mapc_cmd.valid = !!valid;
880
881         its_send_single_command(its, its_build_mapc_cmd, &desc);
882 }
883
884 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
885 {
886         struct its_cmd_desc desc;
887
888         desc.its_mapti_cmd.dev = dev;
889         desc.its_mapti_cmd.phys_id = irq_id;
890         desc.its_mapti_cmd.event_id = id;
891
892         its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
893 }
894
895 static void its_send_movi(struct its_device *dev,
896                           struct its_collection *col, u32 id)
897 {
898         struct its_cmd_desc desc;
899
900         desc.its_movi_cmd.dev = dev;
901         desc.its_movi_cmd.col = col;
902         desc.its_movi_cmd.event_id = id;
903
904         its_send_single_command(dev->its, its_build_movi_cmd, &desc);
905 }
906
907 static void its_send_discard(struct its_device *dev, u32 id)
908 {
909         struct its_cmd_desc desc;
910
911         desc.its_discard_cmd.dev = dev;
912         desc.its_discard_cmd.event_id = id;
913
914         its_send_single_command(dev->its, its_build_discard_cmd, &desc);
915 }
916
917 static void its_send_invall(struct its_node *its, struct its_collection *col)
918 {
919         struct its_cmd_desc desc;
920
921         desc.its_invall_cmd.col = col;
922
923         its_send_single_command(its, its_build_invall_cmd, &desc);
924 }
925
926 static void its_send_vmapti(struct its_device *dev, u32 id)
927 {
928         struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
929         struct its_cmd_desc desc;
930
931         desc.its_vmapti_cmd.vpe = map->vpe;
932         desc.its_vmapti_cmd.dev = dev;
933         desc.its_vmapti_cmd.virt_id = map->vintid;
934         desc.its_vmapti_cmd.event_id = id;
935         desc.its_vmapti_cmd.db_enabled = map->db_enabled;
936
937         its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
938 }
939
940 static void its_send_vmovi(struct its_device *dev, u32 id)
941 {
942         struct its_vlpi_map *map = &dev->event_map.vlpi_maps[id];
943         struct its_cmd_desc desc;
944
945         desc.its_vmovi_cmd.vpe = map->vpe;
946         desc.its_vmovi_cmd.dev = dev;
947         desc.its_vmovi_cmd.event_id = id;
948         desc.its_vmovi_cmd.db_enabled = map->db_enabled;
949
950         its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
951 }
952
953 static void its_send_vmapp(struct its_node *its,
954                            struct its_vpe *vpe, bool valid)
955 {
956         struct its_cmd_desc desc;
957
958         desc.its_vmapp_cmd.vpe = vpe;
959         desc.its_vmapp_cmd.valid = valid;
960         desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
961
962         its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
963 }
964
965 static void its_send_vmovp(struct its_vpe *vpe)
966 {
967         struct its_cmd_desc desc;
968         struct its_node *its;
969         unsigned long flags;
970         int col_id = vpe->col_idx;
971
972         desc.its_vmovp_cmd.vpe = vpe;
973         desc.its_vmovp_cmd.its_list = (u16)its_list_map;
974
975         if (!its_list_map) {
976                 its = list_first_entry(&its_nodes, struct its_node, entry);
977                 desc.its_vmovp_cmd.seq_num = 0;
978                 desc.its_vmovp_cmd.col = &its->collections[col_id];
979                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
980                 return;
981         }
982
983         /*
984          * Yet another marvel of the architecture. If using the
985          * its_list "feature", we need to make sure that all ITSs
986          * receive all VMOVP commands in the same order. The only way
987          * to guarantee this is to make vmovp a serialization point.
988          *
989          * Wall <-- Head.
990          */
991         raw_spin_lock_irqsave(&vmovp_lock, flags);
992
993         desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
994
995         /* Emit VMOVPs */
996         list_for_each_entry(its, &its_nodes, entry) {
997                 if (!its->is_v4)
998                         continue;
999
1000                 if (!vpe->its_vm->vlpi_count[its->list_nr])
1001                         continue;
1002
1003                 desc.its_vmovp_cmd.col = &its->collections[col_id];
1004                 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1005         }
1006
1007         raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1008 }
1009
1010 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1011 {
1012         struct its_cmd_desc desc;
1013
1014         desc.its_vinvall_cmd.vpe = vpe;
1015         its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1016 }
1017
1018 /*
1019  * irqchip functions - assumes MSI, mostly.
1020  */
1021
1022 static inline u32 its_get_event_id(struct irq_data *d)
1023 {
1024         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1025         return d->hwirq - its_dev->event_map.lpi_base;
1026 }
1027
1028 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1029 {
1030         irq_hw_number_t hwirq;
1031         struct page *prop_page;
1032         u8 *cfg;
1033
1034         if (irqd_is_forwarded_to_vcpu(d)) {
1035                 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1036                 u32 event = its_get_event_id(d);
1037                 struct its_vlpi_map *map;
1038
1039                 prop_page = its_dev->event_map.vm->vprop_page;
1040                 map = &its_dev->event_map.vlpi_maps[event];
1041                 hwirq = map->vintid;
1042
1043                 /* Remember the updated property */
1044                 map->properties &= ~clr;
1045                 map->properties |= set | LPI_PROP_GROUP1;
1046         } else {
1047                 prop_page = gic_rdists->prop_page;
1048                 hwirq = d->hwirq;
1049         }
1050
1051         cfg = page_address(prop_page) + hwirq - 8192;
1052         *cfg &= ~clr;
1053         *cfg |= set | LPI_PROP_GROUP1;
1054
1055         /*
1056          * Make the above write visible to the redistributors.
1057          * And yes, we're flushing exactly: One. Single. Byte.
1058          * Humpf...
1059          */
1060         if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1061                 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1062         else
1063                 dsb(ishst);
1064 }
1065
1066 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1067 {
1068         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1069
1070         lpi_write_config(d, clr, set);
1071         its_send_inv(its_dev, its_get_event_id(d));
1072 }
1073
1074 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1075 {
1076         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1077         u32 event = its_get_event_id(d);
1078
1079         if (its_dev->event_map.vlpi_maps[event].db_enabled == enable)
1080                 return;
1081
1082         its_dev->event_map.vlpi_maps[event].db_enabled = enable;
1083
1084         /*
1085          * More fun with the architecture:
1086          *
1087          * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1088          * value or to 1023, depending on the enable bit. But that
1089          * would be issueing a mapping for an /existing/ DevID+EventID
1090          * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1091          * to the /same/ vPE, using this opportunity to adjust the
1092          * doorbell. Mouahahahaha. We loves it, Precious.
1093          */
1094         its_send_vmovi(its_dev, event);
1095 }
1096
1097 static void its_mask_irq(struct irq_data *d)
1098 {
1099         if (irqd_is_forwarded_to_vcpu(d))
1100                 its_vlpi_set_doorbell(d, false);
1101
1102         lpi_update_config(d, LPI_PROP_ENABLED, 0);
1103 }
1104
1105 static void its_unmask_irq(struct irq_data *d)
1106 {
1107         if (irqd_is_forwarded_to_vcpu(d))
1108                 its_vlpi_set_doorbell(d, true);
1109
1110         lpi_update_config(d, 0, LPI_PROP_ENABLED);
1111 }
1112
1113 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1114                             bool force)
1115 {
1116         unsigned int cpu;
1117         const struct cpumask *cpu_mask = cpu_online_mask;
1118         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1119         struct its_collection *target_col;
1120         u32 id = its_get_event_id(d);
1121
1122         /* A forwarded interrupt should use irq_set_vcpu_affinity */
1123         if (irqd_is_forwarded_to_vcpu(d))
1124                 return -EINVAL;
1125
1126        /* lpi cannot be routed to a redistributor that is on a foreign node */
1127         if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1128                 if (its_dev->its->numa_node >= 0) {
1129                         cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1130                         if (!cpumask_intersects(mask_val, cpu_mask))
1131                                 return -EINVAL;
1132                 }
1133         }
1134
1135         cpu = cpumask_any_and(mask_val, cpu_mask);
1136
1137         if (cpu >= nr_cpu_ids)
1138                 return -EINVAL;
1139
1140         /* don't set the affinity when the target cpu is same as current one */
1141         if (cpu != its_dev->event_map.col_map[id]) {
1142                 target_col = &its_dev->its->collections[cpu];
1143                 its_send_movi(its_dev, target_col, id);
1144                 its_dev->event_map.col_map[id] = cpu;
1145                 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1146         }
1147
1148         return IRQ_SET_MASK_OK_DONE;
1149 }
1150
1151 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1152 {
1153         struct its_node *its = its_dev->its;
1154
1155         return its->phys_base + GITS_TRANSLATER;
1156 }
1157
1158 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1159 {
1160         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1161         struct its_node *its;
1162         u64 addr;
1163
1164         its = its_dev->its;
1165         addr = its->get_msi_base(its_dev);
1166
1167         msg->address_lo         = lower_32_bits(addr);
1168         msg->address_hi         = upper_32_bits(addr);
1169         msg->data               = its_get_event_id(d);
1170
1171         iommu_dma_map_msi_msg(d->irq, msg);
1172 }
1173
1174 static int its_irq_set_irqchip_state(struct irq_data *d,
1175                                      enum irqchip_irq_state which,
1176                                      bool state)
1177 {
1178         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1179         u32 event = its_get_event_id(d);
1180
1181         if (which != IRQCHIP_STATE_PENDING)
1182                 return -EINVAL;
1183
1184         if (state)
1185                 its_send_int(its_dev, event);
1186         else
1187                 its_send_clear(its_dev, event);
1188
1189         return 0;
1190 }
1191
1192 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1193 {
1194         unsigned long flags;
1195
1196         /* Not using the ITS list? Everything is always mapped. */
1197         if (!its_list_map)
1198                 return;
1199
1200         raw_spin_lock_irqsave(&vmovp_lock, flags);
1201
1202         /*
1203          * If the VM wasn't mapped yet, iterate over the vpes and get
1204          * them mapped now.
1205          */
1206         vm->vlpi_count[its->list_nr]++;
1207
1208         if (vm->vlpi_count[its->list_nr] == 1) {
1209                 int i;
1210
1211                 for (i = 0; i < vm->nr_vpes; i++) {
1212                         struct its_vpe *vpe = vm->vpes[i];
1213                         struct irq_data *d = irq_get_irq_data(vpe->irq);
1214
1215                         /* Map the VPE to the first possible CPU */
1216                         vpe->col_idx = cpumask_first(cpu_online_mask);
1217                         its_send_vmapp(its, vpe, true);
1218                         its_send_vinvall(its, vpe);
1219                         irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1220                 }
1221         }
1222
1223         raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1224 }
1225
1226 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1227 {
1228         unsigned long flags;
1229
1230         /* Not using the ITS list? Everything is always mapped. */
1231         if (!its_list_map)
1232                 return;
1233
1234         raw_spin_lock_irqsave(&vmovp_lock, flags);
1235
1236         if (!--vm->vlpi_count[its->list_nr]) {
1237                 int i;
1238
1239                 for (i = 0; i < vm->nr_vpes; i++)
1240                         its_send_vmapp(its, vm->vpes[i], false);
1241         }
1242
1243         raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1244 }
1245
1246 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1247 {
1248         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1249         u32 event = its_get_event_id(d);
1250         int ret = 0;
1251
1252         if (!info->map)
1253                 return -EINVAL;
1254
1255         mutex_lock(&its_dev->event_map.vlpi_lock);
1256
1257         if (!its_dev->event_map.vm) {
1258                 struct its_vlpi_map *maps;
1259
1260                 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1261                                GFP_KERNEL);
1262                 if (!maps) {
1263                         ret = -ENOMEM;
1264                         goto out;
1265                 }
1266
1267                 its_dev->event_map.vm = info->map->vm;
1268                 its_dev->event_map.vlpi_maps = maps;
1269         } else if (its_dev->event_map.vm != info->map->vm) {
1270                 ret = -EINVAL;
1271                 goto out;
1272         }
1273
1274         /* Get our private copy of the mapping information */
1275         its_dev->event_map.vlpi_maps[event] = *info->map;
1276
1277         if (irqd_is_forwarded_to_vcpu(d)) {
1278                 /* Already mapped, move it around */
1279                 its_send_vmovi(its_dev, event);
1280         } else {
1281                 /* Ensure all the VPEs are mapped on this ITS */
1282                 its_map_vm(its_dev->its, info->map->vm);
1283
1284                 /*
1285                  * Flag the interrupt as forwarded so that we can
1286                  * start poking the virtual property table.
1287                  */
1288                 irqd_set_forwarded_to_vcpu(d);
1289
1290                 /* Write out the property to the prop table */
1291                 lpi_write_config(d, 0xff, info->map->properties);
1292
1293                 /* Drop the physical mapping */
1294                 its_send_discard(its_dev, event);
1295
1296                 /* and install the virtual one */
1297                 its_send_vmapti(its_dev, event);
1298
1299                 /* Increment the number of VLPIs */
1300                 its_dev->event_map.nr_vlpis++;
1301         }
1302
1303 out:
1304         mutex_unlock(&its_dev->event_map.vlpi_lock);
1305         return ret;
1306 }
1307
1308 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1309 {
1310         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1311         u32 event = its_get_event_id(d);
1312         int ret = 0;
1313
1314         mutex_lock(&its_dev->event_map.vlpi_lock);
1315
1316         if (!its_dev->event_map.vm ||
1317             !its_dev->event_map.vlpi_maps[event].vm) {
1318                 ret = -EINVAL;
1319                 goto out;
1320         }
1321
1322         /* Copy our mapping information to the incoming request */
1323         *info->map = its_dev->event_map.vlpi_maps[event];
1324
1325 out:
1326         mutex_unlock(&its_dev->event_map.vlpi_lock);
1327         return ret;
1328 }
1329
1330 static int its_vlpi_unmap(struct irq_data *d)
1331 {
1332         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1333         u32 event = its_get_event_id(d);
1334         int ret = 0;
1335
1336         mutex_lock(&its_dev->event_map.vlpi_lock);
1337
1338         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1339                 ret = -EINVAL;
1340                 goto out;
1341         }
1342
1343         /* Drop the virtual mapping */
1344         its_send_discard(its_dev, event);
1345
1346         /* and restore the physical one */
1347         irqd_clr_forwarded_to_vcpu(d);
1348         its_send_mapti(its_dev, d->hwirq, event);
1349         lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1350                                     LPI_PROP_ENABLED |
1351                                     LPI_PROP_GROUP1));
1352
1353         /* Potentially unmap the VM from this ITS */
1354         its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1355
1356         /*
1357          * Drop the refcount and make the device available again if
1358          * this was the last VLPI.
1359          */
1360         if (!--its_dev->event_map.nr_vlpis) {
1361                 its_dev->event_map.vm = NULL;
1362                 kfree(its_dev->event_map.vlpi_maps);
1363         }
1364
1365 out:
1366         mutex_unlock(&its_dev->event_map.vlpi_lock);
1367         return ret;
1368 }
1369
1370 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1371 {
1372         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1373
1374         if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1375                 return -EINVAL;
1376
1377         if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1378                 lpi_update_config(d, 0xff, info->config);
1379         else
1380                 lpi_write_config(d, 0xff, info->config);
1381         its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1382
1383         return 0;
1384 }
1385
1386 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1387 {
1388         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1389         struct its_cmd_info *info = vcpu_info;
1390
1391         /* Need a v4 ITS */
1392         if (!its_dev->its->is_v4)
1393                 return -EINVAL;
1394
1395         /* Unmap request? */
1396         if (!info)
1397                 return its_vlpi_unmap(d);
1398
1399         switch (info->cmd_type) {
1400         case MAP_VLPI:
1401                 return its_vlpi_map(d, info);
1402
1403         case GET_VLPI:
1404                 return its_vlpi_get(d, info);
1405
1406         case PROP_UPDATE_VLPI:
1407         case PROP_UPDATE_AND_INV_VLPI:
1408                 return its_vlpi_prop_update(d, info);
1409
1410         default:
1411                 return -EINVAL;
1412         }
1413 }
1414
1415 static struct irq_chip its_irq_chip = {
1416         .name                   = "ITS",
1417         .irq_mask               = its_mask_irq,
1418         .irq_unmask             = its_unmask_irq,
1419         .irq_eoi                = irq_chip_eoi_parent,
1420         .irq_set_affinity       = its_set_affinity,
1421         .irq_compose_msi_msg    = its_irq_compose_msi_msg,
1422         .irq_set_irqchip_state  = its_irq_set_irqchip_state,
1423         .irq_set_vcpu_affinity  = its_irq_set_vcpu_affinity,
1424 };
1425
1426
1427 /*
1428  * How we allocate LPIs:
1429  *
1430  * lpi_range_list contains ranges of LPIs that are to available to
1431  * allocate from. To allocate LPIs, just pick the first range that
1432  * fits the required allocation, and reduce it by the required
1433  * amount. Once empty, remove the range from the list.
1434  *
1435  * To free a range of LPIs, add a free range to the list, sort it and
1436  * merge the result if the new range happens to be adjacent to an
1437  * already free block.
1438  *
1439  * The consequence of the above is that allocation is cost is low, but
1440  * freeing is expensive. We assumes that freeing rarely occurs.
1441  */
1442
1443 static DEFINE_MUTEX(lpi_range_lock);
1444 static LIST_HEAD(lpi_range_list);
1445
1446 struct lpi_range {
1447         struct list_head        entry;
1448         u32                     base_id;
1449         u32                     span;
1450 };
1451
1452 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1453 {
1454         struct lpi_range *range;
1455
1456         range = kzalloc(sizeof(*range), GFP_KERNEL);
1457         if (range) {
1458                 INIT_LIST_HEAD(&range->entry);
1459                 range->base_id = base;
1460                 range->span = span;
1461         }
1462
1463         return range;
1464 }
1465
1466 static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1467 {
1468         struct lpi_range *ra, *rb;
1469
1470         ra = container_of(a, struct lpi_range, entry);
1471         rb = container_of(b, struct lpi_range, entry);
1472
1473         return rb->base_id - ra->base_id;
1474 }
1475
1476 static void merge_lpi_ranges(void)
1477 {
1478         struct lpi_range *range, *tmp;
1479
1480         list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1481                 if (!list_is_last(&range->entry, &lpi_range_list) &&
1482                     (tmp->base_id == (range->base_id + range->span))) {
1483                         tmp->base_id = range->base_id;
1484                         tmp->span += range->span;
1485                         list_del(&range->entry);
1486                         kfree(range);
1487                 }
1488         }
1489 }
1490
1491 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1492 {
1493         struct lpi_range *range, *tmp;
1494         int err = -ENOSPC;
1495
1496         mutex_lock(&lpi_range_lock);
1497
1498         list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1499                 if (range->span >= nr_lpis) {
1500                         *base = range->base_id;
1501                         range->base_id += nr_lpis;
1502                         range->span -= nr_lpis;
1503
1504                         if (range->span == 0) {
1505                                 list_del(&range->entry);
1506                                 kfree(range);
1507                         }
1508
1509                         err = 0;
1510                         break;
1511                 }
1512         }
1513
1514         mutex_unlock(&lpi_range_lock);
1515
1516         pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1517         return err;
1518 }
1519
1520 static int free_lpi_range(u32 base, u32 nr_lpis)
1521 {
1522         struct lpi_range *new;
1523         int err = 0;
1524
1525         mutex_lock(&lpi_range_lock);
1526
1527         new = mk_lpi_range(base, nr_lpis);
1528         if (!new) {
1529                 err = -ENOMEM;
1530                 goto out;
1531         }
1532
1533         list_add(&new->entry, &lpi_range_list);
1534         list_sort(NULL, &lpi_range_list, lpi_range_cmp);
1535         merge_lpi_ranges();
1536 out:
1537         mutex_unlock(&lpi_range_lock);
1538         return err;
1539 }
1540
1541 static int __init its_lpi_init(u32 id_bits)
1542 {
1543         u32 lpis = (1UL << id_bits) - 8192;
1544         u32 numlpis;
1545         int err;
1546
1547         numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1548
1549         if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1550                 lpis = numlpis;
1551                 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1552                         lpis);
1553         }
1554
1555         /*
1556          * Initializing the allocator is just the same as freeing the
1557          * full range of LPIs.
1558          */
1559         err = free_lpi_range(8192, lpis);
1560         pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1561         return err;
1562 }
1563
1564 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1565 {
1566         unsigned long *bitmap = NULL;
1567         int err = 0;
1568
1569         do {
1570                 err = alloc_lpi_range(nr_irqs, base);
1571                 if (!err)
1572                         break;
1573
1574                 nr_irqs /= 2;
1575         } while (nr_irqs > 0);
1576
1577         if (err)
1578                 goto out;
1579
1580         bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
1581         if (!bitmap)
1582                 goto out;
1583
1584         *nr_ids = nr_irqs;
1585
1586 out:
1587         if (!bitmap)
1588                 *base = *nr_ids = 0;
1589
1590         return bitmap;
1591 }
1592
1593 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
1594 {
1595         WARN_ON(free_lpi_range(base, nr_ids));
1596         kfree(bitmap);
1597 }
1598
1599 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
1600 {
1601         struct page *prop_page;
1602
1603         prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
1604         if (!prop_page)
1605                 return NULL;
1606
1607         /* Priority 0xa0, Group-1, disabled */
1608         memset(page_address(prop_page),
1609                LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
1610                LPI_PROPBASE_SZ);
1611
1612         /* Make sure the GIC will observe the written configuration */
1613         gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
1614
1615         return prop_page;
1616 }
1617
1618 static void its_free_prop_table(struct page *prop_page)
1619 {
1620         free_pages((unsigned long)page_address(prop_page),
1621                    get_order(LPI_PROPBASE_SZ));
1622 }
1623
1624 static int __init its_alloc_lpi_tables(void)
1625 {
1626         phys_addr_t paddr;
1627
1628         lpi_id_bits = GICD_TYPER_ID_BITS(gic_rdists->gicd_typer);
1629         gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
1630         if (!gic_rdists->prop_page) {
1631                 pr_err("Failed to allocate PROPBASE\n");
1632                 return -ENOMEM;
1633         }
1634
1635         paddr = page_to_phys(gic_rdists->prop_page);
1636         pr_info("GIC: using LPI property table @%pa\n", &paddr);
1637
1638         return its_lpi_init(lpi_id_bits);
1639 }
1640
1641 static const char *its_base_type_string[] = {
1642         [GITS_BASER_TYPE_DEVICE]        = "Devices",
1643         [GITS_BASER_TYPE_VCPU]          = "Virtual CPUs",
1644         [GITS_BASER_TYPE_RESERVED3]     = "Reserved (3)",
1645         [GITS_BASER_TYPE_COLLECTION]    = "Interrupt Collections",
1646         [GITS_BASER_TYPE_RESERVED5]     = "Reserved (5)",
1647         [GITS_BASER_TYPE_RESERVED6]     = "Reserved (6)",
1648         [GITS_BASER_TYPE_RESERVED7]     = "Reserved (7)",
1649 };
1650
1651 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
1652 {
1653         u32 idx = baser - its->tables;
1654
1655         return gits_read_baser(its->base + GITS_BASER + (idx << 3));
1656 }
1657
1658 static void its_write_baser(struct its_node *its, struct its_baser *baser,
1659                             u64 val)
1660 {
1661         u32 idx = baser - its->tables;
1662
1663         gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
1664         baser->val = its_read_baser(its, baser);
1665 }
1666
1667 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1668                            u64 cache, u64 shr, u32 psz, u32 order,
1669                            bool indirect)
1670 {
1671         u64 val = its_read_baser(its, baser);
1672         u64 esz = GITS_BASER_ENTRY_SIZE(val);
1673         u64 type = GITS_BASER_TYPE(val);
1674         u64 baser_phys, tmp;
1675         u32 alloc_pages;
1676         void *base;
1677
1678 retry_alloc_baser:
1679         alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
1680         if (alloc_pages > GITS_BASER_PAGES_MAX) {
1681                 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
1682                         &its->phys_base, its_base_type_string[type],
1683                         alloc_pages, GITS_BASER_PAGES_MAX);
1684                 alloc_pages = GITS_BASER_PAGES_MAX;
1685                 order = get_order(GITS_BASER_PAGES_MAX * psz);
1686         }
1687
1688         base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
1689         if (!base)
1690                 return -ENOMEM;
1691
1692         baser_phys = virt_to_phys(base);
1693
1694         /* Check if the physical address of the memory is above 48bits */
1695         if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
1696
1697                 /* 52bit PA is supported only when PageSize=64K */
1698                 if (psz != SZ_64K) {
1699                         pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
1700                         free_pages((unsigned long)base, order);
1701                         return -ENXIO;
1702                 }
1703
1704                 /* Convert 52bit PA to 48bit field */
1705                 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
1706         }
1707
1708 retry_baser:
1709         val = (baser_phys                                        |
1710                 (type << GITS_BASER_TYPE_SHIFT)                  |
1711                 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
1712                 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
1713                 cache                                            |
1714                 shr                                              |
1715                 GITS_BASER_VALID);
1716
1717         val |=  indirect ? GITS_BASER_INDIRECT : 0x0;
1718
1719         switch (psz) {
1720         case SZ_4K:
1721                 val |= GITS_BASER_PAGE_SIZE_4K;
1722                 break;
1723         case SZ_16K:
1724                 val |= GITS_BASER_PAGE_SIZE_16K;
1725                 break;
1726         case SZ_64K:
1727                 val |= GITS_BASER_PAGE_SIZE_64K;
1728                 break;
1729         }
1730
1731         its_write_baser(its, baser, val);
1732         tmp = baser->val;
1733
1734         if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
1735                 /*
1736                  * Shareability didn't stick. Just use
1737                  * whatever the read reported, which is likely
1738                  * to be the only thing this redistributor
1739                  * supports. If that's zero, make it
1740                  * non-cacheable as well.
1741                  */
1742                 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
1743                 if (!shr) {
1744                         cache = GITS_BASER_nC;
1745                         gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
1746                 }
1747                 goto retry_baser;
1748         }
1749
1750         if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) {
1751                 /*
1752                  * Page size didn't stick. Let's try a smaller
1753                  * size and retry. If we reach 4K, then
1754                  * something is horribly wrong...
1755                  */
1756                 free_pages((unsigned long)base, order);
1757                 baser->base = NULL;
1758
1759                 switch (psz) {
1760                 case SZ_16K:
1761                         psz = SZ_4K;
1762                         goto retry_alloc_baser;
1763                 case SZ_64K:
1764                         psz = SZ_16K;
1765                         goto retry_alloc_baser;
1766                 }
1767         }
1768
1769         if (val != tmp) {
1770                 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
1771                        &its->phys_base, its_base_type_string[type],
1772                        val, tmp);
1773                 free_pages((unsigned long)base, order);
1774                 return -ENXIO;
1775         }
1776
1777         baser->order = order;
1778         baser->base = base;
1779         baser->psz = psz;
1780         tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
1781
1782         pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
1783                 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
1784                 its_base_type_string[type],
1785                 (unsigned long)virt_to_phys(base),
1786                 indirect ? "indirect" : "flat", (int)esz,
1787                 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
1788
1789         return 0;
1790 }
1791
1792 static bool its_parse_indirect_baser(struct its_node *its,
1793                                      struct its_baser *baser,
1794                                      u32 psz, u32 *order, u32 ids)
1795 {
1796         u64 tmp = its_read_baser(its, baser);
1797         u64 type = GITS_BASER_TYPE(tmp);
1798         u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
1799         u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
1800         u32 new_order = *order;
1801         bool indirect = false;
1802
1803         /* No need to enable Indirection if memory requirement < (psz*2)bytes */
1804         if ((esz << ids) > (psz * 2)) {
1805                 /*
1806                  * Find out whether hw supports a single or two-level table by
1807                  * table by reading bit at offset '62' after writing '1' to it.
1808                  */
1809                 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
1810                 indirect = !!(baser->val & GITS_BASER_INDIRECT);
1811
1812                 if (indirect) {
1813                         /*
1814                          * The size of the lvl2 table is equal to ITS page size
1815                          * which is 'psz'. For computing lvl1 table size,
1816                          * subtract ID bits that sparse lvl2 table from 'ids'
1817                          * which is reported by ITS hardware times lvl1 table
1818                          * entry size.
1819                          */
1820                         ids -= ilog2(psz / (int)esz);
1821                         esz = GITS_LVL1_ENTRY_SIZE;
1822                 }
1823         }
1824
1825         /*
1826          * Allocate as many entries as required to fit the
1827          * range of device IDs that the ITS can grok... The ID
1828          * space being incredibly sparse, this results in a
1829          * massive waste of memory if two-level device table
1830          * feature is not supported by hardware.
1831          */
1832         new_order = max_t(u32, get_order(esz << ids), new_order);
1833         if (new_order >= MAX_ORDER) {
1834                 new_order = MAX_ORDER - 1;
1835                 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
1836                 pr_warn("ITS@%pa: %s Table too large, reduce ids %u->%u\n",
1837                         &its->phys_base, its_base_type_string[type],
1838                         its->device_ids, ids);
1839         }
1840
1841         *order = new_order;
1842
1843         return indirect;
1844 }
1845
1846 static void its_free_tables(struct its_node *its)
1847 {
1848         int i;
1849
1850         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1851                 if (its->tables[i].base) {
1852                         free_pages((unsigned long)its->tables[i].base,
1853                                    its->tables[i].order);
1854                         its->tables[i].base = NULL;
1855                 }
1856         }
1857 }
1858
1859 static int its_alloc_tables(struct its_node *its)
1860 {
1861         u64 shr = GITS_BASER_InnerShareable;
1862         u64 cache = GITS_BASER_RaWaWb;
1863         u32 psz = SZ_64K;
1864         int err, i;
1865
1866         if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
1867                 /* erratum 24313: ignore memory access type */
1868                 cache = GITS_BASER_nCnB;
1869
1870         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
1871                 struct its_baser *baser = its->tables + i;
1872                 u64 val = its_read_baser(its, baser);
1873                 u64 type = GITS_BASER_TYPE(val);
1874                 u32 order = get_order(psz);
1875                 bool indirect = false;
1876
1877                 switch (type) {
1878                 case GITS_BASER_TYPE_NONE:
1879                         continue;
1880
1881                 case GITS_BASER_TYPE_DEVICE:
1882                         indirect = its_parse_indirect_baser(its, baser,
1883                                                             psz, &order,
1884                                                             its->device_ids);
1885                 case GITS_BASER_TYPE_VCPU:
1886                         indirect = its_parse_indirect_baser(its, baser,
1887                                                             psz, &order,
1888                                                             ITS_MAX_VPEID_BITS);
1889                         break;
1890                 }
1891
1892                 err = its_setup_baser(its, baser, cache, shr, psz, order, indirect);
1893                 if (err < 0) {
1894                         its_free_tables(its);
1895                         return err;
1896                 }
1897
1898                 /* Update settings which will be used for next BASERn */
1899                 psz = baser->psz;
1900                 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
1901                 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
1902         }
1903
1904         return 0;
1905 }
1906
1907 static int its_alloc_collections(struct its_node *its)
1908 {
1909         int i;
1910
1911         its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
1912                                    GFP_KERNEL);
1913         if (!its->collections)
1914                 return -ENOMEM;
1915
1916         for (i = 0; i < nr_cpu_ids; i++)
1917                 its->collections[i].target_address = ~0ULL;
1918
1919         return 0;
1920 }
1921
1922 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
1923 {
1924         struct page *pend_page;
1925         /*
1926          * The pending pages have to be at least 64kB aligned,
1927          * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
1928          */
1929         pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
1930                                 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1931         if (!pend_page)
1932                 return NULL;
1933
1934         /* Make sure the GIC will observe the zero-ed page */
1935         gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
1936
1937         return pend_page;
1938 }
1939
1940 static void its_free_pending_table(struct page *pt)
1941 {
1942         free_pages((unsigned long)page_address(pt),
1943                    get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1944 }
1945
1946 static void its_cpu_init_lpis(void)
1947 {
1948         void __iomem *rbase = gic_data_rdist_rd_base();
1949         struct page *pend_page;
1950         u64 val, tmp;
1951
1952         /* If we didn't allocate the pending table yet, do it now */
1953         pend_page = gic_data_rdist()->pend_page;
1954         if (!pend_page) {
1955                 phys_addr_t paddr;
1956
1957                 pend_page = its_allocate_pending_table(GFP_NOWAIT);
1958                 if (!pend_page) {
1959                         pr_err("Failed to allocate PENDBASE for CPU%d\n",
1960                                smp_processor_id());
1961                         return;
1962                 }
1963
1964                 paddr = page_to_phys(pend_page);
1965                 pr_info("CPU%d: using LPI pending table @%pa\n",
1966                         smp_processor_id(), &paddr);
1967                 gic_data_rdist()->pend_page = pend_page;
1968         }
1969
1970         /* set PROPBASE */
1971         val = (page_to_phys(gic_rdists->prop_page) |
1972                GICR_PROPBASER_InnerShareable |
1973                GICR_PROPBASER_RaWaWb |
1974                ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
1975
1976         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1977         tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
1978
1979         if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
1980                 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
1981                         /*
1982                          * The HW reports non-shareable, we must
1983                          * remove the cacheability attributes as
1984                          * well.
1985                          */
1986                         val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
1987                                  GICR_PROPBASER_CACHEABILITY_MASK);
1988                         val |= GICR_PROPBASER_nC;
1989                         gicr_write_propbaser(val, rbase + GICR_PROPBASER);
1990                 }
1991                 pr_info_once("GIC: using cache flushing for LPI property table\n");
1992                 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
1993         }
1994
1995         /* set PENDBASE */
1996         val = (page_to_phys(pend_page) |
1997                GICR_PENDBASER_InnerShareable |
1998                GICR_PENDBASER_RaWaWb);
1999
2000         gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2001         tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2002
2003         if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2004                 /*
2005                  * The HW reports non-shareable, we must remove the
2006                  * cacheability attributes as well.
2007                  */
2008                 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2009                          GICR_PENDBASER_CACHEABILITY_MASK);
2010                 val |= GICR_PENDBASER_nC;
2011                 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2012         }
2013
2014         /* Enable LPIs */
2015         val = readl_relaxed(rbase + GICR_CTLR);
2016         val |= GICR_CTLR_ENABLE_LPIS;
2017         writel_relaxed(val, rbase + GICR_CTLR);
2018
2019         /* Make sure the GIC has seen the above */
2020         dsb(sy);
2021 }
2022
2023 static void its_cpu_init_collection(struct its_node *its)
2024 {
2025         int cpu = smp_processor_id();
2026         u64 target;
2027
2028         /* avoid cross node collections and its mapping */
2029         if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
2030                 struct device_node *cpu_node;
2031
2032                 cpu_node = of_get_cpu_node(cpu, NULL);
2033                 if (its->numa_node != NUMA_NO_NODE &&
2034                         its->numa_node != of_node_to_nid(cpu_node))
2035                         return;
2036         }
2037
2038         /*
2039          * We now have to bind each collection to its target
2040          * redistributor.
2041          */
2042         if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
2043                 /*
2044                  * This ITS wants the physical address of the
2045                  * redistributor.
2046                  */
2047                 target = gic_data_rdist()->phys_base;
2048         } else {
2049                 /* This ITS wants a linear CPU number. */
2050                 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2051                 target = GICR_TYPER_CPU_NUMBER(target) << 16;
2052         }
2053
2054         /* Perform collection mapping */
2055         its->collections[cpu].target_address = target;
2056         its->collections[cpu].col_id = cpu;
2057
2058         its_send_mapc(its, &its->collections[cpu], 1);
2059         its_send_invall(its, &its->collections[cpu]);
2060 }
2061
2062 static void its_cpu_init_collections(void)
2063 {
2064         struct its_node *its;
2065
2066         raw_spin_lock(&its_lock);
2067
2068         list_for_each_entry(its, &its_nodes, entry)
2069                 its_cpu_init_collection(its);
2070
2071         raw_spin_unlock(&its_lock);
2072 }
2073
2074 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
2075 {
2076         struct its_device *its_dev = NULL, *tmp;
2077         unsigned long flags;
2078
2079         raw_spin_lock_irqsave(&its->lock, flags);
2080
2081         list_for_each_entry(tmp, &its->its_device_list, entry) {
2082                 if (tmp->device_id == dev_id) {
2083                         its_dev = tmp;
2084                         break;
2085                 }
2086         }
2087
2088         raw_spin_unlock_irqrestore(&its->lock, flags);
2089
2090         return its_dev;
2091 }
2092
2093 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2094 {
2095         int i;
2096
2097         for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2098                 if (GITS_BASER_TYPE(its->tables[i].val) == type)
2099                         return &its->tables[i];
2100         }
2101
2102         return NULL;
2103 }
2104
2105 static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
2106 {
2107         struct page *page;
2108         u32 esz, idx;
2109         __le64 *table;
2110
2111         /* Don't allow device id that exceeds single, flat table limit */
2112         esz = GITS_BASER_ENTRY_SIZE(baser->val);
2113         if (!(baser->val & GITS_BASER_INDIRECT))
2114                 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
2115
2116         /* Compute 1st level table index & check if that exceeds table limit */
2117         idx = id >> ilog2(baser->psz / esz);
2118         if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
2119                 return false;
2120
2121         table = baser->base;
2122
2123         /* Allocate memory for 2nd level table */
2124         if (!table[idx]) {
2125                 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz));
2126                 if (!page)
2127                         return false;
2128
2129                 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2130                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2131                         gic_flush_dcache_to_poc(page_address(page), baser->psz);
2132
2133                 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2134
2135                 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2136                 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
2137                         gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2138
2139                 /* Ensure updated table contents are visible to ITS hardware */
2140                 dsb(sy);
2141         }
2142
2143         return true;
2144 }
2145
2146 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2147 {
2148         struct its_baser *baser;
2149
2150         baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
2151
2152         /* Don't allow device id that exceeds ITS hardware limit */
2153         if (!baser)
2154                 return (ilog2(dev_id) < its->device_ids);
2155
2156         return its_alloc_table_entry(baser, dev_id);
2157 }
2158
2159 static bool its_alloc_vpe_table(u32 vpe_id)
2160 {
2161         struct its_node *its;
2162
2163         /*
2164          * Make sure the L2 tables are allocated on *all* v4 ITSs. We
2165          * could try and only do it on ITSs corresponding to devices
2166          * that have interrupts targeted at this VPE, but the
2167          * complexity becomes crazy (and you have tons of memory
2168          * anyway, right?).
2169          */
2170         list_for_each_entry(its, &its_nodes, entry) {
2171                 struct its_baser *baser;
2172
2173                 if (!its->is_v4)
2174                         continue;
2175
2176                 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
2177                 if (!baser)
2178                         return false;
2179
2180                 if (!its_alloc_table_entry(baser, vpe_id))
2181                         return false;
2182         }
2183
2184         return true;
2185 }
2186
2187 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2188                                             int nvecs, bool alloc_lpis)
2189 {
2190         struct its_device *dev;
2191         unsigned long *lpi_map = NULL;
2192         unsigned long flags;
2193         u16 *col_map = NULL;
2194         void *itt;
2195         int lpi_base;
2196         int nr_lpis;
2197         int nr_ites;
2198         int sz;
2199
2200         if (!its_alloc_device_table(its, dev_id))
2201                 return NULL;
2202
2203         if (WARN_ON(!is_power_of_2(nvecs)))
2204                 nvecs = roundup_pow_of_two(nvecs);
2205
2206         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2207         /*
2208          * Even if the device wants a single LPI, the ITT must be
2209          * sized as a power of two (and you need at least one bit...).
2210          */
2211         nr_ites = max(2, nvecs);
2212         sz = nr_ites * its->ite_size;
2213         sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2214         itt = kzalloc(sz, GFP_KERNEL);
2215         if (alloc_lpis) {
2216                 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2217                 if (lpi_map)
2218                         col_map = kcalloc(nr_lpis, sizeof(*col_map),
2219                                           GFP_KERNEL);
2220         } else {
2221                 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
2222                 nr_lpis = 0;
2223                 lpi_base = 0;
2224         }
2225
2226         if (!dev || !itt ||  !col_map || (!lpi_map && alloc_lpis)) {
2227                 kfree(dev);
2228                 kfree(itt);
2229                 kfree(lpi_map);
2230                 kfree(col_map);
2231                 return NULL;
2232         }
2233
2234         gic_flush_dcache_to_poc(itt, sz);
2235
2236         dev->its = its;
2237         dev->itt = itt;
2238         dev->nr_ites = nr_ites;
2239         dev->event_map.lpi_map = lpi_map;
2240         dev->event_map.col_map = col_map;
2241         dev->event_map.lpi_base = lpi_base;
2242         dev->event_map.nr_lpis = nr_lpis;
2243         mutex_init(&dev->event_map.vlpi_lock);
2244         dev->device_id = dev_id;
2245         INIT_LIST_HEAD(&dev->entry);
2246
2247         raw_spin_lock_irqsave(&its->lock, flags);
2248         list_add(&dev->entry, &its->its_device_list);
2249         raw_spin_unlock_irqrestore(&its->lock, flags);
2250
2251         /* Map device to its ITT */
2252         its_send_mapd(dev, 1);
2253
2254         return dev;
2255 }
2256
2257 static void its_free_device(struct its_device *its_dev)
2258 {
2259         unsigned long flags;
2260
2261         raw_spin_lock_irqsave(&its_dev->its->lock, flags);
2262         list_del(&its_dev->entry);
2263         raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
2264         kfree(its_dev->itt);
2265         kfree(its_dev);
2266 }
2267
2268 static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
2269 {
2270         int idx;
2271
2272         idx = find_first_zero_bit(dev->event_map.lpi_map,
2273                                   dev->event_map.nr_lpis);
2274         if (idx == dev->event_map.nr_lpis)
2275                 return -ENOSPC;
2276
2277         *hwirq = dev->event_map.lpi_base + idx;
2278         set_bit(idx, dev->event_map.lpi_map);
2279
2280         return 0;
2281 }
2282
2283 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
2284                            int nvec, msi_alloc_info_t *info)
2285 {
2286         struct its_node *its;
2287         struct its_device *its_dev;
2288         struct msi_domain_info *msi_info;
2289         u32 dev_id;
2290
2291         /*
2292          * We ignore "dev" entierely, and rely on the dev_id that has
2293          * been passed via the scratchpad. This limits this domain's
2294          * usefulness to upper layers that definitely know that they
2295          * are built on top of the ITS.
2296          */
2297         dev_id = info->scratchpad[0].ul;
2298
2299         msi_info = msi_get_domain_info(domain);
2300         its = msi_info->data;
2301
2302         if (!gic_rdists->has_direct_lpi &&
2303             vpe_proxy.dev &&
2304             vpe_proxy.dev->its == its &&
2305             dev_id == vpe_proxy.dev->device_id) {
2306                 /* Bad luck. Get yourself a better implementation */
2307                 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
2308                           dev_id);
2309                 return -EINVAL;
2310         }
2311
2312         its_dev = its_find_device(its, dev_id);
2313         if (its_dev) {
2314                 /*
2315                  * We already have seen this ID, probably through
2316                  * another alias (PCI bridge of some sort). No need to
2317                  * create the device.
2318                  */
2319                 pr_debug("Reusing ITT for devID %x\n", dev_id);
2320                 goto out;
2321         }
2322
2323         its_dev = its_create_device(its, dev_id, nvec, true);
2324         if (!its_dev)
2325                 return -ENOMEM;
2326
2327         pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
2328 out:
2329         info->scratchpad[0].ptr = its_dev;
2330         return 0;
2331 }
2332
2333 static struct msi_domain_ops its_msi_domain_ops = {
2334         .msi_prepare    = its_msi_prepare,
2335 };
2336
2337 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
2338                                     unsigned int virq,
2339                                     irq_hw_number_t hwirq)
2340 {
2341         struct irq_fwspec fwspec;
2342
2343         if (irq_domain_get_of_node(domain->parent)) {
2344                 fwspec.fwnode = domain->parent->fwnode;
2345                 fwspec.param_count = 3;
2346                 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
2347                 fwspec.param[1] = hwirq;
2348                 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
2349         } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
2350                 fwspec.fwnode = domain->parent->fwnode;
2351                 fwspec.param_count = 2;
2352                 fwspec.param[0] = hwirq;
2353                 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
2354         } else {
2355                 return -EINVAL;
2356         }
2357
2358         return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
2359 }
2360
2361 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2362                                 unsigned int nr_irqs, void *args)
2363 {
2364         msi_alloc_info_t *info = args;
2365         struct its_device *its_dev = info->scratchpad[0].ptr;
2366         irq_hw_number_t hwirq;
2367         int err;
2368         int i;
2369
2370         for (i = 0; i < nr_irqs; i++) {
2371                 err = its_alloc_device_irq(its_dev, &hwirq);
2372                 if (err)
2373                         return err;
2374
2375                 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
2376                 if (err)
2377                         return err;
2378
2379                 irq_domain_set_hwirq_and_chip(domain, virq + i,
2380                                               hwirq, &its_irq_chip, its_dev);
2381                 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
2382                 pr_debug("ID:%d pID:%d vID:%d\n",
2383                          (int)(hwirq - its_dev->event_map.lpi_base),
2384                          (int) hwirq, virq + i);
2385         }
2386
2387         return 0;
2388 }
2389
2390 static int its_irq_domain_activate(struct irq_domain *domain,
2391                                    struct irq_data *d, bool reserve)
2392 {
2393         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2394         u32 event = its_get_event_id(d);
2395         const struct cpumask *cpu_mask = cpu_online_mask;
2396         int cpu;
2397
2398         /* get the cpu_mask of local node */
2399         if (its_dev->its->numa_node >= 0)
2400                 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
2401
2402         /* Bind the LPI to the first possible CPU */
2403         cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
2404         if (cpu >= nr_cpu_ids) {
2405                 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
2406                         return -EINVAL;
2407
2408                 cpu = cpumask_first(cpu_online_mask);
2409         }
2410
2411         its_dev->event_map.col_map[event] = cpu;
2412         irq_data_update_effective_affinity(d, cpumask_of(cpu));
2413
2414         /* Map the GIC IRQ and event to the device */
2415         its_send_mapti(its_dev, d->hwirq, event);
2416         return 0;
2417 }
2418
2419 static void its_irq_domain_deactivate(struct irq_domain *domain,
2420                                       struct irq_data *d)
2421 {
2422         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2423         u32 event = its_get_event_id(d);
2424
2425         /* Stop the delivery of interrupts */
2426         its_send_discard(its_dev, event);
2427 }
2428
2429 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
2430                                 unsigned int nr_irqs)
2431 {
2432         struct irq_data *d = irq_domain_get_irq_data(domain, virq);
2433         struct its_device *its_dev = irq_data_get_irq_chip_data(d);
2434         int i;
2435
2436         for (i = 0; i < nr_irqs; i++) {
2437                 struct irq_data *data = irq_domain_get_irq_data(domain,
2438                                                                 virq + i);
2439                 u32 event = its_get_event_id(data);
2440
2441                 /* Mark interrupt index as unused */
2442                 clear_bit(event, its_dev->event_map.lpi_map);
2443
2444                 /* Nuke the entry in the domain */
2445                 irq_domain_reset_irq_data(data);
2446         }
2447
2448         /* If all interrupts have been freed, start mopping the floor */
2449         if (bitmap_empty(its_dev->event_map.lpi_map,
2450                          its_dev->event_map.nr_lpis)) {
2451                 its_lpi_free(its_dev->event_map.lpi_map,
2452                              its_dev->event_map.lpi_base,
2453                              its_dev->event_map.nr_lpis);
2454                 kfree(its_dev->event_map.col_map);
2455
2456                 /* Unmap device/itt */
2457                 its_send_mapd(its_dev, 0);
2458                 its_free_device(its_dev);
2459         }
2460
2461         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2462 }
2463
2464 static const struct irq_domain_ops its_domain_ops = {
2465         .alloc                  = its_irq_domain_alloc,
2466         .free                   = its_irq_domain_free,
2467         .activate               = its_irq_domain_activate,
2468         .deactivate             = its_irq_domain_deactivate,
2469 };
2470
2471 /*
2472  * This is insane.
2473  *
2474  * If a GICv4 doesn't implement Direct LPIs (which is extremely
2475  * likely), the only way to perform an invalidate is to use a fake
2476  * device to issue an INV command, implying that the LPI has first
2477  * been mapped to some event on that device. Since this is not exactly
2478  * cheap, we try to keep that mapping around as long as possible, and
2479  * only issue an UNMAP if we're short on available slots.
2480  *
2481  * Broken by design(tm).
2482  */
2483 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
2484 {
2485         /* Already unmapped? */
2486         if (vpe->vpe_proxy_event == -1)
2487                 return;
2488
2489         its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
2490         vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
2491
2492         /*
2493          * We don't track empty slots at all, so let's move the
2494          * next_victim pointer if we can quickly reuse that slot
2495          * instead of nuking an existing entry. Not clear that this is
2496          * always a win though, and this might just generate a ripple
2497          * effect... Let's just hope VPEs don't migrate too often.
2498          */
2499         if (vpe_proxy.vpes[vpe_proxy.next_victim])
2500                 vpe_proxy.next_victim = vpe->vpe_proxy_event;
2501
2502         vpe->vpe_proxy_event = -1;
2503 }
2504
2505 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
2506 {
2507         if (!gic_rdists->has_direct_lpi) {
2508                 unsigned long flags;
2509
2510                 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2511                 its_vpe_db_proxy_unmap_locked(vpe);
2512                 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2513         }
2514 }
2515
2516 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
2517 {
2518         /* Already mapped? */
2519         if (vpe->vpe_proxy_event != -1)
2520                 return;
2521
2522         /* This slot was already allocated. Kick the other VPE out. */
2523         if (vpe_proxy.vpes[vpe_proxy.next_victim])
2524                 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
2525
2526         /* Map the new VPE instead */
2527         vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
2528         vpe->vpe_proxy_event = vpe_proxy.next_victim;
2529         vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
2530
2531         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
2532         its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
2533 }
2534
2535 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
2536 {
2537         unsigned long flags;
2538         struct its_collection *target_col;
2539
2540         if (gic_rdists->has_direct_lpi) {
2541                 void __iomem *rdbase;
2542
2543                 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
2544                 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2545                 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2546                         cpu_relax();
2547
2548                 return;
2549         }
2550
2551         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2552
2553         its_vpe_db_proxy_map_locked(vpe);
2554
2555         target_col = &vpe_proxy.dev->its->collections[to];
2556         its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
2557         vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
2558
2559         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2560 }
2561
2562 static int its_vpe_set_affinity(struct irq_data *d,
2563                                 const struct cpumask *mask_val,
2564                                 bool force)
2565 {
2566         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2567         int cpu = cpumask_first(mask_val);
2568
2569         /*
2570          * Changing affinity is mega expensive, so let's be as lazy as
2571          * we can and only do it if we really have to. Also, if mapped
2572          * into the proxy device, we need to move the doorbell
2573          * interrupt to its new location.
2574          */
2575         if (vpe->col_idx != cpu) {
2576                 int from = vpe->col_idx;
2577
2578                 vpe->col_idx = cpu;
2579                 its_send_vmovp(vpe);
2580                 its_vpe_db_proxy_move(vpe, from, cpu);
2581         }
2582
2583         irq_data_update_effective_affinity(d, cpumask_of(cpu));
2584
2585         return IRQ_SET_MASK_OK_DONE;
2586 }
2587
2588 static void its_vpe_schedule(struct its_vpe *vpe)
2589 {
2590         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2591         u64 val;
2592
2593         /* Schedule the VPE */
2594         val  = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
2595                 GENMASK_ULL(51, 12);
2596         val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
2597         val |= GICR_VPROPBASER_RaWb;
2598         val |= GICR_VPROPBASER_InnerShareable;
2599         gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2600
2601         val  = virt_to_phys(page_address(vpe->vpt_page)) &
2602                 GENMASK_ULL(51, 16);
2603         val |= GICR_VPENDBASER_RaWaWb;
2604         val |= GICR_VPENDBASER_NonShareable;
2605         /*
2606          * There is no good way of finding out if the pending table is
2607          * empty as we can race against the doorbell interrupt very
2608          * easily. So in the end, vpe->pending_last is only an
2609          * indication that the vcpu has something pending, not one
2610          * that the pending table is empty. A good implementation
2611          * would be able to read its coarse map pretty quickly anyway,
2612          * making this a tolerable issue.
2613          */
2614         val |= GICR_VPENDBASER_PendingLast;
2615         val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
2616         val |= GICR_VPENDBASER_Valid;
2617         gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2618 }
2619
2620 static void its_vpe_deschedule(struct its_vpe *vpe)
2621 {
2622         void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2623         u32 count = 1000000;    /* 1s! */
2624         bool clean;
2625         u64 val;
2626
2627         /* We're being scheduled out */
2628         val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2629         val &= ~GICR_VPENDBASER_Valid;
2630         gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2631
2632         do {
2633                 val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2634                 clean = !(val & GICR_VPENDBASER_Dirty);
2635                 if (!clean) {
2636                         count--;
2637                         cpu_relax();
2638                         udelay(1);
2639                 }
2640         } while (!clean && count);
2641
2642         if (unlikely(!clean && !count)) {
2643                 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2644                 vpe->idai = false;
2645                 vpe->pending_last = true;
2646         } else {
2647                 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
2648                 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
2649         }
2650 }
2651
2652 static void its_vpe_invall(struct its_vpe *vpe)
2653 {
2654         struct its_node *its;
2655
2656         list_for_each_entry(its, &its_nodes, entry) {
2657                 if (!its->is_v4)
2658                         continue;
2659
2660                 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
2661                         continue;
2662
2663                 /*
2664                  * Sending a VINVALL to a single ITS is enough, as all
2665                  * we need is to reach the redistributors.
2666                  */
2667                 its_send_vinvall(its, vpe);
2668                 return;
2669         }
2670 }
2671
2672 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
2673 {
2674         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2675         struct its_cmd_info *info = vcpu_info;
2676
2677         switch (info->cmd_type) {
2678         case SCHEDULE_VPE:
2679                 its_vpe_schedule(vpe);
2680                 return 0;
2681
2682         case DESCHEDULE_VPE:
2683                 its_vpe_deschedule(vpe);
2684                 return 0;
2685
2686         case INVALL_VPE:
2687                 its_vpe_invall(vpe);
2688                 return 0;
2689
2690         default:
2691                 return -EINVAL;
2692         }
2693 }
2694
2695 static void its_vpe_send_cmd(struct its_vpe *vpe,
2696                              void (*cmd)(struct its_device *, u32))
2697 {
2698         unsigned long flags;
2699
2700         raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
2701
2702         its_vpe_db_proxy_map_locked(vpe);
2703         cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
2704
2705         raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
2706 }
2707
2708 static void its_vpe_send_inv(struct irq_data *d)
2709 {
2710         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2711
2712         if (gic_rdists->has_direct_lpi) {
2713                 void __iomem *rdbase;
2714
2715                 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2716                 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_INVLPIR);
2717                 while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2718                         cpu_relax();
2719         } else {
2720                 its_vpe_send_cmd(vpe, its_send_inv);
2721         }
2722 }
2723
2724 static void its_vpe_mask_irq(struct irq_data *d)
2725 {
2726         /*
2727          * We need to unmask the LPI, which is described by the parent
2728          * irq_data. Instead of calling into the parent (which won't
2729          * exactly do the right thing, let's simply use the
2730          * parent_data pointer. Yes, I'm naughty.
2731          */
2732         lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
2733         its_vpe_send_inv(d);
2734 }
2735
2736 static void its_vpe_unmask_irq(struct irq_data *d)
2737 {
2738         /* Same hack as above... */
2739         lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
2740         its_vpe_send_inv(d);
2741 }
2742
2743 static int its_vpe_set_irqchip_state(struct irq_data *d,
2744                                      enum irqchip_irq_state which,
2745                                      bool state)
2746 {
2747         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2748
2749         if (which != IRQCHIP_STATE_PENDING)
2750                 return -EINVAL;
2751
2752         if (gic_rdists->has_direct_lpi) {
2753                 void __iomem *rdbase;
2754
2755                 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
2756                 if (state) {
2757                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
2758                 } else {
2759                         gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
2760                         while (gic_read_lpir(rdbase + GICR_SYNCR) & 1)
2761                                 cpu_relax();
2762                 }
2763         } else {
2764                 if (state)
2765                         its_vpe_send_cmd(vpe, its_send_int);
2766                 else
2767                         its_vpe_send_cmd(vpe, its_send_clear);
2768         }
2769
2770         return 0;
2771 }
2772
2773 static struct irq_chip its_vpe_irq_chip = {
2774         .name                   = "GICv4-vpe",
2775         .irq_mask               = its_vpe_mask_irq,
2776         .irq_unmask             = its_vpe_unmask_irq,
2777         .irq_eoi                = irq_chip_eoi_parent,
2778         .irq_set_affinity       = its_vpe_set_affinity,
2779         .irq_set_irqchip_state  = its_vpe_set_irqchip_state,
2780         .irq_set_vcpu_affinity  = its_vpe_set_vcpu_affinity,
2781 };
2782
2783 static int its_vpe_id_alloc(void)
2784 {
2785         return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
2786 }
2787
2788 static void its_vpe_id_free(u16 id)
2789 {
2790         ida_simple_remove(&its_vpeid_ida, id);
2791 }
2792
2793 static int its_vpe_init(struct its_vpe *vpe)
2794 {
2795         struct page *vpt_page;
2796         int vpe_id;
2797
2798         /* Allocate vpe_id */
2799         vpe_id = its_vpe_id_alloc();
2800         if (vpe_id < 0)
2801                 return vpe_id;
2802
2803         /* Allocate VPT */
2804         vpt_page = its_allocate_pending_table(GFP_KERNEL);
2805         if (!vpt_page) {
2806                 its_vpe_id_free(vpe_id);
2807                 return -ENOMEM;
2808         }
2809
2810         if (!its_alloc_vpe_table(vpe_id)) {
2811                 its_vpe_id_free(vpe_id);
2812                 its_free_pending_table(vpe->vpt_page);
2813                 return -ENOMEM;
2814         }
2815
2816         vpe->vpe_id = vpe_id;
2817         vpe->vpt_page = vpt_page;
2818         vpe->vpe_proxy_event = -1;
2819
2820         return 0;
2821 }
2822
2823 static void its_vpe_teardown(struct its_vpe *vpe)
2824 {
2825         its_vpe_db_proxy_unmap(vpe);
2826         its_vpe_id_free(vpe->vpe_id);
2827         its_free_pending_table(vpe->vpt_page);
2828 }
2829
2830 static void its_vpe_irq_domain_free(struct irq_domain *domain,
2831                                     unsigned int virq,
2832                                     unsigned int nr_irqs)
2833 {
2834         struct its_vm *vm = domain->host_data;
2835         int i;
2836
2837         irq_domain_free_irqs_parent(domain, virq, nr_irqs);
2838
2839         for (i = 0; i < nr_irqs; i++) {
2840                 struct irq_data *data = irq_domain_get_irq_data(domain,
2841                                                                 virq + i);
2842                 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
2843
2844                 BUG_ON(vm != vpe->its_vm);
2845
2846                 clear_bit(data->hwirq, vm->db_bitmap);
2847                 its_vpe_teardown(vpe);
2848                 irq_domain_reset_irq_data(data);
2849         }
2850
2851         if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
2852                 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
2853                 its_free_prop_table(vm->vprop_page);
2854         }
2855 }
2856
2857 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
2858                                     unsigned int nr_irqs, void *args)
2859 {
2860         struct its_vm *vm = args;
2861         unsigned long *bitmap;
2862         struct page *vprop_page;
2863         int base, nr_ids, i, err = 0;
2864
2865         BUG_ON(!vm);
2866
2867         bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
2868         if (!bitmap)
2869                 return -ENOMEM;
2870
2871         if (nr_ids < nr_irqs) {
2872                 its_lpi_free(bitmap, base, nr_ids);
2873                 return -ENOMEM;
2874         }
2875
2876         vprop_page = its_allocate_prop_table(GFP_KERNEL);
2877         if (!vprop_page) {
2878                 its_lpi_free(bitmap, base, nr_ids);
2879                 return -ENOMEM;
2880         }
2881
2882         vm->db_bitmap = bitmap;
2883         vm->db_lpi_base = base;
2884         vm->nr_db_lpis = nr_ids;
2885         vm->vprop_page = vprop_page;
2886
2887         for (i = 0; i < nr_irqs; i++) {
2888                 vm->vpes[i]->vpe_db_lpi = base + i;
2889                 err = its_vpe_init(vm->vpes[i]);
2890                 if (err)
2891                         break;
2892                 err = its_irq_gic_domain_alloc(domain, virq + i,
2893                                                vm->vpes[i]->vpe_db_lpi);
2894                 if (err)
2895                         break;
2896                 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
2897                                               &its_vpe_irq_chip, vm->vpes[i]);
2898                 set_bit(i, bitmap);
2899         }
2900
2901         if (err) {
2902                 if (i > 0)
2903                         its_vpe_irq_domain_free(domain, virq, i - 1);
2904
2905                 its_lpi_free(bitmap, base, nr_ids);
2906                 its_free_prop_table(vprop_page);
2907         }
2908
2909         return err;
2910 }
2911
2912 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
2913                                        struct irq_data *d, bool reserve)
2914 {
2915         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2916         struct its_node *its;
2917
2918         /* If we use the list map, we issue VMAPP on demand... */
2919         if (its_list_map)
2920                 return 0;
2921
2922         /* Map the VPE to the first possible CPU */
2923         vpe->col_idx = cpumask_first(cpu_online_mask);
2924
2925         list_for_each_entry(its, &its_nodes, entry) {
2926                 if (!its->is_v4)
2927                         continue;
2928
2929                 its_send_vmapp(its, vpe, true);
2930                 its_send_vinvall(its, vpe);
2931         }
2932
2933         irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
2934
2935         return 0;
2936 }
2937
2938 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
2939                                           struct irq_data *d)
2940 {
2941         struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
2942         struct its_node *its;
2943
2944         /*
2945          * If we use the list map, we unmap the VPE once no VLPIs are
2946          * associated with the VM.
2947          */
2948         if (its_list_map)
2949                 return;
2950
2951         list_for_each_entry(its, &its_nodes, entry) {
2952                 if (!its->is_v4)
2953                         continue;
2954
2955                 its_send_vmapp(its, vpe, false);
2956         }
2957 }
2958
2959 static const struct irq_domain_ops its_vpe_domain_ops = {
2960         .alloc                  = its_vpe_irq_domain_alloc,
2961         .free                   = its_vpe_irq_domain_free,
2962         .activate               = its_vpe_irq_domain_activate,
2963         .deactivate             = its_vpe_irq_domain_deactivate,
2964 };
2965
2966 static int its_force_quiescent(void __iomem *base)
2967 {
2968         u32 count = 1000000;    /* 1s */
2969         u32 val;
2970
2971         val = readl_relaxed(base + GITS_CTLR);
2972         /*
2973          * GIC architecture specification requires the ITS to be both
2974          * disabled and quiescent for writes to GITS_BASER<n> or
2975          * GITS_CBASER to not have UNPREDICTABLE results.
2976          */
2977         if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
2978                 return 0;
2979
2980         /* Disable the generation of all interrupts to this ITS */
2981         val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
2982         writel_relaxed(val, base + GITS_CTLR);
2983
2984         /* Poll GITS_CTLR and wait until ITS becomes quiescent */
2985         while (1) {
2986                 val = readl_relaxed(base + GITS_CTLR);
2987                 if (val & GITS_CTLR_QUIESCENT)
2988                         return 0;
2989
2990                 count--;
2991                 if (!count)
2992                         return -EBUSY;
2993
2994                 cpu_relax();
2995                 udelay(1);
2996         }
2997 }
2998
2999 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
3000 {
3001         struct its_node *its = data;
3002
3003         /* erratum 22375: only alloc 8MB table size */
3004         its->device_ids = 0x14;         /* 20 bits, 8MB */
3005         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
3006
3007         return true;
3008 }
3009
3010 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
3011 {
3012         struct its_node *its = data;
3013
3014         its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
3015
3016         return true;
3017 }
3018
3019 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
3020 {
3021         struct its_node *its = data;
3022
3023         /* On QDF2400, the size of the ITE is 16Bytes */
3024         its->ite_size = 16;
3025
3026         return true;
3027 }
3028
3029 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
3030 {
3031         struct its_node *its = its_dev->its;
3032
3033         /*
3034          * The Socionext Synquacer SoC has a so-called 'pre-ITS',
3035          * which maps 32-bit writes targeted at a separate window of
3036          * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
3037          * with device ID taken from bits [device_id_bits + 1:2] of
3038          * the window offset.
3039          */
3040         return its->pre_its_base + (its_dev->device_id << 2);
3041 }
3042
3043 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
3044 {
3045         struct its_node *its = data;
3046         u32 pre_its_window[2];
3047         u32 ids;
3048
3049         if (!fwnode_property_read_u32_array(its->fwnode_handle,
3050                                            "socionext,synquacer-pre-its",
3051                                            pre_its_window,
3052                                            ARRAY_SIZE(pre_its_window))) {
3053
3054                 its->pre_its_base = pre_its_window[0];
3055                 its->get_msi_base = its_irq_get_msi_base_pre_its;
3056
3057                 ids = ilog2(pre_its_window[1]) - 2;
3058                 if (its->device_ids > ids)
3059                         its->device_ids = ids;
3060
3061                 /* the pre-ITS breaks isolation, so disable MSI remapping */
3062                 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
3063                 return true;
3064         }
3065         return false;
3066 }
3067
3068 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
3069 {
3070         struct its_node *its = data;
3071
3072         /*
3073          * Hip07 insists on using the wrong address for the VLPI
3074          * page. Trick it into doing the right thing...
3075          */
3076         its->vlpi_redist_offset = SZ_128K;
3077         return true;
3078 }
3079
3080 static const struct gic_quirk its_quirks[] = {
3081 #ifdef CONFIG_CAVIUM_ERRATUM_22375
3082         {
3083                 .desc   = "ITS: Cavium errata 22375, 24313",
3084                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
3085                 .mask   = 0xffff0fff,
3086                 .init   = its_enable_quirk_cavium_22375,
3087         },
3088 #endif
3089 #ifdef CONFIG_CAVIUM_ERRATUM_23144
3090         {
3091                 .desc   = "ITS: Cavium erratum 23144",
3092                 .iidr   = 0xa100034c,   /* ThunderX pass 1.x */
3093                 .mask   = 0xffff0fff,
3094                 .init   = its_enable_quirk_cavium_23144,
3095         },
3096 #endif
3097 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
3098         {
3099                 .desc   = "ITS: QDF2400 erratum 0065",
3100                 .iidr   = 0x00001070, /* QDF2400 ITS rev 1.x */
3101                 .mask   = 0xffffffff,
3102                 .init   = its_enable_quirk_qdf2400_e0065,
3103         },
3104 #endif
3105 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
3106         {
3107                 /*
3108                  * The Socionext Synquacer SoC incorporates ARM's own GIC-500
3109                  * implementation, but with a 'pre-ITS' added that requires
3110                  * special handling in software.
3111                  */
3112                 .desc   = "ITS: Socionext Synquacer pre-ITS",
3113                 .iidr   = 0x0001143b,
3114                 .mask   = 0xffffffff,
3115                 .init   = its_enable_quirk_socionext_synquacer,
3116         },
3117 #endif
3118 #ifdef CONFIG_HISILICON_ERRATUM_161600802
3119         {
3120                 .desc   = "ITS: Hip07 erratum 161600802",
3121                 .iidr   = 0x00000004,
3122                 .mask   = 0xffffffff,
3123                 .init   = its_enable_quirk_hip07_161600802,
3124         },
3125 #endif
3126         {
3127         }
3128 };
3129
3130 static void its_enable_quirks(struct its_node *its)
3131 {
3132         u32 iidr = readl_relaxed(its->base + GITS_IIDR);
3133
3134         gic_enable_quirks(iidr, its_quirks, its);
3135 }
3136
3137 static int its_save_disable(void)
3138 {
3139         struct its_node *its;
3140         int err = 0;
3141
3142         raw_spin_lock(&its_lock);
3143         list_for_each_entry(its, &its_nodes, entry) {
3144                 void __iomem *base;
3145
3146                 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3147                         continue;
3148
3149                 base = its->base;
3150                 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3151                 err = its_force_quiescent(base);
3152                 if (err) {
3153                         pr_err("ITS@%pa: failed to quiesce: %d\n",
3154                                &its->phys_base, err);
3155                         writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3156                         goto err;
3157                 }
3158
3159                 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
3160         }
3161
3162 err:
3163         if (err) {
3164                 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3165                         void __iomem *base;
3166
3167                         if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3168                                 continue;
3169
3170                         base = its->base;
3171                         writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3172                 }
3173         }
3174         raw_spin_unlock(&its_lock);
3175
3176         return err;
3177 }
3178
3179 static void its_restore_enable(void)
3180 {
3181         struct its_node *its;
3182         int ret;
3183
3184         raw_spin_lock(&its_lock);
3185         list_for_each_entry(its, &its_nodes, entry) {
3186                 void __iomem *base;
3187                 int i;
3188
3189                 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3190                         continue;
3191
3192                 base = its->base;
3193
3194                 /*
3195                  * Make sure that the ITS is disabled. If it fails to quiesce,
3196                  * don't restore it since writing to CBASER or BASER<n>
3197                  * registers is undefined according to the GIC v3 ITS
3198                  * Specification.
3199                  */
3200                 ret = its_force_quiescent(base);
3201                 if (ret) {
3202                         pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3203                                &its->phys_base, ret);
3204                         continue;
3205                 }
3206
3207                 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
3208
3209                 /*
3210                  * Writing CBASER resets CREADR to 0, so make CWRITER and
3211                  * cmd_write line up with it.
3212                  */
3213                 its->cmd_write = its->cmd_base;
3214                 gits_write_cwriter(0, base + GITS_CWRITER);
3215
3216                 /* Restore GITS_BASER from the value cache. */
3217                 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3218                         struct its_baser *baser = &its->tables[i];
3219
3220                         if (!(baser->val & GITS_BASER_VALID))
3221                                 continue;
3222
3223                         its_write_baser(its, baser, baser->val);
3224                 }
3225                 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3226
3227                 /*
3228                  * Reinit the collection if it's stored in the ITS. This is
3229                  * indicated by the col_id being less than the HCC field.
3230                  * CID < HCC as specified in the GIC v3 Documentation.
3231                  */
3232                 if (its->collections[smp_processor_id()].col_id <
3233                     GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
3234                         its_cpu_init_collection(its);
3235         }
3236         raw_spin_unlock(&its_lock);
3237 }
3238
3239 static struct syscore_ops its_syscore_ops = {
3240         .suspend = its_save_disable,
3241         .resume = its_restore_enable,
3242 };
3243
3244 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
3245 {
3246         struct irq_domain *inner_domain;
3247         struct msi_domain_info *info;
3248
3249         info = kzalloc(sizeof(*info), GFP_KERNEL);
3250         if (!info)
3251                 return -ENOMEM;
3252
3253         inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
3254         if (!inner_domain) {
3255                 kfree(info);
3256                 return -ENOMEM;
3257         }
3258
3259         inner_domain->parent = its_parent;
3260         irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
3261         inner_domain->flags |= its->msi_domain_flags;
3262         info->ops = &its_msi_domain_ops;
3263         info->data = its;
3264         inner_domain->host_data = info;
3265
3266         return 0;
3267 }
3268
3269 static int its_init_vpe_domain(void)
3270 {
3271         struct its_node *its;
3272         u32 devid;
3273         int entries;
3274
3275         if (gic_rdists->has_direct_lpi) {
3276                 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
3277                 return 0;
3278         }
3279
3280         /* Any ITS will do, even if not v4 */
3281         its = list_first_entry(&its_nodes, struct its_node, entry);
3282
3283         entries = roundup_pow_of_two(nr_cpu_ids);
3284         vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
3285                                  GFP_KERNEL);
3286         if (!vpe_proxy.vpes) {
3287                 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
3288                 return -ENOMEM;
3289         }
3290
3291         /* Use the last possible DevID */
3292         devid = GENMASK(its->device_ids - 1, 0);
3293         vpe_proxy.dev = its_create_device(its, devid, entries, false);
3294         if (!vpe_proxy.dev) {
3295                 kfree(vpe_proxy.vpes);
3296                 pr_err("ITS: Can't allocate GICv4 proxy device\n");
3297                 return -ENOMEM;
3298         }
3299
3300         BUG_ON(entries > vpe_proxy.dev->nr_ites);
3301
3302         raw_spin_lock_init(&vpe_proxy.lock);
3303         vpe_proxy.next_victim = 0;
3304         pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
3305                 devid, vpe_proxy.dev->nr_ites);
3306
3307         return 0;
3308 }
3309
3310 static int __init its_compute_its_list_map(struct resource *res,
3311                                            void __iomem *its_base)
3312 {
3313         int its_number;
3314         u32 ctlr;
3315
3316         /*
3317          * This is assumed to be done early enough that we're
3318          * guaranteed to be single-threaded, hence no
3319          * locking. Should this change, we should address
3320          * this.
3321          */
3322         its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
3323         if (its_number >= GICv4_ITS_LIST_MAX) {
3324                 pr_err("ITS@%pa: No ITSList entry available!\n",
3325                        &res->start);
3326                 return -EINVAL;
3327         }
3328
3329         ctlr = readl_relaxed(its_base + GITS_CTLR);
3330         ctlr &= ~GITS_CTLR_ITS_NUMBER;
3331         ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
3332         writel_relaxed(ctlr, its_base + GITS_CTLR);
3333         ctlr = readl_relaxed(its_base + GITS_CTLR);
3334         if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
3335                 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
3336                 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
3337         }
3338
3339         if (test_and_set_bit(its_number, &its_list_map)) {
3340                 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
3341                        &res->start, its_number);
3342                 return -EINVAL;
3343         }
3344
3345         return its_number;
3346 }
3347
3348 static int __init its_probe_one(struct resource *res,
3349                                 struct fwnode_handle *handle, int numa_node)
3350 {
3351         struct its_node *its;
3352         void __iomem *its_base;
3353         u32 val, ctlr;
3354         u64 baser, tmp, typer;
3355         int err;
3356
3357         its_base = ioremap(res->start, resource_size(res));
3358         if (!its_base) {
3359                 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
3360                 return -ENOMEM;
3361         }
3362
3363         val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
3364         if (val != 0x30 && val != 0x40) {
3365                 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
3366                 err = -ENODEV;
3367                 goto out_unmap;
3368         }
3369
3370         err = its_force_quiescent(its_base);
3371         if (err) {
3372                 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
3373                 goto out_unmap;
3374         }
3375
3376         pr_info("ITS %pR\n", res);
3377
3378         its = kzalloc(sizeof(*its), GFP_KERNEL);
3379         if (!its) {
3380                 err = -ENOMEM;
3381                 goto out_unmap;
3382         }
3383
3384         raw_spin_lock_init(&its->lock);
3385         INIT_LIST_HEAD(&its->entry);
3386         INIT_LIST_HEAD(&its->its_device_list);
3387         typer = gic_read_typer(its_base + GITS_TYPER);
3388         its->base = its_base;
3389         its->phys_base = res->start;
3390         its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
3391         its->device_ids = GITS_TYPER_DEVBITS(typer);
3392         its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
3393         if (its->is_v4) {
3394                 if (!(typer & GITS_TYPER_VMOVP)) {
3395                         err = its_compute_its_list_map(res, its_base);
3396                         if (err < 0)
3397                                 goto out_free_its;
3398
3399                         its->list_nr = err;
3400
3401                         pr_info("ITS@%pa: Using ITS number %d\n",
3402                                 &res->start, err);
3403                 } else {
3404                         pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
3405                 }
3406         }
3407
3408         its->numa_node = numa_node;
3409
3410         its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
3411                                                 get_order(ITS_CMD_QUEUE_SZ));
3412         if (!its->cmd_base) {
3413                 err = -ENOMEM;
3414                 goto out_free_its;
3415         }
3416         its->cmd_write = its->cmd_base;
3417         its->fwnode_handle = handle;
3418         its->get_msi_base = its_irq_get_msi_base;
3419         its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
3420
3421         its_enable_quirks(its);
3422
3423         err = its_alloc_tables(its);
3424         if (err)
3425                 goto out_free_cmd;
3426
3427         err = its_alloc_collections(its);
3428         if (err)
3429                 goto out_free_tables;
3430
3431         baser = (virt_to_phys(its->cmd_base)    |
3432                  GITS_CBASER_RaWaWb             |
3433                  GITS_CBASER_InnerShareable     |
3434                  (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
3435                  GITS_CBASER_VALID);
3436
3437         gits_write_cbaser(baser, its->base + GITS_CBASER);
3438         tmp = gits_read_cbaser(its->base + GITS_CBASER);
3439
3440         if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
3441                 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
3442                         /*
3443                          * The HW reports non-shareable, we must
3444                          * remove the cacheability attributes as
3445                          * well.
3446                          */
3447                         baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
3448                                    GITS_CBASER_CACHEABILITY_MASK);
3449                         baser |= GITS_CBASER_nC;
3450                         gits_write_cbaser(baser, its->base + GITS_CBASER);
3451                 }
3452                 pr_info("ITS: using cache flushing for cmd queue\n");
3453                 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
3454         }
3455
3456         gits_write_cwriter(0, its->base + GITS_CWRITER);
3457         ctlr = readl_relaxed(its->base + GITS_CTLR);
3458         ctlr |= GITS_CTLR_ENABLE;
3459         if (its->is_v4)
3460                 ctlr |= GITS_CTLR_ImDe;
3461         writel_relaxed(ctlr, its->base + GITS_CTLR);
3462
3463         if (GITS_TYPER_HCC(typer))
3464                 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3465
3466         err = its_init_domain(handle, its);
3467         if (err)
3468                 goto out_free_tables;
3469
3470         raw_spin_lock(&its_lock);
3471         list_add(&its->entry, &its_nodes);
3472         raw_spin_unlock(&its_lock);
3473
3474         return 0;
3475
3476 out_free_tables:
3477         its_free_tables(its);
3478 out_free_cmd:
3479         free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
3480 out_free_its:
3481         kfree(its);
3482 out_unmap:
3483         iounmap(its_base);
3484         pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
3485         return err;
3486 }
3487
3488 static bool gic_rdists_supports_plpis(void)
3489 {
3490         return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
3491 }
3492
3493 static int redist_disable_lpis(void)
3494 {
3495         void __iomem *rbase = gic_data_rdist_rd_base();
3496         u64 timeout = USEC_PER_SEC;
3497         u64 val;
3498
3499         /*
3500          * If coming via a CPU hotplug event, we don't need to disable
3501          * LPIs before trying to re-enable them. They are already
3502          * configured and all is well in the world. Detect this case
3503          * by checking the allocation of the pending table for the
3504          * current CPU.
3505          */
3506         if (gic_data_rdist()->pend_page)
3507                 return 0;
3508
3509         if (!gic_rdists_supports_plpis()) {
3510                 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
3511                 return -ENXIO;
3512         }
3513
3514         val = readl_relaxed(rbase + GICR_CTLR);
3515         if (!(val & GICR_CTLR_ENABLE_LPIS))
3516                 return 0;
3517
3518         pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
3519                 smp_processor_id());
3520         add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
3521
3522         /* Disable LPIs */
3523         val &= ~GICR_CTLR_ENABLE_LPIS;
3524         writel_relaxed(val, rbase + GICR_CTLR);
3525
3526         /* Make sure any change to GICR_CTLR is observable by the GIC */
3527         dsb(sy);
3528
3529         /*
3530          * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
3531          * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
3532          * Error out if we time out waiting for RWP to clear.
3533          */
3534         while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
3535                 if (!timeout) {
3536                         pr_err("CPU%d: Timeout while disabling LPIs\n",
3537                                smp_processor_id());
3538                         return -ETIMEDOUT;
3539                 }
3540                 udelay(1);
3541                 timeout--;
3542         }
3543
3544         /*
3545          * After it has been written to 1, it is IMPLEMENTATION
3546          * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
3547          * cleared to 0. Error out if clearing the bit failed.
3548          */
3549         if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
3550                 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
3551                 return -EBUSY;
3552         }
3553
3554         return 0;
3555 }
3556
3557 int its_cpu_init(void)
3558 {
3559         if (!list_empty(&its_nodes)) {
3560                 int ret;
3561
3562                 ret = redist_disable_lpis();
3563                 if (ret)
3564                         return ret;
3565
3566                 its_cpu_init_lpis();
3567                 its_cpu_init_collections();
3568         }
3569
3570         return 0;
3571 }
3572
3573 static const struct of_device_id its_device_id[] = {
3574         {       .compatible     = "arm,gic-v3-its",     },
3575         {},
3576 };
3577
3578 static int __init its_of_probe(struct device_node *node)
3579 {
3580         struct device_node *np;
3581         struct resource res;
3582
3583         for (np = of_find_matching_node(node, its_device_id); np;
3584              np = of_find_matching_node(np, its_device_id)) {
3585                 if (!of_device_is_available(np))
3586                         continue;
3587                 if (!of_property_read_bool(np, "msi-controller")) {
3588                         pr_warn("%pOF: no msi-controller property, ITS ignored\n",
3589                                 np);
3590                         continue;
3591                 }
3592
3593                 if (of_address_to_resource(np, 0, &res)) {
3594                         pr_warn("%pOF: no regs?\n", np);
3595                         continue;
3596                 }
3597
3598                 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
3599         }
3600         return 0;
3601 }
3602
3603 #ifdef CONFIG_ACPI
3604
3605 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
3606
3607 #ifdef CONFIG_ACPI_NUMA
3608 struct its_srat_map {
3609         /* numa node id */
3610         u32     numa_node;
3611         /* GIC ITS ID */
3612         u32     its_id;
3613 };
3614
3615 static struct its_srat_map *its_srat_maps __initdata;
3616 static int its_in_srat __initdata;
3617
3618 static int __init acpi_get_its_numa_node(u32 its_id)
3619 {
3620         int i;
3621
3622         for (i = 0; i < its_in_srat; i++) {
3623                 if (its_id == its_srat_maps[i].its_id)
3624                         return its_srat_maps[i].numa_node;
3625         }
3626         return NUMA_NO_NODE;
3627 }
3628
3629 static int __init gic_acpi_match_srat_its(struct acpi_subtable_header *header,
3630                                           const unsigned long end)
3631 {
3632         return 0;
3633 }
3634
3635 static int __init gic_acpi_parse_srat_its(struct acpi_subtable_header *header,
3636                          const unsigned long end)
3637 {
3638         int node;
3639         struct acpi_srat_gic_its_affinity *its_affinity;
3640
3641         its_affinity = (struct acpi_srat_gic_its_affinity *)header;
3642         if (!its_affinity)
3643                 return -EINVAL;
3644
3645         if (its_affinity->header.length < sizeof(*its_affinity)) {
3646                 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
3647                         its_affinity->header.length);
3648                 return -EINVAL;
3649         }
3650
3651         node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
3652
3653         if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
3654                 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
3655                 return 0;
3656         }
3657
3658         its_srat_maps[its_in_srat].numa_node = node;
3659         its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
3660         its_in_srat++;
3661         pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
3662                 its_affinity->proximity_domain, its_affinity->its_id, node);
3663
3664         return 0;
3665 }
3666
3667 static void __init acpi_table_parse_srat_its(void)
3668 {
3669         int count;
3670
3671         count = acpi_table_parse_entries(ACPI_SIG_SRAT,
3672                         sizeof(struct acpi_table_srat),
3673                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3674                         gic_acpi_match_srat_its, 0);
3675         if (count <= 0)
3676                 return;
3677
3678         its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
3679                                       GFP_KERNEL);
3680         if (!its_srat_maps) {
3681                 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
3682                 return;
3683         }
3684
3685         acpi_table_parse_entries(ACPI_SIG_SRAT,
3686                         sizeof(struct acpi_table_srat),
3687                         ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
3688                         gic_acpi_parse_srat_its, 0);
3689 }
3690
3691 /* free the its_srat_maps after ITS probing */
3692 static void __init acpi_its_srat_maps_free(void)
3693 {
3694         kfree(its_srat_maps);
3695 }
3696 #else
3697 static void __init acpi_table_parse_srat_its(void)      { }
3698 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
3699 static void __init acpi_its_srat_maps_free(void) { }
3700 #endif
3701
3702 static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
3703                                           const unsigned long end)
3704 {
3705         struct acpi_madt_generic_translator *its_entry;
3706         struct fwnode_handle *dom_handle;
3707         struct resource res;
3708         int err;
3709
3710         its_entry = (struct acpi_madt_generic_translator *)header;
3711         memset(&res, 0, sizeof(res));
3712         res.start = its_entry->base_address;
3713         res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
3714         res.flags = IORESOURCE_MEM;
3715
3716         dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
3717         if (!dom_handle) {
3718                 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
3719                        &res.start);
3720                 return -ENOMEM;
3721         }
3722
3723         err = iort_register_domain_token(its_entry->translation_id, res.start,
3724                                          dom_handle);
3725         if (err) {
3726                 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
3727                        &res.start, its_entry->translation_id);
3728                 goto dom_err;
3729         }
3730
3731         err = its_probe_one(&res, dom_handle,
3732                         acpi_get_its_numa_node(its_entry->translation_id));
3733         if (!err)
3734                 return 0;
3735
3736         iort_deregister_domain_token(its_entry->translation_id);
3737 dom_err:
3738         irq_domain_free_fwnode(dom_handle);
3739         return err;
3740 }
3741
3742 static void __init its_acpi_probe(void)
3743 {
3744         acpi_table_parse_srat_its();
3745         acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
3746                               gic_acpi_parse_madt_its, 0);
3747         acpi_its_srat_maps_free();
3748 }
3749 #else
3750 static void __init its_acpi_probe(void) { }
3751 #endif
3752
3753 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
3754                     struct irq_domain *parent_domain)
3755 {
3756         struct device_node *of_node;
3757         struct its_node *its;
3758         bool has_v4 = false;
3759         int err;
3760
3761         its_parent = parent_domain;
3762         of_node = to_of_node(handle);
3763         if (of_node)
3764                 its_of_probe(of_node);
3765         else
3766                 its_acpi_probe();
3767
3768         if (list_empty(&its_nodes)) {
3769                 pr_warn("ITS: No ITS available, not enabling LPIs\n");
3770                 return -ENXIO;
3771         }
3772
3773         gic_rdists = rdists;
3774         err = its_alloc_lpi_tables();
3775         if (err)
3776                 return err;
3777
3778         list_for_each_entry(its, &its_nodes, entry)
3779                 has_v4 |= its->is_v4;
3780
3781         if (has_v4 & rdists->has_vlpis) {
3782                 if (its_init_vpe_domain() ||
3783                     its_init_v4(parent_domain, &its_vpe_domain_ops)) {
3784                         rdists->has_vlpis = false;
3785                         pr_err("ITS: Disabling GICv4 support\n");
3786                 }
3787         }
3788
3789         register_syscore_ops(&its_syscore_ops);
3790
3791         return 0;
3792 }