Merge tag 'io_uring-5.10-2020-10-12' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / drivers / base / regmap / regmap-irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // regmap based irq_chip
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/regmap.h>
16 #include <linux/slab.h>
17
18 #include "internal.h"
19
20 struct regmap_irq_chip_data {
21         struct mutex lock;
22         struct irq_chip irq_chip;
23
24         struct regmap *map;
25         const struct regmap_irq_chip *chip;
26
27         int irq_base;
28         struct irq_domain *domain;
29
30         int irq;
31         int wake_count;
32
33         void *status_reg_buf;
34         unsigned int *main_status_buf;
35         unsigned int *status_buf;
36         unsigned int *mask_buf;
37         unsigned int *mask_buf_def;
38         unsigned int *wake_buf;
39         unsigned int *type_buf;
40         unsigned int *type_buf_def;
41
42         unsigned int irq_reg_stride;
43         unsigned int type_reg_stride;
44
45         bool clear_status:1;
46 };
47
48 static inline const
49 struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
50                                      int irq)
51 {
52         return &data->chip->irqs[irq];
53 }
54
55 static void regmap_irq_lock(struct irq_data *data)
56 {
57         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
58
59         mutex_lock(&d->lock);
60 }
61
62 static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
63                                   unsigned int reg, unsigned int mask,
64                                   unsigned int val)
65 {
66         if (d->chip->mask_writeonly)
67                 return regmap_write_bits(d->map, reg, mask, val);
68         else
69                 return regmap_update_bits(d->map, reg, mask, val);
70 }
71
72 static void regmap_irq_sync_unlock(struct irq_data *data)
73 {
74         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75         struct regmap *map = d->map;
76         int i, ret;
77         u32 reg;
78         u32 unmask_offset;
79         u32 val;
80
81         if (d->chip->runtime_pm) {
82                 ret = pm_runtime_get_sync(map->dev);
83                 if (ret < 0)
84                         dev_err(map->dev, "IRQ sync failed to resume: %d\n",
85                                 ret);
86         }
87
88         if (d->clear_status) {
89                 for (i = 0; i < d->chip->num_regs; i++) {
90                         reg = d->chip->status_base +
91                                 (i * map->reg_stride * d->irq_reg_stride);
92
93                         ret = regmap_read(map, reg, &val);
94                         if (ret)
95                                 dev_err(d->map->dev,
96                                         "Failed to clear the interrupt status bits\n");
97                 }
98
99                 d->clear_status = false;
100         }
101
102         /*
103          * If there's been a change in the mask write it back to the
104          * hardware.  We rely on the use of the regmap core cache to
105          * suppress pointless writes.
106          */
107         for (i = 0; i < d->chip->num_regs; i++) {
108                 if (!d->chip->mask_base)
109                         continue;
110
111                 reg = d->chip->mask_base +
112                         (i * map->reg_stride * d->irq_reg_stride);
113                 if (d->chip->mask_invert) {
114                         ret = regmap_irq_update_bits(d, reg,
115                                          d->mask_buf_def[i], ~d->mask_buf[i]);
116                 } else if (d->chip->unmask_base) {
117                         /* set mask with mask_base register */
118                         ret = regmap_irq_update_bits(d, reg,
119                                         d->mask_buf_def[i], ~d->mask_buf[i]);
120                         if (ret < 0)
121                                 dev_err(d->map->dev,
122                                         "Failed to sync unmasks in %x\n",
123                                         reg);
124                         unmask_offset = d->chip->unmask_base -
125                                                         d->chip->mask_base;
126                         /* clear mask with unmask_base register */
127                         ret = regmap_irq_update_bits(d,
128                                         reg + unmask_offset,
129                                         d->mask_buf_def[i],
130                                         d->mask_buf[i]);
131                 } else {
132                         ret = regmap_irq_update_bits(d, reg,
133                                          d->mask_buf_def[i], d->mask_buf[i]);
134                 }
135                 if (ret != 0)
136                         dev_err(d->map->dev, "Failed to sync masks in %x\n",
137                                 reg);
138
139                 reg = d->chip->wake_base +
140                         (i * map->reg_stride * d->irq_reg_stride);
141                 if (d->wake_buf) {
142                         if (d->chip->wake_invert)
143                                 ret = regmap_irq_update_bits(d, reg,
144                                                          d->mask_buf_def[i],
145                                                          ~d->wake_buf[i]);
146                         else
147                                 ret = regmap_irq_update_bits(d, reg,
148                                                          d->mask_buf_def[i],
149                                                          d->wake_buf[i]);
150                         if (ret != 0)
151                                 dev_err(d->map->dev,
152                                         "Failed to sync wakes in %x: %d\n",
153                                         reg, ret);
154                 }
155
156                 if (!d->chip->init_ack_masked)
157                         continue;
158                 /*
159                  * Ack all the masked interrupts unconditionally,
160                  * OR if there is masked interrupt which hasn't been Acked,
161                  * it'll be ignored in irq handler, then may introduce irq storm
162                  */
163                 if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164                         reg = d->chip->ack_base +
165                                 (i * map->reg_stride * d->irq_reg_stride);
166                         /* some chips ack by write 0 */
167                         if (d->chip->ack_invert)
168                                 ret = regmap_write(map, reg, ~d->mask_buf[i]);
169                         else
170                                 ret = regmap_write(map, reg, d->mask_buf[i]);
171                         if (d->chip->clear_ack) {
172                                 if (d->chip->ack_invert && !ret)
173                                         ret = regmap_write(map, reg,
174                                                            d->mask_buf[i]);
175                                 else if (!ret)
176                                         ret = regmap_write(map, reg,
177                                                            ~d->mask_buf[i]);
178                         }
179                         if (ret != 0)
180                                 dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
181                                         reg, ret);
182                 }
183         }
184
185         /* Don't update the type bits if we're using mask bits for irq type. */
186         if (!d->chip->type_in_mask) {
187                 for (i = 0; i < d->chip->num_type_reg; i++) {
188                         if (!d->type_buf_def[i])
189                                 continue;
190                         reg = d->chip->type_base +
191                                 (i * map->reg_stride * d->type_reg_stride);
192                         if (d->chip->type_invert)
193                                 ret = regmap_irq_update_bits(d, reg,
194                                         d->type_buf_def[i], ~d->type_buf[i]);
195                         else
196                                 ret = regmap_irq_update_bits(d, reg,
197                                         d->type_buf_def[i], d->type_buf[i]);
198                         if (ret != 0)
199                                 dev_err(d->map->dev, "Failed to sync type in %x\n",
200                                         reg);
201                 }
202         }
203
204         if (d->chip->runtime_pm)
205                 pm_runtime_put(map->dev);
206
207         /* If we've changed our wakeup count propagate it to the parent */
208         if (d->wake_count < 0)
209                 for (i = d->wake_count; i < 0; i++)
210                         irq_set_irq_wake(d->irq, 0);
211         else if (d->wake_count > 0)
212                 for (i = 0; i < d->wake_count; i++)
213                         irq_set_irq_wake(d->irq, 1);
214
215         d->wake_count = 0;
216
217         mutex_unlock(&d->lock);
218 }
219
220 static void regmap_irq_enable(struct irq_data *data)
221 {
222         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
223         struct regmap *map = d->map;
224         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
225         unsigned int mask, type;
226
227         type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
228
229         /*
230          * The type_in_mask flag means that the underlying hardware uses
231          * separate mask bits for rising and falling edge interrupts, but
232          * we want to make them into a single virtual interrupt with
233          * configurable edge.
234          *
235          * If the interrupt we're enabling defines the falling or rising
236          * masks then instead of using the regular mask bits for this
237          * interrupt, use the value previously written to the type buffer
238          * at the corresponding offset in regmap_irq_set_type().
239          */
240         if (d->chip->type_in_mask && type)
241                 mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
242         else
243                 mask = irq_data->mask;
244
245         if (d->chip->clear_on_unmask)
246                 d->clear_status = true;
247
248         d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
249 }
250
251 static void regmap_irq_disable(struct irq_data *data)
252 {
253         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
254         struct regmap *map = d->map;
255         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
256
257         d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
258 }
259
260 static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
261 {
262         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
263         struct regmap *map = d->map;
264         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
265         int reg;
266         const struct regmap_irq_type *t = &irq_data->type;
267
268         if ((t->types_supported & type) != type)
269                 return 0;
270
271         reg = t->type_reg_offset / map->reg_stride;
272
273         if (t->type_reg_mask)
274                 d->type_buf[reg] &= ~t->type_reg_mask;
275         else
276                 d->type_buf[reg] &= ~(t->type_falling_val |
277                                       t->type_rising_val |
278                                       t->type_level_low_val |
279                                       t->type_level_high_val);
280         switch (type) {
281         case IRQ_TYPE_EDGE_FALLING:
282                 d->type_buf[reg] |= t->type_falling_val;
283                 break;
284
285         case IRQ_TYPE_EDGE_RISING:
286                 d->type_buf[reg] |= t->type_rising_val;
287                 break;
288
289         case IRQ_TYPE_EDGE_BOTH:
290                 d->type_buf[reg] |= (t->type_falling_val |
291                                         t->type_rising_val);
292                 break;
293
294         case IRQ_TYPE_LEVEL_HIGH:
295                 d->type_buf[reg] |= t->type_level_high_val;
296                 break;
297
298         case IRQ_TYPE_LEVEL_LOW:
299                 d->type_buf[reg] |= t->type_level_low_val;
300                 break;
301         default:
302                 return -EINVAL;
303         }
304         return 0;
305 }
306
307 static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
308 {
309         struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
310         struct regmap *map = d->map;
311         const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
312
313         if (on) {
314                 if (d->wake_buf)
315                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
316                                 &= ~irq_data->mask;
317                 d->wake_count++;
318         } else {
319                 if (d->wake_buf)
320                         d->wake_buf[irq_data->reg_offset / map->reg_stride]
321                                 |= irq_data->mask;
322                 d->wake_count--;
323         }
324
325         return 0;
326 }
327
328 static const struct irq_chip regmap_irq_chip = {
329         .irq_bus_lock           = regmap_irq_lock,
330         .irq_bus_sync_unlock    = regmap_irq_sync_unlock,
331         .irq_disable            = regmap_irq_disable,
332         .irq_enable             = regmap_irq_enable,
333         .irq_set_type           = regmap_irq_set_type,
334         .irq_set_wake           = regmap_irq_set_wake,
335 };
336
337 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
338                                            unsigned int b)
339 {
340         const struct regmap_irq_chip *chip = data->chip;
341         struct regmap *map = data->map;
342         struct regmap_irq_sub_irq_map *subreg;
343         int i, ret = 0;
344
345         if (!chip->sub_reg_offsets) {
346                 /* Assume linear mapping */
347                 ret = regmap_read(map, chip->status_base +
348                                   (b * map->reg_stride * data->irq_reg_stride),
349                                    &data->status_buf[b]);
350         } else {
351                 subreg = &chip->sub_reg_offsets[b];
352                 for (i = 0; i < subreg->num_regs; i++) {
353                         unsigned int offset = subreg->offset[i];
354
355                         ret = regmap_read(map, chip->status_base + offset,
356                                           &data->status_buf[offset]);
357                         if (ret)
358                                 break;
359                 }
360         }
361         return ret;
362 }
363
364 static irqreturn_t regmap_irq_thread(int irq, void *d)
365 {
366         struct regmap_irq_chip_data *data = d;
367         const struct regmap_irq_chip *chip = data->chip;
368         struct regmap *map = data->map;
369         int ret, i;
370         bool handled = false;
371         u32 reg;
372
373         if (chip->handle_pre_irq)
374                 chip->handle_pre_irq(chip->irq_drv_data);
375
376         if (chip->runtime_pm) {
377                 ret = pm_runtime_get_sync(map->dev);
378                 if (ret < 0) {
379                         dev_err(map->dev, "IRQ thread failed to resume: %d\n",
380                                 ret);
381                         goto exit;
382                 }
383         }
384
385         /*
386          * Read only registers with active IRQs if the chip has 'main status
387          * register'. Else read in the statuses, using a single bulk read if
388          * possible in order to reduce the I/O overheads.
389          */
390
391         if (chip->num_main_regs) {
392                 unsigned int max_main_bits;
393                 unsigned long size;
394
395                 size = chip->num_regs * sizeof(unsigned int);
396
397                 max_main_bits = (chip->num_main_status_bits) ?
398                                  chip->num_main_status_bits : chip->num_regs;
399                 /* Clear the status buf as we don't read all status regs */
400                 memset(data->status_buf, 0, size);
401
402                 /* We could support bulk read for main status registers
403                  * but I don't expect to see devices with really many main
404                  * status registers so let's only support single reads for the
405                  * sake of simplicity. and add bulk reads only if needed
406                  */
407                 for (i = 0; i < chip->num_main_regs; i++) {
408                         ret = regmap_read(map, chip->main_status +
409                                   (i * map->reg_stride
410                                    * data->irq_reg_stride),
411                                   &data->main_status_buf[i]);
412                         if (ret) {
413                                 dev_err(map->dev,
414                                         "Failed to read IRQ status %d\n",
415                                         ret);
416                                 goto exit;
417                         }
418                 }
419
420                 /* Read sub registers with active IRQs */
421                 for (i = 0; i < chip->num_main_regs; i++) {
422                         unsigned int b;
423                         const unsigned long mreg = data->main_status_buf[i];
424
425                         for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
426                                 if (i * map->format.val_bytes * 8 + b >
427                                     max_main_bits)
428                                         break;
429                                 ret = read_sub_irq_data(data, b);
430
431                                 if (ret != 0) {
432                                         dev_err(map->dev,
433                                                 "Failed to read IRQ status %d\n",
434                                                 ret);
435                                         goto exit;
436                                 }
437                         }
438
439                 }
440         } else if (!map->use_single_read && map->reg_stride == 1 &&
441                    data->irq_reg_stride == 1) {
442
443                 u8 *buf8 = data->status_reg_buf;
444                 u16 *buf16 = data->status_reg_buf;
445                 u32 *buf32 = data->status_reg_buf;
446
447                 BUG_ON(!data->status_reg_buf);
448
449                 ret = regmap_bulk_read(map, chip->status_base,
450                                        data->status_reg_buf,
451                                        chip->num_regs);
452                 if (ret != 0) {
453                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
454                                 ret);
455                         goto exit;
456                 }
457
458                 for (i = 0; i < data->chip->num_regs; i++) {
459                         switch (map->format.val_bytes) {
460                         case 1:
461                                 data->status_buf[i] = buf8[i];
462                                 break;
463                         case 2:
464                                 data->status_buf[i] = buf16[i];
465                                 break;
466                         case 4:
467                                 data->status_buf[i] = buf32[i];
468                                 break;
469                         default:
470                                 BUG();
471                                 goto exit;
472                         }
473                 }
474
475         } else {
476                 for (i = 0; i < data->chip->num_regs; i++) {
477                         ret = regmap_read(map, chip->status_base +
478                                           (i * map->reg_stride
479                                            * data->irq_reg_stride),
480                                           &data->status_buf[i]);
481
482                         if (ret != 0) {
483                                 dev_err(map->dev,
484                                         "Failed to read IRQ status: %d\n",
485                                         ret);
486                                 goto exit;
487                         }
488                 }
489         }
490
491         /*
492          * Ignore masked IRQs and ack if we need to; we ack early so
493          * there is no race between handling and acknowleding the
494          * interrupt.  We assume that typically few of the interrupts
495          * will fire simultaneously so don't worry about overhead from
496          * doing a write per register.
497          */
498         for (i = 0; i < data->chip->num_regs; i++) {
499                 data->status_buf[i] &= ~data->mask_buf[i];
500
501                 if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
502                         reg = chip->ack_base +
503                                 (i * map->reg_stride * data->irq_reg_stride);
504                         if (chip->ack_invert)
505                                 ret = regmap_write(map, reg,
506                                                 ~data->status_buf[i]);
507                         else
508                                 ret = regmap_write(map, reg,
509                                                 data->status_buf[i]);
510                         if (chip->clear_ack) {
511                                 if (chip->ack_invert && !ret)
512                                         ret = regmap_write(map, reg,
513                                                         data->status_buf[i]);
514                                 else if (!ret)
515                                         ret = regmap_write(map, reg,
516                                                         ~data->status_buf[i]);
517                         }
518                         if (ret != 0)
519                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
520                                         reg, ret);
521                 }
522         }
523
524         for (i = 0; i < chip->num_irqs; i++) {
525                 if (data->status_buf[chip->irqs[i].reg_offset /
526                                      map->reg_stride] & chip->irqs[i].mask) {
527                         handle_nested_irq(irq_find_mapping(data->domain, i));
528                         handled = true;
529                 }
530         }
531
532 exit:
533         if (chip->runtime_pm)
534                 pm_runtime_put(map->dev);
535
536         if (chip->handle_post_irq)
537                 chip->handle_post_irq(chip->irq_drv_data);
538
539         if (handled)
540                 return IRQ_HANDLED;
541         else
542                 return IRQ_NONE;
543 }
544
545 static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
546                           irq_hw_number_t hw)
547 {
548         struct regmap_irq_chip_data *data = h->host_data;
549
550         irq_set_chip_data(virq, data);
551         irq_set_chip(virq, &data->irq_chip);
552         irq_set_nested_thread(virq, 1);
553         irq_set_parent(virq, data->irq);
554         irq_set_noprobe(virq);
555
556         return 0;
557 }
558
559 static const struct irq_domain_ops regmap_domain_ops = {
560         .map    = regmap_irq_map,
561         .xlate  = irq_domain_xlate_onetwocell,
562 };
563
564 /**
565  * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
566  *
567  * @fwnode: The firmware node where the IRQ domain should be added to.
568  * @map: The regmap for the device.
569  * @irq: The IRQ the device uses to signal interrupts.
570  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
571  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
572  * @chip: Configuration for the interrupt controller.
573  * @data: Runtime data structure for the controller, allocated on success.
574  *
575  * Returns 0 on success or an errno on failure.
576  *
577  * In order for this to be efficient the chip really should use a
578  * register cache.  The chip driver is responsible for restoring the
579  * register values used by the IRQ controller over suspend and resume.
580  */
581 int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
582                                struct regmap *map, int irq,
583                                int irq_flags, int irq_base,
584                                const struct regmap_irq_chip *chip,
585                                struct regmap_irq_chip_data **data)
586 {
587         struct regmap_irq_chip_data *d;
588         int i;
589         int ret = -ENOMEM;
590         int num_type_reg;
591         u32 reg;
592         u32 unmask_offset;
593
594         if (chip->num_regs <= 0)
595                 return -EINVAL;
596
597         if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
598                 return -EINVAL;
599
600         for (i = 0; i < chip->num_irqs; i++) {
601                 if (chip->irqs[i].reg_offset % map->reg_stride)
602                         return -EINVAL;
603                 if (chip->irqs[i].reg_offset / map->reg_stride >=
604                     chip->num_regs)
605                         return -EINVAL;
606         }
607
608         if (irq_base) {
609                 irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
610                 if (irq_base < 0) {
611                         dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
612                                  irq_base);
613                         return irq_base;
614                 }
615         }
616
617         d = kzalloc(sizeof(*d), GFP_KERNEL);
618         if (!d)
619                 return -ENOMEM;
620
621         if (chip->num_main_regs) {
622                 d->main_status_buf = kcalloc(chip->num_main_regs,
623                                              sizeof(unsigned int),
624                                              GFP_KERNEL);
625
626                 if (!d->main_status_buf)
627                         goto err_alloc;
628         }
629
630         d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
631                                 GFP_KERNEL);
632         if (!d->status_buf)
633                 goto err_alloc;
634
635         d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
636                               GFP_KERNEL);
637         if (!d->mask_buf)
638                 goto err_alloc;
639
640         d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
641                                   GFP_KERNEL);
642         if (!d->mask_buf_def)
643                 goto err_alloc;
644
645         if (chip->wake_base) {
646                 d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
647                                       GFP_KERNEL);
648                 if (!d->wake_buf)
649                         goto err_alloc;
650         }
651
652         num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
653         if (num_type_reg) {
654                 d->type_buf_def = kcalloc(num_type_reg,
655                                           sizeof(unsigned int), GFP_KERNEL);
656                 if (!d->type_buf_def)
657                         goto err_alloc;
658
659                 d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
660                                       GFP_KERNEL);
661                 if (!d->type_buf)
662                         goto err_alloc;
663         }
664
665         d->irq_chip = regmap_irq_chip;
666         d->irq_chip.name = chip->name;
667         d->irq = irq;
668         d->map = map;
669         d->chip = chip;
670         d->irq_base = irq_base;
671
672         if (chip->irq_reg_stride)
673                 d->irq_reg_stride = chip->irq_reg_stride;
674         else
675                 d->irq_reg_stride = 1;
676
677         if (chip->type_reg_stride)
678                 d->type_reg_stride = chip->type_reg_stride;
679         else
680                 d->type_reg_stride = 1;
681
682         if (!map->use_single_read && map->reg_stride == 1 &&
683             d->irq_reg_stride == 1) {
684                 d->status_reg_buf = kmalloc_array(chip->num_regs,
685                                                   map->format.val_bytes,
686                                                   GFP_KERNEL);
687                 if (!d->status_reg_buf)
688                         goto err_alloc;
689         }
690
691         mutex_init(&d->lock);
692
693         for (i = 0; i < chip->num_irqs; i++)
694                 d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
695                         |= chip->irqs[i].mask;
696
697         /* Mask all the interrupts by default */
698         for (i = 0; i < chip->num_regs; i++) {
699                 d->mask_buf[i] = d->mask_buf_def[i];
700                 if (!chip->mask_base)
701                         continue;
702
703                 reg = chip->mask_base +
704                         (i * map->reg_stride * d->irq_reg_stride);
705                 if (chip->mask_invert)
706                         ret = regmap_irq_update_bits(d, reg,
707                                          d->mask_buf[i], ~d->mask_buf[i]);
708                 else if (d->chip->unmask_base) {
709                         unmask_offset = d->chip->unmask_base -
710                                         d->chip->mask_base;
711                         ret = regmap_irq_update_bits(d,
712                                         reg + unmask_offset,
713                                         d->mask_buf[i],
714                                         d->mask_buf[i]);
715                 } else
716                         ret = regmap_irq_update_bits(d, reg,
717                                          d->mask_buf[i], d->mask_buf[i]);
718                 if (ret != 0) {
719                         dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
720                                 reg, ret);
721                         goto err_alloc;
722                 }
723
724                 if (!chip->init_ack_masked)
725                         continue;
726
727                 /* Ack masked but set interrupts */
728                 reg = chip->status_base +
729                         (i * map->reg_stride * d->irq_reg_stride);
730                 ret = regmap_read(map, reg, &d->status_buf[i]);
731                 if (ret != 0) {
732                         dev_err(map->dev, "Failed to read IRQ status: %d\n",
733                                 ret);
734                         goto err_alloc;
735                 }
736
737                 if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
738                         reg = chip->ack_base +
739                                 (i * map->reg_stride * d->irq_reg_stride);
740                         if (chip->ack_invert)
741                                 ret = regmap_write(map, reg,
742                                         ~(d->status_buf[i] & d->mask_buf[i]));
743                         else
744                                 ret = regmap_write(map, reg,
745                                         d->status_buf[i] & d->mask_buf[i]);
746                         if (chip->clear_ack) {
747                                 if (chip->ack_invert && !ret)
748                                         ret = regmap_write(map, reg,
749                                                 (d->status_buf[i] &
750                                                  d->mask_buf[i]));
751                                 else if (!ret)
752                                         ret = regmap_write(map, reg,
753                                                 ~(d->status_buf[i] &
754                                                   d->mask_buf[i]));
755                         }
756                         if (ret != 0) {
757                                 dev_err(map->dev, "Failed to ack 0x%x: %d\n",
758                                         reg, ret);
759                                 goto err_alloc;
760                         }
761                 }
762         }
763
764         /* Wake is disabled by default */
765         if (d->wake_buf) {
766                 for (i = 0; i < chip->num_regs; i++) {
767                         d->wake_buf[i] = d->mask_buf_def[i];
768                         reg = chip->wake_base +
769                                 (i * map->reg_stride * d->irq_reg_stride);
770
771                         if (chip->wake_invert)
772                                 ret = regmap_irq_update_bits(d, reg,
773                                                          d->mask_buf_def[i],
774                                                          0);
775                         else
776                                 ret = regmap_irq_update_bits(d, reg,
777                                                          d->mask_buf_def[i],
778                                                          d->wake_buf[i]);
779                         if (ret != 0) {
780                                 dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
781                                         reg, ret);
782                                 goto err_alloc;
783                         }
784                 }
785         }
786
787         if (chip->num_type_reg && !chip->type_in_mask) {
788                 for (i = 0; i < chip->num_type_reg; ++i) {
789                         reg = chip->type_base +
790                                 (i * map->reg_stride * d->type_reg_stride);
791
792                         ret = regmap_read(map, reg, &d->type_buf_def[i]);
793
794                         if (d->chip->type_invert)
795                                 d->type_buf_def[i] = ~d->type_buf_def[i];
796
797                         if (ret) {
798                                 dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
799                                         reg, ret);
800                                 goto err_alloc;
801                         }
802                 }
803         }
804
805         if (irq_base)
806                 d->domain = irq_domain_add_legacy(to_of_node(fwnode),
807                                                   chip->num_irqs, irq_base,
808                                                   0, &regmap_domain_ops, d);
809         else
810                 d->domain = irq_domain_add_linear(to_of_node(fwnode),
811                                                   chip->num_irqs,
812                                                   &regmap_domain_ops, d);
813         if (!d->domain) {
814                 dev_err(map->dev, "Failed to create IRQ domain\n");
815                 ret = -ENOMEM;
816                 goto err_alloc;
817         }
818
819         ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
820                                    irq_flags | IRQF_ONESHOT,
821                                    chip->name, d);
822         if (ret != 0) {
823                 dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
824                         irq, chip->name, ret);
825                 goto err_domain;
826         }
827
828         *data = d;
829
830         return 0;
831
832 err_domain:
833         /* Should really dispose of the domain but... */
834 err_alloc:
835         kfree(d->type_buf);
836         kfree(d->type_buf_def);
837         kfree(d->wake_buf);
838         kfree(d->mask_buf_def);
839         kfree(d->mask_buf);
840         kfree(d->status_buf);
841         kfree(d->status_reg_buf);
842         kfree(d);
843         return ret;
844 }
845 EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
846
847 /**
848  * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
849  *
850  * @map: The regmap for the device.
851  * @irq: The IRQ the device uses to signal interrupts.
852  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
853  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
854  * @chip: Configuration for the interrupt controller.
855  * @data: Runtime data structure for the controller, allocated on success.
856  *
857  * Returns 0 on success or an errno on failure.
858  *
859  * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
860  * node of the regmap is used.
861  */
862 int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
863                         int irq_base, const struct regmap_irq_chip *chip,
864                         struct regmap_irq_chip_data **data)
865 {
866         return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
867                                           irq_flags, irq_base, chip, data);
868 }
869 EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
870
871 /**
872  * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
873  *
874  * @irq: Primary IRQ for the device
875  * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
876  *
877  * This function also disposes of all mapped IRQs on the chip.
878  */
879 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
880 {
881         unsigned int virq;
882         int hwirq;
883
884         if (!d)
885                 return;
886
887         free_irq(irq, d);
888
889         /* Dispose all virtual irq from irq domain before removing it */
890         for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
891                 /* Ignore hwirq if holes in the IRQ list */
892                 if (!d->chip->irqs[hwirq].mask)
893                         continue;
894
895                 /*
896                  * Find the virtual irq of hwirq on chip and if it is
897                  * there then dispose it
898                  */
899                 virq = irq_find_mapping(d->domain, hwirq);
900                 if (virq)
901                         irq_dispose_mapping(virq);
902         }
903
904         irq_domain_remove(d->domain);
905         kfree(d->type_buf);
906         kfree(d->type_buf_def);
907         kfree(d->wake_buf);
908         kfree(d->mask_buf_def);
909         kfree(d->mask_buf);
910         kfree(d->status_reg_buf);
911         kfree(d->status_buf);
912         kfree(d);
913 }
914 EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
915
916 static void devm_regmap_irq_chip_release(struct device *dev, void *res)
917 {
918         struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;
919
920         regmap_del_irq_chip(d->irq, d);
921 }
922
923 static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)
924
925 {
926         struct regmap_irq_chip_data **r = res;
927
928         if (!r || !*r) {
929                 WARN_ON(!r || !*r);
930                 return 0;
931         }
932         return *r == data;
933 }
934
935 /**
936  * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
937  *
938  * @dev: The device pointer on which irq_chip belongs to.
939  * @fwnode: The firmware node where the IRQ domain should be added to.
940  * @map: The regmap for the device.
941  * @irq: The IRQ the device uses to signal interrupts
942  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
943  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
944  * @chip: Configuration for the interrupt controller.
945  * @data: Runtime data structure for the controller, allocated on success
946  *
947  * Returns 0 on success or an errno on failure.
948  *
949  * The &regmap_irq_chip_data will be automatically released when the device is
950  * unbound.
951  */
952 int devm_regmap_add_irq_chip_fwnode(struct device *dev,
953                                     struct fwnode_handle *fwnode,
954                                     struct regmap *map, int irq,
955                                     int irq_flags, int irq_base,
956                                     const struct regmap_irq_chip *chip,
957                                     struct regmap_irq_chip_data **data)
958 {
959         struct regmap_irq_chip_data **ptr, *d;
960         int ret;
961
962         ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
963                            GFP_KERNEL);
964         if (!ptr)
965                 return -ENOMEM;
966
967         ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
968                                          chip, &d);
969         if (ret < 0) {
970                 devres_free(ptr);
971                 return ret;
972         }
973
974         *ptr = d;
975         devres_add(dev, ptr);
976         *data = d;
977         return 0;
978 }
979 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
980
981 /**
982  * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
983  *
984  * @dev: The device pointer on which irq_chip belongs to.
985  * @map: The regmap for the device.
986  * @irq: The IRQ the device uses to signal interrupts
987  * @irq_flags: The IRQF_ flags to use for the primary interrupt.
988  * @irq_base: Allocate at specific IRQ number if irq_base > 0.
989  * @chip: Configuration for the interrupt controller.
990  * @data: Runtime data structure for the controller, allocated on success
991  *
992  * Returns 0 on success or an errno on failure.
993  *
994  * The &regmap_irq_chip_data will be automatically released when the device is
995  * unbound.
996  */
997 int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
998                              int irq_flags, int irq_base,
999                              const struct regmap_irq_chip *chip,
1000                              struct regmap_irq_chip_data **data)
1001 {
1002         return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
1003                                                irq, irq_flags, irq_base, chip,
1004                                                data);
1005 }
1006 EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);
1007
1008 /**
1009  * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1010  *
1011  * @dev: Device for which which resource was allocated.
1012  * @irq: Primary IRQ for the device.
1013  * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
1014  *
1015  * A resource managed version of regmap_del_irq_chip().
1016  */
1017 void devm_regmap_del_irq_chip(struct device *dev, int irq,
1018                               struct regmap_irq_chip_data *data)
1019 {
1020         int rc;
1021
1022         WARN_ON(irq != data->irq);
1023         rc = devres_release(dev, devm_regmap_irq_chip_release,
1024                             devm_regmap_irq_chip_match, data);
1025
1026         if (rc != 0)
1027                 WARN_ON(rc);
1028 }
1029 EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);
1030
1031 /**
1032  * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1033  *
1034  * @data: regmap irq controller to operate on.
1035  *
1036  * Useful for drivers to request their own IRQs.
1037  */
1038 int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
1039 {
1040         WARN_ON(!data->irq_base);
1041         return data->irq_base;
1042 }
1043 EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1044
1045 /**
1046  * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1047  *
1048  * @data: regmap irq controller to operate on.
1049  * @irq: index of the interrupt requested in the chip IRQs.
1050  *
1051  * Useful for drivers to request their own IRQs.
1052  */
1053 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
1054 {
1055         /* Handle holes in the IRQ list */
1056         if (!data->chip->irqs[irq].mask)
1057                 return -EINVAL;
1058
1059         return irq_create_mapping(data->domain, irq);
1060 }
1061 EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1062
1063 /**
1064  * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
1065  *
1066  * @data: regmap_irq controller to operate on.
1067  *
1068  * Useful for drivers to request their own IRQs and for integration
1069  * with subsystems.  For ease of integration NULL is accepted as a
1070  * domain, allowing devices to just call this even if no domain is
1071  * allocated.
1072  */
1073 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
1074 {
1075         if (data)
1076                 return data->domain;
1077         else
1078                 return NULL;
1079 }
1080 EXPORT_SYMBOL_GPL(regmap_irq_get_domain);