1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
5 #include <linux/bitops.h>
6 #include <linux/debugfs.h>
7 #include <linux/slab.h>
9 #include "dpu_core_irq.h"
11 #include "dpu_hw_interrupts.h"
12 #include "dpu_hw_util.h"
13 #include "dpu_hw_mdss.h"
14 #include "dpu_trace.h"
17 * Register offsets in MDSS register file for the interrupt registers
18 * w.r.t. to the MDP base
20 #define MDP_SSPP_TOP0_OFF 0x0
21 #define MDP_INTF_0_OFF 0x6A000
22 #define MDP_INTF_1_OFF 0x6A800
23 #define MDP_INTF_2_OFF 0x6B000
24 #define MDP_INTF_3_OFF 0x6B800
25 #define MDP_INTF_4_OFF 0x6C000
26 #define MDP_INTF_5_OFF 0x6C800
27 #define MDP_AD4_0_OFF 0x7C000
28 #define MDP_AD4_1_OFF 0x7D000
29 #define MDP_AD4_INTR_EN_OFF 0x41c
30 #define MDP_AD4_INTR_CLEAR_OFF 0x424
31 #define MDP_AD4_INTR_STATUS_OFF 0x420
32 #define MDP_INTF_0_OFF_REV_7xxx 0x34000
33 #define MDP_INTF_1_OFF_REV_7xxx 0x35000
34 #define MDP_INTF_2_OFF_REV_7xxx 0x36000
35 #define MDP_INTF_3_OFF_REV_7xxx 0x37000
36 #define MDP_INTF_4_OFF_REV_7xxx 0x38000
37 #define MDP_INTF_5_OFF_REV_7xxx 0x39000
40 * struct dpu_intr_reg - array of DPU register sets
41 * @clr_off: offset to CLEAR reg
42 * @en_off: offset to ENABLE reg
43 * @status_off: offset to STATUS reg
52 * struct dpu_intr_reg - List of DPU interrupt registers
54 * When making changes be sure to sync with dpu_hw_intr_reg
56 static const struct dpu_intr_reg dpu_intr_set[] = {
58 MDP_SSPP_TOP0_OFF+INTR_CLEAR,
59 MDP_SSPP_TOP0_OFF+INTR_EN,
60 MDP_SSPP_TOP0_OFF+INTR_STATUS
63 MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
64 MDP_SSPP_TOP0_OFF+INTR2_EN,
65 MDP_SSPP_TOP0_OFF+INTR2_STATUS
68 MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
69 MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
70 MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
73 MDP_INTF_0_OFF+INTF_INTR_CLEAR,
74 MDP_INTF_0_OFF+INTF_INTR_EN,
75 MDP_INTF_0_OFF+INTF_INTR_STATUS
78 MDP_INTF_1_OFF+INTF_INTR_CLEAR,
79 MDP_INTF_1_OFF+INTF_INTR_EN,
80 MDP_INTF_1_OFF+INTF_INTR_STATUS
83 MDP_INTF_2_OFF+INTF_INTR_CLEAR,
84 MDP_INTF_2_OFF+INTF_INTR_EN,
85 MDP_INTF_2_OFF+INTF_INTR_STATUS
88 MDP_INTF_3_OFF+INTF_INTR_CLEAR,
89 MDP_INTF_3_OFF+INTF_INTR_EN,
90 MDP_INTF_3_OFF+INTF_INTR_STATUS
93 MDP_INTF_4_OFF+INTF_INTR_CLEAR,
94 MDP_INTF_4_OFF+INTF_INTR_EN,
95 MDP_INTF_4_OFF+INTF_INTR_STATUS
98 MDP_INTF_5_OFF+INTF_INTR_CLEAR,
99 MDP_INTF_5_OFF+INTF_INTR_EN,
100 MDP_INTF_5_OFF+INTF_INTR_STATUS
103 MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
104 MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
105 MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
108 MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
109 MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
110 MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
113 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_CLEAR,
114 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_EN,
115 MDP_INTF_0_OFF_REV_7xxx+INTF_INTR_STATUS
118 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_CLEAR,
119 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_EN,
120 MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
123 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
124 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
125 MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
128 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
129 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
130 MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
133 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
134 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
135 MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
138 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
139 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
140 MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
144 #define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
145 #define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
148 * dpu_core_irq_callback_handler - dispatch core interrupts
149 * @dpu_kms: Pointer to DPU's KMS structure
150 * @irq_idx: interrupt index
152 static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
154 struct dpu_irq_callback *cb;
156 VERB("irq_idx=%d\n", irq_idx);
158 if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
159 DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
161 atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
164 * Perform registered function callback
166 list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
168 cb->func(cb->arg, irq_idx);
171 irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
173 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
179 unsigned long irq_flags;
184 spin_lock_irqsave(&intr->irq_lock, irq_flags);
185 for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
186 if (!test_bit(reg_idx, &intr->irq_mask))
189 /* Read interrupt status */
190 irq_status = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].status_off);
192 /* Read enable mask */
193 enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[reg_idx].en_off);
195 /* and clear the interrupt */
197 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
200 /* Finally update IRQ status based on enable mask */
201 irq_status &= enable_mask;
207 * Search through matching intr status.
209 while ((bit = ffs(irq_status)) != 0) {
210 irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
212 dpu_core_irq_callback_handler(dpu_kms, irq_idx);
215 * When callback finish, clear the irq_status
216 * with the matching mask. Once irq_status
217 * is all cleared, the search can be stopped.
219 irq_status &= ~BIT(bit - 1);
223 /* ensure register writes go through */
226 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
231 static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
234 const struct dpu_intr_reg *reg;
235 const char *dbgstr = NULL;
236 uint32_t cache_irq_mask;
241 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
242 pr_err("invalid IRQ index: [%d]\n", irq_idx);
247 * The cache_irq_mask and hardware RMW operations needs to be done
248 * under irq_lock and it's the caller's responsibility to ensure that's
251 assert_spin_locked(&intr->irq_lock);
253 reg_idx = DPU_IRQ_REG(irq_idx);
254 reg = &dpu_intr_set[reg_idx];
256 cache_irq_mask = intr->cache_irq_mask[reg_idx];
257 if (cache_irq_mask & DPU_IRQ_MASK(irq_idx)) {
258 dbgstr = "DPU IRQ already set:";
260 dbgstr = "DPU IRQ enabled:";
262 cache_irq_mask |= DPU_IRQ_MASK(irq_idx);
263 /* Cleaning any pending interrupt */
264 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
265 /* Enabling interrupts with the new mask */
266 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
268 /* ensure register write goes through */
271 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
274 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
275 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
280 static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
283 const struct dpu_intr_reg *reg;
284 const char *dbgstr = NULL;
285 uint32_t cache_irq_mask;
290 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
291 pr_err("invalid IRQ index: [%d]\n", irq_idx);
296 * The cache_irq_mask and hardware RMW operations needs to be done
297 * under irq_lock and it's the caller's responsibility to ensure that's
300 assert_spin_locked(&intr->irq_lock);
302 reg_idx = DPU_IRQ_REG(irq_idx);
303 reg = &dpu_intr_set[reg_idx];
305 cache_irq_mask = intr->cache_irq_mask[reg_idx];
306 if ((cache_irq_mask & DPU_IRQ_MASK(irq_idx)) == 0) {
307 dbgstr = "DPU IRQ is already cleared:";
309 dbgstr = "DPU IRQ mask disable:";
311 cache_irq_mask &= ~DPU_IRQ_MASK(irq_idx);
312 /* Disable interrupts based on the new mask */
313 DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
314 /* Cleaning any pending interrupt */
315 DPU_REG_WRITE(&intr->hw, reg->clr_off, DPU_IRQ_MASK(irq_idx));
317 /* ensure register write goes through */
320 intr->cache_irq_mask[reg_idx] = cache_irq_mask;
323 pr_debug("%s MASK:0x%.8lx, CACHE-MASK:0x%.8x\n", dbgstr,
324 DPU_IRQ_MASK(irq_idx), cache_irq_mask);
329 static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
331 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
337 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
338 if (test_bit(i, &intr->irq_mask))
339 DPU_REG_WRITE(&intr->hw,
340 dpu_intr_set[i].clr_off, 0xffffffff);
343 /* ensure register writes go through */
347 static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
349 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
355 for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
356 if (test_bit(i, &intr->irq_mask))
357 DPU_REG_WRITE(&intr->hw,
358 dpu_intr_set[i].en_off, 0x00000000);
361 /* ensure register writes go through */
365 u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
367 struct dpu_hw_intr *intr = dpu_kms->hw_intr;
369 unsigned long irq_flags;
376 DPU_ERROR("[%pS] invalid irq_idx=%d\n",
377 __builtin_return_address(0), irq_idx);
381 if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
382 pr_err("invalid IRQ index: [%d]\n", irq_idx);
386 spin_lock_irqsave(&intr->irq_lock, irq_flags);
388 reg_idx = DPU_IRQ_REG(irq_idx);
389 intr_status = DPU_REG_READ(&intr->hw,
390 dpu_intr_set[reg_idx].status_off) &
391 DPU_IRQ_MASK(irq_idx);
392 if (intr_status && clear)
393 DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
396 /* ensure register writes go through */
399 spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
404 static void __intr_offset(struct dpu_mdss_cfg *m,
405 void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
408 hw->blk_off = m->mdp[0].base;
409 hw->hwversion = m->hwversion;
412 struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
413 struct dpu_mdss_cfg *m)
415 struct dpu_hw_intr *intr;
418 return ERR_PTR(-EINVAL);
420 intr = kzalloc(sizeof(*intr), GFP_KERNEL);
422 return ERR_PTR(-ENOMEM);
424 __intr_offset(m, addr, &intr->hw);
426 intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
428 intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
430 if (intr->cache_irq_mask == NULL) {
432 return ERR_PTR(-ENOMEM);
435 intr->irq_mask = m->mdss_irqs;
437 spin_lock_init(&intr->irq_lock);
442 void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
445 kfree(intr->cache_irq_mask);
447 kfree(intr->irq_cb_tbl);
448 kfree(intr->irq_counts);
454 int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
455 struct dpu_irq_callback *register_irq_cb)
457 unsigned long irq_flags;
459 if (!dpu_kms->hw_intr->irq_cb_tbl) {
460 DPU_ERROR("invalid params\n");
464 if (!register_irq_cb || !register_irq_cb->func) {
465 DPU_ERROR("invalid irq_cb:%d func:%d\n",
466 register_irq_cb != NULL,
468 register_irq_cb->func != NULL : -1);
472 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
473 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
477 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
479 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
480 trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
481 list_del_init(®ister_irq_cb->list);
482 list_add_tail(®ister_irq_cb->list,
483 &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
484 if (list_is_first(®ister_irq_cb->list,
485 &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
486 int ret = dpu_hw_intr_enable_irq_locked(
490 DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
493 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
498 int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
499 struct dpu_irq_callback *register_irq_cb)
501 unsigned long irq_flags;
503 if (!dpu_kms->hw_intr->irq_cb_tbl) {
504 DPU_ERROR("invalid params\n");
508 if (!register_irq_cb || !register_irq_cb->func) {
509 DPU_ERROR("invalid irq_cb:%d func:%d\n",
510 register_irq_cb != NULL,
512 register_irq_cb->func != NULL : -1);
516 if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
517 DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
521 VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
523 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
524 trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
525 list_del_init(®ister_irq_cb->list);
526 /* empty callback list but interrupt is still enabled */
527 if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
528 int ret = dpu_hw_intr_disable_irq_locked(
532 DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
534 VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
536 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
541 #ifdef CONFIG_DEBUG_FS
542 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
544 struct dpu_kms *dpu_kms = s->private;
545 struct dpu_irq_callback *cb;
546 unsigned long irq_flags;
547 int i, irq_count, cb_count;
549 if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
552 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
553 spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
555 irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
556 list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
558 spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
560 if (irq_count || cb_count)
561 seq_printf(s, "idx:%d irq:%d cb:%d\n",
562 i, irq_count, cb_count);
568 DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
570 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
571 struct dentry *parent)
573 debugfs_create_file("core_irq", 0600, parent, dpu_kms,
574 &dpu_debugfs_core_irq_fops);
578 void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
582 pm_runtime_get_sync(&dpu_kms->pdev->dev);
583 dpu_clear_irqs(dpu_kms);
584 dpu_disable_all_irqs(dpu_kms);
585 pm_runtime_put_sync(&dpu_kms->pdev->dev);
587 /* Create irq callbacks for all possible irq_idx */
588 dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
589 sizeof(struct list_head), GFP_KERNEL);
590 dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
591 sizeof(atomic_t), GFP_KERNEL);
592 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
593 INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
594 atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
598 void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
602 pm_runtime_get_sync(&dpu_kms->pdev->dev);
603 for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
604 if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
605 DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
607 dpu_clear_irqs(dpu_kms);
608 dpu_disable_all_irqs(dpu_kms);
609 pm_runtime_put_sync(&dpu_kms->pdev->dev);