Merge tag 'kallsyms_show_value-fix-v5.9-rc1' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / drivers / gpu / ipu-v3 / ipu-common.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) 2010 Sascha Hauer <s.hauer@pengutronix.de>
4  * Copyright (C) 2005-2009 Freescale Semiconductor, Inc.
5  */
6 #include <linux/module.h>
7 #include <linux/export.h>
8 #include <linux/types.h>
9 #include <linux/reset.h>
10 #include <linux/platform_device.h>
11 #include <linux/err.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/interrupt.h>
15 #include <linux/io.h>
16 #include <linux/clk.h>
17 #include <linux/list.h>
18 #include <linux/irq.h>
19 #include <linux/irqchip/chained_irq.h>
20 #include <linux/irqdomain.h>
21 #include <linux/of_device.h>
22 #include <linux/of_graph.h>
23
24 #include <drm/drm_fourcc.h>
25
26 #include <video/imx-ipu-v3.h>
27 #include "ipu-prv.h"
28
29 static inline u32 ipu_cm_read(struct ipu_soc *ipu, unsigned offset)
30 {
31         return readl(ipu->cm_reg + offset);
32 }
33
34 static inline void ipu_cm_write(struct ipu_soc *ipu, u32 value, unsigned offset)
35 {
36         writel(value, ipu->cm_reg + offset);
37 }
38
39 int ipu_get_num(struct ipu_soc *ipu)
40 {
41         return ipu->id;
42 }
43 EXPORT_SYMBOL_GPL(ipu_get_num);
44
45 void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
46 {
47         u32 val;
48
49         val = ipu_cm_read(ipu, IPU_SRM_PRI2);
50         val &= ~DP_S_SRM_MODE_MASK;
51         val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
52                       DP_S_SRM_MODE_NOW;
53         ipu_cm_write(ipu, val, IPU_SRM_PRI2);
54 }
55 EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
56
57 enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
58 {
59         switch (drm_fourcc) {
60         case DRM_FORMAT_ARGB1555:
61         case DRM_FORMAT_ABGR1555:
62         case DRM_FORMAT_RGBA5551:
63         case DRM_FORMAT_BGRA5551:
64         case DRM_FORMAT_RGB565:
65         case DRM_FORMAT_BGR565:
66         case DRM_FORMAT_RGB888:
67         case DRM_FORMAT_BGR888:
68         case DRM_FORMAT_ARGB4444:
69         case DRM_FORMAT_XRGB8888:
70         case DRM_FORMAT_XBGR8888:
71         case DRM_FORMAT_RGBX8888:
72         case DRM_FORMAT_BGRX8888:
73         case DRM_FORMAT_ARGB8888:
74         case DRM_FORMAT_ABGR8888:
75         case DRM_FORMAT_RGBA8888:
76         case DRM_FORMAT_BGRA8888:
77         case DRM_FORMAT_RGB565_A8:
78         case DRM_FORMAT_BGR565_A8:
79         case DRM_FORMAT_RGB888_A8:
80         case DRM_FORMAT_BGR888_A8:
81         case DRM_FORMAT_RGBX8888_A8:
82         case DRM_FORMAT_BGRX8888_A8:
83                 return IPUV3_COLORSPACE_RGB;
84         case DRM_FORMAT_YUYV:
85         case DRM_FORMAT_UYVY:
86         case DRM_FORMAT_YUV420:
87         case DRM_FORMAT_YVU420:
88         case DRM_FORMAT_YUV422:
89         case DRM_FORMAT_YVU422:
90         case DRM_FORMAT_YUV444:
91         case DRM_FORMAT_YVU444:
92         case DRM_FORMAT_NV12:
93         case DRM_FORMAT_NV21:
94         case DRM_FORMAT_NV16:
95         case DRM_FORMAT_NV61:
96                 return IPUV3_COLORSPACE_YUV;
97         default:
98                 return IPUV3_COLORSPACE_UNKNOWN;
99         }
100 }
101 EXPORT_SYMBOL_GPL(ipu_drm_fourcc_to_colorspace);
102
103 enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat)
104 {
105         switch (pixelformat) {
106         case V4L2_PIX_FMT_YUV420:
107         case V4L2_PIX_FMT_YVU420:
108         case V4L2_PIX_FMT_YUV422P:
109         case V4L2_PIX_FMT_UYVY:
110         case V4L2_PIX_FMT_YUYV:
111         case V4L2_PIX_FMT_NV12:
112         case V4L2_PIX_FMT_NV21:
113         case V4L2_PIX_FMT_NV16:
114         case V4L2_PIX_FMT_NV61:
115                 return IPUV3_COLORSPACE_YUV;
116         case V4L2_PIX_FMT_RGB565:
117         case V4L2_PIX_FMT_BGR24:
118         case V4L2_PIX_FMT_RGB24:
119         case V4L2_PIX_FMT_ABGR32:
120         case V4L2_PIX_FMT_XBGR32:
121         case V4L2_PIX_FMT_BGRA32:
122         case V4L2_PIX_FMT_BGRX32:
123         case V4L2_PIX_FMT_RGBA32:
124         case V4L2_PIX_FMT_RGBX32:
125         case V4L2_PIX_FMT_ARGB32:
126         case V4L2_PIX_FMT_XRGB32:
127         case V4L2_PIX_FMT_RGB32:
128         case V4L2_PIX_FMT_BGR32:
129                 return IPUV3_COLORSPACE_RGB;
130         default:
131                 return IPUV3_COLORSPACE_UNKNOWN;
132         }
133 }
134 EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace);
135
136 bool ipu_pixelformat_is_planar(u32 pixelformat)
137 {
138         switch (pixelformat) {
139         case V4L2_PIX_FMT_YUV420:
140         case V4L2_PIX_FMT_YVU420:
141         case V4L2_PIX_FMT_YUV422P:
142         case V4L2_PIX_FMT_NV12:
143         case V4L2_PIX_FMT_NV21:
144         case V4L2_PIX_FMT_NV16:
145         case V4L2_PIX_FMT_NV61:
146                 return true;
147         }
148
149         return false;
150 }
151 EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar);
152
153 enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code)
154 {
155         switch (mbus_code & 0xf000) {
156         case 0x1000:
157                 return IPUV3_COLORSPACE_RGB;
158         case 0x2000:
159                 return IPUV3_COLORSPACE_YUV;
160         default:
161                 return IPUV3_COLORSPACE_UNKNOWN;
162         }
163 }
164 EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace);
165
166 int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat)
167 {
168         switch (pixelformat) {
169         case V4L2_PIX_FMT_YUV420:
170         case V4L2_PIX_FMT_YVU420:
171         case V4L2_PIX_FMT_YUV422P:
172         case V4L2_PIX_FMT_NV12:
173         case V4L2_PIX_FMT_NV21:
174         case V4L2_PIX_FMT_NV16:
175         case V4L2_PIX_FMT_NV61:
176                 /*
177                  * for the planar YUV formats, the stride passed to
178                  * cpmem must be the stride in bytes of the Y plane.
179                  * And all the planar YUV formats have an 8-bit
180                  * Y component.
181                  */
182                 return (8 * pixel_stride) >> 3;
183         case V4L2_PIX_FMT_RGB565:
184         case V4L2_PIX_FMT_YUYV:
185         case V4L2_PIX_FMT_UYVY:
186                 return (16 * pixel_stride) >> 3;
187         case V4L2_PIX_FMT_BGR24:
188         case V4L2_PIX_FMT_RGB24:
189                 return (24 * pixel_stride) >> 3;
190         case V4L2_PIX_FMT_BGR32:
191         case V4L2_PIX_FMT_RGB32:
192         case V4L2_PIX_FMT_XBGR32:
193         case V4L2_PIX_FMT_XRGB32:
194                 return (32 * pixel_stride) >> 3;
195         default:
196                 break;
197         }
198
199         return -EINVAL;
200 }
201 EXPORT_SYMBOL_GPL(ipu_stride_to_bytes);
202
203 int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees,
204                             bool hflip, bool vflip)
205 {
206         u32 r90, vf, hf;
207
208         switch (degrees) {
209         case 0:
210                 vf = hf = r90 = 0;
211                 break;
212         case 90:
213                 vf = hf = 0;
214                 r90 = 1;
215                 break;
216         case 180:
217                 vf = hf = 1;
218                 r90 = 0;
219                 break;
220         case 270:
221                 vf = hf = r90 = 1;
222                 break;
223         default:
224                 return -EINVAL;
225         }
226
227         hf ^= (u32)hflip;
228         vf ^= (u32)vflip;
229
230         *mode = (enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf);
231         return 0;
232 }
233 EXPORT_SYMBOL_GPL(ipu_degrees_to_rot_mode);
234
235 int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode,
236                             bool hflip, bool vflip)
237 {
238         u32 r90, vf, hf;
239
240         r90 = ((u32)mode >> 2) & 0x1;
241         hf = ((u32)mode >> 1) & 0x1;
242         vf = ((u32)mode >> 0) & 0x1;
243         hf ^= (u32)hflip;
244         vf ^= (u32)vflip;
245
246         switch ((enum ipu_rotate_mode)((r90 << 2) | (hf << 1) | vf)) {
247         case IPU_ROTATE_NONE:
248                 *degrees = 0;
249                 break;
250         case IPU_ROTATE_90_RIGHT:
251                 *degrees = 90;
252                 break;
253         case IPU_ROTATE_180:
254                 *degrees = 180;
255                 break;
256         case IPU_ROTATE_90_LEFT:
257                 *degrees = 270;
258                 break;
259         default:
260                 return -EINVAL;
261         }
262
263         return 0;
264 }
265 EXPORT_SYMBOL_GPL(ipu_rot_mode_to_degrees);
266
267 struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
268 {
269         struct ipuv3_channel *channel;
270
271         dev_dbg(ipu->dev, "%s %d\n", __func__, num);
272
273         if (num > 63)
274                 return ERR_PTR(-ENODEV);
275
276         mutex_lock(&ipu->channel_lock);
277
278         list_for_each_entry(channel, &ipu->channels, list) {
279                 if (channel->num == num) {
280                         channel = ERR_PTR(-EBUSY);
281                         goto out;
282                 }
283         }
284
285         channel = kzalloc(sizeof(*channel), GFP_KERNEL);
286         if (!channel) {
287                 channel = ERR_PTR(-ENOMEM);
288                 goto out;
289         }
290
291         channel->num = num;
292         channel->ipu = ipu;
293         list_add(&channel->list, &ipu->channels);
294
295 out:
296         mutex_unlock(&ipu->channel_lock);
297
298         return channel;
299 }
300 EXPORT_SYMBOL_GPL(ipu_idmac_get);
301
302 void ipu_idmac_put(struct ipuv3_channel *channel)
303 {
304         struct ipu_soc *ipu = channel->ipu;
305
306         dev_dbg(ipu->dev, "%s %d\n", __func__, channel->num);
307
308         mutex_lock(&ipu->channel_lock);
309
310         list_del(&channel->list);
311         kfree(channel);
312
313         mutex_unlock(&ipu->channel_lock);
314 }
315 EXPORT_SYMBOL_GPL(ipu_idmac_put);
316
317 #define idma_mask(ch)                   (1 << ((ch) & 0x1f))
318
319 /*
320  * This is an undocumented feature, a write one to a channel bit in
321  * IPU_CHA_CUR_BUF and IPU_CHA_TRIPLE_CUR_BUF will reset the channel's
322  * internal current buffer pointer so that transfers start from buffer
323  * 0 on the next channel enable (that's the theory anyway, the imx6 TRM
324  * only says these are read-only registers). This operation is required
325  * for channel linking to work correctly, for instance video capture
326  * pipelines that carry out image rotations will fail after the first
327  * streaming unless this function is called for each channel before
328  * re-enabling the channels.
329  */
330 static void __ipu_idmac_reset_current_buffer(struct ipuv3_channel *channel)
331 {
332         struct ipu_soc *ipu = channel->ipu;
333         unsigned int chno = channel->num;
334
335         ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_CUR_BUF(chno));
336 }
337
338 void ipu_idmac_set_double_buffer(struct ipuv3_channel *channel,
339                 bool doublebuffer)
340 {
341         struct ipu_soc *ipu = channel->ipu;
342         unsigned long flags;
343         u32 reg;
344
345         spin_lock_irqsave(&ipu->lock, flags);
346
347         reg = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
348         if (doublebuffer)
349                 reg |= idma_mask(channel->num);
350         else
351                 reg &= ~idma_mask(channel->num);
352         ipu_cm_write(ipu, reg, IPU_CHA_DB_MODE_SEL(channel->num));
353
354         __ipu_idmac_reset_current_buffer(channel);
355
356         spin_unlock_irqrestore(&ipu->lock, flags);
357 }
358 EXPORT_SYMBOL_GPL(ipu_idmac_set_double_buffer);
359
360 static const struct {
361         int chnum;
362         u32 reg;
363         int shift;
364 } idmac_lock_en_info[] = {
365         { .chnum =  5, .reg = IDMAC_CH_LOCK_EN_1, .shift =  0, },
366         { .chnum = 11, .reg = IDMAC_CH_LOCK_EN_1, .shift =  2, },
367         { .chnum = 12, .reg = IDMAC_CH_LOCK_EN_1, .shift =  4, },
368         { .chnum = 14, .reg = IDMAC_CH_LOCK_EN_1, .shift =  6, },
369         { .chnum = 15, .reg = IDMAC_CH_LOCK_EN_1, .shift =  8, },
370         { .chnum = 20, .reg = IDMAC_CH_LOCK_EN_1, .shift = 10, },
371         { .chnum = 21, .reg = IDMAC_CH_LOCK_EN_1, .shift = 12, },
372         { .chnum = 22, .reg = IDMAC_CH_LOCK_EN_1, .shift = 14, },
373         { .chnum = 23, .reg = IDMAC_CH_LOCK_EN_1, .shift = 16, },
374         { .chnum = 27, .reg = IDMAC_CH_LOCK_EN_1, .shift = 18, },
375         { .chnum = 28, .reg = IDMAC_CH_LOCK_EN_1, .shift = 20, },
376         { .chnum = 45, .reg = IDMAC_CH_LOCK_EN_2, .shift =  0, },
377         { .chnum = 46, .reg = IDMAC_CH_LOCK_EN_2, .shift =  2, },
378         { .chnum = 47, .reg = IDMAC_CH_LOCK_EN_2, .shift =  4, },
379         { .chnum = 48, .reg = IDMAC_CH_LOCK_EN_2, .shift =  6, },
380         { .chnum = 49, .reg = IDMAC_CH_LOCK_EN_2, .shift =  8, },
381         { .chnum = 50, .reg = IDMAC_CH_LOCK_EN_2, .shift = 10, },
382 };
383
384 int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts)
385 {
386         struct ipu_soc *ipu = channel->ipu;
387         unsigned long flags;
388         u32 bursts, regval;
389         int i;
390
391         switch (num_bursts) {
392         case 0:
393         case 1:
394                 bursts = 0x00; /* locking disabled */
395                 break;
396         case 2:
397                 bursts = 0x01;
398                 break;
399         case 4:
400                 bursts = 0x02;
401                 break;
402         case 8:
403                 bursts = 0x03;
404                 break;
405         default:
406                 return -EINVAL;
407         }
408
409         /*
410          * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M /
411          * i.MX53 channel arbitration locking doesn't seem to work properly.
412          * Allow enabling the lock feature on IPUv3H / i.MX6 only.
413          */
414         if (bursts && ipu->ipu_type != IPUV3H)
415                 return -EINVAL;
416
417         for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) {
418                 if (channel->num == idmac_lock_en_info[i].chnum)
419                         break;
420         }
421         if (i >= ARRAY_SIZE(idmac_lock_en_info))
422                 return -EINVAL;
423
424         spin_lock_irqsave(&ipu->lock, flags);
425
426         regval = ipu_idmac_read(ipu, idmac_lock_en_info[i].reg);
427         regval &= ~(0x03 << idmac_lock_en_info[i].shift);
428         regval |= (bursts << idmac_lock_en_info[i].shift);
429         ipu_idmac_write(ipu, regval, idmac_lock_en_info[i].reg);
430
431         spin_unlock_irqrestore(&ipu->lock, flags);
432
433         return 0;
434 }
435 EXPORT_SYMBOL_GPL(ipu_idmac_lock_enable);
436
437 int ipu_module_enable(struct ipu_soc *ipu, u32 mask)
438 {
439         unsigned long lock_flags;
440         u32 val;
441
442         spin_lock_irqsave(&ipu->lock, lock_flags);
443
444         val = ipu_cm_read(ipu, IPU_DISP_GEN);
445
446         if (mask & IPU_CONF_DI0_EN)
447                 val |= IPU_DI0_COUNTER_RELEASE;
448         if (mask & IPU_CONF_DI1_EN)
449                 val |= IPU_DI1_COUNTER_RELEASE;
450
451         ipu_cm_write(ipu, val, IPU_DISP_GEN);
452
453         val = ipu_cm_read(ipu, IPU_CONF);
454         val |= mask;
455         ipu_cm_write(ipu, val, IPU_CONF);
456
457         spin_unlock_irqrestore(&ipu->lock, lock_flags);
458
459         return 0;
460 }
461 EXPORT_SYMBOL_GPL(ipu_module_enable);
462
463 int ipu_module_disable(struct ipu_soc *ipu, u32 mask)
464 {
465         unsigned long lock_flags;
466         u32 val;
467
468         spin_lock_irqsave(&ipu->lock, lock_flags);
469
470         val = ipu_cm_read(ipu, IPU_CONF);
471         val &= ~mask;
472         ipu_cm_write(ipu, val, IPU_CONF);
473
474         val = ipu_cm_read(ipu, IPU_DISP_GEN);
475
476         if (mask & IPU_CONF_DI0_EN)
477                 val &= ~IPU_DI0_COUNTER_RELEASE;
478         if (mask & IPU_CONF_DI1_EN)
479                 val &= ~IPU_DI1_COUNTER_RELEASE;
480
481         ipu_cm_write(ipu, val, IPU_DISP_GEN);
482
483         spin_unlock_irqrestore(&ipu->lock, lock_flags);
484
485         return 0;
486 }
487 EXPORT_SYMBOL_GPL(ipu_module_disable);
488
489 int ipu_idmac_get_current_buffer(struct ipuv3_channel *channel)
490 {
491         struct ipu_soc *ipu = channel->ipu;
492         unsigned int chno = channel->num;
493
494         return (ipu_cm_read(ipu, IPU_CHA_CUR_BUF(chno)) & idma_mask(chno)) ? 1 : 0;
495 }
496 EXPORT_SYMBOL_GPL(ipu_idmac_get_current_buffer);
497
498 bool ipu_idmac_buffer_is_ready(struct ipuv3_channel *channel, u32 buf_num)
499 {
500         struct ipu_soc *ipu = channel->ipu;
501         unsigned long flags;
502         u32 reg = 0;
503
504         spin_lock_irqsave(&ipu->lock, flags);
505         switch (buf_num) {
506         case 0:
507                 reg = ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num));
508                 break;
509         case 1:
510                 reg = ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num));
511                 break;
512         case 2:
513                 reg = ipu_cm_read(ipu, IPU_CHA_BUF2_RDY(channel->num));
514                 break;
515         }
516         spin_unlock_irqrestore(&ipu->lock, flags);
517
518         return ((reg & idma_mask(channel->num)) != 0);
519 }
520 EXPORT_SYMBOL_GPL(ipu_idmac_buffer_is_ready);
521
522 void ipu_idmac_select_buffer(struct ipuv3_channel *channel, u32 buf_num)
523 {
524         struct ipu_soc *ipu = channel->ipu;
525         unsigned int chno = channel->num;
526         unsigned long flags;
527
528         spin_lock_irqsave(&ipu->lock, flags);
529
530         /* Mark buffer as ready. */
531         if (buf_num == 0)
532                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
533         else
534                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
535
536         spin_unlock_irqrestore(&ipu->lock, flags);
537 }
538 EXPORT_SYMBOL_GPL(ipu_idmac_select_buffer);
539
540 void ipu_idmac_clear_buffer(struct ipuv3_channel *channel, u32 buf_num)
541 {
542         struct ipu_soc *ipu = channel->ipu;
543         unsigned int chno = channel->num;
544         unsigned long flags;
545
546         spin_lock_irqsave(&ipu->lock, flags);
547
548         ipu_cm_write(ipu, 0xF0300000, IPU_GPR); /* write one to clear */
549         switch (buf_num) {
550         case 0:
551                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF0_RDY(chno));
552                 break;
553         case 1:
554                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF1_RDY(chno));
555                 break;
556         case 2:
557                 ipu_cm_write(ipu, idma_mask(chno), IPU_CHA_BUF2_RDY(chno));
558                 break;
559         default:
560                 break;
561         }
562         ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
563
564         spin_unlock_irqrestore(&ipu->lock, flags);
565 }
566 EXPORT_SYMBOL_GPL(ipu_idmac_clear_buffer);
567
568 int ipu_idmac_enable_channel(struct ipuv3_channel *channel)
569 {
570         struct ipu_soc *ipu = channel->ipu;
571         u32 val;
572         unsigned long flags;
573
574         spin_lock_irqsave(&ipu->lock, flags);
575
576         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
577         val |= idma_mask(channel->num);
578         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
579
580         spin_unlock_irqrestore(&ipu->lock, flags);
581
582         return 0;
583 }
584 EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel);
585
586 bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno)
587 {
588         return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno));
589 }
590 EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy);
591
592 int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
593 {
594         struct ipu_soc *ipu = channel->ipu;
595         unsigned long timeout;
596
597         timeout = jiffies + msecs_to_jiffies(ms);
598         while (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(channel->num)) &
599                         idma_mask(channel->num)) {
600                 if (time_after(jiffies, timeout))
601                         return -ETIMEDOUT;
602                 cpu_relax();
603         }
604
605         return 0;
606 }
607 EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
608
609 int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
610 {
611         struct ipu_soc *ipu = channel->ipu;
612         u32 val;
613         unsigned long flags;
614
615         spin_lock_irqsave(&ipu->lock, flags);
616
617         /* Disable DMA channel(s) */
618         val = ipu_idmac_read(ipu, IDMAC_CHA_EN(channel->num));
619         val &= ~idma_mask(channel->num);
620         ipu_idmac_write(ipu, val, IDMAC_CHA_EN(channel->num));
621
622         __ipu_idmac_reset_current_buffer(channel);
623
624         /* Set channel buffers NOT to be ready */
625         ipu_cm_write(ipu, 0xf0000000, IPU_GPR); /* write one to clear */
626
627         if (ipu_cm_read(ipu, IPU_CHA_BUF0_RDY(channel->num)) &
628                         idma_mask(channel->num)) {
629                 ipu_cm_write(ipu, idma_mask(channel->num),
630                              IPU_CHA_BUF0_RDY(channel->num));
631         }
632
633         if (ipu_cm_read(ipu, IPU_CHA_BUF1_RDY(channel->num)) &
634                         idma_mask(channel->num)) {
635                 ipu_cm_write(ipu, idma_mask(channel->num),
636                              IPU_CHA_BUF1_RDY(channel->num));
637         }
638
639         ipu_cm_write(ipu, 0x0, IPU_GPR); /* write one to set */
640
641         /* Reset the double buffer */
642         val = ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(channel->num));
643         val &= ~idma_mask(channel->num);
644         ipu_cm_write(ipu, val, IPU_CHA_DB_MODE_SEL(channel->num));
645
646         spin_unlock_irqrestore(&ipu->lock, flags);
647
648         return 0;
649 }
650 EXPORT_SYMBOL_GPL(ipu_idmac_disable_channel);
651
652 /*
653  * The imx6 rev. D TRM says that enabling the WM feature will increase
654  * a channel's priority. Refer to Table 36-8 Calculated priority value.
655  * The sub-module that is the sink or source for the channel must enable
656  * watermark signal for this to take effect (SMFC_WM for instance).
657  */
658 void ipu_idmac_enable_watermark(struct ipuv3_channel *channel, bool enable)
659 {
660         struct ipu_soc *ipu = channel->ipu;
661         unsigned long flags;
662         u32 val;
663
664         spin_lock_irqsave(&ipu->lock, flags);
665
666         val = ipu_idmac_read(ipu, IDMAC_WM_EN(channel->num));
667         if (enable)
668                 val |= 1 << (channel->num % 32);
669         else
670                 val &= ~(1 << (channel->num % 32));
671         ipu_idmac_write(ipu, val, IDMAC_WM_EN(channel->num));
672
673         spin_unlock_irqrestore(&ipu->lock, flags);
674 }
675 EXPORT_SYMBOL_GPL(ipu_idmac_enable_watermark);
676
677 static int ipu_memory_reset(struct ipu_soc *ipu)
678 {
679         unsigned long timeout;
680
681         ipu_cm_write(ipu, 0x807FFFFF, IPU_MEM_RST);
682
683         timeout = jiffies + msecs_to_jiffies(1000);
684         while (ipu_cm_read(ipu, IPU_MEM_RST) & 0x80000000) {
685                 if (time_after(jiffies, timeout))
686                         return -ETIME;
687                 cpu_relax();
688         }
689
690         return 0;
691 }
692
693 /*
694  * Set the source mux for the given CSI. Selects either parallel or
695  * MIPI CSI2 sources.
696  */
697 void ipu_set_csi_src_mux(struct ipu_soc *ipu, int csi_id, bool mipi_csi2)
698 {
699         unsigned long flags;
700         u32 val, mask;
701
702         mask = (csi_id == 1) ? IPU_CONF_CSI1_DATA_SOURCE :
703                 IPU_CONF_CSI0_DATA_SOURCE;
704
705         spin_lock_irqsave(&ipu->lock, flags);
706
707         val = ipu_cm_read(ipu, IPU_CONF);
708         if (mipi_csi2)
709                 val |= mask;
710         else
711                 val &= ~mask;
712         ipu_cm_write(ipu, val, IPU_CONF);
713
714         spin_unlock_irqrestore(&ipu->lock, flags);
715 }
716 EXPORT_SYMBOL_GPL(ipu_set_csi_src_mux);
717
718 /*
719  * Set the source mux for the IC. Selects either CSI[01] or the VDI.
720  */
721 void ipu_set_ic_src_mux(struct ipu_soc *ipu, int csi_id, bool vdi)
722 {
723         unsigned long flags;
724         u32 val;
725
726         spin_lock_irqsave(&ipu->lock, flags);
727
728         val = ipu_cm_read(ipu, IPU_CONF);
729         if (vdi)
730                 val |= IPU_CONF_IC_INPUT;
731         else
732                 val &= ~IPU_CONF_IC_INPUT;
733
734         if (csi_id == 1)
735                 val |= IPU_CONF_CSI_SEL;
736         else
737                 val &= ~IPU_CONF_CSI_SEL;
738
739         ipu_cm_write(ipu, val, IPU_CONF);
740
741         spin_unlock_irqrestore(&ipu->lock, flags);
742 }
743 EXPORT_SYMBOL_GPL(ipu_set_ic_src_mux);
744
745
746 /* Frame Synchronization Unit Channel Linking */
747
748 struct fsu_link_reg_info {
749         int chno;
750         u32 reg;
751         u32 mask;
752         u32 val;
753 };
754
755 struct fsu_link_info {
756         struct fsu_link_reg_info src;
757         struct fsu_link_reg_info sink;
758 };
759
760 static const struct fsu_link_info fsu_link_info[] = {
761         {
762                 .src  = { IPUV3_CHANNEL_IC_PRP_ENC_MEM, IPU_FS_PROC_FLOW2,
763                           FS_PRP_ENC_DEST_SEL_MASK, FS_PRP_ENC_DEST_SEL_IRT_ENC },
764                 .sink = { IPUV3_CHANNEL_MEM_ROT_ENC, IPU_FS_PROC_FLOW1,
765                           FS_PRPENC_ROT_SRC_SEL_MASK, FS_PRPENC_ROT_SRC_SEL_ENC },
766         }, {
767                 .src =  { IPUV3_CHANNEL_IC_PRP_VF_MEM, IPU_FS_PROC_FLOW2,
768                           FS_PRPVF_DEST_SEL_MASK, FS_PRPVF_DEST_SEL_IRT_VF },
769                 .sink = { IPUV3_CHANNEL_MEM_ROT_VF, IPU_FS_PROC_FLOW1,
770                           FS_PRPVF_ROT_SRC_SEL_MASK, FS_PRPVF_ROT_SRC_SEL_VF },
771         }, {
772                 .src =  { IPUV3_CHANNEL_IC_PP_MEM, IPU_FS_PROC_FLOW2,
773                           FS_PP_DEST_SEL_MASK, FS_PP_DEST_SEL_IRT_PP },
774                 .sink = { IPUV3_CHANNEL_MEM_ROT_PP, IPU_FS_PROC_FLOW1,
775                           FS_PP_ROT_SRC_SEL_MASK, FS_PP_ROT_SRC_SEL_PP },
776         }, {
777                 .src =  { IPUV3_CHANNEL_CSI_DIRECT, 0 },
778                 .sink = { IPUV3_CHANNEL_CSI_VDI_PREV, IPU_FS_PROC_FLOW1,
779                           FS_VDI_SRC_SEL_MASK, FS_VDI_SRC_SEL_CSI_DIRECT },
780         },
781 };
782
783 static const struct fsu_link_info *find_fsu_link_info(int src, int sink)
784 {
785         int i;
786
787         for (i = 0; i < ARRAY_SIZE(fsu_link_info); i++) {
788                 if (src == fsu_link_info[i].src.chno &&
789                     sink == fsu_link_info[i].sink.chno)
790                         return &fsu_link_info[i];
791         }
792
793         return NULL;
794 }
795
796 /*
797  * Links a source channel to a sink channel in the FSU.
798  */
799 int ipu_fsu_link(struct ipu_soc *ipu, int src_ch, int sink_ch)
800 {
801         const struct fsu_link_info *link;
802         u32 src_reg, sink_reg;
803         unsigned long flags;
804
805         link = find_fsu_link_info(src_ch, sink_ch);
806         if (!link)
807                 return -EINVAL;
808
809         spin_lock_irqsave(&ipu->lock, flags);
810
811         if (link->src.mask) {
812                 src_reg = ipu_cm_read(ipu, link->src.reg);
813                 src_reg &= ~link->src.mask;
814                 src_reg |= link->src.val;
815                 ipu_cm_write(ipu, src_reg, link->src.reg);
816         }
817
818         if (link->sink.mask) {
819                 sink_reg = ipu_cm_read(ipu, link->sink.reg);
820                 sink_reg &= ~link->sink.mask;
821                 sink_reg |= link->sink.val;
822                 ipu_cm_write(ipu, sink_reg, link->sink.reg);
823         }
824
825         spin_unlock_irqrestore(&ipu->lock, flags);
826         return 0;
827 }
828 EXPORT_SYMBOL_GPL(ipu_fsu_link);
829
830 /*
831  * Unlinks source and sink channels in the FSU.
832  */
833 int ipu_fsu_unlink(struct ipu_soc *ipu, int src_ch, int sink_ch)
834 {
835         const struct fsu_link_info *link;
836         u32 src_reg, sink_reg;
837         unsigned long flags;
838
839         link = find_fsu_link_info(src_ch, sink_ch);
840         if (!link)
841                 return -EINVAL;
842
843         spin_lock_irqsave(&ipu->lock, flags);
844
845         if (link->src.mask) {
846                 src_reg = ipu_cm_read(ipu, link->src.reg);
847                 src_reg &= ~link->src.mask;
848                 ipu_cm_write(ipu, src_reg, link->src.reg);
849         }
850
851         if (link->sink.mask) {
852                 sink_reg = ipu_cm_read(ipu, link->sink.reg);
853                 sink_reg &= ~link->sink.mask;
854                 ipu_cm_write(ipu, sink_reg, link->sink.reg);
855         }
856
857         spin_unlock_irqrestore(&ipu->lock, flags);
858         return 0;
859 }
860 EXPORT_SYMBOL_GPL(ipu_fsu_unlink);
861
862 /* Link IDMAC channels in the FSU */
863 int ipu_idmac_link(struct ipuv3_channel *src, struct ipuv3_channel *sink)
864 {
865         return ipu_fsu_link(src->ipu, src->num, sink->num);
866 }
867 EXPORT_SYMBOL_GPL(ipu_idmac_link);
868
869 /* Unlink IDMAC channels in the FSU */
870 int ipu_idmac_unlink(struct ipuv3_channel *src, struct ipuv3_channel *sink)
871 {
872         return ipu_fsu_unlink(src->ipu, src->num, sink->num);
873 }
874 EXPORT_SYMBOL_GPL(ipu_idmac_unlink);
875
876 struct ipu_devtype {
877         const char *name;
878         unsigned long cm_ofs;
879         unsigned long cpmem_ofs;
880         unsigned long srm_ofs;
881         unsigned long tpm_ofs;
882         unsigned long csi0_ofs;
883         unsigned long csi1_ofs;
884         unsigned long ic_ofs;
885         unsigned long disp0_ofs;
886         unsigned long disp1_ofs;
887         unsigned long dc_tmpl_ofs;
888         unsigned long vdi_ofs;
889         enum ipuv3_type type;
890 };
891
892 static struct ipu_devtype ipu_type_imx51 = {
893         .name = "IPUv3EX",
894         .cm_ofs = 0x1e000000,
895         .cpmem_ofs = 0x1f000000,
896         .srm_ofs = 0x1f040000,
897         .tpm_ofs = 0x1f060000,
898         .csi0_ofs = 0x1e030000,
899         .csi1_ofs = 0x1e038000,
900         .ic_ofs = 0x1e020000,
901         .disp0_ofs = 0x1e040000,
902         .disp1_ofs = 0x1e048000,
903         .dc_tmpl_ofs = 0x1f080000,
904         .vdi_ofs = 0x1e068000,
905         .type = IPUV3EX,
906 };
907
908 static struct ipu_devtype ipu_type_imx53 = {
909         .name = "IPUv3M",
910         .cm_ofs = 0x06000000,
911         .cpmem_ofs = 0x07000000,
912         .srm_ofs = 0x07040000,
913         .tpm_ofs = 0x07060000,
914         .csi0_ofs = 0x06030000,
915         .csi1_ofs = 0x06038000,
916         .ic_ofs = 0x06020000,
917         .disp0_ofs = 0x06040000,
918         .disp1_ofs = 0x06048000,
919         .dc_tmpl_ofs = 0x07080000,
920         .vdi_ofs = 0x06068000,
921         .type = IPUV3M,
922 };
923
924 static struct ipu_devtype ipu_type_imx6q = {
925         .name = "IPUv3H",
926         .cm_ofs = 0x00200000,
927         .cpmem_ofs = 0x00300000,
928         .srm_ofs = 0x00340000,
929         .tpm_ofs = 0x00360000,
930         .csi0_ofs = 0x00230000,
931         .csi1_ofs = 0x00238000,
932         .ic_ofs = 0x00220000,
933         .disp0_ofs = 0x00240000,
934         .disp1_ofs = 0x00248000,
935         .dc_tmpl_ofs = 0x00380000,
936         .vdi_ofs = 0x00268000,
937         .type = IPUV3H,
938 };
939
940 static const struct of_device_id imx_ipu_dt_ids[] = {
941         { .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
942         { .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
943         { .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
944         { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
945         { /* sentinel */ }
946 };
947 MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
948
949 static int ipu_submodules_init(struct ipu_soc *ipu,
950                 struct platform_device *pdev, unsigned long ipu_base,
951                 struct clk *ipu_clk)
952 {
953         char *unit;
954         int ret;
955         struct device *dev = &pdev->dev;
956         const struct ipu_devtype *devtype = ipu->devtype;
957
958         ret = ipu_cpmem_init(ipu, dev, ipu_base + devtype->cpmem_ofs);
959         if (ret) {
960                 unit = "cpmem";
961                 goto err_cpmem;
962         }
963
964         ret = ipu_csi_init(ipu, dev, 0, ipu_base + devtype->csi0_ofs,
965                            IPU_CONF_CSI0_EN, ipu_clk);
966         if (ret) {
967                 unit = "csi0";
968                 goto err_csi_0;
969         }
970
971         ret = ipu_csi_init(ipu, dev, 1, ipu_base + devtype->csi1_ofs,
972                            IPU_CONF_CSI1_EN, ipu_clk);
973         if (ret) {
974                 unit = "csi1";
975                 goto err_csi_1;
976         }
977
978         ret = ipu_ic_init(ipu, dev,
979                           ipu_base + devtype->ic_ofs,
980                           ipu_base + devtype->tpm_ofs);
981         if (ret) {
982                 unit = "ic";
983                 goto err_ic;
984         }
985
986         ret = ipu_vdi_init(ipu, dev, ipu_base + devtype->vdi_ofs,
987                            IPU_CONF_VDI_EN | IPU_CONF_ISP_EN |
988                            IPU_CONF_IC_INPUT);
989         if (ret) {
990                 unit = "vdi";
991                 goto err_vdi;
992         }
993
994         ret = ipu_image_convert_init(ipu, dev);
995         if (ret) {
996                 unit = "image_convert";
997                 goto err_image_convert;
998         }
999
1000         ret = ipu_di_init(ipu, dev, 0, ipu_base + devtype->disp0_ofs,
1001                           IPU_CONF_DI0_EN, ipu_clk);
1002         if (ret) {
1003                 unit = "di0";
1004                 goto err_di_0;
1005         }
1006
1007         ret = ipu_di_init(ipu, dev, 1, ipu_base + devtype->disp1_ofs,
1008                         IPU_CONF_DI1_EN, ipu_clk);
1009         if (ret) {
1010                 unit = "di1";
1011                 goto err_di_1;
1012         }
1013
1014         ret = ipu_dc_init(ipu, dev, ipu_base + devtype->cm_ofs +
1015                         IPU_CM_DC_REG_OFS, ipu_base + devtype->dc_tmpl_ofs);
1016         if (ret) {
1017                 unit = "dc_template";
1018                 goto err_dc;
1019         }
1020
1021         ret = ipu_dmfc_init(ipu, dev, ipu_base +
1022                         devtype->cm_ofs + IPU_CM_DMFC_REG_OFS, ipu_clk);
1023         if (ret) {
1024                 unit = "dmfc";
1025                 goto err_dmfc;
1026         }
1027
1028         ret = ipu_dp_init(ipu, dev, ipu_base + devtype->srm_ofs);
1029         if (ret) {
1030                 unit = "dp";
1031                 goto err_dp;
1032         }
1033
1034         ret = ipu_smfc_init(ipu, dev, ipu_base +
1035                         devtype->cm_ofs + IPU_CM_SMFC_REG_OFS);
1036         if (ret) {
1037                 unit = "smfc";
1038                 goto err_smfc;
1039         }
1040
1041         return 0;
1042
1043 err_smfc:
1044         ipu_dp_exit(ipu);
1045 err_dp:
1046         ipu_dmfc_exit(ipu);
1047 err_dmfc:
1048         ipu_dc_exit(ipu);
1049 err_dc:
1050         ipu_di_exit(ipu, 1);
1051 err_di_1:
1052         ipu_di_exit(ipu, 0);
1053 err_di_0:
1054         ipu_image_convert_exit(ipu);
1055 err_image_convert:
1056         ipu_vdi_exit(ipu);
1057 err_vdi:
1058         ipu_ic_exit(ipu);
1059 err_ic:
1060         ipu_csi_exit(ipu, 1);
1061 err_csi_1:
1062         ipu_csi_exit(ipu, 0);
1063 err_csi_0:
1064         ipu_cpmem_exit(ipu);
1065 err_cpmem:
1066         dev_err(&pdev->dev, "init %s failed with %d\n", unit, ret);
1067         return ret;
1068 }
1069
1070 static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
1071 {
1072         unsigned long status;
1073         int i, bit, irq;
1074
1075         for (i = 0; i < num_regs; i++) {
1076
1077                 status = ipu_cm_read(ipu, IPU_INT_STAT(regs[i]));
1078                 status &= ipu_cm_read(ipu, IPU_INT_CTRL(regs[i]));
1079
1080                 for_each_set_bit(bit, &status, 32) {
1081                         irq = irq_linear_revmap(ipu->domain,
1082                                                 regs[i] * 32 + bit);
1083                         if (irq)
1084                                 generic_handle_irq(irq);
1085                 }
1086         }
1087 }
1088
1089 static void ipu_irq_handler(struct irq_desc *desc)
1090 {
1091         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
1092         struct irq_chip *chip = irq_desc_get_chip(desc);
1093         static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
1094
1095         chained_irq_enter(chip, desc);
1096
1097         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1098
1099         chained_irq_exit(chip, desc);
1100 }
1101
1102 static void ipu_err_irq_handler(struct irq_desc *desc)
1103 {
1104         struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
1105         struct irq_chip *chip = irq_desc_get_chip(desc);
1106         static const int int_reg[] = { 4, 5, 8, 9};
1107
1108         chained_irq_enter(chip, desc);
1109
1110         ipu_irq_handle(ipu, int_reg, ARRAY_SIZE(int_reg));
1111
1112         chained_irq_exit(chip, desc);
1113 }
1114
1115 int ipu_map_irq(struct ipu_soc *ipu, int irq)
1116 {
1117         int virq;
1118
1119         virq = irq_linear_revmap(ipu->domain, irq);
1120         if (!virq)
1121                 virq = irq_create_mapping(ipu->domain, irq);
1122
1123         return virq;
1124 }
1125 EXPORT_SYMBOL_GPL(ipu_map_irq);
1126
1127 int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
1128                 enum ipu_channel_irq irq_type)
1129 {
1130         return ipu_map_irq(ipu, irq_type + channel->num);
1131 }
1132 EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq);
1133
1134 static void ipu_submodules_exit(struct ipu_soc *ipu)
1135 {
1136         ipu_smfc_exit(ipu);
1137         ipu_dp_exit(ipu);
1138         ipu_dmfc_exit(ipu);
1139         ipu_dc_exit(ipu);
1140         ipu_di_exit(ipu, 1);
1141         ipu_di_exit(ipu, 0);
1142         ipu_image_convert_exit(ipu);
1143         ipu_vdi_exit(ipu);
1144         ipu_ic_exit(ipu);
1145         ipu_csi_exit(ipu, 1);
1146         ipu_csi_exit(ipu, 0);
1147         ipu_cpmem_exit(ipu);
1148 }
1149
1150 static int platform_remove_devices_fn(struct device *dev, void *unused)
1151 {
1152         struct platform_device *pdev = to_platform_device(dev);
1153
1154         platform_device_unregister(pdev);
1155
1156         return 0;
1157 }
1158
1159 static void platform_device_unregister_children(struct platform_device *pdev)
1160 {
1161         device_for_each_child(&pdev->dev, NULL, platform_remove_devices_fn);
1162 }
1163
1164 struct ipu_platform_reg {
1165         struct ipu_client_platformdata pdata;
1166         const char *name;
1167 };
1168
1169 /* These must be in the order of the corresponding device tree port nodes */
1170 static struct ipu_platform_reg client_reg[] = {
1171         {
1172                 .pdata = {
1173                         .csi = 0,
1174                         .dma[0] = IPUV3_CHANNEL_CSI0,
1175                         .dma[1] = -EINVAL,
1176                 },
1177                 .name = "imx-ipuv3-csi",
1178         }, {
1179                 .pdata = {
1180                         .csi = 1,
1181                         .dma[0] = IPUV3_CHANNEL_CSI1,
1182                         .dma[1] = -EINVAL,
1183                 },
1184                 .name = "imx-ipuv3-csi",
1185         }, {
1186                 .pdata = {
1187                         .di = 0,
1188                         .dc = 5,
1189                         .dp = IPU_DP_FLOW_SYNC_BG,
1190                         .dma[0] = IPUV3_CHANNEL_MEM_BG_SYNC,
1191                         .dma[1] = IPUV3_CHANNEL_MEM_FG_SYNC,
1192                 },
1193                 .name = "imx-ipuv3-crtc",
1194         }, {
1195                 .pdata = {
1196                         .di = 1,
1197                         .dc = 1,
1198                         .dp = -EINVAL,
1199                         .dma[0] = IPUV3_CHANNEL_MEM_DC_SYNC,
1200                         .dma[1] = -EINVAL,
1201                 },
1202                 .name = "imx-ipuv3-crtc",
1203         },
1204 };
1205
1206 static DEFINE_MUTEX(ipu_client_id_mutex);
1207 static int ipu_client_id;
1208
1209 static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1210 {
1211         struct device *dev = ipu->dev;
1212         unsigned i;
1213         int id, ret;
1214
1215         mutex_lock(&ipu_client_id_mutex);
1216         id = ipu_client_id;
1217         ipu_client_id += ARRAY_SIZE(client_reg);
1218         mutex_unlock(&ipu_client_id_mutex);
1219
1220         for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1221                 struct ipu_platform_reg *reg = &client_reg[i];
1222                 struct platform_device *pdev;
1223                 struct device_node *of_node;
1224
1225                 /* Associate subdevice with the corresponding port node */
1226                 of_node = of_graph_get_port_by_id(dev->of_node, i);
1227                 if (!of_node) {
1228                         dev_info(dev,
1229                                  "no port@%d node in %pOF, not using %s%d\n",
1230                                  i, dev->of_node,
1231                                  (i / 2) ? "DI" : "CSI", i % 2);
1232                         continue;
1233                 }
1234
1235                 pdev = platform_device_alloc(reg->name, id++);
1236                 if (!pdev) {
1237                         ret = -ENOMEM;
1238                         goto err_register;
1239                 }
1240
1241                 pdev->dev.parent = dev;
1242
1243                 reg->pdata.of_node = of_node;
1244                 ret = platform_device_add_data(pdev, &reg->pdata,
1245                                                sizeof(reg->pdata));
1246                 if (!ret)
1247                         ret = platform_device_add(pdev);
1248                 if (ret) {
1249                         platform_device_put(pdev);
1250                         goto err_register;
1251                 }
1252         }
1253
1254         return 0;
1255
1256 err_register:
1257         platform_device_unregister_children(to_platform_device(dev));
1258
1259         return ret;
1260 }
1261
1262
1263 static int ipu_irq_init(struct ipu_soc *ipu)
1264 {
1265         struct irq_chip_generic *gc;
1266         struct irq_chip_type *ct;
1267         unsigned long unused[IPU_NUM_IRQS / 32] = {
1268                 0x400100d0, 0xffe000fd,
1269                 0x400100d0, 0xffe000fd,
1270                 0x400100d0, 0xffe000fd,
1271                 0x4077ffff, 0xffe7e1fd,
1272                 0x23fffffe, 0x8880fff0,
1273                 0xf98fe7d0, 0xfff81fff,
1274                 0x400100d0, 0xffe000fd,
1275                 0x00000000,
1276         };
1277         int ret, i;
1278
1279         ipu->domain = irq_domain_add_linear(ipu->dev->of_node, IPU_NUM_IRQS,
1280                                             &irq_generic_chip_ops, ipu);
1281         if (!ipu->domain) {
1282                 dev_err(ipu->dev, "failed to add irq domain\n");
1283                 return -ENODEV;
1284         }
1285
1286         ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
1287                                              handle_level_irq, 0, 0, 0);
1288         if (ret < 0) {
1289                 dev_err(ipu->dev, "failed to alloc generic irq chips\n");
1290                 irq_domain_remove(ipu->domain);
1291                 return ret;
1292         }
1293
1294         /* Mask and clear all interrupts */
1295         for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1296                 ipu_cm_write(ipu, 0, IPU_INT_CTRL(i / 32));
1297                 ipu_cm_write(ipu, ~unused[i / 32], IPU_INT_STAT(i / 32));
1298         }
1299
1300         for (i = 0; i < IPU_NUM_IRQS; i += 32) {
1301                 gc = irq_get_domain_generic_chip(ipu->domain, i);
1302                 gc->reg_base = ipu->cm_reg;
1303                 gc->unused = unused[i / 32];
1304                 ct = gc->chip_types;
1305                 ct->chip.irq_ack = irq_gc_ack_set_bit;
1306                 ct->chip.irq_mask = irq_gc_mask_clr_bit;
1307                 ct->chip.irq_unmask = irq_gc_mask_set_bit;
1308                 ct->regs.ack = IPU_INT_STAT(i / 32);
1309                 ct->regs.mask = IPU_INT_CTRL(i / 32);
1310         }
1311
1312         irq_set_chained_handler_and_data(ipu->irq_sync, ipu_irq_handler, ipu);
1313         irq_set_chained_handler_and_data(ipu->irq_err, ipu_err_irq_handler,
1314                                          ipu);
1315
1316         return 0;
1317 }
1318
1319 static void ipu_irq_exit(struct ipu_soc *ipu)
1320 {
1321         int i, irq;
1322
1323         irq_set_chained_handler_and_data(ipu->irq_err, NULL, NULL);
1324         irq_set_chained_handler_and_data(ipu->irq_sync, NULL, NULL);
1325
1326         /* TODO: remove irq_domain_generic_chips */
1327
1328         for (i = 0; i < IPU_NUM_IRQS; i++) {
1329                 irq = irq_linear_revmap(ipu->domain, i);
1330                 if (irq)
1331                         irq_dispose_mapping(irq);
1332         }
1333
1334         irq_domain_remove(ipu->domain);
1335 }
1336
1337 void ipu_dump(struct ipu_soc *ipu)
1338 {
1339         int i;
1340
1341         dev_dbg(ipu->dev, "IPU_CONF = \t0x%08X\n",
1342                 ipu_cm_read(ipu, IPU_CONF));
1343         dev_dbg(ipu->dev, "IDMAC_CONF = \t0x%08X\n",
1344                 ipu_idmac_read(ipu, IDMAC_CONF));
1345         dev_dbg(ipu->dev, "IDMAC_CHA_EN1 = \t0x%08X\n",
1346                 ipu_idmac_read(ipu, IDMAC_CHA_EN(0)));
1347         dev_dbg(ipu->dev, "IDMAC_CHA_EN2 = \t0x%08X\n",
1348                 ipu_idmac_read(ipu, IDMAC_CHA_EN(32)));
1349         dev_dbg(ipu->dev, "IDMAC_CHA_PRI1 = \t0x%08X\n",
1350                 ipu_idmac_read(ipu, IDMAC_CHA_PRI(0)));
1351         dev_dbg(ipu->dev, "IDMAC_CHA_PRI2 = \t0x%08X\n",
1352                 ipu_idmac_read(ipu, IDMAC_CHA_PRI(32)));
1353         dev_dbg(ipu->dev, "IDMAC_BAND_EN1 = \t0x%08X\n",
1354                 ipu_idmac_read(ipu, IDMAC_BAND_EN(0)));
1355         dev_dbg(ipu->dev, "IDMAC_BAND_EN2 = \t0x%08X\n",
1356                 ipu_idmac_read(ipu, IDMAC_BAND_EN(32)));
1357         dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL0 = \t0x%08X\n",
1358                 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(0)));
1359         dev_dbg(ipu->dev, "IPU_CHA_DB_MODE_SEL1 = \t0x%08X\n",
1360                 ipu_cm_read(ipu, IPU_CHA_DB_MODE_SEL(32)));
1361         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW1 = \t0x%08X\n",
1362                 ipu_cm_read(ipu, IPU_FS_PROC_FLOW1));
1363         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW2 = \t0x%08X\n",
1364                 ipu_cm_read(ipu, IPU_FS_PROC_FLOW2));
1365         dev_dbg(ipu->dev, "IPU_FS_PROC_FLOW3 = \t0x%08X\n",
1366                 ipu_cm_read(ipu, IPU_FS_PROC_FLOW3));
1367         dev_dbg(ipu->dev, "IPU_FS_DISP_FLOW1 = \t0x%08X\n",
1368                 ipu_cm_read(ipu, IPU_FS_DISP_FLOW1));
1369         for (i = 0; i < 15; i++)
1370                 dev_dbg(ipu->dev, "IPU_INT_CTRL(%d) = \t%08X\n", i,
1371                         ipu_cm_read(ipu, IPU_INT_CTRL(i)));
1372 }
1373 EXPORT_SYMBOL_GPL(ipu_dump);
1374
1375 static int ipu_probe(struct platform_device *pdev)
1376 {
1377         struct device_node *np = pdev->dev.of_node;
1378         struct ipu_soc *ipu;
1379         struct resource *res;
1380         unsigned long ipu_base;
1381         int ret, irq_sync, irq_err;
1382         const struct ipu_devtype *devtype;
1383
1384         devtype = of_device_get_match_data(&pdev->dev);
1385         if (!devtype)
1386                 return -EINVAL;
1387
1388         irq_sync = platform_get_irq(pdev, 0);
1389         irq_err = platform_get_irq(pdev, 1);
1390         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1391
1392         dev_dbg(&pdev->dev, "irq_sync: %d irq_err: %d\n",
1393                         irq_sync, irq_err);
1394
1395         if (!res || irq_sync < 0 || irq_err < 0)
1396                 return -ENODEV;
1397
1398         ipu_base = res->start;
1399
1400         ipu = devm_kzalloc(&pdev->dev, sizeof(*ipu), GFP_KERNEL);
1401         if (!ipu)
1402                 return -ENODEV;
1403
1404         ipu->id = of_alias_get_id(np, "ipu");
1405         if (ipu->id < 0)
1406                 ipu->id = 0;
1407
1408         if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1409             IS_ENABLED(CONFIG_DRM)) {
1410                 ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
1411                                                           "fsl,prg", ipu->id);
1412                 if (!ipu->prg_priv)
1413                         return -EPROBE_DEFER;
1414         }
1415
1416         ipu->devtype = devtype;
1417         ipu->ipu_type = devtype->type;
1418
1419         spin_lock_init(&ipu->lock);
1420         mutex_init(&ipu->channel_lock);
1421         INIT_LIST_HEAD(&ipu->channels);
1422
1423         dev_dbg(&pdev->dev, "cm_reg:   0x%08lx\n",
1424                         ipu_base + devtype->cm_ofs);
1425         dev_dbg(&pdev->dev, "idmac:    0x%08lx\n",
1426                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS);
1427         dev_dbg(&pdev->dev, "cpmem:    0x%08lx\n",
1428                         ipu_base + devtype->cpmem_ofs);
1429         dev_dbg(&pdev->dev, "csi0:    0x%08lx\n",
1430                         ipu_base + devtype->csi0_ofs);
1431         dev_dbg(&pdev->dev, "csi1:    0x%08lx\n",
1432                         ipu_base + devtype->csi1_ofs);
1433         dev_dbg(&pdev->dev, "ic:      0x%08lx\n",
1434                         ipu_base + devtype->ic_ofs);
1435         dev_dbg(&pdev->dev, "disp0:    0x%08lx\n",
1436                         ipu_base + devtype->disp0_ofs);
1437         dev_dbg(&pdev->dev, "disp1:    0x%08lx\n",
1438                         ipu_base + devtype->disp1_ofs);
1439         dev_dbg(&pdev->dev, "srm:      0x%08lx\n",
1440                         ipu_base + devtype->srm_ofs);
1441         dev_dbg(&pdev->dev, "tpm:      0x%08lx\n",
1442                         ipu_base + devtype->tpm_ofs);
1443         dev_dbg(&pdev->dev, "dc:       0x%08lx\n",
1444                         ipu_base + devtype->cm_ofs + IPU_CM_DC_REG_OFS);
1445         dev_dbg(&pdev->dev, "ic:       0x%08lx\n",
1446                         ipu_base + devtype->cm_ofs + IPU_CM_IC_REG_OFS);
1447         dev_dbg(&pdev->dev, "dmfc:     0x%08lx\n",
1448                         ipu_base + devtype->cm_ofs + IPU_CM_DMFC_REG_OFS);
1449         dev_dbg(&pdev->dev, "vdi:      0x%08lx\n",
1450                         ipu_base + devtype->vdi_ofs);
1451
1452         ipu->cm_reg = devm_ioremap(&pdev->dev,
1453                         ipu_base + devtype->cm_ofs, PAGE_SIZE);
1454         ipu->idmac_reg = devm_ioremap(&pdev->dev,
1455                         ipu_base + devtype->cm_ofs + IPU_CM_IDMAC_REG_OFS,
1456                         PAGE_SIZE);
1457
1458         if (!ipu->cm_reg || !ipu->idmac_reg)
1459                 return -ENOMEM;
1460
1461         ipu->clk = devm_clk_get(&pdev->dev, "bus");
1462         if (IS_ERR(ipu->clk)) {
1463                 ret = PTR_ERR(ipu->clk);
1464                 dev_err(&pdev->dev, "clk_get failed with %d", ret);
1465                 return ret;
1466         }
1467
1468         platform_set_drvdata(pdev, ipu);
1469
1470         ret = clk_prepare_enable(ipu->clk);
1471         if (ret) {
1472                 dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
1473                 return ret;
1474         }
1475
1476         ipu->dev = &pdev->dev;
1477         ipu->irq_sync = irq_sync;
1478         ipu->irq_err = irq_err;
1479
1480         ret = device_reset(&pdev->dev);
1481         if (ret) {
1482                 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
1483                 goto out_failed_reset;
1484         }
1485         ret = ipu_memory_reset(ipu);
1486         if (ret)
1487                 goto out_failed_reset;
1488
1489         ret = ipu_irq_init(ipu);
1490         if (ret)
1491                 goto out_failed_irq;
1492
1493         /* Set MCU_T to divide MCU access window into 2 */
1494         ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1495                         IPU_DISP_GEN);
1496
1497         ret = ipu_submodules_init(ipu, pdev, ipu_base, ipu->clk);
1498         if (ret)
1499                 goto failed_submodules_init;
1500
1501         ret = ipu_add_client_devices(ipu, ipu_base);
1502         if (ret) {
1503                 dev_err(&pdev->dev, "adding client devices failed with %d\n",
1504                                 ret);
1505                 goto failed_add_clients;
1506         }
1507
1508         dev_info(&pdev->dev, "%s probed\n", devtype->name);
1509
1510         return 0;
1511
1512 failed_add_clients:
1513         ipu_submodules_exit(ipu);
1514 failed_submodules_init:
1515         ipu_irq_exit(ipu);
1516 out_failed_irq:
1517 out_failed_reset:
1518         clk_disable_unprepare(ipu->clk);
1519         return ret;
1520 }
1521
1522 static int ipu_remove(struct platform_device *pdev)
1523 {
1524         struct ipu_soc *ipu = platform_get_drvdata(pdev);
1525
1526         platform_device_unregister_children(pdev);
1527         ipu_submodules_exit(ipu);
1528         ipu_irq_exit(ipu);
1529
1530         clk_disable_unprepare(ipu->clk);
1531
1532         return 0;
1533 }
1534
1535 static struct platform_driver imx_ipu_driver = {
1536         .driver = {
1537                 .name = "imx-ipuv3",
1538                 .of_match_table = imx_ipu_dt_ids,
1539         },
1540         .probe = ipu_probe,
1541         .remove = ipu_remove,
1542 };
1543
1544 static struct platform_driver * const drivers[] = {
1545 #if IS_ENABLED(CONFIG_DRM)
1546         &ipu_pre_drv,
1547         &ipu_prg_drv,
1548 #endif
1549         &imx_ipu_driver,
1550 };
1551
1552 static int __init imx_ipu_init(void)
1553 {
1554         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
1555 }
1556 module_init(imx_ipu_init);
1557
1558 static void __exit imx_ipu_exit(void)
1559 {
1560         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
1561 }
1562 module_exit(imx_ipu_exit);
1563
1564 MODULE_ALIAS("platform:imx-ipuv3");
1565 MODULE_DESCRIPTION("i.MX IPU v3 driver");
1566 MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
1567 MODULE_LICENSE("GPL");