Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / bridge / nwl-dsi.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * i.MX8 NWL MIPI DSI host driver
4  *
5  * Copyright (C) 2017 NXP
6  * Copyright (C) 2020 Purism SPC
7  */
8
9 #include <linux/bitfield.h>
10 #include <linux/bits.h>
11 #include <linux/clk.h>
12 #include <linux/irq.h>
13 #include <linux/math64.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/module.h>
16 #include <linux/mux/consumer.h>
17 #include <linux/of.h>
18 #include <linux/of_platform.h>
19 #include <linux/phy/phy.h>
20 #include <linux/regmap.h>
21 #include <linux/reset.h>
22 #include <linux/sys_soc.h>
23 #include <linux/time64.h>
24
25 #include <drm/drm_atomic_state_helper.h>
26 #include <drm/drm_bridge.h>
27 #include <drm/drm_mipi_dsi.h>
28 #include <drm/drm_of.h>
29 #include <drm/drm_panel.h>
30 #include <drm/drm_print.h>
31
32 #include <video/mipi_display.h>
33
34 #include "nwl-dsi.h"
35
36 #define DRV_NAME "nwl-dsi"
37
38 /* i.MX8 NWL quirks */
39 /* i.MX8MQ errata E11418 */
40 #define E11418_HS_MODE_QUIRK    BIT(0)
41
42 #define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
43
44 enum transfer_direction {
45         DSI_PACKET_SEND,
46         DSI_PACKET_RECEIVE,
47 };
48
49 #define NWL_DSI_ENDPOINT_LCDIF 0
50 #define NWL_DSI_ENDPOINT_DCSS 1
51
52 struct nwl_dsi_transfer {
53         const struct mipi_dsi_msg *msg;
54         struct mipi_dsi_packet packet;
55         struct completion completed;
56
57         int status; /* status of transmission */
58         enum transfer_direction direction;
59         bool need_bta;
60         u8 cmd;
61         u16 rx_word_count;
62         size_t tx_len; /* in bytes */
63         size_t rx_len; /* in bytes */
64 };
65
66 struct nwl_dsi {
67         struct drm_bridge bridge;
68         struct mipi_dsi_host dsi_host;
69         struct drm_bridge *panel_bridge;
70         struct device *dev;
71         struct phy *phy;
72         union phy_configure_opts phy_cfg;
73         unsigned int quirks;
74
75         struct regmap *regmap;
76         int irq;
77         /*
78          * The DSI host controller needs this reset sequence according to NWL:
79          * 1. Deassert pclk reset to get access to DSI regs
80          * 2. Configure DSI Host and DPHY and enable DPHY
81          * 3. Deassert ESC and BYTE resets to allow host TX operations)
82          * 4. Send DSI cmds to configure peripheral (handled by panel drv)
83          * 5. Deassert DPI reset so DPI receives pixels and starts sending
84          *    DSI data
85          *
86          * TODO: Since panel_bridges do their DSI setup in enable we
87          * currently have 4. and 5. swapped.
88          */
89         struct reset_control *rst_byte;
90         struct reset_control *rst_esc;
91         struct reset_control *rst_dpi;
92         struct reset_control *rst_pclk;
93         struct mux_control *mux;
94
95         /* DSI clocks */
96         struct clk *phy_ref_clk;
97         struct clk *rx_esc_clk;
98         struct clk *tx_esc_clk;
99         struct clk *core_clk;
100         /*
101          * hardware bug: the i.MX8MQ needs this clock on during reset
102          * even when not using LCDIF.
103          */
104         struct clk *lcdif_clk;
105
106         /* dsi lanes */
107         u32 lanes;
108         enum mipi_dsi_pixel_format format;
109         struct drm_display_mode mode;
110         unsigned long dsi_mode_flags;
111         int error;
112
113         struct nwl_dsi_transfer *xfer;
114 };
115
116 static const struct regmap_config nwl_dsi_regmap_config = {
117         .reg_bits = 16,
118         .val_bits = 32,
119         .reg_stride = 4,
120         .max_register = NWL_DSI_IRQ_MASK2,
121         .name = DRV_NAME,
122 };
123
124 static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
125 {
126         return container_of(bridge, struct nwl_dsi, bridge);
127 }
128
129 static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
130 {
131         int ret = dsi->error;
132
133         dsi->error = 0;
134         return ret;
135 }
136
137 static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
138 {
139         int ret;
140
141         if (dsi->error)
142                 return;
143
144         ret = regmap_write(dsi->regmap, reg, val);
145         if (ret < 0) {
146                 DRM_DEV_ERROR(dsi->dev,
147                               "Failed to write NWL DSI reg 0x%x: %d\n", reg,
148                               ret);
149                 dsi->error = ret;
150         }
151 }
152
153 static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
154 {
155         unsigned int val;
156         int ret;
157
158         if (dsi->error)
159                 return 0;
160
161         ret = regmap_read(dsi->regmap, reg, &val);
162         if (ret < 0) {
163                 DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
164                               reg, ret);
165                 dsi->error = ret;
166         }
167         return val;
168 }
169
170 static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
171 {
172         switch (format) {
173         case MIPI_DSI_FMT_RGB565:
174                 return NWL_DSI_PIXEL_FORMAT_16;
175         case MIPI_DSI_FMT_RGB666:
176                 return NWL_DSI_PIXEL_FORMAT_18L;
177         case MIPI_DSI_FMT_RGB666_PACKED:
178                 return NWL_DSI_PIXEL_FORMAT_18;
179         case MIPI_DSI_FMT_RGB888:
180                 return NWL_DSI_PIXEL_FORMAT_24;
181         default:
182                 return -EINVAL;
183         }
184 }
185
186 /*
187  * ps2bc - Picoseconds to byte clock cycles
188  */
189 static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
190 {
191         u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
192
193         return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
194                                   dsi->lanes * 8ULL * NSEC_PER_SEC);
195 }
196
197 /*
198  * ui2bc - UI time periods to byte clock cycles
199  */
200 static u32 ui2bc(unsigned int ui)
201 {
202         return DIV_ROUND_UP(ui, BITS_PER_BYTE);
203 }
204
205 /*
206  * us2bc - micro seconds to lp clock cycles
207  */
208 static u32 us2lp(u32 lp_clk_rate, unsigned long us)
209 {
210         return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
211 }
212
213 static int nwl_dsi_config_host(struct nwl_dsi *dsi)
214 {
215         u32 cycles;
216         struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
217
218         if (dsi->lanes < 1 || dsi->lanes > 4)
219                 return -EINVAL;
220
221         DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
222         nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
223
224         if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
225                 nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
226                 nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
227         } else {
228                 nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
229                 nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
230         }
231
232         /* values in byte clock cycles */
233         cycles = ui2bc(cfg->clk_pre);
234         DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
235         nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
236         cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
237         DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
238         cycles += ui2bc(cfg->clk_pre);
239         DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
240         nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
241         cycles = ps2bc(dsi, cfg->hs_exit);
242         DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
243         nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
244
245         nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
246         nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
247         nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
248         nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
249         /* In LP clock cycles */
250         cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
251         DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
252         nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
253
254         return nwl_dsi_clear_error(dsi);
255 }
256
257 static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
258 {
259         u32 mode;
260         int color_format;
261         bool burst_mode;
262         int hfront_porch, hback_porch, vfront_porch, vback_porch;
263         int hsync_len, vsync_len;
264
265         hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
266         hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
267         hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
268
269         vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
270         vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
271         vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
272
273         DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
274         DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
275         DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
276         DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
277         DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
278         DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
279         DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
280         DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
281         DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
282
283         color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
284         if (color_format < 0) {
285                 DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
286                               dsi->format);
287                 return color_format;
288         }
289         DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
290
291         nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
292         nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
293         /*
294          * Adjusting input polarity based on the video mode results in
295          * a black screen so always pick active low:
296          */
297         nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
298                       NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
299         nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
300                       NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
301
302         burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
303                      !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
304
305         if (burst_mode) {
306                 nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
307                 nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
308         } else {
309                 mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
310                                 NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
311                                 NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
312                 nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
313                 nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
314                               dsi->mode.hdisplay);
315         }
316
317         nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
318         nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
319         nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
320
321         nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
322         nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
323         nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
324         nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
325
326         nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
327         nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
328         nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
329         nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
330
331         return nwl_dsi_clear_error(dsi);
332 }
333
334 static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
335 {
336         u32 irq_enable;
337
338         nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
339         nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
340
341         irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
342                             NWL_DSI_RX_PKT_HDR_RCVD_MASK |
343                             NWL_DSI_TX_FIFO_OVFLW_MASK |
344                             NWL_DSI_HS_TX_TIMEOUT_MASK);
345
346         nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
347
348         return nwl_dsi_clear_error(dsi);
349 }
350
351 static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
352                                struct mipi_dsi_device *device)
353 {
354         struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
355         struct device *dev = dsi->dev;
356
357         DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
358                      device->format, device->mode_flags);
359
360         if (device->lanes < 1 || device->lanes > 4)
361                 return -EINVAL;
362
363         dsi->lanes = device->lanes;
364         dsi->format = device->format;
365         dsi->dsi_mode_flags = device->mode_flags;
366
367         return 0;
368 }
369
370 static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
371 {
372         struct device *dev = dsi->dev;
373         struct nwl_dsi_transfer *xfer = dsi->xfer;
374         int err;
375         u8 *payload = xfer->msg->rx_buf;
376         u32 val;
377         u16 word_count;
378         u8 channel;
379         u8 data_type;
380
381         xfer->status = 0;
382
383         if (xfer->rx_word_count == 0) {
384                 if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
385                         return false;
386                 /* Get the RX header and parse it */
387                 val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
388                 err = nwl_dsi_clear_error(dsi);
389                 if (err)
390                         xfer->status = err;
391                 word_count = NWL_DSI_WC(val);
392                 channel = NWL_DSI_RX_VC(val);
393                 data_type = NWL_DSI_RX_DT(val);
394
395                 if (channel != xfer->msg->channel) {
396                         DRM_DEV_ERROR(dev,
397                                       "[%02X] Channel mismatch (%u != %u)\n",
398                                       xfer->cmd, channel, xfer->msg->channel);
399                         xfer->status = -EINVAL;
400                         return true;
401                 }
402
403                 switch (data_type) {
404                 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
405                 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
406                         if (xfer->msg->rx_len > 1) {
407                                 /* read second byte */
408                                 payload[1] = word_count >> 8;
409                                 ++xfer->rx_len;
410                         }
411                         fallthrough;
412                 case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
413                 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
414                         if (xfer->msg->rx_len > 0) {
415                                 /* read first byte */
416                                 payload[0] = word_count & 0xff;
417                                 ++xfer->rx_len;
418                         }
419                         xfer->status = xfer->rx_len;
420                         return true;
421                 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
422                         word_count &= 0xff;
423                         DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
424                                       xfer->cmd, word_count);
425                         xfer->status = -EPROTO;
426                         return true;
427                 }
428
429                 if (word_count > xfer->msg->rx_len) {
430                         DRM_DEV_ERROR(dev,
431                                 "[%02X] Receive buffer too small: %zu (< %u)\n",
432                                 xfer->cmd, xfer->msg->rx_len, word_count);
433                         xfer->status = -EINVAL;
434                         return true;
435                 }
436
437                 xfer->rx_word_count = word_count;
438         } else {
439                 /* Set word_count from previous header read */
440                 word_count = xfer->rx_word_count;
441         }
442
443         /* If RX payload is not yet received, wait for it */
444         if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
445                 return false;
446
447         /* Read the RX payload */
448         while (word_count >= 4) {
449                 val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
450                 payload[0] = (val >> 0) & 0xff;
451                 payload[1] = (val >> 8) & 0xff;
452                 payload[2] = (val >> 16) & 0xff;
453                 payload[3] = (val >> 24) & 0xff;
454                 payload += 4;
455                 xfer->rx_len += 4;
456                 word_count -= 4;
457         }
458
459         if (word_count > 0) {
460                 val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
461                 switch (word_count) {
462                 case 3:
463                         payload[2] = (val >> 16) & 0xff;
464                         ++xfer->rx_len;
465                         fallthrough;
466                 case 2:
467                         payload[1] = (val >> 8) & 0xff;
468                         ++xfer->rx_len;
469                         fallthrough;
470                 case 1:
471                         payload[0] = (val >> 0) & 0xff;
472                         ++xfer->rx_len;
473                         break;
474                 }
475         }
476
477         xfer->status = xfer->rx_len;
478         err = nwl_dsi_clear_error(dsi);
479         if (err)
480                 xfer->status = err;
481
482         return true;
483 }
484
485 static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
486 {
487         struct nwl_dsi_transfer *xfer = dsi->xfer;
488         bool end_packet = false;
489
490         if (!xfer)
491                 return;
492
493         if (xfer->direction == DSI_PACKET_SEND &&
494             status & NWL_DSI_TX_PKT_DONE) {
495                 xfer->status = xfer->tx_len;
496                 end_packet = true;
497         } else if (status & NWL_DSI_DPHY_DIRECTION &&
498                    ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
499                                NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
500                 end_packet = nwl_dsi_read_packet(dsi, status);
501         }
502
503         if (end_packet)
504                 complete(&xfer->completed);
505 }
506
507 static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
508 {
509         struct nwl_dsi_transfer *xfer = dsi->xfer;
510         struct mipi_dsi_packet *pkt = &xfer->packet;
511         const u8 *payload;
512         size_t length;
513         u16 word_count;
514         u8 hs_mode;
515         u32 val;
516         u32 hs_workaround = 0;
517
518         /* Send the payload, if any */
519         length = pkt->payload_length;
520         payload = pkt->payload;
521
522         while (length >= 4) {
523                 val = *(u32 *)payload;
524                 hs_workaround |= !(val & 0xFFFF00);
525                 nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
526                 payload += 4;
527                 length -= 4;
528         }
529         /* Send the rest of the payload */
530         val = 0;
531         switch (length) {
532         case 3:
533                 val |= payload[2] << 16;
534                 fallthrough;
535         case 2:
536                 val |= payload[1] << 8;
537                 hs_workaround |= !(val & 0xFFFF00);
538                 fallthrough;
539         case 1:
540                 val |= payload[0];
541                 nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
542                 break;
543         }
544         xfer->tx_len = pkt->payload_length;
545
546         /*
547          * Send the header
548          * header[0] = Virtual Channel + Data Type
549          * header[1] = Word Count LSB (LP) or first param (SP)
550          * header[2] = Word Count MSB (LP) or second param (SP)
551          */
552         word_count = pkt->header[1] | (pkt->header[2] << 8);
553         if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
554                 DRM_DEV_DEBUG_DRIVER(dsi->dev,
555                                      "Using hs mode workaround for cmd 0x%x\n",
556                                      xfer->cmd);
557                 hs_mode = 1;
558         } else {
559                 hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
560         }
561         val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
562               NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
563               NWL_DSI_BTA_TX(xfer->need_bta);
564         nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
565
566         /* Send packet command */
567         nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
568 }
569
570 static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
571                                      const struct mipi_dsi_msg *msg)
572 {
573         struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
574         struct nwl_dsi_transfer xfer;
575         ssize_t ret = 0;
576
577         /* Create packet to be sent */
578         dsi->xfer = &xfer;
579         ret = mipi_dsi_create_packet(&xfer.packet, msg);
580         if (ret < 0) {
581                 dsi->xfer = NULL;
582                 return ret;
583         }
584
585         if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
586              msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
587              msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
588              msg->type & MIPI_DSI_DCS_READ) &&
589             msg->rx_len > 0 && msg->rx_buf)
590                 xfer.direction = DSI_PACKET_RECEIVE;
591         else
592                 xfer.direction = DSI_PACKET_SEND;
593
594         xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
595         xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
596         xfer.msg = msg;
597         xfer.status = -ETIMEDOUT;
598         xfer.rx_word_count = 0;
599         xfer.rx_len = 0;
600         xfer.cmd = 0x00;
601         if (msg->tx_len > 0)
602                 xfer.cmd = ((u8 *)(msg->tx_buf))[0];
603         init_completion(&xfer.completed);
604
605         ret = clk_prepare_enable(dsi->rx_esc_clk);
606         if (ret < 0) {
607                 DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
608                               ret);
609                 return ret;
610         }
611         DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
612                              clk_get_rate(dsi->rx_esc_clk));
613
614         /* Initiate the DSI packet transmision */
615         nwl_dsi_begin_transmission(dsi);
616
617         if (!wait_for_completion_timeout(&xfer.completed,
618                                          NWL_DSI_MIPI_FIFO_TIMEOUT)) {
619                 DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
620                               xfer.cmd);
621                 ret = -ETIMEDOUT;
622         } else {
623                 ret = xfer.status;
624         }
625
626         clk_disable_unprepare(dsi->rx_esc_clk);
627
628         return ret;
629 }
630
631 static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
632         .attach = nwl_dsi_host_attach,
633         .transfer = nwl_dsi_host_transfer,
634 };
635
636 static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
637 {
638         u32 irq_status;
639         struct nwl_dsi *dsi = data;
640
641         irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
642
643         if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
644                 DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
645
646         if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
647                 DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
648
649         if (irq_status & NWL_DSI_TX_PKT_DONE ||
650             irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
651             irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
652                 nwl_dsi_finish_transmission(dsi, irq_status);
653
654         return IRQ_HANDLED;
655 }
656
657 static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
658 {
659         struct device *dev = dsi->dev;
660         union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
661         int ret;
662
663         if (!dsi->lanes) {
664                 DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
665                 return -EINVAL;
666         }
667
668         ret = phy_init(dsi->phy);
669         if (ret < 0) {
670                 DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
671                 return ret;
672         }
673
674         ret = phy_configure(dsi->phy, phy_cfg);
675         if (ret < 0) {
676                 DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
677                 goto uninit_phy;
678         }
679
680         ret = clk_prepare_enable(dsi->tx_esc_clk);
681         if (ret < 0) {
682                 DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
683                               ret);
684                 goto uninit_phy;
685         }
686         DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
687                              clk_get_rate(dsi->tx_esc_clk));
688
689         ret = nwl_dsi_config_host(dsi);
690         if (ret < 0) {
691                 DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
692                 goto disable_clock;
693         }
694
695         ret = nwl_dsi_config_dpi(dsi);
696         if (ret < 0) {
697                 DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
698                 goto disable_clock;
699         }
700
701         ret = phy_power_on(dsi->phy);
702         if (ret < 0) {
703                 DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
704                 goto disable_clock;
705         }
706
707         ret = nwl_dsi_init_interrupts(dsi);
708         if (ret < 0)
709                 goto power_off_phy;
710
711         return ret;
712
713 power_off_phy:
714         phy_power_off(dsi->phy);
715 disable_clock:
716         clk_disable_unprepare(dsi->tx_esc_clk);
717 uninit_phy:
718         phy_exit(dsi->phy);
719
720         return ret;
721 }
722
723 static int nwl_dsi_disable(struct nwl_dsi *dsi)
724 {
725         struct device *dev = dsi->dev;
726
727         DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
728
729         phy_power_off(dsi->phy);
730         phy_exit(dsi->phy);
731
732         /* Disabling the clock before the phy breaks enabling dsi again */
733         clk_disable_unprepare(dsi->tx_esc_clk);
734
735         return 0;
736 }
737
738 static void
739 nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
740                               struct drm_bridge_state *old_bridge_state)
741 {
742         struct nwl_dsi *dsi = bridge_to_dsi(bridge);
743         int ret;
744
745         nwl_dsi_disable(dsi);
746
747         ret = reset_control_assert(dsi->rst_dpi);
748         if (ret < 0) {
749                 DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
750                 return;
751         }
752         ret = reset_control_assert(dsi->rst_byte);
753         if (ret < 0) {
754                 DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
755                 return;
756         }
757         ret = reset_control_assert(dsi->rst_esc);
758         if (ret < 0) {
759                 DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
760                 return;
761         }
762         ret = reset_control_assert(dsi->rst_pclk);
763         if (ret < 0) {
764                 DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
765                 return;
766         }
767
768         clk_disable_unprepare(dsi->core_clk);
769         clk_disable_unprepare(dsi->lcdif_clk);
770
771         pm_runtime_put(dsi->dev);
772 }
773
774 static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
775                                    const struct drm_display_mode *mode,
776                                    union phy_configure_opts *phy_opts)
777 {
778         unsigned long rate;
779         int ret;
780
781         if (dsi->lanes < 1 || dsi->lanes > 4)
782                 return -EINVAL;
783
784         /*
785          * So far the DPHY spec minimal timings work for both mixel
786          * dphy and nwl dsi host
787          */
788         ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
789                 mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
790                 &phy_opts->mipi_dphy);
791         if (ret < 0)
792                 return ret;
793
794         rate = clk_get_rate(dsi->tx_esc_clk);
795         DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
796         phy_opts->mipi_dphy.lp_clk_rate = rate;
797
798         return 0;
799 }
800
801 static enum drm_mode_status
802 nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
803                           const struct drm_display_info *info,
804                           const struct drm_display_mode *mode)
805 {
806         struct nwl_dsi *dsi = bridge_to_dsi(bridge);
807         int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
808
809         if (mode->clock * bpp > 15000000 * dsi->lanes)
810                 return MODE_CLOCK_HIGH;
811
812         if (mode->clock * bpp < 80000 * dsi->lanes)
813                 return MODE_CLOCK_LOW;
814
815         return MODE_OK;
816 }
817
818 static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
819                                        struct drm_bridge_state *bridge_state,
820                                        struct drm_crtc_state *crtc_state,
821                                        struct drm_connector_state *conn_state)
822 {
823         struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
824
825         /* At least LCDIF + NWL needs active high sync */
826         adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
827         adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
828
829         /*
830          * Do a full modeset if crtc_state->active is changed to be true.
831          * This ensures our ->mode_set() is called to get the DSI controller
832          * and the PHY ready to send DCS commands, when only the connector's
833          * DPMS is brought out of "Off" status.
834          */
835         if (crtc_state->active_changed && crtc_state->active)
836                 crtc_state->mode_changed = true;
837
838         return 0;
839 }
840
841 static void
842 nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
843                         const struct drm_display_mode *mode,
844                         const struct drm_display_mode *adjusted_mode)
845 {
846         struct nwl_dsi *dsi = bridge_to_dsi(bridge);
847         struct device *dev = dsi->dev;
848         union phy_configure_opts new_cfg;
849         unsigned long phy_ref_rate;
850         int ret;
851
852         ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
853         if (ret < 0)
854                 return;
855
856         phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
857         DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
858         /* Save the new desired phy config */
859         memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
860
861         memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
862         drm_mode_debug_printmodeline(adjusted_mode);
863
864         pm_runtime_get_sync(dev);
865
866         if (clk_prepare_enable(dsi->lcdif_clk) < 0)
867                 return;
868         if (clk_prepare_enable(dsi->core_clk) < 0)
869                 return;
870
871         /* Step 1 from DSI reset-out instructions */
872         ret = reset_control_deassert(dsi->rst_pclk);
873         if (ret < 0) {
874                 DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
875                 return;
876         }
877
878         /* Step 2 from DSI reset-out instructions */
879         nwl_dsi_mode_set(dsi);
880
881         /* Step 3 from DSI reset-out instructions */
882         ret = reset_control_deassert(dsi->rst_esc);
883         if (ret < 0) {
884                 DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
885                 return;
886         }
887         ret = reset_control_deassert(dsi->rst_byte);
888         if (ret < 0) {
889                 DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
890                 return;
891         }
892 }
893
894 static void
895 nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
896                              struct drm_bridge_state *old_bridge_state)
897 {
898         struct nwl_dsi *dsi = bridge_to_dsi(bridge);
899         int ret;
900
901         /* Step 5 from DSI reset-out instructions */
902         ret = reset_control_deassert(dsi->rst_dpi);
903         if (ret < 0)
904                 DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
905 }
906
907 static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
908                                  enum drm_bridge_attach_flags flags)
909 {
910         struct nwl_dsi *dsi = bridge_to_dsi(bridge);
911         struct drm_bridge *panel_bridge;
912         struct drm_panel *panel;
913         int ret;
914
915         ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
916                                           &panel_bridge);
917         if (ret)
918                 return ret;
919
920         if (panel) {
921                 panel_bridge = drm_panel_bridge_add(panel);
922                 if (IS_ERR(panel_bridge))
923                         return PTR_ERR(panel_bridge);
924         }
925         dsi->panel_bridge = panel_bridge;
926
927         if (!dsi->panel_bridge)
928                 return -EPROBE_DEFER;
929
930         return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
931                                  flags);
932 }
933
934 static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
935 {       struct nwl_dsi *dsi = bridge_to_dsi(bridge);
936
937         drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
938 }
939
940 static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
941                                                  struct drm_bridge_state *bridge_state,
942                                                  struct drm_crtc_state *crtc_state,
943                                                  struct drm_connector_state *conn_state,
944                                                  u32 output_fmt,
945                                                  unsigned int *num_input_fmts)
946 {
947         u32 *input_fmts, input_fmt;
948
949         *num_input_fmts = 0;
950
951         switch (output_fmt) {
952         /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
953         case MEDIA_BUS_FMT_FIXED:
954                 input_fmt = MEDIA_BUS_FMT_RGB888_1X24;
955                 break;
956         case MEDIA_BUS_FMT_RGB888_1X24:
957         case MEDIA_BUS_FMT_RGB666_1X18:
958         case MEDIA_BUS_FMT_RGB565_1X16:
959                 input_fmt = output_fmt;
960                 break;
961         default:
962                 return NULL;
963         }
964
965         input_fmts = kcalloc(1, sizeof(*input_fmts), GFP_KERNEL);
966         if (!input_fmts)
967                 return NULL;
968         input_fmts[0] = input_fmt;
969         *num_input_fmts = 1;
970
971         return input_fmts;
972 }
973
974 static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
975         .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
976         .atomic_destroy_state   = drm_atomic_helper_bridge_destroy_state,
977         .atomic_reset           = drm_atomic_helper_bridge_reset,
978         .atomic_check           = nwl_dsi_bridge_atomic_check,
979         .atomic_enable          = nwl_dsi_bridge_atomic_enable,
980         .atomic_disable         = nwl_dsi_bridge_atomic_disable,
981         .atomic_get_input_bus_fmts = nwl_bridge_atomic_get_input_bus_fmts,
982         .mode_set               = nwl_dsi_bridge_mode_set,
983         .mode_valid             = nwl_dsi_bridge_mode_valid,
984         .attach                 = nwl_dsi_bridge_attach,
985         .detach                 = nwl_dsi_bridge_detach,
986 };
987
988 static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
989 {
990         struct platform_device *pdev = to_platform_device(dsi->dev);
991         struct clk *clk;
992         void __iomem *base;
993         int ret;
994
995         dsi->phy = devm_phy_get(dsi->dev, "dphy");
996         if (IS_ERR(dsi->phy)) {
997                 ret = PTR_ERR(dsi->phy);
998                 if (ret != -EPROBE_DEFER)
999                         DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
1000                 return ret;
1001         }
1002
1003         clk = devm_clk_get(dsi->dev, "lcdif");
1004         if (IS_ERR(clk)) {
1005                 ret = PTR_ERR(clk);
1006                 DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
1007                               ret);
1008                 return ret;
1009         }
1010         dsi->lcdif_clk = clk;
1011
1012         clk = devm_clk_get(dsi->dev, "core");
1013         if (IS_ERR(clk)) {
1014                 ret = PTR_ERR(clk);
1015                 DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
1016                               ret);
1017                 return ret;
1018         }
1019         dsi->core_clk = clk;
1020
1021         clk = devm_clk_get(dsi->dev, "phy_ref");
1022         if (IS_ERR(clk)) {
1023                 ret = PTR_ERR(clk);
1024                 DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
1025                               ret);
1026                 return ret;
1027         }
1028         dsi->phy_ref_clk = clk;
1029
1030         clk = devm_clk_get(dsi->dev, "rx_esc");
1031         if (IS_ERR(clk)) {
1032                 ret = PTR_ERR(clk);
1033                 DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
1034                               ret);
1035                 return ret;
1036         }
1037         dsi->rx_esc_clk = clk;
1038
1039         clk = devm_clk_get(dsi->dev, "tx_esc");
1040         if (IS_ERR(clk)) {
1041                 ret = PTR_ERR(clk);
1042                 DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
1043                               ret);
1044                 return ret;
1045         }
1046         dsi->tx_esc_clk = clk;
1047
1048         dsi->mux = devm_mux_control_get(dsi->dev, NULL);
1049         if (IS_ERR(dsi->mux)) {
1050                 ret = PTR_ERR(dsi->mux);
1051                 if (ret != -EPROBE_DEFER)
1052                         DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
1053                 return ret;
1054         }
1055
1056         base = devm_platform_ioremap_resource(pdev, 0);
1057         if (IS_ERR(base))
1058                 return PTR_ERR(base);
1059
1060         dsi->regmap =
1061                 devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
1062         if (IS_ERR(dsi->regmap)) {
1063                 ret = PTR_ERR(dsi->regmap);
1064                 DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
1065                               ret);
1066                 return ret;
1067         }
1068
1069         dsi->irq = platform_get_irq(pdev, 0);
1070         if (dsi->irq < 0) {
1071                 DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
1072                               dsi->irq);
1073                 return dsi->irq;
1074         }
1075
1076         dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
1077         if (IS_ERR(dsi->rst_pclk)) {
1078                 DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
1079                               PTR_ERR(dsi->rst_pclk));
1080                 return PTR_ERR(dsi->rst_pclk);
1081         }
1082         dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
1083         if (IS_ERR(dsi->rst_byte)) {
1084                 DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
1085                               PTR_ERR(dsi->rst_byte));
1086                 return PTR_ERR(dsi->rst_byte);
1087         }
1088         dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
1089         if (IS_ERR(dsi->rst_esc)) {
1090                 DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
1091                               PTR_ERR(dsi->rst_esc));
1092                 return PTR_ERR(dsi->rst_esc);
1093         }
1094         dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
1095         if (IS_ERR(dsi->rst_dpi)) {
1096                 DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
1097                               PTR_ERR(dsi->rst_dpi));
1098                 return PTR_ERR(dsi->rst_dpi);
1099         }
1100         return 0;
1101 }
1102
1103 static int nwl_dsi_select_input(struct nwl_dsi *dsi)
1104 {
1105         struct device_node *remote;
1106         u32 use_dcss = 1;
1107         int ret;
1108
1109         remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
1110                                           NWL_DSI_ENDPOINT_LCDIF);
1111         if (remote) {
1112                 use_dcss = 0;
1113         } else {
1114                 remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
1115                                                   NWL_DSI_ENDPOINT_DCSS);
1116                 if (!remote) {
1117                         DRM_DEV_ERROR(dsi->dev,
1118                                       "No valid input endpoint found\n");
1119                         return -EINVAL;
1120                 }
1121         }
1122
1123         DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
1124                      (use_dcss) ? "DCSS" : "LCDIF");
1125         ret = mux_control_try_select(dsi->mux, use_dcss);
1126         if (ret < 0)
1127                 DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
1128
1129         of_node_put(remote);
1130         return ret;
1131 }
1132
1133 static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
1134 {
1135         int ret;
1136
1137         ret = mux_control_deselect(dsi->mux);
1138         if (ret < 0)
1139                 DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
1140
1141         return ret;
1142 }
1143
1144 static const struct drm_bridge_timings nwl_dsi_timings = {
1145         .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
1146 };
1147
1148 static const struct of_device_id nwl_dsi_dt_ids[] = {
1149         { .compatible = "fsl,imx8mq-nwl-dsi", },
1150         { /* sentinel */ }
1151 };
1152 MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
1153
1154 static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
1155         { .soc_id = "i.MX8MQ", .revision = "2.0",
1156           .data = (void *)E11418_HS_MODE_QUIRK },
1157         { /* sentinel. */ },
1158 };
1159
1160 static int nwl_dsi_probe(struct platform_device *pdev)
1161 {
1162         struct device *dev = &pdev->dev;
1163         const struct soc_device_attribute *attr;
1164         struct nwl_dsi *dsi;
1165         int ret;
1166
1167         dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1168         if (!dsi)
1169                 return -ENOMEM;
1170
1171         dsi->dev = dev;
1172
1173         ret = nwl_dsi_parse_dt(dsi);
1174         if (ret)
1175                 return ret;
1176
1177         ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
1178                                dev_name(dev), dsi);
1179         if (ret < 0) {
1180                 DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
1181                               ret);
1182                 return ret;
1183         }
1184
1185         dsi->dsi_host.ops = &nwl_dsi_host_ops;
1186         dsi->dsi_host.dev = dev;
1187         ret = mipi_dsi_host_register(&dsi->dsi_host);
1188         if (ret) {
1189                 DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
1190                 return ret;
1191         }
1192
1193         attr = soc_device_match(nwl_dsi_quirks_match);
1194         if (attr)
1195                 dsi->quirks = (uintptr_t)attr->data;
1196
1197         dsi->bridge.driver_private = dsi;
1198         dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
1199         dsi->bridge.of_node = dev->of_node;
1200         dsi->bridge.timings = &nwl_dsi_timings;
1201
1202         dev_set_drvdata(dev, dsi);
1203         pm_runtime_enable(dev);
1204
1205         ret = nwl_dsi_select_input(dsi);
1206         if (ret < 0) {
1207                 mipi_dsi_host_unregister(&dsi->dsi_host);
1208                 return ret;
1209         }
1210
1211         drm_bridge_add(&dsi->bridge);
1212         return 0;
1213 }
1214
1215 static int nwl_dsi_remove(struct platform_device *pdev)
1216 {
1217         struct nwl_dsi *dsi = platform_get_drvdata(pdev);
1218
1219         nwl_dsi_deselect_input(dsi);
1220         mipi_dsi_host_unregister(&dsi->dsi_host);
1221         drm_bridge_remove(&dsi->bridge);
1222         pm_runtime_disable(&pdev->dev);
1223         return 0;
1224 }
1225
1226 static struct platform_driver nwl_dsi_driver = {
1227         .probe          = nwl_dsi_probe,
1228         .remove         = nwl_dsi_remove,
1229         .driver         = {
1230                 .of_match_table = nwl_dsi_dt_ids,
1231                 .name   = DRV_NAME,
1232         },
1233 };
1234
1235 module_platform_driver(nwl_dsi_driver);
1236
1237 MODULE_AUTHOR("NXP Semiconductor");
1238 MODULE_AUTHOR("Purism SPC");
1239 MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
1240 MODULE_LICENSE("GPL"); /* GPLv2 or later */