Merge tag 'drm-misc-fixes-2018-02-28' of git://people.freedesktop.org/drm-misc into...
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / nv50_display.c
1 /*
2  * Copyright 2011 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24
25 #include <linux/dma-mapping.h>
26 #include <linux/hdmi.h>
27
28 #include <drm/drmP.h>
29 #include <drm/drm_atomic.h>
30 #include <drm/drm_atomic_helper.h>
31 #include <drm/drm_crtc_helper.h>
32 #include <drm/drm_dp_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_plane_helper.h>
35 #include <drm/drm_edid.h>
36
37 #include <nvif/class.h>
38 #include <nvif/cl0002.h>
39 #include <nvif/cl5070.h>
40 #include <nvif/cl507a.h>
41 #include <nvif/cl507b.h>
42 #include <nvif/cl507c.h>
43 #include <nvif/cl507d.h>
44 #include <nvif/cl507e.h>
45 #include <nvif/event.h>
46
47 #include "nouveau_drv.h"
48 #include "nouveau_dma.h"
49 #include "nouveau_gem.h"
50 #include "nouveau_connector.h"
51 #include "nouveau_encoder.h"
52 #include "nouveau_crtc.h"
53 #include "nouveau_fence.h"
54 #include "nouveau_fbcon.h"
55 #include "nv50_display.h"
56
57 #define EVO_DMA_NR 9
58
59 #define EVO_MASTER  (0x00)
60 #define EVO_FLIP(c) (0x01 + (c))
61 #define EVO_OVLY(c) (0x05 + (c))
62 #define EVO_OIMM(c) (0x09 + (c))
63 #define EVO_CURS(c) (0x0d + (c))
64
65 /* offsets in shared sync bo of various structures */
66 #define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
67 #define EVO_MAST_NTFY     EVO_SYNC(      0, 0x00)
68 #define EVO_FLIP_SEM0(c)  EVO_SYNC((c) + 1, 0x00)
69 #define EVO_FLIP_SEM1(c)  EVO_SYNC((c) + 1, 0x10)
70 #define EVO_FLIP_NTFY0(c) EVO_SYNC((c) + 1, 0x20)
71 #define EVO_FLIP_NTFY1(c) EVO_SYNC((c) + 1, 0x30)
72
73 /******************************************************************************
74  * Atomic state
75  *****************************************************************************/
76 #define nv50_atom(p) container_of((p), struct nv50_atom, state)
77
78 struct nv50_atom {
79         struct drm_atomic_state state;
80
81         struct list_head outp;
82         bool lock_core;
83         bool flush_disable;
84 };
85
86 struct nv50_outp_atom {
87         struct list_head head;
88
89         struct drm_encoder *encoder;
90         bool flush_disable;
91
92         union {
93                 struct {
94                         bool ctrl:1;
95                 };
96                 u8 mask;
97         } clr;
98
99         union {
100                 struct {
101                         bool ctrl:1;
102                 };
103                 u8 mask;
104         } set;
105 };
106
107 #define nv50_head_atom(p) container_of((p), struct nv50_head_atom, state)
108
109 struct nv50_head_atom {
110         struct drm_crtc_state state;
111
112         struct {
113                 u16 iW;
114                 u16 iH;
115                 u16 oW;
116                 u16 oH;
117         } view;
118
119         struct nv50_head_mode {
120                 bool interlace;
121                 u32 clock;
122                 struct {
123                         u16 active;
124                         u16 synce;
125                         u16 blanke;
126                         u16 blanks;
127                 } h;
128                 struct {
129                         u32 active;
130                         u16 synce;
131                         u16 blanke;
132                         u16 blanks;
133                         u16 blank2s;
134                         u16 blank2e;
135                         u16 blankus;
136                 } v;
137         } mode;
138
139         struct {
140                 bool visible;
141                 u32 handle;
142                 u64 offset:40;
143                 u8  mode:4;
144         } lut;
145
146         struct {
147                 bool visible;
148                 u32 handle;
149                 u64 offset:40;
150                 u8  format;
151                 u8  kind:7;
152                 u8  layout:1;
153                 u8  block:4;
154                 u32 pitch:20;
155                 u16 x;
156                 u16 y;
157                 u16 w;
158                 u16 h;
159         } core;
160
161         struct {
162                 bool visible;
163                 u32 handle;
164                 u64 offset:40;
165                 u8  layout:1;
166                 u8  format:1;
167         } curs;
168
169         struct {
170                 u8  depth;
171                 u8  cpp;
172                 u16 x;
173                 u16 y;
174                 u16 w;
175                 u16 h;
176         } base;
177
178         struct {
179                 u8 cpp;
180         } ovly;
181
182         struct {
183                 bool enable:1;
184                 u8 bits:2;
185                 u8 mode:4;
186         } dither;
187
188         struct {
189                 struct {
190                         u16 cos:12;
191                         u16 sin:12;
192                 } sat;
193         } procamp;
194
195         union {
196                 struct {
197                         bool ilut:1;
198                         bool core:1;
199                         bool curs:1;
200                 };
201                 u8 mask;
202         } clr;
203
204         union {
205                 struct {
206                         bool ilut:1;
207                         bool core:1;
208                         bool curs:1;
209                         bool view:1;
210                         bool mode:1;
211                         bool base:1;
212                         bool ovly:1;
213                         bool dither:1;
214                         bool procamp:1;
215                 };
216                 u16 mask;
217         } set;
218 };
219
220 static inline struct nv50_head_atom *
221 nv50_head_atom_get(struct drm_atomic_state *state, struct drm_crtc *crtc)
222 {
223         struct drm_crtc_state *statec = drm_atomic_get_crtc_state(state, crtc);
224         if (IS_ERR(statec))
225                 return (void *)statec;
226         return nv50_head_atom(statec);
227 }
228
229 #define nv50_wndw_atom(p) container_of((p), struct nv50_wndw_atom, state)
230
231 struct nv50_wndw_atom {
232         struct drm_plane_state state;
233         u8 interval;
234
235         struct drm_rect clip;
236
237         struct {
238                 u32  handle;
239                 u16  offset:12;
240                 bool awaken:1;
241         } ntfy;
242
243         struct {
244                 u32 handle;
245                 u16 offset:12;
246                 u32 acquire;
247                 u32 release;
248         } sema;
249
250         struct {
251                 u8 enable:2;
252         } lut;
253
254         struct {
255                 u8  mode:2;
256                 u8  interval:4;
257
258                 u8  format;
259                 u8  kind:7;
260                 u8  layout:1;
261                 u8  block:4;
262                 u32 pitch:20;
263                 u16 w;
264                 u16 h;
265
266                 u32 handle;
267                 u64 offset;
268         } image;
269
270         struct {
271                 u16 x;
272                 u16 y;
273         } point;
274
275         union {
276                 struct {
277                         bool ntfy:1;
278                         bool sema:1;
279                         bool image:1;
280                 };
281                 u8 mask;
282         } clr;
283
284         union {
285                 struct {
286                         bool ntfy:1;
287                         bool sema:1;
288                         bool image:1;
289                         bool lut:1;
290                         bool point:1;
291                 };
292                 u8 mask;
293         } set;
294 };
295
296 /******************************************************************************
297  * EVO channel
298  *****************************************************************************/
299
300 struct nv50_chan {
301         struct nvif_object user;
302         struct nvif_device *device;
303 };
304
305 static int
306 nv50_chan_create(struct nvif_device *device, struct nvif_object *disp,
307                  const s32 *oclass, u8 head, void *data, u32 size,
308                  struct nv50_chan *chan)
309 {
310         struct nvif_sclass *sclass;
311         int ret, i, n;
312
313         chan->device = device;
314
315         ret = n = nvif_object_sclass_get(disp, &sclass);
316         if (ret < 0)
317                 return ret;
318
319         while (oclass[0]) {
320                 for (i = 0; i < n; i++) {
321                         if (sclass[i].oclass == oclass[0]) {
322                                 ret = nvif_object_init(disp, 0, oclass[0],
323                                                        data, size, &chan->user);
324                                 if (ret == 0)
325                                         nvif_object_map(&chan->user, NULL, 0);
326                                 nvif_object_sclass_put(&sclass);
327                                 return ret;
328                         }
329                 }
330                 oclass++;
331         }
332
333         nvif_object_sclass_put(&sclass);
334         return -ENOSYS;
335 }
336
337 static void
338 nv50_chan_destroy(struct nv50_chan *chan)
339 {
340         nvif_object_fini(&chan->user);
341 }
342
343 /******************************************************************************
344  * PIO EVO channel
345  *****************************************************************************/
346
347 struct nv50_pioc {
348         struct nv50_chan base;
349 };
350
351 static void
352 nv50_pioc_destroy(struct nv50_pioc *pioc)
353 {
354         nv50_chan_destroy(&pioc->base);
355 }
356
357 static int
358 nv50_pioc_create(struct nvif_device *device, struct nvif_object *disp,
359                  const s32 *oclass, u8 head, void *data, u32 size,
360                  struct nv50_pioc *pioc)
361 {
362         return nv50_chan_create(device, disp, oclass, head, data, size,
363                                 &pioc->base);
364 }
365
366 /******************************************************************************
367  * Overlay Immediate
368  *****************************************************************************/
369
370 struct nv50_oimm {
371         struct nv50_pioc base;
372 };
373
374 static int
375 nv50_oimm_create(struct nvif_device *device, struct nvif_object *disp,
376                  int head, struct nv50_oimm *oimm)
377 {
378         struct nv50_disp_cursor_v0 args = {
379                 .head = head,
380         };
381         static const s32 oclass[] = {
382                 GK104_DISP_OVERLAY,
383                 GF110_DISP_OVERLAY,
384                 GT214_DISP_OVERLAY,
385                 G82_DISP_OVERLAY,
386                 NV50_DISP_OVERLAY,
387                 0
388         };
389
390         return nv50_pioc_create(device, disp, oclass, head, &args, sizeof(args),
391                                 &oimm->base);
392 }
393
394 /******************************************************************************
395  * DMA EVO channel
396  *****************************************************************************/
397
398 struct nv50_dmac_ctxdma {
399         struct list_head head;
400         struct nvif_object object;
401 };
402
403 struct nv50_dmac {
404         struct nv50_chan base;
405         dma_addr_t handle;
406         u32 *ptr;
407
408         struct nvif_object sync;
409         struct nvif_object vram;
410         struct list_head ctxdma;
411
412         /* Protects against concurrent pushbuf access to this channel, lock is
413          * grabbed by evo_wait (if the pushbuf reservation is successful) and
414          * dropped again by evo_kick. */
415         struct mutex lock;
416 };
417
418 static void
419 nv50_dmac_ctxdma_del(struct nv50_dmac_ctxdma *ctxdma)
420 {
421         nvif_object_fini(&ctxdma->object);
422         list_del(&ctxdma->head);
423         kfree(ctxdma);
424 }
425
426 static struct nv50_dmac_ctxdma *
427 nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
428 {
429         struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
430         struct nv50_dmac_ctxdma *ctxdma;
431         const u8    kind = fb->nvbo->kind;
432         const u32 handle = 0xfb000000 | kind;
433         struct {
434                 struct nv_dma_v0 base;
435                 union {
436                         struct nv50_dma_v0 nv50;
437                         struct gf100_dma_v0 gf100;
438                         struct gf119_dma_v0 gf119;
439                 };
440         } args = {};
441         u32 argc = sizeof(args.base);
442         int ret;
443
444         list_for_each_entry(ctxdma, &dmac->ctxdma, head) {
445                 if (ctxdma->object.handle == handle)
446                         return ctxdma;
447         }
448
449         if (!(ctxdma = kzalloc(sizeof(*ctxdma), GFP_KERNEL)))
450                 return ERR_PTR(-ENOMEM);
451         list_add(&ctxdma->head, &dmac->ctxdma);
452
453         args.base.target = NV_DMA_V0_TARGET_VRAM;
454         args.base.access = NV_DMA_V0_ACCESS_RDWR;
455         args.base.start  = 0;
456         args.base.limit  = drm->client.device.info.ram_user - 1;
457
458         if (drm->client.device.info.chipset < 0x80) {
459                 args.nv50.part = NV50_DMA_V0_PART_256;
460                 argc += sizeof(args.nv50);
461         } else
462         if (drm->client.device.info.chipset < 0xc0) {
463                 args.nv50.part = NV50_DMA_V0_PART_256;
464                 args.nv50.kind = kind;
465                 argc += sizeof(args.nv50);
466         } else
467         if (drm->client.device.info.chipset < 0xd0) {
468                 args.gf100.kind = kind;
469                 argc += sizeof(args.gf100);
470         } else {
471                 args.gf119.page = GF119_DMA_V0_PAGE_LP;
472                 args.gf119.kind = kind;
473                 argc += sizeof(args.gf119);
474         }
475
476         ret = nvif_object_init(&dmac->base.user, handle, NV_DMA_IN_MEMORY,
477                                &args, argc, &ctxdma->object);
478         if (ret) {
479                 nv50_dmac_ctxdma_del(ctxdma);
480                 return ERR_PTR(ret);
481         }
482
483         return ctxdma;
484 }
485
486 static void
487 nv50_dmac_destroy(struct nv50_dmac *dmac, struct nvif_object *disp)
488 {
489         struct nvif_device *device = dmac->base.device;
490         struct nv50_dmac_ctxdma *ctxdma, *ctxtmp;
491
492         list_for_each_entry_safe(ctxdma, ctxtmp, &dmac->ctxdma, head) {
493                 nv50_dmac_ctxdma_del(ctxdma);
494         }
495
496         nvif_object_fini(&dmac->vram);
497         nvif_object_fini(&dmac->sync);
498
499         nv50_chan_destroy(&dmac->base);
500
501         if (dmac->ptr) {
502                 struct device *dev = nvxx_device(device)->dev;
503                 dma_free_coherent(dev, PAGE_SIZE, dmac->ptr, dmac->handle);
504         }
505 }
506
507 static int
508 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp,
509                  const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf,
510                  struct nv50_dmac *dmac)
511 {
512         struct nv50_disp_core_channel_dma_v0 *args = data;
513         struct nvif_object pushbuf;
514         int ret;
515
516         mutex_init(&dmac->lock);
517         INIT_LIST_HEAD(&dmac->ctxdma);
518
519         dmac->ptr = dma_alloc_coherent(nvxx_device(device)->dev, PAGE_SIZE,
520                                        &dmac->handle, GFP_KERNEL);
521         if (!dmac->ptr)
522                 return -ENOMEM;
523
524         ret = nvif_object_init(&device->object, 0, NV_DMA_FROM_MEMORY,
525                                &(struct nv_dma_v0) {
526                                         .target = NV_DMA_V0_TARGET_PCI_US,
527                                         .access = NV_DMA_V0_ACCESS_RD,
528                                         .start = dmac->handle + 0x0000,
529                                         .limit = dmac->handle + 0x0fff,
530                                }, sizeof(struct nv_dma_v0), &pushbuf);
531         if (ret)
532                 return ret;
533
534         args->pushbuf = nvif_handle(&pushbuf);
535
536         ret = nv50_chan_create(device, disp, oclass, head, data, size,
537                                &dmac->base);
538         nvif_object_fini(&pushbuf);
539         if (ret)
540                 return ret;
541
542         ret = nvif_object_init(&dmac->base.user, 0xf0000000, NV_DMA_IN_MEMORY,
543                                &(struct nv_dma_v0) {
544                                         .target = NV_DMA_V0_TARGET_VRAM,
545                                         .access = NV_DMA_V0_ACCESS_RDWR,
546                                         .start = syncbuf + 0x0000,
547                                         .limit = syncbuf + 0x0fff,
548                                }, sizeof(struct nv_dma_v0),
549                                &dmac->sync);
550         if (ret)
551                 return ret;
552
553         ret = nvif_object_init(&dmac->base.user, 0xf0000001, NV_DMA_IN_MEMORY,
554                                &(struct nv_dma_v0) {
555                                         .target = NV_DMA_V0_TARGET_VRAM,
556                                         .access = NV_DMA_V0_ACCESS_RDWR,
557                                         .start = 0,
558                                         .limit = device->info.ram_user - 1,
559                                }, sizeof(struct nv_dma_v0),
560                                &dmac->vram);
561         if (ret)
562                 return ret;
563
564         return ret;
565 }
566
567 /******************************************************************************
568  * Core
569  *****************************************************************************/
570
571 struct nv50_mast {
572         struct nv50_dmac base;
573 };
574
575 static int
576 nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
577                  u64 syncbuf, struct nv50_mast *core)
578 {
579         struct nv50_disp_core_channel_dma_v0 args = {
580                 .pushbuf = 0xb0007d00,
581         };
582         static const s32 oclass[] = {
583                 GP102_DISP_CORE_CHANNEL_DMA,
584                 GP100_DISP_CORE_CHANNEL_DMA,
585                 GM200_DISP_CORE_CHANNEL_DMA,
586                 GM107_DISP_CORE_CHANNEL_DMA,
587                 GK110_DISP_CORE_CHANNEL_DMA,
588                 GK104_DISP_CORE_CHANNEL_DMA,
589                 GF110_DISP_CORE_CHANNEL_DMA,
590                 GT214_DISP_CORE_CHANNEL_DMA,
591                 GT206_DISP_CORE_CHANNEL_DMA,
592                 GT200_DISP_CORE_CHANNEL_DMA,
593                 G82_DISP_CORE_CHANNEL_DMA,
594                 NV50_DISP_CORE_CHANNEL_DMA,
595                 0
596         };
597
598         return nv50_dmac_create(device, disp, oclass, 0, &args, sizeof(args),
599                                 syncbuf, &core->base);
600 }
601
602 /******************************************************************************
603  * Base
604  *****************************************************************************/
605
606 struct nv50_sync {
607         struct nv50_dmac base;
608         u32 addr;
609         u32 data;
610 };
611
612 static int
613 nv50_base_create(struct nvif_device *device, struct nvif_object *disp,
614                  int head, u64 syncbuf, struct nv50_sync *base)
615 {
616         struct nv50_disp_base_channel_dma_v0 args = {
617                 .pushbuf = 0xb0007c00 | head,
618                 .head = head,
619         };
620         static const s32 oclass[] = {
621                 GK110_DISP_BASE_CHANNEL_DMA,
622                 GK104_DISP_BASE_CHANNEL_DMA,
623                 GF110_DISP_BASE_CHANNEL_DMA,
624                 GT214_DISP_BASE_CHANNEL_DMA,
625                 GT200_DISP_BASE_CHANNEL_DMA,
626                 G82_DISP_BASE_CHANNEL_DMA,
627                 NV50_DISP_BASE_CHANNEL_DMA,
628                 0
629         };
630
631         return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
632                                 syncbuf, &base->base);
633 }
634
635 /******************************************************************************
636  * Overlay
637  *****************************************************************************/
638
639 struct nv50_ovly {
640         struct nv50_dmac base;
641 };
642
643 static int
644 nv50_ovly_create(struct nvif_device *device, struct nvif_object *disp,
645                  int head, u64 syncbuf, struct nv50_ovly *ovly)
646 {
647         struct nv50_disp_overlay_channel_dma_v0 args = {
648                 .pushbuf = 0xb0007e00 | head,
649                 .head = head,
650         };
651         static const s32 oclass[] = {
652                 GK104_DISP_OVERLAY_CONTROL_DMA,
653                 GF110_DISP_OVERLAY_CONTROL_DMA,
654                 GT214_DISP_OVERLAY_CHANNEL_DMA,
655                 GT200_DISP_OVERLAY_CHANNEL_DMA,
656                 G82_DISP_OVERLAY_CHANNEL_DMA,
657                 NV50_DISP_OVERLAY_CHANNEL_DMA,
658                 0
659         };
660
661         return nv50_dmac_create(device, disp, oclass, head, &args, sizeof(args),
662                                 syncbuf, &ovly->base);
663 }
664
665 struct nv50_head {
666         struct nouveau_crtc base;
667         struct {
668                 struct nouveau_bo *nvbo[2];
669                 int next;
670         } lut;
671         struct nv50_ovly ovly;
672         struct nv50_oimm oimm;
673 };
674
675 #define nv50_head(c) ((struct nv50_head *)nouveau_crtc(c))
676 #define nv50_ovly(c) (&nv50_head(c)->ovly)
677 #define nv50_oimm(c) (&nv50_head(c)->oimm)
678 #define nv50_chan(c) (&(c)->base.base)
679 #define nv50_vers(c) nv50_chan(c)->user.oclass
680
681 struct nv50_disp {
682         struct nvif_object *disp;
683         struct nv50_mast mast;
684
685         struct nouveau_bo *sync;
686
687         struct mutex mutex;
688 };
689
690 static struct nv50_disp *
691 nv50_disp(struct drm_device *dev)
692 {
693         return nouveau_display(dev)->priv;
694 }
695
696 #define nv50_mast(d) (&nv50_disp(d)->mast)
697
698 /******************************************************************************
699  * EVO channel helpers
700  *****************************************************************************/
701 static u32 *
702 evo_wait(void *evoc, int nr)
703 {
704         struct nv50_dmac *dmac = evoc;
705         struct nvif_device *device = dmac->base.device;
706         u32 put = nvif_rd32(&dmac->base.user, 0x0000) / 4;
707
708         mutex_lock(&dmac->lock);
709         if (put + nr >= (PAGE_SIZE / 4) - 8) {
710                 dmac->ptr[put] = 0x20000000;
711
712                 nvif_wr32(&dmac->base.user, 0x0000, 0x00000000);
713                 if (nvif_msec(device, 2000,
714                         if (!nvif_rd32(&dmac->base.user, 0x0004))
715                                 break;
716                 ) < 0) {
717                         mutex_unlock(&dmac->lock);
718                         pr_err("nouveau: evo channel stalled\n");
719                         return NULL;
720                 }
721
722                 put = 0;
723         }
724
725         return dmac->ptr + put;
726 }
727
728 static void
729 evo_kick(u32 *push, void *evoc)
730 {
731         struct nv50_dmac *dmac = evoc;
732         nvif_wr32(&dmac->base.user, 0x0000, (push - dmac->ptr) << 2);
733         mutex_unlock(&dmac->lock);
734 }
735
736 #define evo_mthd(p, m, s) do {                                          \
737         const u32 _m = (m), _s = (s);                                   \
738         if (drm_debug & DRM_UT_KMS)                                     \
739                 pr_err("%04x %d %s\n", _m, _s, __func__);               \
740         *((p)++) = ((_s << 18) | _m);                                   \
741 } while(0)
742
743 #define evo_data(p, d) do {                                             \
744         const u32 _d = (d);                                             \
745         if (drm_debug & DRM_UT_KMS)                                     \
746                 pr_err("\t%08x\n", _d);                                 \
747         *((p)++) = _d;                                                  \
748 } while(0)
749
750 /******************************************************************************
751  * Plane
752  *****************************************************************************/
753 #define nv50_wndw(p) container_of((p), struct nv50_wndw, plane)
754
755 struct nv50_wndw {
756         const struct nv50_wndw_func *func;
757         struct nv50_dmac *dmac;
758
759         struct drm_plane plane;
760
761         struct nvif_notify notify;
762         u16 ntfy;
763         u16 sema;
764         u32 data;
765 };
766
767 struct nv50_wndw_func {
768         void *(*dtor)(struct nv50_wndw *);
769         int (*acquire)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
770                        struct nv50_head_atom *asyh);
771         void (*release)(struct nv50_wndw *, struct nv50_wndw_atom *asyw,
772                         struct nv50_head_atom *asyh);
773         void (*prepare)(struct nv50_wndw *, struct nv50_head_atom *asyh,
774                         struct nv50_wndw_atom *asyw);
775
776         void (*sema_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
777         void (*sema_clr)(struct nv50_wndw *);
778         void (*ntfy_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
779         void (*ntfy_clr)(struct nv50_wndw *);
780         int (*ntfy_wait_begun)(struct nv50_wndw *, struct nv50_wndw_atom *);
781         void (*image_set)(struct nv50_wndw *, struct nv50_wndw_atom *);
782         void (*image_clr)(struct nv50_wndw *);
783         void (*lut)(struct nv50_wndw *, struct nv50_wndw_atom *);
784         void (*point)(struct nv50_wndw *, struct nv50_wndw_atom *);
785
786         u32 (*update)(struct nv50_wndw *, u32 interlock);
787 };
788
789 static int
790 nv50_wndw_wait_armed(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
791 {
792         if (asyw->set.ntfy)
793                 return wndw->func->ntfy_wait_begun(wndw, asyw);
794         return 0;
795 }
796
797 static u32
798 nv50_wndw_flush_clr(struct nv50_wndw *wndw, u32 interlock, bool flush,
799                     struct nv50_wndw_atom *asyw)
800 {
801         if (asyw->clr.sema && (!asyw->set.sema || flush))
802                 wndw->func->sema_clr(wndw);
803         if (asyw->clr.ntfy && (!asyw->set.ntfy || flush))
804                 wndw->func->ntfy_clr(wndw);
805         if (asyw->clr.image && (!asyw->set.image || flush))
806                 wndw->func->image_clr(wndw);
807
808         return flush ? wndw->func->update(wndw, interlock) : 0;
809 }
810
811 static u32
812 nv50_wndw_flush_set(struct nv50_wndw *wndw, u32 interlock,
813                     struct nv50_wndw_atom *asyw)
814 {
815         if (interlock) {
816                 asyw->image.mode = 0;
817                 asyw->image.interval = 1;
818         }
819
820         if (asyw->set.sema ) wndw->func->sema_set (wndw, asyw);
821         if (asyw->set.ntfy ) wndw->func->ntfy_set (wndw, asyw);
822         if (asyw->set.image) wndw->func->image_set(wndw, asyw);
823         if (asyw->set.lut  ) wndw->func->lut      (wndw, asyw);
824         if (asyw->set.point) wndw->func->point    (wndw, asyw);
825
826         return wndw->func->update(wndw, interlock);
827 }
828
829 static void
830 nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
831                                struct nv50_wndw_atom *asyw,
832                                struct nv50_head_atom *asyh)
833 {
834         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
835         NV_ATOMIC(drm, "%s release\n", wndw->plane.name);
836         wndw->func->release(wndw, asyw, asyh);
837         asyw->ntfy.handle = 0;
838         asyw->sema.handle = 0;
839 }
840
841 static int
842 nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
843                                struct nv50_wndw_atom *asyw,
844                                struct nv50_head_atom *asyh)
845 {
846         struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
847         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
848         int ret;
849
850         NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
851         asyw->clip.x1 = 0;
852         asyw->clip.y1 = 0;
853         asyw->clip.x2 = asyh->state.mode.hdisplay;
854         asyw->clip.y2 = asyh->state.mode.vdisplay;
855
856         asyw->image.w = fb->base.width;
857         asyw->image.h = fb->base.height;
858         asyw->image.kind = fb->nvbo->kind;
859
860         if (asyh->state.pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
861                 asyw->interval = 0;
862         else
863                 asyw->interval = 1;
864
865         if (asyw->image.kind) {
866                 asyw->image.layout = 0;
867                 if (drm->client.device.info.chipset >= 0xc0)
868                         asyw->image.block = fb->nvbo->mode >> 4;
869                 else
870                         asyw->image.block = fb->nvbo->mode;
871                 asyw->image.pitch = (fb->base.pitches[0] / 4) << 4;
872         } else {
873                 asyw->image.layout = 1;
874                 asyw->image.block  = 0;
875                 asyw->image.pitch  = fb->base.pitches[0];
876         }
877
878         ret = wndw->func->acquire(wndw, asyw, asyh);
879         if (ret)
880                 return ret;
881
882         if (asyw->set.image) {
883                 if (!(asyw->image.mode = asyw->interval ? 0 : 1))
884                         asyw->image.interval = asyw->interval;
885                 else
886                         asyw->image.interval = 0;
887         }
888
889         return 0;
890 }
891
892 static int
893 nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
894 {
895         struct nouveau_drm *drm = nouveau_drm(plane->dev);
896         struct nv50_wndw *wndw = nv50_wndw(plane);
897         struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
898         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
899         struct nv50_head_atom *harm = NULL, *asyh = NULL;
900         bool varm = false, asyv = false, asym = false;
901         int ret;
902
903         NV_ATOMIC(drm, "%s atomic_check\n", plane->name);
904         if (asyw->state.crtc) {
905                 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
906                 if (IS_ERR(asyh))
907                         return PTR_ERR(asyh);
908                 asym = drm_atomic_crtc_needs_modeset(&asyh->state);
909                 asyv = asyh->state.active;
910         }
911
912         if (armw->state.crtc) {
913                 harm = nv50_head_atom_get(asyw->state.state, armw->state.crtc);
914                 if (IS_ERR(harm))
915                         return PTR_ERR(harm);
916                 varm = harm->state.crtc->state->active;
917         }
918
919         if (asyv) {
920                 asyw->point.x = asyw->state.crtc_x;
921                 asyw->point.y = asyw->state.crtc_y;
922                 if (memcmp(&armw->point, &asyw->point, sizeof(asyw->point)))
923                         asyw->set.point = true;
924
925                 ret = nv50_wndw_atomic_check_acquire(wndw, asyw, asyh);
926                 if (ret)
927                         return ret;
928         } else
929         if (varm) {
930                 nv50_wndw_atomic_check_release(wndw, asyw, harm);
931         } else {
932                 return 0;
933         }
934
935         if (!asyv || asym) {
936                 asyw->clr.ntfy = armw->ntfy.handle != 0;
937                 asyw->clr.sema = armw->sema.handle != 0;
938                 if (wndw->func->image_clr)
939                         asyw->clr.image = armw->image.handle != 0;
940                 asyw->set.lut = wndw->func->lut && asyv;
941         }
942
943         return 0;
944 }
945
946 static void
947 nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
948 {
949         struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
950         struct nouveau_drm *drm = nouveau_drm(plane->dev);
951
952         NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
953         if (!old_state->fb)
954                 return;
955
956         nouveau_bo_unpin(fb->nvbo);
957 }
958
959 static int
960 nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
961 {
962         struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
963         struct nouveau_drm *drm = nouveau_drm(plane->dev);
964         struct nv50_wndw *wndw = nv50_wndw(plane);
965         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
966         struct nv50_head_atom *asyh;
967         struct nv50_dmac_ctxdma *ctxdma;
968         int ret;
969
970         NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
971         if (!asyw->state.fb)
972                 return 0;
973
974         ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
975         if (ret)
976                 return ret;
977
978         ctxdma = nv50_dmac_ctxdma_new(wndw->dmac, fb);
979         if (IS_ERR(ctxdma)) {
980                 nouveau_bo_unpin(fb->nvbo);
981                 return PTR_ERR(ctxdma);
982         }
983
984         asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
985         asyw->image.handle = ctxdma->object.handle;
986         asyw->image.offset = fb->nvbo->bo.offset;
987
988         if (wndw->func->prepare) {
989                 asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
990                 if (IS_ERR(asyh))
991                         return PTR_ERR(asyh);
992
993                 wndw->func->prepare(wndw, asyh, asyw);
994         }
995
996         return 0;
997 }
998
999 static const struct drm_plane_helper_funcs
1000 nv50_wndw_helper = {
1001         .prepare_fb = nv50_wndw_prepare_fb,
1002         .cleanup_fb = nv50_wndw_cleanup_fb,
1003         .atomic_check = nv50_wndw_atomic_check,
1004 };
1005
1006 static void
1007 nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
1008                                struct drm_plane_state *state)
1009 {
1010         struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
1011         __drm_atomic_helper_plane_destroy_state(&asyw->state);
1012         kfree(asyw);
1013 }
1014
1015 static struct drm_plane_state *
1016 nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
1017 {
1018         struct nv50_wndw_atom *armw = nv50_wndw_atom(plane->state);
1019         struct nv50_wndw_atom *asyw;
1020         if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
1021                 return NULL;
1022         __drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
1023         asyw->interval = 1;
1024         asyw->sema = armw->sema;
1025         asyw->ntfy = armw->ntfy;
1026         asyw->image = armw->image;
1027         asyw->point = armw->point;
1028         asyw->lut = armw->lut;
1029         asyw->clr.mask = 0;
1030         asyw->set.mask = 0;
1031         return &asyw->state;
1032 }
1033
1034 static void
1035 nv50_wndw_reset(struct drm_plane *plane)
1036 {
1037         struct nv50_wndw_atom *asyw;
1038
1039         if (WARN_ON(!(asyw = kzalloc(sizeof(*asyw), GFP_KERNEL))))
1040                 return;
1041
1042         if (plane->state)
1043                 plane->funcs->atomic_destroy_state(plane, plane->state);
1044         plane->state = &asyw->state;
1045         plane->state->plane = plane;
1046         plane->state->rotation = DRM_MODE_ROTATE_0;
1047 }
1048
1049 static void
1050 nv50_wndw_destroy(struct drm_plane *plane)
1051 {
1052         struct nv50_wndw *wndw = nv50_wndw(plane);
1053         void *data;
1054         nvif_notify_fini(&wndw->notify);
1055         data = wndw->func->dtor(wndw);
1056         drm_plane_cleanup(&wndw->plane);
1057         kfree(data);
1058 }
1059
1060 static const struct drm_plane_funcs
1061 nv50_wndw = {
1062         .update_plane = drm_atomic_helper_update_plane,
1063         .disable_plane = drm_atomic_helper_disable_plane,
1064         .destroy = nv50_wndw_destroy,
1065         .reset = nv50_wndw_reset,
1066         .atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
1067         .atomic_destroy_state = nv50_wndw_atomic_destroy_state,
1068 };
1069
1070 static void
1071 nv50_wndw_fini(struct nv50_wndw *wndw)
1072 {
1073         nvif_notify_put(&wndw->notify);
1074 }
1075
1076 static void
1077 nv50_wndw_init(struct nv50_wndw *wndw)
1078 {
1079         nvif_notify_get(&wndw->notify);
1080 }
1081
1082 static int
1083 nv50_wndw_ctor(const struct nv50_wndw_func *func, struct drm_device *dev,
1084                enum drm_plane_type type, const char *name, int index,
1085                struct nv50_dmac *dmac, const u32 *format, int nformat,
1086                struct nv50_wndw *wndw)
1087 {
1088         int ret;
1089
1090         wndw->func = func;
1091         wndw->dmac = dmac;
1092
1093         ret = drm_universal_plane_init(dev, &wndw->plane, 0, &nv50_wndw,
1094                                        format, nformat, NULL,
1095                                        type, "%s-%d", name, index);
1096         if (ret)
1097                 return ret;
1098
1099         drm_plane_helper_add(&wndw->plane, &nv50_wndw_helper);
1100         return 0;
1101 }
1102
1103 /******************************************************************************
1104  * Cursor plane
1105  *****************************************************************************/
1106 #define nv50_curs(p) container_of((p), struct nv50_curs, wndw)
1107
1108 struct nv50_curs {
1109         struct nv50_wndw wndw;
1110         struct nvif_object chan;
1111 };
1112
1113 static u32
1114 nv50_curs_update(struct nv50_wndw *wndw, u32 interlock)
1115 {
1116         struct nv50_curs *curs = nv50_curs(wndw);
1117         nvif_wr32(&curs->chan, 0x0080, 0x00000000);
1118         return 0;
1119 }
1120
1121 static void
1122 nv50_curs_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1123 {
1124         struct nv50_curs *curs = nv50_curs(wndw);
1125         nvif_wr32(&curs->chan, 0x0084, (asyw->point.y << 16) | asyw->point.x);
1126 }
1127
1128 static void
1129 nv50_curs_prepare(struct nv50_wndw *wndw, struct nv50_head_atom *asyh,
1130                   struct nv50_wndw_atom *asyw)
1131 {
1132         u32 handle = nv50_disp(wndw->plane.dev)->mast.base.vram.handle;
1133         u32 offset = asyw->image.offset;
1134         if (asyh->curs.handle != handle || asyh->curs.offset != offset) {
1135                 asyh->curs.handle = handle;
1136                 asyh->curs.offset = offset;
1137                 asyh->set.curs = asyh->curs.visible;
1138         }
1139 }
1140
1141 static void
1142 nv50_curs_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1143                   struct nv50_head_atom *asyh)
1144 {
1145         asyh->curs.visible = false;
1146 }
1147
1148 static int
1149 nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1150                   struct nv50_head_atom *asyh)
1151 {
1152         int ret;
1153
1154         ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1155                                                   &asyw->clip,
1156                                                   DRM_PLANE_HELPER_NO_SCALING,
1157                                                   DRM_PLANE_HELPER_NO_SCALING,
1158                                                   true, true);
1159         asyh->curs.visible = asyw->state.visible;
1160         if (ret || !asyh->curs.visible)
1161                 return ret;
1162
1163         switch (asyw->state.fb->width) {
1164         case 32: asyh->curs.layout = 0; break;
1165         case 64: asyh->curs.layout = 1; break;
1166         default:
1167                 return -EINVAL;
1168         }
1169
1170         if (asyw->state.fb->width != asyw->state.fb->height)
1171                 return -EINVAL;
1172
1173         switch (asyw->state.fb->format->format) {
1174         case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break;
1175         default:
1176                 WARN_ON(1);
1177                 return -EINVAL;
1178         }
1179
1180         return 0;
1181 }
1182
1183 static void *
1184 nv50_curs_dtor(struct nv50_wndw *wndw)
1185 {
1186         struct nv50_curs *curs = nv50_curs(wndw);
1187         nvif_object_fini(&curs->chan);
1188         return curs;
1189 }
1190
1191 static const u32
1192 nv50_curs_format[] = {
1193         DRM_FORMAT_ARGB8888,
1194 };
1195
1196 static const struct nv50_wndw_func
1197 nv50_curs = {
1198         .dtor = nv50_curs_dtor,
1199         .acquire = nv50_curs_acquire,
1200         .release = nv50_curs_release,
1201         .prepare = nv50_curs_prepare,
1202         .point = nv50_curs_point,
1203         .update = nv50_curs_update,
1204 };
1205
1206 static int
1207 nv50_curs_new(struct nouveau_drm *drm, struct nv50_head *head,
1208               struct nv50_curs **pcurs)
1209 {
1210         static const struct nvif_mclass curses[] = {
1211                 { GK104_DISP_CURSOR, 0 },
1212                 { GF110_DISP_CURSOR, 0 },
1213                 { GT214_DISP_CURSOR, 0 },
1214                 {   G82_DISP_CURSOR, 0 },
1215                 {  NV50_DISP_CURSOR, 0 },
1216                 {}
1217         };
1218         struct nv50_disp_cursor_v0 args = {
1219                 .head = head->base.index,
1220         };
1221         struct nv50_disp *disp = nv50_disp(drm->dev);
1222         struct nv50_curs *curs;
1223         int cid, ret;
1224
1225         cid = nvif_mclass(disp->disp, curses);
1226         if (cid < 0) {
1227                 NV_ERROR(drm, "No supported cursor immediate class\n");
1228                 return cid;
1229         }
1230
1231         if (!(curs = *pcurs = kzalloc(sizeof(*curs), GFP_KERNEL)))
1232                 return -ENOMEM;
1233
1234         ret = nv50_wndw_ctor(&nv50_curs, drm->dev, DRM_PLANE_TYPE_CURSOR,
1235                              "curs", head->base.index, &disp->mast.base,
1236                              nv50_curs_format, ARRAY_SIZE(nv50_curs_format),
1237                              &curs->wndw);
1238         if (ret) {
1239                 kfree(curs);
1240                 return ret;
1241         }
1242
1243         ret = nvif_object_init(disp->disp, 0, curses[cid].oclass, &args,
1244                                sizeof(args), &curs->chan);
1245         if (ret) {
1246                 NV_ERROR(drm, "curs%04x allocation failed: %d\n",
1247                          curses[cid].oclass, ret);
1248                 return ret;
1249         }
1250
1251         return 0;
1252 }
1253
1254 /******************************************************************************
1255  * Primary plane
1256  *****************************************************************************/
1257 #define nv50_base(p) container_of((p), struct nv50_base, wndw)
1258
1259 struct nv50_base {
1260         struct nv50_wndw wndw;
1261         struct nv50_sync chan;
1262         int id;
1263 };
1264
1265 static int
1266 nv50_base_notify(struct nvif_notify *notify)
1267 {
1268         return NVIF_NOTIFY_KEEP;
1269 }
1270
1271 static void
1272 nv50_base_lut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1273 {
1274         struct nv50_base *base = nv50_base(wndw);
1275         u32 *push;
1276         if ((push = evo_wait(&base->chan, 2))) {
1277                 evo_mthd(push, 0x00e0, 1);
1278                 evo_data(push, asyw->lut.enable << 30);
1279                 evo_kick(push, &base->chan);
1280         }
1281 }
1282
1283 static void
1284 nv50_base_image_clr(struct nv50_wndw *wndw)
1285 {
1286         struct nv50_base *base = nv50_base(wndw);
1287         u32 *push;
1288         if ((push = evo_wait(&base->chan, 4))) {
1289                 evo_mthd(push, 0x0084, 1);
1290                 evo_data(push, 0x00000000);
1291                 evo_mthd(push, 0x00c0, 1);
1292                 evo_data(push, 0x00000000);
1293                 evo_kick(push, &base->chan);
1294         }
1295 }
1296
1297 static void
1298 nv50_base_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1299 {
1300         struct nv50_base *base = nv50_base(wndw);
1301         const s32 oclass = base->chan.base.base.user.oclass;
1302         u32 *push;
1303         if ((push = evo_wait(&base->chan, 10))) {
1304                 evo_mthd(push, 0x0084, 1);
1305                 evo_data(push, (asyw->image.mode << 8) |
1306                                (asyw->image.interval << 4));
1307                 evo_mthd(push, 0x00c0, 1);
1308                 evo_data(push, asyw->image.handle);
1309                 if (oclass < G82_DISP_BASE_CHANNEL_DMA) {
1310                         evo_mthd(push, 0x0800, 5);
1311                         evo_data(push, asyw->image.offset >> 8);
1312                         evo_data(push, 0x00000000);
1313                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1314                         evo_data(push, (asyw->image.layout << 20) |
1315                                         asyw->image.pitch |
1316                                         asyw->image.block);
1317                         evo_data(push, (asyw->image.kind << 16) |
1318                                        (asyw->image.format << 8));
1319                 } else
1320                 if (oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1321                         evo_mthd(push, 0x0800, 5);
1322                         evo_data(push, asyw->image.offset >> 8);
1323                         evo_data(push, 0x00000000);
1324                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1325                         evo_data(push, (asyw->image.layout << 20) |
1326                                         asyw->image.pitch |
1327                                         asyw->image.block);
1328                         evo_data(push, asyw->image.format << 8);
1329                 } else {
1330                         evo_mthd(push, 0x0400, 5);
1331                         evo_data(push, asyw->image.offset >> 8);
1332                         evo_data(push, 0x00000000);
1333                         evo_data(push, (asyw->image.h << 16) | asyw->image.w);
1334                         evo_data(push, (asyw->image.layout << 24) |
1335                                         asyw->image.pitch |
1336                                         asyw->image.block);
1337                         evo_data(push, asyw->image.format << 8);
1338                 }
1339                 evo_kick(push, &base->chan);
1340         }
1341 }
1342
1343 static void
1344 nv50_base_ntfy_clr(struct nv50_wndw *wndw)
1345 {
1346         struct nv50_base *base = nv50_base(wndw);
1347         u32 *push;
1348         if ((push = evo_wait(&base->chan, 2))) {
1349                 evo_mthd(push, 0x00a4, 1);
1350                 evo_data(push, 0x00000000);
1351                 evo_kick(push, &base->chan);
1352         }
1353 }
1354
1355 static void
1356 nv50_base_ntfy_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1357 {
1358         struct nv50_base *base = nv50_base(wndw);
1359         u32 *push;
1360         if ((push = evo_wait(&base->chan, 3))) {
1361                 evo_mthd(push, 0x00a0, 2);
1362                 evo_data(push, (asyw->ntfy.awaken << 30) | asyw->ntfy.offset);
1363                 evo_data(push, asyw->ntfy.handle);
1364                 evo_kick(push, &base->chan);
1365         }
1366 }
1367
1368 static void
1369 nv50_base_sema_clr(struct nv50_wndw *wndw)
1370 {
1371         struct nv50_base *base = nv50_base(wndw);
1372         u32 *push;
1373         if ((push = evo_wait(&base->chan, 2))) {
1374                 evo_mthd(push, 0x0094, 1);
1375                 evo_data(push, 0x00000000);
1376                 evo_kick(push, &base->chan);
1377         }
1378 }
1379
1380 static void
1381 nv50_base_sema_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1382 {
1383         struct nv50_base *base = nv50_base(wndw);
1384         u32 *push;
1385         if ((push = evo_wait(&base->chan, 5))) {
1386                 evo_mthd(push, 0x0088, 4);
1387                 evo_data(push, asyw->sema.offset);
1388                 evo_data(push, asyw->sema.acquire);
1389                 evo_data(push, asyw->sema.release);
1390                 evo_data(push, asyw->sema.handle);
1391                 evo_kick(push, &base->chan);
1392         }
1393 }
1394
1395 static u32
1396 nv50_base_update(struct nv50_wndw *wndw, u32 interlock)
1397 {
1398         struct nv50_base *base = nv50_base(wndw);
1399         u32 *push;
1400
1401         if (!(push = evo_wait(&base->chan, 2)))
1402                 return 0;
1403         evo_mthd(push, 0x0080, 1);
1404         evo_data(push, interlock);
1405         evo_kick(push, &base->chan);
1406
1407         if (base->chan.base.base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA)
1408                 return interlock ? 2 << (base->id * 8) : 0;
1409         return interlock ? 2 << (base->id * 4) : 0;
1410 }
1411
1412 static int
1413 nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
1414 {
1415         struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
1416         struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1417         if (nvif_msec(&drm->client.device, 2000ULL,
1418                 u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
1419                 if ((data & 0xc0000000) == 0x40000000)
1420                         break;
1421                 usleep_range(1, 2);
1422         ) < 0)
1423                 return -ETIMEDOUT;
1424         return 0;
1425 }
1426
1427 static void
1428 nv50_base_release(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1429                   struct nv50_head_atom *asyh)
1430 {
1431         asyh->base.cpp = 0;
1432 }
1433
1434 static int
1435 nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw,
1436                   struct nv50_head_atom *asyh)
1437 {
1438         const struct drm_framebuffer *fb = asyw->state.fb;
1439         int ret;
1440
1441         if (!fb->format->depth)
1442                 return -EINVAL;
1443
1444         ret = drm_atomic_helper_check_plane_state(&asyw->state, &asyh->state,
1445                                                   &asyw->clip,
1446                                                   DRM_PLANE_HELPER_NO_SCALING,
1447                                                   DRM_PLANE_HELPER_NO_SCALING,
1448                                                   false, true);
1449         if (ret)
1450                 return ret;
1451
1452         asyh->base.depth = fb->format->depth;
1453         asyh->base.cpp = fb->format->cpp[0];
1454         asyh->base.x = asyw->state.src.x1 >> 16;
1455         asyh->base.y = asyw->state.src.y1 >> 16;
1456         asyh->base.w = asyw->state.fb->width;
1457         asyh->base.h = asyw->state.fb->height;
1458
1459         switch (fb->format->format) {
1460         case DRM_FORMAT_C8         : asyw->image.format = 0x1e; break;
1461         case DRM_FORMAT_RGB565     : asyw->image.format = 0xe8; break;
1462         case DRM_FORMAT_XRGB1555   :
1463         case DRM_FORMAT_ARGB1555   : asyw->image.format = 0xe9; break;
1464         case DRM_FORMAT_XRGB8888   :
1465         case DRM_FORMAT_ARGB8888   : asyw->image.format = 0xcf; break;
1466         case DRM_FORMAT_XBGR2101010:
1467         case DRM_FORMAT_ABGR2101010: asyw->image.format = 0xd1; break;
1468         case DRM_FORMAT_XBGR8888   :
1469         case DRM_FORMAT_ABGR8888   : asyw->image.format = 0xd5; break;
1470         default:
1471                 WARN_ON(1);
1472                 return -EINVAL;
1473         }
1474
1475         asyw->lut.enable = 1;
1476         asyw->set.image = true;
1477         return 0;
1478 }
1479
1480 static void *
1481 nv50_base_dtor(struct nv50_wndw *wndw)
1482 {
1483         struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
1484         struct nv50_base *base = nv50_base(wndw);
1485         nv50_dmac_destroy(&base->chan.base, disp->disp);
1486         return base;
1487 }
1488
1489 static const u32
1490 nv50_base_format[] = {
1491         DRM_FORMAT_C8,
1492         DRM_FORMAT_RGB565,
1493         DRM_FORMAT_XRGB1555,
1494         DRM_FORMAT_ARGB1555,
1495         DRM_FORMAT_XRGB8888,
1496         DRM_FORMAT_ARGB8888,
1497         DRM_FORMAT_XBGR2101010,
1498         DRM_FORMAT_ABGR2101010,
1499         DRM_FORMAT_XBGR8888,
1500         DRM_FORMAT_ABGR8888,
1501 };
1502
1503 static const struct nv50_wndw_func
1504 nv50_base = {
1505         .dtor = nv50_base_dtor,
1506         .acquire = nv50_base_acquire,
1507         .release = nv50_base_release,
1508         .sema_set = nv50_base_sema_set,
1509         .sema_clr = nv50_base_sema_clr,
1510         .ntfy_set = nv50_base_ntfy_set,
1511         .ntfy_clr = nv50_base_ntfy_clr,
1512         .ntfy_wait_begun = nv50_base_ntfy_wait_begun,
1513         .image_set = nv50_base_image_set,
1514         .image_clr = nv50_base_image_clr,
1515         .lut = nv50_base_lut,
1516         .update = nv50_base_update,
1517 };
1518
1519 static int
1520 nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
1521               struct nv50_base **pbase)
1522 {
1523         struct nv50_disp *disp = nv50_disp(drm->dev);
1524         struct nv50_base *base;
1525         int ret;
1526
1527         if (!(base = *pbase = kzalloc(sizeof(*base), GFP_KERNEL)))
1528                 return -ENOMEM;
1529         base->id = head->base.index;
1530         base->wndw.ntfy = EVO_FLIP_NTFY0(base->id);
1531         base->wndw.sema = EVO_FLIP_SEM0(base->id);
1532         base->wndw.data = 0x00000000;
1533
1534         ret = nv50_wndw_ctor(&nv50_base, drm->dev, DRM_PLANE_TYPE_PRIMARY,
1535                              "base", base->id, &base->chan.base,
1536                              nv50_base_format, ARRAY_SIZE(nv50_base_format),
1537                              &base->wndw);
1538         if (ret) {
1539                 kfree(base);
1540                 return ret;
1541         }
1542
1543         ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
1544                                disp->sync->bo.offset, &base->chan);
1545         if (ret)
1546                 return ret;
1547
1548         return nvif_notify_init(&base->chan.base.base.user, nv50_base_notify,
1549                                 false,
1550                                 NV50_DISP_BASE_CHANNEL_DMA_V0_NTFY_UEVENT,
1551                                 &(struct nvif_notify_uevent_req) {},
1552                                 sizeof(struct nvif_notify_uevent_req),
1553                                 sizeof(struct nvif_notify_uevent_rep),
1554                                 &base->wndw.notify);
1555 }
1556
1557 /******************************************************************************
1558  * Head
1559  *****************************************************************************/
1560 static void
1561 nv50_head_procamp(struct nv50_head *head, struct nv50_head_atom *asyh)
1562 {
1563         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1564         u32 *push;
1565         if ((push = evo_wait(core, 2))) {
1566                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1567                         evo_mthd(push, 0x08a8 + (head->base.index * 0x400), 1);
1568                 else
1569                         evo_mthd(push, 0x0498 + (head->base.index * 0x300), 1);
1570                 evo_data(push, (asyh->procamp.sat.sin << 20) |
1571                                (asyh->procamp.sat.cos << 8));
1572                 evo_kick(push, core);
1573         }
1574 }
1575
1576 static void
1577 nv50_head_dither(struct nv50_head *head, struct nv50_head_atom *asyh)
1578 {
1579         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1580         u32 *push;
1581         if ((push = evo_wait(core, 2))) {
1582                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1583                         evo_mthd(push, 0x08a0 + (head->base.index * 0x0400), 1);
1584                 else
1585                 if (core->base.user.oclass < GK104_DISP_CORE_CHANNEL_DMA)
1586                         evo_mthd(push, 0x0490 + (head->base.index * 0x0300), 1);
1587                 else
1588                         evo_mthd(push, 0x04a0 + (head->base.index * 0x0300), 1);
1589                 evo_data(push, (asyh->dither.mode << 3) |
1590                                (asyh->dither.bits << 1) |
1591                                 asyh->dither.enable);
1592                 evo_kick(push, core);
1593         }
1594 }
1595
1596 static void
1597 nv50_head_ovly(struct nv50_head *head, struct nv50_head_atom *asyh)
1598 {
1599         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1600         u32 bounds = 0;
1601         u32 *push;
1602
1603         if (asyh->base.cpp) {
1604                 switch (asyh->base.cpp) {
1605                 case 8: bounds |= 0x00000500; break;
1606                 case 4: bounds |= 0x00000300; break;
1607                 case 2: bounds |= 0x00000100; break;
1608                 default:
1609                         WARN_ON(1);
1610                         break;
1611                 }
1612                 bounds |= 0x00000001;
1613         }
1614
1615         if ((push = evo_wait(core, 2))) {
1616                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1617                         evo_mthd(push, 0x0904 + head->base.index * 0x400, 1);
1618                 else
1619                         evo_mthd(push, 0x04d4 + head->base.index * 0x300, 1);
1620                 evo_data(push, bounds);
1621                 evo_kick(push, core);
1622         }
1623 }
1624
1625 static void
1626 nv50_head_base(struct nv50_head *head, struct nv50_head_atom *asyh)
1627 {
1628         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1629         u32 bounds = 0;
1630         u32 *push;
1631
1632         if (asyh->base.cpp) {
1633                 switch (asyh->base.cpp) {
1634                 case 8: bounds |= 0x00000500; break;
1635                 case 4: bounds |= 0x00000300; break;
1636                 case 2: bounds |= 0x00000100; break;
1637                 case 1: bounds |= 0x00000000; break;
1638                 default:
1639                         WARN_ON(1);
1640                         break;
1641                 }
1642                 bounds |= 0x00000001;
1643         }
1644
1645         if ((push = evo_wait(core, 2))) {
1646                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1647                         evo_mthd(push, 0x0900 + head->base.index * 0x400, 1);
1648                 else
1649                         evo_mthd(push, 0x04d0 + head->base.index * 0x300, 1);
1650                 evo_data(push, bounds);
1651                 evo_kick(push, core);
1652         }
1653 }
1654
1655 static void
1656 nv50_head_curs_clr(struct nv50_head *head)
1657 {
1658         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1659         u32 *push;
1660         if ((push = evo_wait(core, 4))) {
1661                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1662                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1663                         evo_data(push, 0x05000000);
1664                 } else
1665                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1666                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 1);
1667                         evo_data(push, 0x05000000);
1668                         evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1669                         evo_data(push, 0x00000000);
1670                 } else {
1671                         evo_mthd(push, 0x0480 + head->base.index * 0x300, 1);
1672                         evo_data(push, 0x05000000);
1673                         evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1674                         evo_data(push, 0x00000000);
1675                 }
1676                 evo_kick(push, core);
1677         }
1678 }
1679
1680 static void
1681 nv50_head_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1682 {
1683         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1684         u32 *push;
1685         if ((push = evo_wait(core, 5))) {
1686                 if (core->base.user.oclass < G82_DISP_BASE_CHANNEL_DMA) {
1687                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1688                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1689                                                     (asyh->curs.format << 24));
1690                         evo_data(push, asyh->curs.offset >> 8);
1691                 } else
1692                 if (core->base.user.oclass < GF110_DISP_BASE_CHANNEL_DMA) {
1693                         evo_mthd(push, 0x0880 + head->base.index * 0x400, 2);
1694                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1695                                                     (asyh->curs.format << 24));
1696                         evo_data(push, asyh->curs.offset >> 8);
1697                         evo_mthd(push, 0x089c + head->base.index * 0x400, 1);
1698                         evo_data(push, asyh->curs.handle);
1699                 } else {
1700                         evo_mthd(push, 0x0480 + head->base.index * 0x300, 2);
1701                         evo_data(push, 0x80000000 | (asyh->curs.layout << 26) |
1702                                                     (asyh->curs.format << 24));
1703                         evo_data(push, asyh->curs.offset >> 8);
1704                         evo_mthd(push, 0x048c + head->base.index * 0x300, 1);
1705                         evo_data(push, asyh->curs.handle);
1706                 }
1707                 evo_kick(push, core);
1708         }
1709 }
1710
1711 static void
1712 nv50_head_core_clr(struct nv50_head *head)
1713 {
1714         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1715         u32 *push;
1716         if ((push = evo_wait(core, 2))) {
1717                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA)
1718                         evo_mthd(push, 0x0874 + head->base.index * 0x400, 1);
1719                 else
1720                         evo_mthd(push, 0x0474 + head->base.index * 0x300, 1);
1721                 evo_data(push, 0x00000000);
1722                 evo_kick(push, core);
1723         }
1724 }
1725
1726 static void
1727 nv50_head_core_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1728 {
1729         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1730         u32 *push;
1731         if ((push = evo_wait(core, 9))) {
1732                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1733                         evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1734                         evo_data(push, asyh->core.offset >> 8);
1735                         evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1736                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1737                         evo_data(push, asyh->core.layout << 20 |
1738                                        (asyh->core.pitch >> 8) << 8 |
1739                                        asyh->core.block);
1740                         evo_data(push, asyh->core.kind << 16 |
1741                                        asyh->core.format << 8);
1742                         evo_data(push, asyh->core.handle);
1743                         evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1744                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1745                         /* EVO will complain with INVALID_STATE if we have an
1746                          * active cursor and (re)specify HeadSetContextDmaIso
1747                          * without also updating HeadSetOffsetCursor.
1748                          */
1749                         asyh->set.curs = asyh->curs.visible;
1750                 } else
1751                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1752                         evo_mthd(push, 0x0860 + head->base.index * 0x400, 1);
1753                         evo_data(push, asyh->core.offset >> 8);
1754                         evo_mthd(push, 0x0868 + head->base.index * 0x400, 4);
1755                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1756                         evo_data(push, asyh->core.layout << 20 |
1757                                        (asyh->core.pitch >> 8) << 8 |
1758                                        asyh->core.block);
1759                         evo_data(push, asyh->core.format << 8);
1760                         evo_data(push, asyh->core.handle);
1761                         evo_mthd(push, 0x08c0 + head->base.index * 0x400, 1);
1762                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1763                 } else {
1764                         evo_mthd(push, 0x0460 + head->base.index * 0x300, 1);
1765                         evo_data(push, asyh->core.offset >> 8);
1766                         evo_mthd(push, 0x0468 + head->base.index * 0x300, 4);
1767                         evo_data(push, (asyh->core.h << 16) | asyh->core.w);
1768                         evo_data(push, asyh->core.layout << 24 |
1769                                        (asyh->core.pitch >> 8) << 8 |
1770                                        asyh->core.block);
1771                         evo_data(push, asyh->core.format << 8);
1772                         evo_data(push, asyh->core.handle);
1773                         evo_mthd(push, 0x04b0 + head->base.index * 0x300, 1);
1774                         evo_data(push, (asyh->core.y << 16) | asyh->core.x);
1775                 }
1776                 evo_kick(push, core);
1777         }
1778 }
1779
1780 static void
1781 nv50_head_lut_clr(struct nv50_head *head)
1782 {
1783         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1784         u32 *push;
1785         if ((push = evo_wait(core, 4))) {
1786                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1787                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1788                         evo_data(push, 0x40000000);
1789                 } else
1790                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1791                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 1);
1792                         evo_data(push, 0x40000000);
1793                         evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1794                         evo_data(push, 0x00000000);
1795                 } else {
1796                         evo_mthd(push, 0x0440 + (head->base.index * 0x300), 1);
1797                         evo_data(push, 0x03000000);
1798                         evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1799                         evo_data(push, 0x00000000);
1800                 }
1801                 evo_kick(push, core);
1802         }
1803 }
1804
1805 static void
1806 nv50_head_lut_load(struct drm_property_blob *blob, int mode,
1807                    struct nouveau_bo *nvbo)
1808 {
1809         struct drm_color_lut *in = (struct drm_color_lut *)blob->data;
1810         void __iomem *lut = (u8 *)nvbo_kmap_obj_iovirtual(nvbo);
1811         const int size = blob->length / sizeof(*in);
1812         int bits, shift, i;
1813         u16 zero, r, g, b;
1814
1815         /* This can't happen.. But it shuts the compiler up. */
1816         if (WARN_ON(size != 256))
1817                 return;
1818
1819         switch (mode) {
1820         case 0: /* LORES. */
1821         case 1: /* HIRES. */
1822                 bits = 11;
1823                 shift = 3;
1824                 zero = 0x0000;
1825                 break;
1826         case 7: /* INTERPOLATE_257_UNITY_RANGE. */
1827                 bits = 14;
1828                 shift = 0;
1829                 zero = 0x6000;
1830                 break;
1831         default:
1832                 WARN_ON(1);
1833                 return;
1834         }
1835
1836         for (i = 0; i < size; i++) {
1837                 r = (drm_color_lut_extract(in[i].  red, bits) + zero) << shift;
1838                 g = (drm_color_lut_extract(in[i].green, bits) + zero) << shift;
1839                 b = (drm_color_lut_extract(in[i]. blue, bits) + zero) << shift;
1840                 writew(r, lut + (i * 0x08) + 0);
1841                 writew(g, lut + (i * 0x08) + 2);
1842                 writew(b, lut + (i * 0x08) + 4);
1843         }
1844
1845         /* INTERPOLATE modes require a "next" entry to interpolate with,
1846          * so we replicate the last entry to deal with this for now.
1847          */
1848         writew(r, lut + (i * 0x08) + 0);
1849         writew(g, lut + (i * 0x08) + 2);
1850         writew(b, lut + (i * 0x08) + 4);
1851 }
1852
1853 static void
1854 nv50_head_lut_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1855 {
1856         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1857         u32 *push;
1858         if ((push = evo_wait(core, 7))) {
1859                 if (core->base.user.oclass < G82_DISP_CORE_CHANNEL_DMA) {
1860                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1861                         evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1862                         evo_data(push, asyh->lut.offset >> 8);
1863                 } else
1864                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1865                         evo_mthd(push, 0x0840 + (head->base.index * 0x400), 2);
1866                         evo_data(push, 0x80000000 | asyh->lut.mode << 30);
1867                         evo_data(push, asyh->lut.offset >> 8);
1868                         evo_mthd(push, 0x085c + (head->base.index * 0x400), 1);
1869                         evo_data(push, asyh->lut.handle);
1870                 } else {
1871                         evo_mthd(push, 0x0440 + (head->base.index * 0x300), 4);
1872                         evo_data(push, 0x80000000 | asyh->lut.mode << 24);
1873                         evo_data(push, asyh->lut.offset >> 8);
1874                         evo_data(push, 0x00000000);
1875                         evo_data(push, 0x00000000);
1876                         evo_mthd(push, 0x045c + (head->base.index * 0x300), 1);
1877                         evo_data(push, asyh->lut.handle);
1878                 }
1879                 evo_kick(push, core);
1880         }
1881 }
1882
1883 static void
1884 nv50_head_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
1885 {
1886         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1887         struct nv50_head_mode *m = &asyh->mode;
1888         u32 *push;
1889         if ((push = evo_wait(core, 14))) {
1890                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1891                         evo_mthd(push, 0x0804 + (head->base.index * 0x400), 2);
1892                         evo_data(push, 0x00800000 | m->clock);
1893                         evo_data(push, m->interlace ? 0x00000002 : 0x00000000);
1894                         evo_mthd(push, 0x0810 + (head->base.index * 0x400), 7);
1895                         evo_data(push, 0x00000000);
1896                         evo_data(push, (m->v.active  << 16) | m->h.active );
1897                         evo_data(push, (m->v.synce   << 16) | m->h.synce  );
1898                         evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
1899                         evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
1900                         evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1901                         evo_data(push, asyh->mode.v.blankus);
1902                         evo_mthd(push, 0x082c + (head->base.index * 0x400), 1);
1903                         evo_data(push, 0x00000000);
1904                 } else {
1905                         evo_mthd(push, 0x0410 + (head->base.index * 0x300), 6);
1906                         evo_data(push, 0x00000000);
1907                         evo_data(push, (m->v.active  << 16) | m->h.active );
1908                         evo_data(push, (m->v.synce   << 16) | m->h.synce  );
1909                         evo_data(push, (m->v.blanke  << 16) | m->h.blanke );
1910                         evo_data(push, (m->v.blanks  << 16) | m->h.blanks );
1911                         evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
1912                         evo_mthd(push, 0x042c + (head->base.index * 0x300), 2);
1913                         evo_data(push, 0x00000000); /* ??? */
1914                         evo_data(push, 0xffffff00);
1915                         evo_mthd(push, 0x0450 + (head->base.index * 0x300), 3);
1916                         evo_data(push, m->clock * 1000);
1917                         evo_data(push, 0x00200000); /* ??? */
1918                         evo_data(push, m->clock * 1000);
1919                 }
1920                 evo_kick(push, core);
1921         }
1922 }
1923
1924 static void
1925 nv50_head_view(struct nv50_head *head, struct nv50_head_atom *asyh)
1926 {
1927         struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->mast.base;
1928         u32 *push;
1929         if ((push = evo_wait(core, 10))) {
1930                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
1931                         evo_mthd(push, 0x08a4 + (head->base.index * 0x400), 1);
1932                         evo_data(push, 0x00000000);
1933                         evo_mthd(push, 0x08c8 + (head->base.index * 0x400), 1);
1934                         evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1935                         evo_mthd(push, 0x08d8 + (head->base.index * 0x400), 2);
1936                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1937                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1938                 } else {
1939                         evo_mthd(push, 0x0494 + (head->base.index * 0x300), 1);
1940                         evo_data(push, 0x00000000);
1941                         evo_mthd(push, 0x04b8 + (head->base.index * 0x300), 1);
1942                         evo_data(push, (asyh->view.iH << 16) | asyh->view.iW);
1943                         evo_mthd(push, 0x04c0 + (head->base.index * 0x300), 3);
1944                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1945                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1946                         evo_data(push, (asyh->view.oH << 16) | asyh->view.oW);
1947                 }
1948                 evo_kick(push, core);
1949         }
1950 }
1951
1952 static void
1953 nv50_head_flush_clr(struct nv50_head *head, struct nv50_head_atom *asyh, bool y)
1954 {
1955         if (asyh->clr.ilut && (!asyh->set.ilut || y))
1956                 nv50_head_lut_clr(head);
1957         if (asyh->clr.core && (!asyh->set.core || y))
1958                 nv50_head_core_clr(head);
1959         if (asyh->clr.curs && (!asyh->set.curs || y))
1960                 nv50_head_curs_clr(head);
1961 }
1962
1963 static void
1964 nv50_head_flush_set(struct nv50_head *head, struct nv50_head_atom *asyh)
1965 {
1966         if (asyh->set.view   ) nv50_head_view    (head, asyh);
1967         if (asyh->set.mode   ) nv50_head_mode    (head, asyh);
1968         if (asyh->set.ilut   ) {
1969                 struct nouveau_bo *nvbo = head->lut.nvbo[head->lut.next];
1970                 struct drm_property_blob *blob = asyh->state.gamma_lut;
1971                 if (blob)
1972                         nv50_head_lut_load(blob, asyh->lut.mode, nvbo);
1973                 asyh->lut.offset = nvbo->bo.offset;
1974                 head->lut.next ^= 1;
1975                 nv50_head_lut_set(head, asyh);
1976         }
1977         if (asyh->set.core   ) nv50_head_core_set(head, asyh);
1978         if (asyh->set.curs   ) nv50_head_curs_set(head, asyh);
1979         if (asyh->set.base   ) nv50_head_base    (head, asyh);
1980         if (asyh->set.ovly   ) nv50_head_ovly    (head, asyh);
1981         if (asyh->set.dither ) nv50_head_dither  (head, asyh);
1982         if (asyh->set.procamp) nv50_head_procamp (head, asyh);
1983 }
1984
1985 static void
1986 nv50_head_atomic_check_procamp(struct nv50_head_atom *armh,
1987                                struct nv50_head_atom *asyh,
1988                                struct nouveau_conn_atom *asyc)
1989 {
1990         const int vib = asyc->procamp.color_vibrance - 100;
1991         const int hue = asyc->procamp.vibrant_hue - 90;
1992         const int adj = (vib > 0) ? 50 : 0;
1993         asyh->procamp.sat.cos = ((vib * 2047 + adj) / 100) & 0xfff;
1994         asyh->procamp.sat.sin = ((hue * 2047) / 100) & 0xfff;
1995         asyh->set.procamp = true;
1996 }
1997
1998 static void
1999 nv50_head_atomic_check_dither(struct nv50_head_atom *armh,
2000                               struct nv50_head_atom *asyh,
2001                               struct nouveau_conn_atom *asyc)
2002 {
2003         struct drm_connector *connector = asyc->state.connector;
2004         u32 mode = 0x00;
2005
2006         if (asyc->dither.mode == DITHERING_MODE_AUTO) {
2007                 if (asyh->base.depth > connector->display_info.bpc * 3)
2008                         mode = DITHERING_MODE_DYNAMIC2X2;
2009         } else {
2010                 mode = asyc->dither.mode;
2011         }
2012
2013         if (asyc->dither.depth == DITHERING_DEPTH_AUTO) {
2014                 if (connector->display_info.bpc >= 8)
2015                         mode |= DITHERING_DEPTH_8BPC;
2016         } else {
2017                 mode |= asyc->dither.depth;
2018         }
2019
2020         asyh->dither.enable = mode;
2021         asyh->dither.bits = mode >> 1;
2022         asyh->dither.mode = mode >> 3;
2023         asyh->set.dither = true;
2024 }
2025
2026 static void
2027 nv50_head_atomic_check_view(struct nv50_head_atom *armh,
2028                             struct nv50_head_atom *asyh,
2029                             struct nouveau_conn_atom *asyc)
2030 {
2031         struct drm_connector *connector = asyc->state.connector;
2032         struct drm_display_mode *omode = &asyh->state.adjusted_mode;
2033         struct drm_display_mode *umode = &asyh->state.mode;
2034         int mode = asyc->scaler.mode;
2035         struct edid *edid;
2036         int umode_vdisplay, omode_hdisplay, omode_vdisplay;
2037
2038         if (connector->edid_blob_ptr)
2039                 edid = (struct edid *)connector->edid_blob_ptr->data;
2040         else
2041                 edid = NULL;
2042
2043         if (!asyc->scaler.full) {
2044                 if (mode == DRM_MODE_SCALE_NONE)
2045                         omode = umode;
2046         } else {
2047                 /* Non-EDID LVDS/eDP mode. */
2048                 mode = DRM_MODE_SCALE_FULLSCREEN;
2049         }
2050
2051         /* For the user-specified mode, we must ignore doublescan and
2052          * the like, but honor frame packing.
2053          */
2054         umode_vdisplay = umode->vdisplay;
2055         if ((umode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
2056                 umode_vdisplay += umode->vtotal;
2057         asyh->view.iW = umode->hdisplay;
2058         asyh->view.iH = umode_vdisplay;
2059         /* For the output mode, we can just use the stock helper. */
2060         drm_mode_get_hv_timing(omode, &omode_hdisplay, &omode_vdisplay);
2061         asyh->view.oW = omode_hdisplay;
2062         asyh->view.oH = omode_vdisplay;
2063
2064         /* Add overscan compensation if necessary, will keep the aspect
2065          * ratio the same as the backend mode unless overridden by the
2066          * user setting both hborder and vborder properties.
2067          */
2068         if ((asyc->scaler.underscan.mode == UNDERSCAN_ON ||
2069             (asyc->scaler.underscan.mode == UNDERSCAN_AUTO &&
2070              drm_detect_hdmi_monitor(edid)))) {
2071                 u32 bX = asyc->scaler.underscan.hborder;
2072                 u32 bY = asyc->scaler.underscan.vborder;
2073                 u32 r = (asyh->view.oH << 19) / asyh->view.oW;
2074
2075                 if (bX) {
2076                         asyh->view.oW -= (bX * 2);
2077                         if (bY) asyh->view.oH -= (bY * 2);
2078                         else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
2079                 } else {
2080                         asyh->view.oW -= (asyh->view.oW >> 4) + 32;
2081                         if (bY) asyh->view.oH -= (bY * 2);
2082                         else    asyh->view.oH  = ((asyh->view.oW * r) + (r / 2)) >> 19;
2083                 }
2084         }
2085
2086         /* Handle CENTER/ASPECT scaling, taking into account the areas
2087          * removed already for overscan compensation.
2088          */
2089         switch (mode) {
2090         case DRM_MODE_SCALE_CENTER:
2091                 asyh->view.oW = min((u16)umode->hdisplay, asyh->view.oW);
2092                 asyh->view.oH = min((u16)umode_vdisplay, asyh->view.oH);
2093                 /* fall-through */
2094         case DRM_MODE_SCALE_ASPECT:
2095                 if (asyh->view.oH < asyh->view.oW) {
2096                         u32 r = (asyh->view.iW << 19) / asyh->view.iH;
2097                         asyh->view.oW = ((asyh->view.oH * r) + (r / 2)) >> 19;
2098                 } else {
2099                         u32 r = (asyh->view.iH << 19) / asyh->view.iW;
2100                         asyh->view.oH = ((asyh->view.oW * r) + (r / 2)) >> 19;
2101                 }
2102                 break;
2103         default:
2104                 break;
2105         }
2106
2107         asyh->set.view = true;
2108 }
2109
2110 static void
2111 nv50_head_atomic_check_lut(struct nv50_head *head,
2112                            struct nv50_head_atom *armh,
2113                            struct nv50_head_atom *asyh)
2114 {
2115         struct nv50_disp *disp = nv50_disp(head->base.base.dev);
2116
2117         /* An I8 surface without an input LUT makes no sense, and
2118          * EVO will throw an error if you try.
2119          *
2120          * Legacy clients actually cause this due to the order in
2121          * which they call ioctls, so we will enable the LUT with
2122          * whatever contents the buffer already contains to avoid
2123          * triggering the error check.
2124          */
2125         if (!asyh->state.gamma_lut && asyh->base.cpp != 1) {
2126                 asyh->lut.handle = 0;
2127                 asyh->clr.ilut = armh->lut.visible;
2128                 return;
2129         }
2130
2131         if (disp->disp->oclass < GF110_DISP) {
2132                 asyh->lut.mode = (asyh->base.cpp == 1) ? 0 : 1;
2133                 asyh->set.ilut = true;
2134         } else {
2135                 asyh->lut.mode = 7;
2136                 asyh->set.ilut = asyh->state.color_mgmt_changed;
2137         }
2138         asyh->lut.handle = disp->mast.base.vram.handle;
2139 }
2140
2141 static void
2142 nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
2143 {
2144         struct drm_display_mode *mode = &asyh->state.adjusted_mode;
2145         struct nv50_head_mode *m = &asyh->mode;
2146         u32 blankus;
2147
2148         drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V | CRTC_STEREO_DOUBLE);
2149
2150         /*
2151          * DRM modes are defined in terms of a repeating interval
2152          * starting with the active display area.  The hardware modes
2153          * are defined in terms of a repeating interval starting one
2154          * unit (pixel or line) into the sync pulse.  So, add bias.
2155          */
2156
2157         m->h.active = mode->crtc_htotal;
2158         m->h.synce  = mode->crtc_hsync_end - mode->crtc_hsync_start - 1;
2159         m->h.blanke = mode->crtc_hblank_end - mode->crtc_hsync_start - 1;
2160         m->h.blanks = m->h.blanke + mode->crtc_hdisplay;
2161
2162         m->v.active = mode->crtc_vtotal;
2163         m->v.synce  = mode->crtc_vsync_end - mode->crtc_vsync_start - 1;
2164         m->v.blanke = mode->crtc_vblank_end - mode->crtc_vsync_start - 1;
2165         m->v.blanks = m->v.blanke + mode->crtc_vdisplay;
2166
2167         /*XXX: Safe underestimate, even "0" works */
2168         blankus = (m->v.active - mode->crtc_vdisplay - 2) * m->h.active;
2169         blankus *= 1000;
2170         blankus /= mode->crtc_clock;
2171         m->v.blankus = blankus;
2172
2173         if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
2174                 m->v.blank2e =  m->v.active + m->v.blanke;
2175                 m->v.blank2s =  m->v.blank2e + mode->crtc_vdisplay;
2176                 m->v.active  = (m->v.active * 2) + 1;
2177                 m->interlace = true;
2178         } else {
2179                 m->v.blank2e = 0;
2180                 m->v.blank2s = 1;
2181                 m->interlace = false;
2182         }
2183         m->clock = mode->crtc_clock;
2184
2185         asyh->set.mode = true;
2186 }
2187
2188 static int
2189 nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
2190 {
2191         struct nouveau_drm *drm = nouveau_drm(crtc->dev);
2192         struct nv50_disp *disp = nv50_disp(crtc->dev);
2193         struct nv50_head *head = nv50_head(crtc);
2194         struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2195         struct nv50_head_atom *asyh = nv50_head_atom(state);
2196         struct nouveau_conn_atom *asyc = NULL;
2197         struct drm_connector_state *conns;
2198         struct drm_connector *conn;
2199         int i;
2200
2201         NV_ATOMIC(drm, "%s atomic_check %d\n", crtc->name, asyh->state.active);
2202         if (asyh->state.active) {
2203                 for_each_new_connector_in_state(asyh->state.state, conn, conns, i) {
2204                         if (conns->crtc == crtc) {
2205                                 asyc = nouveau_conn_atom(conns);
2206                                 break;
2207                         }
2208                 }
2209
2210                 if (armh->state.active) {
2211                         if (asyc) {
2212                                 if (asyh->state.mode_changed)
2213                                         asyc->set.scaler = true;
2214                                 if (armh->base.depth != asyh->base.depth)
2215                                         asyc->set.dither = true;
2216                         }
2217                 } else {
2218                         if (asyc)
2219                                 asyc->set.mask = ~0;
2220                         asyh->set.mask = ~0;
2221                 }
2222
2223                 if (asyh->state.mode_changed)
2224                         nv50_head_atomic_check_mode(head, asyh);
2225
2226                 if (asyh->state.color_mgmt_changed ||
2227                     asyh->base.cpp != armh->base.cpp)
2228                         nv50_head_atomic_check_lut(head, armh, asyh);
2229                 asyh->lut.visible = asyh->lut.handle != 0;
2230
2231                 if (asyc) {
2232                         if (asyc->set.scaler)
2233                                 nv50_head_atomic_check_view(armh, asyh, asyc);
2234                         if (asyc->set.dither)
2235                                 nv50_head_atomic_check_dither(armh, asyh, asyc);
2236                         if (asyc->set.procamp)
2237                                 nv50_head_atomic_check_procamp(armh, asyh, asyc);
2238                 }
2239
2240                 if ((asyh->core.visible = (asyh->base.cpp != 0))) {
2241                         asyh->core.x = asyh->base.x;
2242                         asyh->core.y = asyh->base.y;
2243                         asyh->core.w = asyh->base.w;
2244                         asyh->core.h = asyh->base.h;
2245                 } else
2246                 if ((asyh->core.visible = asyh->curs.visible) ||
2247                     (asyh->core.visible = asyh->lut.visible)) {
2248                         /*XXX: We need to either find some way of having the
2249                          *     primary base layer appear black, while still
2250                          *     being able to display the other layers, or we
2251                          *     need to allocate a dummy black surface here.
2252                          */
2253                         asyh->core.x = 0;
2254                         asyh->core.y = 0;
2255                         asyh->core.w = asyh->state.mode.hdisplay;
2256                         asyh->core.h = asyh->state.mode.vdisplay;
2257                 }
2258                 asyh->core.handle = disp->mast.base.vram.handle;
2259                 asyh->core.offset = 0;
2260                 asyh->core.format = 0xcf;
2261                 asyh->core.kind = 0;
2262                 asyh->core.layout = 1;
2263                 asyh->core.block = 0;
2264                 asyh->core.pitch = ALIGN(asyh->core.w, 64) * 4;
2265                 asyh->set.base = armh->base.cpp != asyh->base.cpp;
2266                 asyh->set.ovly = armh->ovly.cpp != asyh->ovly.cpp;
2267         } else {
2268                 asyh->lut.visible = false;
2269                 asyh->core.visible = false;
2270                 asyh->curs.visible = false;
2271                 asyh->base.cpp = 0;
2272                 asyh->ovly.cpp = 0;
2273         }
2274
2275         if (!drm_atomic_crtc_needs_modeset(&asyh->state)) {
2276                 if (asyh->core.visible) {
2277                         if (memcmp(&armh->core, &asyh->core, sizeof(asyh->core)))
2278                                 asyh->set.core = true;
2279                 } else
2280                 if (armh->core.visible) {
2281                         asyh->clr.core = true;
2282                 }
2283
2284                 if (asyh->curs.visible) {
2285                         if (memcmp(&armh->curs, &asyh->curs, sizeof(asyh->curs)))
2286                                 asyh->set.curs = true;
2287                 } else
2288                 if (armh->curs.visible) {
2289                         asyh->clr.curs = true;
2290                 }
2291         } else {
2292                 asyh->clr.ilut = armh->lut.visible;
2293                 asyh->clr.core = armh->core.visible;
2294                 asyh->clr.curs = armh->curs.visible;
2295                 asyh->set.ilut = asyh->lut.visible;
2296                 asyh->set.core = asyh->core.visible;
2297                 asyh->set.curs = asyh->curs.visible;
2298         }
2299
2300         if (asyh->clr.mask || asyh->set.mask)
2301                 nv50_atom(asyh->state.state)->lock_core = true;
2302         return 0;
2303 }
2304
2305 static const struct drm_crtc_helper_funcs
2306 nv50_head_help = {
2307         .atomic_check = nv50_head_atomic_check,
2308 };
2309
2310 static void
2311 nv50_head_atomic_destroy_state(struct drm_crtc *crtc,
2312                                struct drm_crtc_state *state)
2313 {
2314         struct nv50_head_atom *asyh = nv50_head_atom(state);
2315         __drm_atomic_helper_crtc_destroy_state(&asyh->state);
2316         kfree(asyh);
2317 }
2318
2319 static struct drm_crtc_state *
2320 nv50_head_atomic_duplicate_state(struct drm_crtc *crtc)
2321 {
2322         struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
2323         struct nv50_head_atom *asyh;
2324         if (!(asyh = kmalloc(sizeof(*asyh), GFP_KERNEL)))
2325                 return NULL;
2326         __drm_atomic_helper_crtc_duplicate_state(crtc, &asyh->state);
2327         asyh->view = armh->view;
2328         asyh->mode = armh->mode;
2329         asyh->lut  = armh->lut;
2330         asyh->core = armh->core;
2331         asyh->curs = armh->curs;
2332         asyh->base = armh->base;
2333         asyh->ovly = armh->ovly;
2334         asyh->dither = armh->dither;
2335         asyh->procamp = armh->procamp;
2336         asyh->clr.mask = 0;
2337         asyh->set.mask = 0;
2338         return &asyh->state;
2339 }
2340
2341 static void
2342 __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
2343                                struct drm_crtc_state *state)
2344 {
2345         if (crtc->state)
2346                 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
2347         crtc->state = state;
2348         crtc->state->crtc = crtc;
2349 }
2350
2351 static void
2352 nv50_head_reset(struct drm_crtc *crtc)
2353 {
2354         struct nv50_head_atom *asyh;
2355
2356         if (WARN_ON(!(asyh = kzalloc(sizeof(*asyh), GFP_KERNEL))))
2357                 return;
2358
2359         __drm_atomic_helper_crtc_reset(crtc, &asyh->state);
2360 }
2361
2362 static void
2363 nv50_head_destroy(struct drm_crtc *crtc)
2364 {
2365         struct nv50_disp *disp = nv50_disp(crtc->dev);
2366         struct nv50_head *head = nv50_head(crtc);
2367         int i;
2368
2369         nv50_dmac_destroy(&head->ovly.base, disp->disp);
2370         nv50_pioc_destroy(&head->oimm.base);
2371
2372         for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++)
2373                 nouveau_bo_unmap_unpin_unref(&head->lut.nvbo[i]);
2374
2375         drm_crtc_cleanup(crtc);
2376         kfree(crtc);
2377 }
2378
2379 static const struct drm_crtc_funcs
2380 nv50_head_func = {
2381         .reset = nv50_head_reset,
2382         .gamma_set = drm_atomic_helper_legacy_gamma_set,
2383         .destroy = nv50_head_destroy,
2384         .set_config = drm_atomic_helper_set_config,
2385         .page_flip = drm_atomic_helper_page_flip,
2386         .atomic_duplicate_state = nv50_head_atomic_duplicate_state,
2387         .atomic_destroy_state = nv50_head_atomic_destroy_state,
2388 };
2389
2390 static int
2391 nv50_head_create(struct drm_device *dev, int index)
2392 {
2393         struct nouveau_drm *drm = nouveau_drm(dev);
2394         struct nvif_device *device = &drm->client.device;
2395         struct nv50_disp *disp = nv50_disp(dev);
2396         struct nv50_head *head;
2397         struct nv50_base *base;
2398         struct nv50_curs *curs;
2399         struct drm_crtc *crtc;
2400         int ret, i;
2401
2402         head = kzalloc(sizeof(*head), GFP_KERNEL);
2403         if (!head)
2404                 return -ENOMEM;
2405
2406         head->base.index = index;
2407         ret = nv50_base_new(drm, head, &base);
2408         if (ret == 0)
2409                 ret = nv50_curs_new(drm, head, &curs);
2410         if (ret) {
2411                 kfree(head);
2412                 return ret;
2413         }
2414
2415         crtc = &head->base.base;
2416         drm_crtc_init_with_planes(dev, crtc, &base->wndw.plane,
2417                                   &curs->wndw.plane, &nv50_head_func,
2418                                   "head-%d", head->base.index);
2419         drm_crtc_helper_add(crtc, &nv50_head_help);
2420         drm_mode_crtc_set_gamma_size(crtc, 256);
2421
2422         for (i = 0; i < ARRAY_SIZE(head->lut.nvbo); i++) {
2423                 ret = nouveau_bo_new_pin_map(&drm->client, 1025 * 8, 0x100,
2424                                              TTM_PL_FLAG_VRAM,
2425                                              &head->lut.nvbo[i]);
2426                 if (ret)
2427                         goto out;
2428         }
2429
2430         /* allocate overlay resources */
2431         ret = nv50_oimm_create(device, disp->disp, index, &head->oimm);
2432         if (ret)
2433                 goto out;
2434
2435         ret = nv50_ovly_create(device, disp->disp, index, disp->sync->bo.offset,
2436                                &head->ovly);
2437         if (ret)
2438                 goto out;
2439
2440 out:
2441         if (ret)
2442                 nv50_head_destroy(crtc);
2443         return ret;
2444 }
2445
2446 /******************************************************************************
2447  * Output path helpers
2448  *****************************************************************************/
2449 static void
2450 nv50_outp_release(struct nouveau_encoder *nv_encoder)
2451 {
2452         struct nv50_disp *disp = nv50_disp(nv_encoder->base.base.dev);
2453         struct {
2454                 struct nv50_disp_mthd_v1 base;
2455         } args = {
2456                 .base.version = 1,
2457                 .base.method = NV50_DISP_MTHD_V1_RELEASE,
2458                 .base.hasht  = nv_encoder->dcb->hasht,
2459                 .base.hashm  = nv_encoder->dcb->hashm,
2460         };
2461
2462         nvif_mthd(disp->disp, 0, &args, sizeof(args));
2463         nv_encoder->or = -1;
2464         nv_encoder->link = 0;
2465 }
2466
2467 static int
2468 nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
2469 {
2470         struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
2471         struct nv50_disp *disp = nv50_disp(drm->dev);
2472         struct {
2473                 struct nv50_disp_mthd_v1 base;
2474                 struct nv50_disp_acquire_v0 info;
2475         } args = {
2476                 .base.version = 1,
2477                 .base.method = NV50_DISP_MTHD_V1_ACQUIRE,
2478                 .base.hasht  = nv_encoder->dcb->hasht,
2479                 .base.hashm  = nv_encoder->dcb->hashm,
2480         };
2481         int ret;
2482
2483         ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2484         if (ret) {
2485                 NV_ERROR(drm, "error acquiring output path: %d\n", ret);
2486                 return ret;
2487         }
2488
2489         nv_encoder->or = args.info.or;
2490         nv_encoder->link = args.info.link;
2491         return 0;
2492 }
2493
2494 static int
2495 nv50_outp_atomic_check_view(struct drm_encoder *encoder,
2496                             struct drm_crtc_state *crtc_state,
2497                             struct drm_connector_state *conn_state,
2498                             struct drm_display_mode *native_mode)
2499 {
2500         struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
2501         struct drm_display_mode *mode = &crtc_state->mode;
2502         struct drm_connector *connector = conn_state->connector;
2503         struct nouveau_conn_atom *asyc = nouveau_conn_atom(conn_state);
2504         struct nouveau_drm *drm = nouveau_drm(encoder->dev);
2505
2506         NV_ATOMIC(drm, "%s atomic_check\n", encoder->name);
2507         asyc->scaler.full = false;
2508         if (!native_mode)
2509                 return 0;
2510
2511         if (asyc->scaler.mode == DRM_MODE_SCALE_NONE) {
2512                 switch (connector->connector_type) {
2513                 case DRM_MODE_CONNECTOR_LVDS:
2514                 case DRM_MODE_CONNECTOR_eDP:
2515                         /* Force use of scaler for non-EDID modes. */
2516                         if (adjusted_mode->type & DRM_MODE_TYPE_DRIVER)
2517                                 break;
2518                         mode = native_mode;
2519                         asyc->scaler.full = true;
2520                         break;
2521                 default:
2522                         break;
2523                 }
2524         } else {
2525                 mode = native_mode;
2526         }
2527
2528         if (!drm_mode_equal(adjusted_mode, mode)) {
2529                 drm_mode_copy(adjusted_mode, mode);
2530                 crtc_state->mode_changed = true;
2531         }
2532
2533         return 0;
2534 }
2535
2536 static int
2537 nv50_outp_atomic_check(struct drm_encoder *encoder,
2538                        struct drm_crtc_state *crtc_state,
2539                        struct drm_connector_state *conn_state)
2540 {
2541         struct nouveau_connector *nv_connector =
2542                 nouveau_connector(conn_state->connector);
2543         return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2544                                            nv_connector->native_mode);
2545 }
2546
2547 /******************************************************************************
2548  * DAC
2549  *****************************************************************************/
2550 static void
2551 nv50_dac_disable(struct drm_encoder *encoder)
2552 {
2553         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2554         struct nv50_mast *mast = nv50_mast(encoder->dev);
2555         const int or = nv_encoder->or;
2556         u32 *push;
2557
2558         if (nv_encoder->crtc) {
2559                 push = evo_wait(mast, 4);
2560                 if (push) {
2561                         if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2562                                 evo_mthd(push, 0x0400 + (or * 0x080), 1);
2563                                 evo_data(push, 0x00000000);
2564                         } else {
2565                                 evo_mthd(push, 0x0180 + (or * 0x020), 1);
2566                                 evo_data(push, 0x00000000);
2567                         }
2568                         evo_kick(push, mast);
2569                 }
2570         }
2571
2572         nv_encoder->crtc = NULL;
2573         nv50_outp_release(nv_encoder);
2574 }
2575
2576 static void
2577 nv50_dac_enable(struct drm_encoder *encoder)
2578 {
2579         struct nv50_mast *mast = nv50_mast(encoder->dev);
2580         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2581         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2582         struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
2583         u32 *push;
2584
2585         nv50_outp_acquire(nv_encoder);
2586
2587         push = evo_wait(mast, 8);
2588         if (push) {
2589                 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
2590                         u32 syncs = 0x00000000;
2591
2592                         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2593                                 syncs |= 0x00000001;
2594                         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2595                                 syncs |= 0x00000002;
2596
2597                         evo_mthd(push, 0x0400 + (nv_encoder->or * 0x080), 2);
2598                         evo_data(push, 1 << nv_crtc->index);
2599                         evo_data(push, syncs);
2600                 } else {
2601                         u32 magic = 0x31ec6000 | (nv_crtc->index << 25);
2602                         u32 syncs = 0x00000001;
2603
2604                         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2605                                 syncs |= 0x00000008;
2606                         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2607                                 syncs |= 0x00000010;
2608
2609                         if (mode->flags & DRM_MODE_FLAG_INTERLACE)
2610                                 magic |= 0x00000001;
2611
2612                         evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
2613                         evo_data(push, syncs);
2614                         evo_data(push, magic);
2615                         evo_mthd(push, 0x0180 + (nv_encoder->or * 0x020), 1);
2616                         evo_data(push, 1 << nv_crtc->index);
2617                 }
2618
2619                 evo_kick(push, mast);
2620         }
2621
2622         nv_encoder->crtc = encoder->crtc;
2623 }
2624
2625 static enum drm_connector_status
2626 nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2627 {
2628         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2629         struct nv50_disp *disp = nv50_disp(encoder->dev);
2630         struct {
2631                 struct nv50_disp_mthd_v1 base;
2632                 struct nv50_disp_dac_load_v0 load;
2633         } args = {
2634                 .base.version = 1,
2635                 .base.method = NV50_DISP_MTHD_V1_DAC_LOAD,
2636                 .base.hasht  = nv_encoder->dcb->hasht,
2637                 .base.hashm  = nv_encoder->dcb->hashm,
2638         };
2639         int ret;
2640
2641         args.load.data = nouveau_drm(encoder->dev)->vbios.dactestval;
2642         if (args.load.data == 0)
2643                 args.load.data = 340;
2644
2645         ret = nvif_mthd(disp->disp, 0, &args, sizeof(args));
2646         if (ret || !args.load.load)
2647                 return connector_status_disconnected;
2648
2649         return connector_status_connected;
2650 }
2651
2652 static const struct drm_encoder_helper_funcs
2653 nv50_dac_help = {
2654         .atomic_check = nv50_outp_atomic_check,
2655         .enable = nv50_dac_enable,
2656         .disable = nv50_dac_disable,
2657         .detect = nv50_dac_detect
2658 };
2659
2660 static void
2661 nv50_dac_destroy(struct drm_encoder *encoder)
2662 {
2663         drm_encoder_cleanup(encoder);
2664         kfree(encoder);
2665 }
2666
2667 static const struct drm_encoder_funcs
2668 nv50_dac_func = {
2669         .destroy = nv50_dac_destroy,
2670 };
2671
2672 static int
2673 nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
2674 {
2675         struct nouveau_drm *drm = nouveau_drm(connector->dev);
2676         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
2677         struct nvkm_i2c_bus *bus;
2678         struct nouveau_encoder *nv_encoder;
2679         struct drm_encoder *encoder;
2680         int type = DRM_MODE_ENCODER_DAC;
2681
2682         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
2683         if (!nv_encoder)
2684                 return -ENOMEM;
2685         nv_encoder->dcb = dcbe;
2686
2687         bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
2688         if (bus)
2689                 nv_encoder->i2c = &bus->i2c;
2690
2691         encoder = to_drm_encoder(nv_encoder);
2692         encoder->possible_crtcs = dcbe->heads;
2693         encoder->possible_clones = 0;
2694         drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
2695                          "dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
2696         drm_encoder_helper_add(encoder, &nv50_dac_help);
2697
2698         drm_mode_connector_attach_encoder(connector, encoder);
2699         return 0;
2700 }
2701
2702 /******************************************************************************
2703  * Audio
2704  *****************************************************************************/
2705 static void
2706 nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2707 {
2708         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2709         struct nv50_disp *disp = nv50_disp(encoder->dev);
2710         struct {
2711                 struct nv50_disp_mthd_v1 base;
2712                 struct nv50_disp_sor_hda_eld_v0 eld;
2713         } args = {
2714                 .base.version = 1,
2715                 .base.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2716                 .base.hasht   = nv_encoder->dcb->hasht,
2717                 .base.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
2718                                 (0x0100 << nv_crtc->index),
2719         };
2720
2721         nvif_mthd(disp->disp, 0, &args, sizeof(args));
2722 }
2723
2724 static void
2725 nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2726 {
2727         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2728         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2729         struct nouveau_connector *nv_connector;
2730         struct nv50_disp *disp = nv50_disp(encoder->dev);
2731         struct __packed {
2732                 struct {
2733                         struct nv50_disp_mthd_v1 mthd;
2734                         struct nv50_disp_sor_hda_eld_v0 eld;
2735                 } base;
2736                 u8 data[sizeof(nv_connector->base.eld)];
2737         } args = {
2738                 .base.mthd.version = 1,
2739                 .base.mthd.method  = NV50_DISP_MTHD_V1_SOR_HDA_ELD,
2740                 .base.mthd.hasht   = nv_encoder->dcb->hasht,
2741                 .base.mthd.hashm   = (0xf0ff & nv_encoder->dcb->hashm) |
2742                                      (0x0100 << nv_crtc->index),
2743         };
2744
2745         nv_connector = nouveau_encoder_connector_get(nv_encoder);
2746         if (!drm_detect_monitor_audio(nv_connector->edid))
2747                 return;
2748
2749         memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
2750
2751         nvif_mthd(disp->disp, 0, &args,
2752                   sizeof(args.base) + drm_eld_size(args.data));
2753 }
2754
2755 /******************************************************************************
2756  * HDMI
2757  *****************************************************************************/
2758 static void
2759 nv50_hdmi_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
2760 {
2761         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2762         struct nv50_disp *disp = nv50_disp(encoder->dev);
2763         struct {
2764                 struct nv50_disp_mthd_v1 base;
2765                 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2766         } args = {
2767                 .base.version = 1,
2768                 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2769                 .base.hasht  = nv_encoder->dcb->hasht,
2770                 .base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
2771                                (0x0100 << nv_crtc->index),
2772         };
2773
2774         nvif_mthd(disp->disp, 0, &args, sizeof(args));
2775 }
2776
2777 static void
2778 nv50_hdmi_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
2779 {
2780         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
2781         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
2782         struct nv50_disp *disp = nv50_disp(encoder->dev);
2783         struct {
2784                 struct nv50_disp_mthd_v1 base;
2785                 struct nv50_disp_sor_hdmi_pwr_v0 pwr;
2786                 u8 infoframes[2 * 17]; /* two frames, up to 17 bytes each */
2787         } args = {
2788                 .base.version = 1,
2789                 .base.method = NV50_DISP_MTHD_V1_SOR_HDMI_PWR,
2790                 .base.hasht  = nv_encoder->dcb->hasht,
2791                 .base.hashm  = (0xf0ff & nv_encoder->dcb->hashm) |
2792                                (0x0100 << nv_crtc->index),
2793                 .pwr.state = 1,
2794                 .pwr.rekey = 56, /* binary driver, and tegra, constant */
2795         };
2796         struct nouveau_connector *nv_connector;
2797         u32 max_ac_packet;
2798         union hdmi_infoframe avi_frame;
2799         union hdmi_infoframe vendor_frame;
2800         int ret;
2801         int size;
2802
2803         nv_connector = nouveau_encoder_connector_get(nv_encoder);
2804         if (!drm_detect_hdmi_monitor(nv_connector->edid))
2805                 return;
2806
2807         ret = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame.avi, mode,
2808                                                        false);
2809         if (!ret) {
2810                 /* We have an AVI InfoFrame, populate it to the display */
2811                 args.pwr.avi_infoframe_length
2812                         = hdmi_infoframe_pack(&avi_frame, args.infoframes, 17);
2813         }
2814
2815         ret = drm_hdmi_vendor_infoframe_from_display_mode(&vendor_frame.vendor.hdmi,
2816                                                           &nv_connector->base, mode);
2817         if (!ret) {
2818                 /* We have a Vendor InfoFrame, populate it to the display */
2819                 args.pwr.vendor_infoframe_length
2820                         = hdmi_infoframe_pack(&vendor_frame,
2821                                               args.infoframes
2822                                               + args.pwr.avi_infoframe_length,
2823                                               17);
2824         }
2825
2826         max_ac_packet  = mode->htotal - mode->hdisplay;
2827         max_ac_packet -= args.pwr.rekey;
2828         max_ac_packet -= 18; /* constant from tegra */
2829         args.pwr.max_ac_packet = max_ac_packet / 32;
2830
2831         size = sizeof(args.base)
2832                 + sizeof(args.pwr)
2833                 + args.pwr.avi_infoframe_length
2834                 + args.pwr.vendor_infoframe_length;
2835         nvif_mthd(disp->disp, 0, &args, size);
2836         nv50_audio_enable(encoder, mode);
2837 }
2838
2839 /******************************************************************************
2840  * MST
2841  *****************************************************************************/
2842 #define nv50_mstm(p) container_of((p), struct nv50_mstm, mgr)
2843 #define nv50_mstc(p) container_of((p), struct nv50_mstc, connector)
2844 #define nv50_msto(p) container_of((p), struct nv50_msto, encoder)
2845
2846 struct nv50_mstm {
2847         struct nouveau_encoder *outp;
2848
2849         struct drm_dp_mst_topology_mgr mgr;
2850         struct nv50_msto *msto[4];
2851
2852         bool modified;
2853         bool disabled;
2854         int links;
2855 };
2856
2857 struct nv50_mstc {
2858         struct nv50_mstm *mstm;
2859         struct drm_dp_mst_port *port;
2860         struct drm_connector connector;
2861
2862         struct drm_display_mode *native;
2863         struct edid *edid;
2864
2865         int pbn;
2866 };
2867
2868 struct nv50_msto {
2869         struct drm_encoder encoder;
2870
2871         struct nv50_head *head;
2872         struct nv50_mstc *mstc;
2873         bool disabled;
2874 };
2875
2876 static struct drm_dp_payload *
2877 nv50_msto_payload(struct nv50_msto *msto)
2878 {
2879         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2880         struct nv50_mstc *mstc = msto->mstc;
2881         struct nv50_mstm *mstm = mstc->mstm;
2882         int vcpi = mstc->port->vcpi.vcpi, i;
2883
2884         NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi);
2885         for (i = 0; i < mstm->mgr.max_payloads; i++) {
2886                 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2887                 NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n",
2888                           mstm->outp->base.base.name, i, payload->vcpi,
2889                           payload->start_slot, payload->num_slots);
2890         }
2891
2892         for (i = 0; i < mstm->mgr.max_payloads; i++) {
2893                 struct drm_dp_payload *payload = &mstm->mgr.payloads[i];
2894                 if (payload->vcpi == vcpi)
2895                         return payload;
2896         }
2897
2898         return NULL;
2899 }
2900
2901 static void
2902 nv50_msto_cleanup(struct nv50_msto *msto)
2903 {
2904         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2905         struct nv50_mstc *mstc = msto->mstc;
2906         struct nv50_mstm *mstm = mstc->mstm;
2907
2908         NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
2909         if (mstc->port && mstc->port->vcpi.vcpi > 0 && !nv50_msto_payload(msto))
2910                 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port);
2911         if (msto->disabled) {
2912                 msto->mstc = NULL;
2913                 msto->head = NULL;
2914                 msto->disabled = false;
2915         }
2916 }
2917
2918 static void
2919 nv50_msto_prepare(struct nv50_msto *msto)
2920 {
2921         struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
2922         struct nv50_mstc *mstc = msto->mstc;
2923         struct nv50_mstm *mstm = mstc->mstm;
2924         struct {
2925                 struct nv50_disp_mthd_v1 base;
2926                 struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi;
2927         } args = {
2928                 .base.version = 1,
2929                 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_VCPI,
2930                 .base.hasht  = mstm->outp->dcb->hasht,
2931                 .base.hashm  = (0xf0ff & mstm->outp->dcb->hashm) |
2932                                (0x0100 << msto->head->base.index),
2933         };
2934
2935         NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
2936         if (mstc->port && mstc->port->vcpi.vcpi > 0) {
2937                 struct drm_dp_payload *payload = nv50_msto_payload(msto);
2938                 if (payload) {
2939                         args.vcpi.start_slot = payload->start_slot;
2940                         args.vcpi.num_slots = payload->num_slots;
2941                         args.vcpi.pbn = mstc->port->vcpi.pbn;
2942                         args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn;
2943                 }
2944         }
2945
2946         NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n",
2947                   msto->encoder.name, msto->head->base.base.name,
2948                   args.vcpi.start_slot, args.vcpi.num_slots,
2949                   args.vcpi.pbn, args.vcpi.aligned_pbn);
2950         nvif_mthd(&drm->display->disp, 0, &args, sizeof(args));
2951 }
2952
2953 static int
2954 nv50_msto_atomic_check(struct drm_encoder *encoder,
2955                        struct drm_crtc_state *crtc_state,
2956                        struct drm_connector_state *conn_state)
2957 {
2958         struct nv50_mstc *mstc = nv50_mstc(conn_state->connector);
2959         struct nv50_mstm *mstm = mstc->mstm;
2960         int bpp = conn_state->connector->display_info.bpc * 3;
2961         int slots;
2962
2963         mstc->pbn = drm_dp_calc_pbn_mode(crtc_state->adjusted_mode.clock, bpp);
2964
2965         slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
2966         if (slots < 0)
2967                 return slots;
2968
2969         return nv50_outp_atomic_check_view(encoder, crtc_state, conn_state,
2970                                            mstc->native);
2971 }
2972
2973 static void
2974 nv50_msto_enable(struct drm_encoder *encoder)
2975 {
2976         struct nv50_head *head = nv50_head(encoder->crtc);
2977         struct nv50_msto *msto = nv50_msto(encoder);
2978         struct nv50_mstc *mstc = NULL;
2979         struct nv50_mstm *mstm = NULL;
2980         struct drm_connector *connector;
2981         struct drm_connector_list_iter conn_iter;
2982         u8 proto, depth;
2983         int slots;
2984         bool r;
2985
2986         drm_connector_list_iter_begin(encoder->dev, &conn_iter);
2987         drm_for_each_connector_iter(connector, &conn_iter) {
2988                 if (connector->state->best_encoder == &msto->encoder) {
2989                         mstc = nv50_mstc(connector);
2990                         mstm = mstc->mstm;
2991                         break;
2992                 }
2993         }
2994         drm_connector_list_iter_end(&conn_iter);
2995
2996         if (WARN_ON(!mstc))
2997                 return;
2998
2999         slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3000         r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
3001         WARN_ON(!r);
3002
3003         if (!mstm->links++)
3004                 nv50_outp_acquire(mstm->outp);
3005
3006         if (mstm->outp->link & 1)
3007                 proto = 0x8;
3008         else
3009                 proto = 0x9;
3010
3011         switch (mstc->connector.display_info.bpc) {
3012         case  6: depth = 0x2; break;
3013         case  8: depth = 0x5; break;
3014         case 10:
3015         default: depth = 0x6; break;
3016         }
3017
3018         mstm->outp->update(mstm->outp, head->base.index,
3019                            &head->base.base.state->adjusted_mode, proto, depth);
3020
3021         msto->head = head;
3022         msto->mstc = mstc;
3023         mstm->modified = true;
3024 }
3025
3026 static void
3027 nv50_msto_disable(struct drm_encoder *encoder)
3028 {
3029         struct nv50_msto *msto = nv50_msto(encoder);
3030         struct nv50_mstc *mstc = msto->mstc;
3031         struct nv50_mstm *mstm = mstc->mstm;
3032
3033         if (mstc->port)
3034                 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port);
3035
3036         mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
3037         mstm->modified = true;
3038         if (!--mstm->links)
3039                 mstm->disabled = true;
3040         msto->disabled = true;
3041 }
3042
3043 static const struct drm_encoder_helper_funcs
3044 nv50_msto_help = {
3045         .disable = nv50_msto_disable,
3046         .enable = nv50_msto_enable,
3047         .atomic_check = nv50_msto_atomic_check,
3048 };
3049
3050 static void
3051 nv50_msto_destroy(struct drm_encoder *encoder)
3052 {
3053         struct nv50_msto *msto = nv50_msto(encoder);
3054         drm_encoder_cleanup(&msto->encoder);
3055         kfree(msto);
3056 }
3057
3058 static const struct drm_encoder_funcs
3059 nv50_msto = {
3060         .destroy = nv50_msto_destroy,
3061 };
3062
3063 static int
3064 nv50_msto_new(struct drm_device *dev, u32 heads, const char *name, int id,
3065               struct nv50_msto **pmsto)
3066 {
3067         struct nv50_msto *msto;
3068         int ret;
3069
3070         if (!(msto = *pmsto = kzalloc(sizeof(*msto), GFP_KERNEL)))
3071                 return -ENOMEM;
3072
3073         ret = drm_encoder_init(dev, &msto->encoder, &nv50_msto,
3074                                DRM_MODE_ENCODER_DPMST, "%s-mst-%d", name, id);
3075         if (ret) {
3076                 kfree(*pmsto);
3077                 *pmsto = NULL;
3078                 return ret;
3079         }
3080
3081         drm_encoder_helper_add(&msto->encoder, &nv50_msto_help);
3082         msto->encoder.possible_crtcs = heads;
3083         return 0;
3084 }
3085
3086 static struct drm_encoder *
3087 nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
3088                               struct drm_connector_state *connector_state)
3089 {
3090         struct nv50_head *head = nv50_head(connector_state->crtc);
3091         struct nv50_mstc *mstc = nv50_mstc(connector);
3092         if (mstc->port) {
3093                 struct nv50_mstm *mstm = mstc->mstm;
3094                 return &mstm->msto[head->base.index]->encoder;
3095         }
3096         return NULL;
3097 }
3098
3099 static struct drm_encoder *
3100 nv50_mstc_best_encoder(struct drm_connector *connector)
3101 {
3102         struct nv50_mstc *mstc = nv50_mstc(connector);
3103         if (mstc->port) {
3104                 struct nv50_mstm *mstm = mstc->mstm;
3105                 return &mstm->msto[0]->encoder;
3106         }
3107         return NULL;
3108 }
3109
3110 static enum drm_mode_status
3111 nv50_mstc_mode_valid(struct drm_connector *connector,
3112                      struct drm_display_mode *mode)
3113 {
3114         return MODE_OK;
3115 }
3116
3117 static int
3118 nv50_mstc_get_modes(struct drm_connector *connector)
3119 {
3120         struct nv50_mstc *mstc = nv50_mstc(connector);
3121         int ret = 0;
3122
3123         mstc->edid = drm_dp_mst_get_edid(&mstc->connector, mstc->port->mgr, mstc->port);
3124         drm_mode_connector_update_edid_property(&mstc->connector, mstc->edid);
3125         if (mstc->edid)
3126                 ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
3127
3128         if (!mstc->connector.display_info.bpc)
3129                 mstc->connector.display_info.bpc = 8;
3130
3131         if (mstc->native)
3132                 drm_mode_destroy(mstc->connector.dev, mstc->native);
3133         mstc->native = nouveau_conn_native_mode(&mstc->connector);
3134         return ret;
3135 }
3136
3137 static const struct drm_connector_helper_funcs
3138 nv50_mstc_help = {
3139         .get_modes = nv50_mstc_get_modes,
3140         .mode_valid = nv50_mstc_mode_valid,
3141         .best_encoder = nv50_mstc_best_encoder,
3142         .atomic_best_encoder = nv50_mstc_atomic_best_encoder,
3143 };
3144
3145 static enum drm_connector_status
3146 nv50_mstc_detect(struct drm_connector *connector, bool force)
3147 {
3148         struct nv50_mstc *mstc = nv50_mstc(connector);
3149         if (!mstc->port)
3150                 return connector_status_disconnected;
3151         return drm_dp_mst_detect_port(connector, mstc->port->mgr, mstc->port);
3152 }
3153
3154 static void
3155 nv50_mstc_destroy(struct drm_connector *connector)
3156 {
3157         struct nv50_mstc *mstc = nv50_mstc(connector);
3158         drm_connector_cleanup(&mstc->connector);
3159         kfree(mstc);
3160 }
3161
3162 static const struct drm_connector_funcs
3163 nv50_mstc = {
3164         .reset = nouveau_conn_reset,
3165         .detect = nv50_mstc_detect,
3166         .fill_modes = drm_helper_probe_single_connector_modes,
3167         .destroy = nv50_mstc_destroy,
3168         .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state,
3169         .atomic_destroy_state = nouveau_conn_atomic_destroy_state,
3170         .atomic_set_property = nouveau_conn_atomic_set_property,
3171         .atomic_get_property = nouveau_conn_atomic_get_property,
3172 };
3173
3174 static int
3175 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port,
3176               const char *path, struct nv50_mstc **pmstc)
3177 {
3178         struct drm_device *dev = mstm->outp->base.base.dev;
3179         struct nv50_mstc *mstc;
3180         int ret, i;
3181
3182         if (!(mstc = *pmstc = kzalloc(sizeof(*mstc), GFP_KERNEL)))
3183                 return -ENOMEM;
3184         mstc->mstm = mstm;
3185         mstc->port = port;
3186
3187         ret = drm_connector_init(dev, &mstc->connector, &nv50_mstc,
3188                                  DRM_MODE_CONNECTOR_DisplayPort);
3189         if (ret) {
3190                 kfree(*pmstc);
3191                 *pmstc = NULL;
3192                 return ret;
3193         }
3194
3195         drm_connector_helper_add(&mstc->connector, &nv50_mstc_help);
3196
3197         mstc->connector.funcs->reset(&mstc->connector);
3198         nouveau_conn_attach_properties(&mstc->connector);
3199
3200         for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++)
3201                 drm_mode_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder);
3202
3203         drm_object_attach_property(&mstc->connector.base, dev->mode_config.path_property, 0);
3204         drm_object_attach_property(&mstc->connector.base, dev->mode_config.tile_property, 0);
3205         drm_mode_connector_set_path_property(&mstc->connector, path);
3206         return 0;
3207 }
3208
3209 static void
3210 nv50_mstm_cleanup(struct nv50_mstm *mstm)
3211 {
3212         struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3213         struct drm_encoder *encoder;
3214         int ret;
3215
3216         NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name);
3217         ret = drm_dp_check_act_status(&mstm->mgr);
3218
3219         ret = drm_dp_update_payload_part2(&mstm->mgr);
3220
3221         drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3222                 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3223                         struct nv50_msto *msto = nv50_msto(encoder);
3224                         struct nv50_mstc *mstc = msto->mstc;
3225                         if (mstc && mstc->mstm == mstm)
3226                                 nv50_msto_cleanup(msto);
3227                 }
3228         }
3229
3230         mstm->modified = false;
3231 }
3232
3233 static void
3234 nv50_mstm_prepare(struct nv50_mstm *mstm)
3235 {
3236         struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev);
3237         struct drm_encoder *encoder;
3238         int ret;
3239
3240         NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
3241         ret = drm_dp_update_payload_part1(&mstm->mgr);
3242
3243         drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
3244                 if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
3245                         struct nv50_msto *msto = nv50_msto(encoder);
3246                         struct nv50_mstc *mstc = msto->mstc;
3247                         if (mstc && mstc->mstm == mstm)
3248                                 nv50_msto_prepare(msto);
3249                 }
3250         }
3251
3252         if (mstm->disabled) {
3253                 if (!mstm->links)
3254                         nv50_outp_release(mstm->outp);
3255                 mstm->disabled = false;
3256         }
3257 }
3258
3259 static void
3260 nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
3261 {
3262         struct nv50_mstm *mstm = nv50_mstm(mgr);
3263         drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
3264 }
3265
3266 static void
3267 nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
3268                             struct drm_connector *connector)
3269 {
3270         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3271         struct nv50_mstc *mstc = nv50_mstc(connector);
3272
3273         drm_connector_unregister(&mstc->connector);
3274
3275         drm_modeset_lock_all(drm->dev);
3276         drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
3277         mstc->port = NULL;
3278         drm_modeset_unlock_all(drm->dev);
3279
3280         drm_connector_unreference(&mstc->connector);
3281 }
3282
3283 static void
3284 nv50_mstm_register_connector(struct drm_connector *connector)
3285 {
3286         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3287
3288         drm_modeset_lock_all(drm->dev);
3289         drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
3290         drm_modeset_unlock_all(drm->dev);
3291
3292         drm_connector_register(connector);
3293 }
3294
3295 static struct drm_connector *
3296 nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
3297                         struct drm_dp_mst_port *port, const char *path)
3298 {
3299         struct nv50_mstm *mstm = nv50_mstm(mgr);
3300         struct nv50_mstc *mstc;
3301         int ret;
3302
3303         ret = nv50_mstc_new(mstm, port, path, &mstc);
3304         if (ret) {
3305                 if (mstc)
3306                         mstc->connector.funcs->destroy(&mstc->connector);
3307                 return NULL;
3308         }
3309
3310         return &mstc->connector;
3311 }
3312
3313 static const struct drm_dp_mst_topology_cbs
3314 nv50_mstm = {
3315         .add_connector = nv50_mstm_add_connector,
3316         .register_connector = nv50_mstm_register_connector,
3317         .destroy_connector = nv50_mstm_destroy_connector,
3318         .hotplug = nv50_mstm_hotplug,
3319 };
3320
3321 void
3322 nv50_mstm_service(struct nv50_mstm *mstm)
3323 {
3324         struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL;
3325         bool handled = true;
3326         int ret;
3327         u8 esi[8] = {};
3328
3329         if (!aux)
3330                 return;
3331
3332         while (handled) {
3333                 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8);
3334                 if (ret != 8) {
3335                         drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3336                         return;
3337                 }
3338
3339                 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled);
3340                 if (!handled)
3341                         break;
3342
3343                 drm_dp_dpcd_write(aux, DP_SINK_COUNT_ESI + 1, &esi[1], 3);
3344         }
3345 }
3346
3347 void
3348 nv50_mstm_remove(struct nv50_mstm *mstm)
3349 {
3350         if (mstm)
3351                 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false);
3352 }
3353
3354 static int
3355 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state)
3356 {
3357         struct nouveau_encoder *outp = mstm->outp;
3358         struct {
3359                 struct nv50_disp_mthd_v1 base;
3360                 struct nv50_disp_sor_dp_mst_link_v0 mst;
3361         } args = {
3362                 .base.version = 1,
3363                 .base.method = NV50_DISP_MTHD_V1_SOR_DP_MST_LINK,
3364                 .base.hasht = outp->dcb->hasht,
3365                 .base.hashm = outp->dcb->hashm,
3366                 .mst.state = state,
3367         };
3368         struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
3369         struct nvif_object *disp = &drm->display->disp;
3370         int ret;
3371
3372         if (dpcd >= 0x12) {
3373                 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CTRL, &dpcd);
3374                 if (ret < 0)
3375                         return ret;
3376
3377                 dpcd &= ~DP_MST_EN;
3378                 if (state)
3379                         dpcd |= DP_MST_EN;
3380
3381                 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, dpcd);
3382                 if (ret < 0)
3383                         return ret;
3384         }
3385
3386         return nvif_mthd(disp, 0, &args, sizeof(args));
3387 }
3388
3389 int
3390 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow)
3391 {
3392         int ret, state = 0;
3393
3394         if (!mstm)
3395                 return 0;
3396
3397         if (dpcd[0] >= 0x12) {
3398                 ret = drm_dp_dpcd_readb(mstm->mgr.aux, DP_MSTM_CAP, &dpcd[1]);
3399                 if (ret < 0)
3400                         return ret;
3401
3402                 if (!(dpcd[1] & DP_MST_CAP))
3403                         dpcd[0] = 0x11;
3404                 else
3405                         state = allow;
3406         }
3407
3408         ret = nv50_mstm_enable(mstm, dpcd[0], state);
3409         if (ret)
3410                 return ret;
3411
3412         ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, state);
3413         if (ret)
3414                 return nv50_mstm_enable(mstm, dpcd[0], 0);
3415
3416         return mstm->mgr.mst_state;
3417 }
3418
3419 static void
3420 nv50_mstm_fini(struct nv50_mstm *mstm)
3421 {
3422         if (mstm && mstm->mgr.mst_state)
3423                 drm_dp_mst_topology_mgr_suspend(&mstm->mgr);
3424 }
3425
3426 static void
3427 nv50_mstm_init(struct nv50_mstm *mstm)
3428 {
3429         if (mstm && mstm->mgr.mst_state)
3430                 drm_dp_mst_topology_mgr_resume(&mstm->mgr);
3431 }
3432
3433 static void
3434 nv50_mstm_del(struct nv50_mstm **pmstm)
3435 {
3436         struct nv50_mstm *mstm = *pmstm;
3437         if (mstm) {
3438                 kfree(*pmstm);
3439                 *pmstm = NULL;
3440         }
3441 }
3442
3443 static int
3444 nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
3445               int conn_base_id, struct nv50_mstm **pmstm)
3446 {
3447         const int max_payloads = hweight8(outp->dcb->heads);
3448         struct drm_device *dev = outp->base.base.dev;
3449         struct nv50_mstm *mstm;
3450         int ret, i;
3451         u8 dpcd;
3452
3453         /* This is a workaround for some monitors not functioning
3454          * correctly in MST mode on initial module load.  I think
3455          * some bad interaction with the VBIOS may be responsible.
3456          *
3457          * A good ol' off and on again seems to work here ;)
3458          */
3459         ret = drm_dp_dpcd_readb(aux, DP_DPCD_REV, &dpcd);
3460         if (ret >= 0 && dpcd >= 0x12)
3461                 drm_dp_dpcd_writeb(aux, DP_MSTM_CTRL, 0);
3462
3463         if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL)))
3464                 return -ENOMEM;
3465         mstm->outp = outp;
3466         mstm->mgr.cbs = &nv50_mstm;
3467
3468         ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
3469                                            max_payloads, conn_base_id);
3470         if (ret)
3471                 return ret;
3472
3473         for (i = 0; i < max_payloads; i++) {
3474                 ret = nv50_msto_new(dev, outp->dcb->heads, outp->base.base.name,
3475                                     i, &mstm->msto[i]);
3476                 if (ret)
3477                         return ret;
3478         }
3479
3480         return 0;
3481 }
3482
3483 /******************************************************************************
3484  * SOR
3485  *****************************************************************************/
3486 static void
3487 nv50_sor_update(struct nouveau_encoder *nv_encoder, u8 head,
3488                 struct drm_display_mode *mode, u8 proto, u8 depth)
3489 {
3490         struct nv50_dmac *core = &nv50_mast(nv_encoder->base.base.dev)->base;
3491         u32 *push;
3492
3493         if (!mode) {
3494                 nv_encoder->ctrl &= ~BIT(head);
3495                 if (!(nv_encoder->ctrl & 0x0000000f))
3496                         nv_encoder->ctrl = 0;
3497         } else {
3498                 nv_encoder->ctrl |= proto << 8;
3499                 nv_encoder->ctrl |= BIT(head);
3500         }
3501
3502         if ((push = evo_wait(core, 6))) {
3503                 if (core->base.user.oclass < GF110_DISP_CORE_CHANNEL_DMA) {
3504                         if (mode) {
3505                                 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3506                                         nv_encoder->ctrl |= 0x00001000;
3507                                 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3508                                         nv_encoder->ctrl |= 0x00002000;
3509                                 nv_encoder->ctrl |= depth << 16;
3510                         }
3511                         evo_mthd(push, 0x0600 + (nv_encoder->or * 0x40), 1);
3512                 } else {
3513                         if (mode) {
3514                                 u32 magic = 0x31ec6000 | (head << 25);
3515                                 u32 syncs = 0x00000001;
3516                                 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3517                                         syncs |= 0x00000008;
3518                                 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3519                                         syncs |= 0x00000010;
3520                                 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3521                                         magic |= 0x00000001;
3522
3523                                 evo_mthd(push, 0x0404 + (head * 0x300), 2);
3524                                 evo_data(push, syncs | (depth << 6));
3525                                 evo_data(push, magic);
3526                         }
3527                         evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
3528                 }
3529                 evo_data(push, nv_encoder->ctrl);
3530                 evo_kick(push, core);
3531         }
3532 }
3533
3534 static void
3535 nv50_sor_disable(struct drm_encoder *encoder)
3536 {
3537         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3538         struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
3539
3540         nv_encoder->crtc = NULL;
3541
3542         if (nv_crtc) {
3543                 struct nvkm_i2c_aux *aux = nv_encoder->aux;
3544                 u8 pwr;
3545
3546                 if (aux) {
3547                         int ret = nvkm_rdaux(aux, DP_SET_POWER, &pwr, 1);
3548                         if (ret == 0) {
3549                                 pwr &= ~DP_SET_POWER_MASK;
3550                                 pwr |=  DP_SET_POWER_D3;
3551                                 nvkm_wraux(aux, DP_SET_POWER, &pwr, 1);
3552                         }
3553                 }
3554
3555                 nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
3556                 nv50_audio_disable(encoder, nv_crtc);
3557                 nv50_hdmi_disable(&nv_encoder->base.base, nv_crtc);
3558                 nv50_outp_release(nv_encoder);
3559         }
3560 }
3561
3562 static void
3563 nv50_sor_enable(struct drm_encoder *encoder)
3564 {
3565         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3566         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3567         struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3568         struct {
3569                 struct nv50_disp_mthd_v1 base;
3570                 struct nv50_disp_sor_lvds_script_v0 lvds;
3571         } lvds = {
3572                 .base.version = 1,
3573                 .base.method  = NV50_DISP_MTHD_V1_SOR_LVDS_SCRIPT,
3574                 .base.hasht   = nv_encoder->dcb->hasht,
3575                 .base.hashm   = nv_encoder->dcb->hashm,
3576         };
3577         struct nv50_disp *disp = nv50_disp(encoder->dev);
3578         struct drm_device *dev = encoder->dev;
3579         struct nouveau_drm *drm = nouveau_drm(dev);
3580         struct nouveau_connector *nv_connector;
3581         struct nvbios *bios = &drm->vbios;
3582         u8 proto = 0xf;
3583         u8 depth = 0x0;
3584
3585         nv_connector = nouveau_encoder_connector_get(nv_encoder);
3586         nv_encoder->crtc = encoder->crtc;
3587         nv50_outp_acquire(nv_encoder);
3588
3589         switch (nv_encoder->dcb->type) {
3590         case DCB_OUTPUT_TMDS:
3591                 if (nv_encoder->link & 1) {
3592                         proto = 0x1;
3593                         /* Only enable dual-link if:
3594                          *  - Need to (i.e. rate > 165MHz)
3595                          *  - DCB says we can
3596                          *  - Not an HDMI monitor, since there's no dual-link
3597                          *    on HDMI.
3598                          */
3599                         if (mode->clock >= 165000 &&
3600                             nv_encoder->dcb->duallink_possible &&
3601                             !drm_detect_hdmi_monitor(nv_connector->edid))
3602                                 proto |= 0x4;
3603                 } else {
3604                         proto = 0x2;
3605                 }
3606
3607                 nv50_hdmi_enable(&nv_encoder->base.base, mode);
3608                 break;
3609         case DCB_OUTPUT_LVDS:
3610                 proto = 0x0;
3611
3612                 if (bios->fp_no_ddc) {
3613                         if (bios->fp.dual_link)
3614                                 lvds.lvds.script |= 0x0100;
3615                         if (bios->fp.if_is_24bit)
3616                                 lvds.lvds.script |= 0x0200;
3617                 } else {
3618                         if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
3619                                 if (((u8 *)nv_connector->edid)[121] == 2)
3620                                         lvds.lvds.script |= 0x0100;
3621                         } else
3622                         if (mode->clock >= bios->fp.duallink_transition_clk) {
3623                                 lvds.lvds.script |= 0x0100;
3624                         }
3625
3626                         if (lvds.lvds.script & 0x0100) {
3627                                 if (bios->fp.strapless_is_24bit & 2)
3628                                         lvds.lvds.script |= 0x0200;
3629                         } else {
3630                                 if (bios->fp.strapless_is_24bit & 1)
3631                                         lvds.lvds.script |= 0x0200;
3632                         }
3633
3634                         if (nv_connector->base.display_info.bpc == 8)
3635                                 lvds.lvds.script |= 0x0200;
3636                 }
3637
3638                 nvif_mthd(disp->disp, 0, &lvds, sizeof(lvds));
3639                 break;
3640         case DCB_OUTPUT_DP:
3641                 if (nv_connector->base.display_info.bpc == 6)
3642                         depth = 0x2;
3643                 else
3644                 if (nv_connector->base.display_info.bpc == 8)
3645                         depth = 0x5;
3646                 else
3647                         depth = 0x6;
3648
3649                 if (nv_encoder->link & 1)
3650                         proto = 0x8;
3651                 else
3652                         proto = 0x9;
3653
3654                 nv50_audio_enable(encoder, mode);
3655                 break;
3656         default:
3657                 BUG();
3658                 break;
3659         }
3660
3661         nv_encoder->update(nv_encoder, nv_crtc->index, mode, proto, depth);
3662 }
3663
3664 static const struct drm_encoder_helper_funcs
3665 nv50_sor_help = {
3666         .atomic_check = nv50_outp_atomic_check,
3667         .enable = nv50_sor_enable,
3668         .disable = nv50_sor_disable,
3669 };
3670
3671 static void
3672 nv50_sor_destroy(struct drm_encoder *encoder)
3673 {
3674         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3675         nv50_mstm_del(&nv_encoder->dp.mstm);
3676         drm_encoder_cleanup(encoder);
3677         kfree(encoder);
3678 }
3679
3680 static const struct drm_encoder_funcs
3681 nv50_sor_func = {
3682         .destroy = nv50_sor_destroy,
3683 };
3684
3685 static int
3686 nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
3687 {
3688         struct nouveau_connector *nv_connector = nouveau_connector(connector);
3689         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3690         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3691         struct nouveau_encoder *nv_encoder;
3692         struct drm_encoder *encoder;
3693         int type, ret;
3694
3695         switch (dcbe->type) {
3696         case DCB_OUTPUT_LVDS: type = DRM_MODE_ENCODER_LVDS; break;
3697         case DCB_OUTPUT_TMDS:
3698         case DCB_OUTPUT_DP:
3699         default:
3700                 type = DRM_MODE_ENCODER_TMDS;
3701                 break;
3702         }
3703
3704         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3705         if (!nv_encoder)
3706                 return -ENOMEM;
3707         nv_encoder->dcb = dcbe;
3708         nv_encoder->update = nv50_sor_update;
3709
3710         encoder = to_drm_encoder(nv_encoder);
3711         encoder->possible_crtcs = dcbe->heads;
3712         encoder->possible_clones = 0;
3713         drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
3714                          "sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
3715         drm_encoder_helper_add(encoder, &nv50_sor_help);
3716
3717         drm_mode_connector_attach_encoder(connector, encoder);
3718
3719         if (dcbe->type == DCB_OUTPUT_DP) {
3720                 struct nv50_disp *disp = nv50_disp(encoder->dev);
3721                 struct nvkm_i2c_aux *aux =
3722                         nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
3723                 if (aux) {
3724                         if (disp->disp->oclass < GF110_DISP) {
3725                                 /* HW has no support for address-only
3726                                  * transactions, so we're required to
3727                                  * use custom I2C-over-AUX code.
3728                                  */
3729                                 nv_encoder->i2c = &aux->i2c;
3730                         } else {
3731                                 nv_encoder->i2c = &nv_connector->aux.ddc;
3732                         }
3733                         nv_encoder->aux = aux;
3734                 }
3735
3736                 /*TODO: Use DP Info Table to check for support. */
3737                 if (disp->disp->oclass >= GF110_DISP) {
3738                         ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16,
3739                                             nv_connector->base.base.id,
3740                                             &nv_encoder->dp.mstm);
3741                         if (ret)
3742                                 return ret;
3743                 }
3744         } else {
3745                 struct nvkm_i2c_bus *bus =
3746                         nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
3747                 if (bus)
3748                         nv_encoder->i2c = &bus->i2c;
3749         }
3750
3751         return 0;
3752 }
3753
3754 /******************************************************************************
3755  * PIOR
3756  *****************************************************************************/
3757 static int
3758 nv50_pior_atomic_check(struct drm_encoder *encoder,
3759                        struct drm_crtc_state *crtc_state,
3760                        struct drm_connector_state *conn_state)
3761 {
3762         int ret = nv50_outp_atomic_check(encoder, crtc_state, conn_state);
3763         if (ret)
3764                 return ret;
3765         crtc_state->adjusted_mode.clock *= 2;
3766         return 0;
3767 }
3768
3769 static void
3770 nv50_pior_disable(struct drm_encoder *encoder)
3771 {
3772         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3773         struct nv50_mast *mast = nv50_mast(encoder->dev);
3774         const int or = nv_encoder->or;
3775         u32 *push;
3776
3777         if (nv_encoder->crtc) {
3778                 push = evo_wait(mast, 4);
3779                 if (push) {
3780                         if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3781                                 evo_mthd(push, 0x0700 + (or * 0x040), 1);
3782                                 evo_data(push, 0x00000000);
3783                         }
3784                         evo_kick(push, mast);
3785                 }
3786         }
3787
3788         nv_encoder->crtc = NULL;
3789         nv50_outp_release(nv_encoder);
3790 }
3791
3792 static void
3793 nv50_pior_enable(struct drm_encoder *encoder)
3794 {
3795         struct nv50_mast *mast = nv50_mast(encoder->dev);
3796         struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
3797         struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
3798         struct nouveau_connector *nv_connector;
3799         struct drm_display_mode *mode = &nv_crtc->base.state->adjusted_mode;
3800         u8 owner = 1 << nv_crtc->index;
3801         u8 proto, depth;
3802         u32 *push;
3803
3804         nv50_outp_acquire(nv_encoder);
3805
3806         nv_connector = nouveau_encoder_connector_get(nv_encoder);
3807         switch (nv_connector->base.display_info.bpc) {
3808         case 10: depth = 0x6; break;
3809         case  8: depth = 0x5; break;
3810         case  6: depth = 0x2; break;
3811         default: depth = 0x0; break;
3812         }
3813
3814         switch (nv_encoder->dcb->type) {
3815         case DCB_OUTPUT_TMDS:
3816         case DCB_OUTPUT_DP:
3817                 proto = 0x0;
3818                 break;
3819         default:
3820                 BUG();
3821                 break;
3822         }
3823
3824         push = evo_wait(mast, 8);
3825         if (push) {
3826                 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
3827                         u32 ctrl = (depth << 16) | (proto << 8) | owner;
3828                         if (mode->flags & DRM_MODE_FLAG_NHSYNC)
3829                                 ctrl |= 0x00001000;
3830                         if (mode->flags & DRM_MODE_FLAG_NVSYNC)
3831                                 ctrl |= 0x00002000;
3832                         evo_mthd(push, 0x0700 + (nv_encoder->or * 0x040), 1);
3833                         evo_data(push, ctrl);
3834                 }
3835
3836                 evo_kick(push, mast);
3837         }
3838
3839         nv_encoder->crtc = encoder->crtc;
3840 }
3841
3842 static const struct drm_encoder_helper_funcs
3843 nv50_pior_help = {
3844         .atomic_check = nv50_pior_atomic_check,
3845         .enable = nv50_pior_enable,
3846         .disable = nv50_pior_disable,
3847 };
3848
3849 static void
3850 nv50_pior_destroy(struct drm_encoder *encoder)
3851 {
3852         drm_encoder_cleanup(encoder);
3853         kfree(encoder);
3854 }
3855
3856 static const struct drm_encoder_funcs
3857 nv50_pior_func = {
3858         .destroy = nv50_pior_destroy,
3859 };
3860
3861 static int
3862 nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
3863 {
3864         struct nouveau_connector *nv_connector = nouveau_connector(connector);
3865         struct nouveau_drm *drm = nouveau_drm(connector->dev);
3866         struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
3867         struct nvkm_i2c_bus *bus = NULL;
3868         struct nvkm_i2c_aux *aux = NULL;
3869         struct i2c_adapter *ddc;
3870         struct nouveau_encoder *nv_encoder;
3871         struct drm_encoder *encoder;
3872         int type;
3873
3874         switch (dcbe->type) {
3875         case DCB_OUTPUT_TMDS:
3876                 bus  = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
3877                 ddc  = bus ? &bus->i2c : NULL;
3878                 type = DRM_MODE_ENCODER_TMDS;
3879                 break;
3880         case DCB_OUTPUT_DP:
3881                 aux  = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
3882                 ddc  = aux ? &nv_connector->aux.ddc : NULL;
3883                 type = DRM_MODE_ENCODER_TMDS;
3884                 break;
3885         default:
3886                 return -ENODEV;
3887         }
3888
3889         nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
3890         if (!nv_encoder)
3891                 return -ENOMEM;
3892         nv_encoder->dcb = dcbe;
3893         nv_encoder->i2c = ddc;
3894         nv_encoder->aux = aux;
3895
3896         encoder = to_drm_encoder(nv_encoder);
3897         encoder->possible_crtcs = dcbe->heads;
3898         encoder->possible_clones = 0;
3899         drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
3900                          "pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
3901         drm_encoder_helper_add(encoder, &nv50_pior_help);
3902
3903         drm_mode_connector_attach_encoder(connector, encoder);
3904         return 0;
3905 }
3906
3907 /******************************************************************************
3908  * Atomic
3909  *****************************************************************************/
3910
3911 static void
3912 nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
3913 {
3914         struct nv50_disp *disp = nv50_disp(drm->dev);
3915         struct nv50_dmac *core = &disp->mast.base;
3916         struct nv50_mstm *mstm;
3917         struct drm_encoder *encoder;
3918         u32 *push;
3919
3920         NV_ATOMIC(drm, "commit core %08x\n", interlock);
3921
3922         drm_for_each_encoder(encoder, drm->dev) {
3923                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3924                         mstm = nouveau_encoder(encoder)->dp.mstm;
3925                         if (mstm && mstm->modified)
3926                                 nv50_mstm_prepare(mstm);
3927                 }
3928         }
3929
3930         if ((push = evo_wait(core, 5))) {
3931                 evo_mthd(push, 0x0084, 1);
3932                 evo_data(push, 0x80000000);
3933                 evo_mthd(push, 0x0080, 2);
3934                 evo_data(push, interlock);
3935                 evo_data(push, 0x00000000);
3936                 nouveau_bo_wr32(disp->sync, 0, 0x00000000);
3937                 evo_kick(push, core);
3938                 if (nvif_msec(&drm->client.device, 2000ULL,
3939                         if (nouveau_bo_rd32(disp->sync, 0))
3940                                 break;
3941                         usleep_range(1, 2);
3942                 ) < 0)
3943                         NV_ERROR(drm, "EVO timeout\n");
3944         }
3945
3946         drm_for_each_encoder(encoder, drm->dev) {
3947                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
3948                         mstm = nouveau_encoder(encoder)->dp.mstm;
3949                         if (mstm && mstm->modified)
3950                                 nv50_mstm_cleanup(mstm);
3951                 }
3952         }
3953 }
3954
3955 static void
3956 nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
3957 {
3958         struct drm_device *dev = state->dev;
3959         struct drm_crtc_state *new_crtc_state, *old_crtc_state;
3960         struct drm_crtc *crtc;
3961         struct drm_plane_state *new_plane_state;
3962         struct drm_plane *plane;
3963         struct nouveau_drm *drm = nouveau_drm(dev);
3964         struct nv50_disp *disp = nv50_disp(dev);
3965         struct nv50_atom *atom = nv50_atom(state);
3966         struct nv50_outp_atom *outp, *outt;
3967         u32 interlock_core = 0;
3968         u32 interlock_chan = 0;
3969         int i;
3970
3971         NV_ATOMIC(drm, "commit %d %d\n", atom->lock_core, atom->flush_disable);
3972         drm_atomic_helper_wait_for_fences(dev, state, false);
3973         drm_atomic_helper_wait_for_dependencies(state);
3974         drm_atomic_helper_update_legacy_modeset_state(dev, state);
3975
3976         if (atom->lock_core)
3977                 mutex_lock(&disp->mutex);
3978
3979         /* Disable head(s). */
3980         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
3981                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
3982                 struct nv50_head *head = nv50_head(crtc);
3983
3984                 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name,
3985                           asyh->clr.mask, asyh->set.mask);
3986                 if (old_crtc_state->active && !new_crtc_state->active)
3987                         drm_crtc_vblank_off(crtc);
3988
3989                 if (asyh->clr.mask) {
3990                         nv50_head_flush_clr(head, asyh, atom->flush_disable);
3991                         interlock_core |= 1;
3992                 }
3993         }
3994
3995         /* Disable plane(s). */
3996         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
3997                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
3998                 struct nv50_wndw *wndw = nv50_wndw(plane);
3999
4000                 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", plane->name,
4001                           asyw->clr.mask, asyw->set.mask);
4002                 if (!asyw->clr.mask)
4003                         continue;
4004
4005                 interlock_chan |= nv50_wndw_flush_clr(wndw, interlock_core,
4006                                                       atom->flush_disable,
4007                                                       asyw);
4008         }
4009
4010         /* Disable output path(s). */
4011         list_for_each_entry(outp, &atom->outp, head) {
4012                 const struct drm_encoder_helper_funcs *help;
4013                 struct drm_encoder *encoder;
4014
4015                 encoder = outp->encoder;
4016                 help = encoder->helper_private;
4017
4018                 NV_ATOMIC(drm, "%s: clr %02x (set %02x)\n", encoder->name,
4019                           outp->clr.mask, outp->set.mask);
4020
4021                 if (outp->clr.mask) {
4022                         help->disable(encoder);
4023                         interlock_core |= 1;
4024                         if (outp->flush_disable) {
4025                                 nv50_disp_atomic_commit_core(drm, interlock_chan);
4026                                 interlock_core = 0;
4027                                 interlock_chan = 0;
4028                         }
4029                 }
4030         }
4031
4032         /* Flush disable. */
4033         if (interlock_core) {
4034                 if (atom->flush_disable) {
4035                         nv50_disp_atomic_commit_core(drm, interlock_chan);
4036                         interlock_core = 0;
4037                         interlock_chan = 0;
4038                 }
4039         }
4040
4041         /* Update output path(s). */
4042         list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4043                 const struct drm_encoder_helper_funcs *help;
4044                 struct drm_encoder *encoder;
4045
4046                 encoder = outp->encoder;
4047                 help = encoder->helper_private;
4048
4049                 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", encoder->name,
4050                           outp->set.mask, outp->clr.mask);
4051
4052                 if (outp->set.mask) {
4053                         help->enable(encoder);
4054                         interlock_core = 1;
4055                 }
4056
4057                 list_del(&outp->head);
4058                 kfree(outp);
4059         }
4060
4061         /* Update head(s). */
4062         for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
4063                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
4064                 struct nv50_head *head = nv50_head(crtc);
4065
4066                 NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
4067                           asyh->set.mask, asyh->clr.mask);
4068
4069                 if (asyh->set.mask) {
4070                         nv50_head_flush_set(head, asyh);
4071                         interlock_core = 1;
4072                 }
4073
4074                 if (new_crtc_state->active) {
4075                         if (!old_crtc_state->active)
4076                                 drm_crtc_vblank_on(crtc);
4077                         if (new_crtc_state->event)
4078                                 drm_crtc_vblank_get(crtc);
4079                 }
4080         }
4081
4082         /* Update plane(s). */
4083         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4084                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4085                 struct nv50_wndw *wndw = nv50_wndw(plane);
4086
4087                 NV_ATOMIC(drm, "%s: set %02x (clr %02x)\n", plane->name,
4088                           asyw->set.mask, asyw->clr.mask);
4089                 if ( !asyw->set.mask &&
4090                     (!asyw->clr.mask || atom->flush_disable))
4091                         continue;
4092
4093                 interlock_chan |= nv50_wndw_flush_set(wndw, interlock_core, asyw);
4094         }
4095
4096         /* Flush update. */
4097         if (interlock_core) {
4098                 if (!interlock_chan && atom->state.legacy_cursor_update) {
4099                         u32 *push = evo_wait(&disp->mast, 2);
4100                         if (push) {
4101                                 evo_mthd(push, 0x0080, 1);
4102                                 evo_data(push, 0x00000000);
4103                                 evo_kick(push, &disp->mast);
4104                         }
4105                 } else {
4106                         nv50_disp_atomic_commit_core(drm, interlock_chan);
4107                 }
4108         }
4109
4110         if (atom->lock_core)
4111                 mutex_unlock(&disp->mutex);
4112
4113         /* Wait for HW to signal completion. */
4114         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4115                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4116                 struct nv50_wndw *wndw = nv50_wndw(plane);
4117                 int ret = nv50_wndw_wait_armed(wndw, asyw);
4118                 if (ret)
4119                         NV_ERROR(drm, "%s: timeout\n", plane->name);
4120         }
4121
4122         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
4123                 if (new_crtc_state->event) {
4124                         unsigned long flags;
4125                         /* Get correct count/ts if racing with vblank irq */
4126                         if (new_crtc_state->active)
4127                                 drm_crtc_accurate_vblank_count(crtc);
4128                         spin_lock_irqsave(&crtc->dev->event_lock, flags);
4129                         drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
4130                         spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4131
4132                         new_crtc_state->event = NULL;
4133                         if (new_crtc_state->active)
4134                                 drm_crtc_vblank_put(crtc);
4135                 }
4136         }
4137
4138         drm_atomic_helper_commit_hw_done(state);
4139         drm_atomic_helper_cleanup_planes(dev, state);
4140         drm_atomic_helper_commit_cleanup_done(state);
4141         drm_atomic_state_put(state);
4142 }
4143
4144 static void
4145 nv50_disp_atomic_commit_work(struct work_struct *work)
4146 {
4147         struct drm_atomic_state *state =
4148                 container_of(work, typeof(*state), commit_work);
4149         nv50_disp_atomic_commit_tail(state);
4150 }
4151
4152 static int
4153 nv50_disp_atomic_commit(struct drm_device *dev,
4154                         struct drm_atomic_state *state, bool nonblock)
4155 {
4156         struct nouveau_drm *drm = nouveau_drm(dev);
4157         struct nv50_disp *disp = nv50_disp(dev);
4158         struct drm_plane_state *new_plane_state;
4159         struct drm_plane *plane;
4160         struct drm_crtc *crtc;
4161         bool active = false;
4162         int ret, i;
4163
4164         ret = pm_runtime_get_sync(dev->dev);
4165         if (ret < 0 && ret != -EACCES)
4166                 return ret;
4167
4168         ret = drm_atomic_helper_setup_commit(state, nonblock);
4169         if (ret)
4170                 goto done;
4171
4172         INIT_WORK(&state->commit_work, nv50_disp_atomic_commit_work);
4173
4174         ret = drm_atomic_helper_prepare_planes(dev, state);
4175         if (ret)
4176                 goto done;
4177
4178         if (!nonblock) {
4179                 ret = drm_atomic_helper_wait_for_fences(dev, state, true);
4180                 if (ret)
4181                         goto err_cleanup;
4182         }
4183
4184         ret = drm_atomic_helper_swap_state(state, true);
4185         if (ret)
4186                 goto err_cleanup;
4187
4188         for_each_new_plane_in_state(state, plane, new_plane_state, i) {
4189                 struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
4190                 struct nv50_wndw *wndw = nv50_wndw(plane);
4191
4192                 if (asyw->set.image) {
4193                         asyw->ntfy.handle = wndw->dmac->sync.handle;
4194                         asyw->ntfy.offset = wndw->ntfy;
4195                         asyw->ntfy.awaken = false;
4196                         asyw->set.ntfy = true;
4197                         nouveau_bo_wr32(disp->sync, wndw->ntfy / 4, 0x00000000);
4198                         wndw->ntfy ^= 0x10;
4199                 }
4200         }
4201
4202         drm_atomic_state_get(state);
4203
4204         if (nonblock)
4205                 queue_work(system_unbound_wq, &state->commit_work);
4206         else
4207                 nv50_disp_atomic_commit_tail(state);
4208
4209         drm_for_each_crtc(crtc, dev) {
4210                 if (crtc->state->enable) {
4211                         if (!drm->have_disp_power_ref) {
4212                                 drm->have_disp_power_ref = true;
4213                                 return 0;
4214                         }
4215                         active = true;
4216                         break;
4217                 }
4218         }
4219
4220         if (!active && drm->have_disp_power_ref) {
4221                 pm_runtime_put_autosuspend(dev->dev);
4222                 drm->have_disp_power_ref = false;
4223         }
4224
4225 err_cleanup:
4226         if (ret)
4227                 drm_atomic_helper_cleanup_planes(dev, state);
4228 done:
4229         pm_runtime_put_autosuspend(dev->dev);
4230         return ret;
4231 }
4232
4233 static struct nv50_outp_atom *
4234 nv50_disp_outp_atomic_add(struct nv50_atom *atom, struct drm_encoder *encoder)
4235 {
4236         struct nv50_outp_atom *outp;
4237
4238         list_for_each_entry(outp, &atom->outp, head) {
4239                 if (outp->encoder == encoder)
4240                         return outp;
4241         }
4242
4243         outp = kzalloc(sizeof(*outp), GFP_KERNEL);
4244         if (!outp)
4245                 return ERR_PTR(-ENOMEM);
4246
4247         list_add(&outp->head, &atom->outp);
4248         outp->encoder = encoder;
4249         return outp;
4250 }
4251
4252 static int
4253 nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
4254                                 struct drm_connector_state *old_connector_state)
4255 {
4256         struct drm_encoder *encoder = old_connector_state->best_encoder;
4257         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
4258         struct drm_crtc *crtc;
4259         struct nv50_outp_atom *outp;
4260
4261         if (!(crtc = old_connector_state->crtc))
4262                 return 0;
4263
4264         old_crtc_state = drm_atomic_get_old_crtc_state(&atom->state, crtc);
4265         new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4266         if (old_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4267                 outp = nv50_disp_outp_atomic_add(atom, encoder);
4268                 if (IS_ERR(outp))
4269                         return PTR_ERR(outp);
4270
4271                 if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
4272                         outp->flush_disable = true;
4273                         atom->flush_disable = true;
4274                 }
4275                 outp->clr.ctrl = true;
4276                 atom->lock_core = true;
4277         }
4278
4279         return 0;
4280 }
4281
4282 static int
4283 nv50_disp_outp_atomic_check_set(struct nv50_atom *atom,
4284                                 struct drm_connector_state *connector_state)
4285 {
4286         struct drm_encoder *encoder = connector_state->best_encoder;
4287         struct drm_crtc_state *new_crtc_state;
4288         struct drm_crtc *crtc;
4289         struct nv50_outp_atom *outp;
4290
4291         if (!(crtc = connector_state->crtc))
4292                 return 0;
4293
4294         new_crtc_state = drm_atomic_get_new_crtc_state(&atom->state, crtc);
4295         if (new_crtc_state->active && drm_atomic_crtc_needs_modeset(new_crtc_state)) {
4296                 outp = nv50_disp_outp_atomic_add(atom, encoder);
4297                 if (IS_ERR(outp))
4298                         return PTR_ERR(outp);
4299
4300                 outp->set.ctrl = true;
4301                 atom->lock_core = true;
4302         }
4303
4304         return 0;
4305 }
4306
4307 static int
4308 nv50_disp_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
4309 {
4310         struct nv50_atom *atom = nv50_atom(state);
4311         struct drm_connector_state *old_connector_state, *new_connector_state;
4312         struct drm_connector *connector;
4313         int ret, i;
4314
4315         ret = drm_atomic_helper_check(dev, state);
4316         if (ret)
4317                 return ret;
4318
4319         for_each_oldnew_connector_in_state(state, connector, old_connector_state, new_connector_state, i) {
4320                 ret = nv50_disp_outp_atomic_check_clr(atom, old_connector_state);
4321                 if (ret)
4322                         return ret;
4323
4324                 ret = nv50_disp_outp_atomic_check_set(atom, new_connector_state);
4325                 if (ret)
4326                         return ret;
4327         }
4328
4329         return 0;
4330 }
4331
4332 static void
4333 nv50_disp_atomic_state_clear(struct drm_atomic_state *state)
4334 {
4335         struct nv50_atom *atom = nv50_atom(state);
4336         struct nv50_outp_atom *outp, *outt;
4337
4338         list_for_each_entry_safe(outp, outt, &atom->outp, head) {
4339                 list_del(&outp->head);
4340                 kfree(outp);
4341         }
4342
4343         drm_atomic_state_default_clear(state);
4344 }
4345
4346 static void
4347 nv50_disp_atomic_state_free(struct drm_atomic_state *state)
4348 {
4349         struct nv50_atom *atom = nv50_atom(state);
4350         drm_atomic_state_default_release(&atom->state);
4351         kfree(atom);
4352 }
4353
4354 static struct drm_atomic_state *
4355 nv50_disp_atomic_state_alloc(struct drm_device *dev)
4356 {
4357         struct nv50_atom *atom;
4358         if (!(atom = kzalloc(sizeof(*atom), GFP_KERNEL)) ||
4359             drm_atomic_state_init(dev, &atom->state) < 0) {
4360                 kfree(atom);
4361                 return NULL;
4362         }
4363         INIT_LIST_HEAD(&atom->outp);
4364         return &atom->state;
4365 }
4366
4367 static const struct drm_mode_config_funcs
4368 nv50_disp_func = {
4369         .fb_create = nouveau_user_framebuffer_create,
4370         .output_poll_changed = drm_fb_helper_output_poll_changed,
4371         .atomic_check = nv50_disp_atomic_check,
4372         .atomic_commit = nv50_disp_atomic_commit,
4373         .atomic_state_alloc = nv50_disp_atomic_state_alloc,
4374         .atomic_state_clear = nv50_disp_atomic_state_clear,
4375         .atomic_state_free = nv50_disp_atomic_state_free,
4376 };
4377
4378 /******************************************************************************
4379  * Init
4380  *****************************************************************************/
4381
4382 void
4383 nv50_display_fini(struct drm_device *dev)
4384 {
4385         struct nouveau_encoder *nv_encoder;
4386         struct drm_encoder *encoder;
4387         struct drm_plane *plane;
4388
4389         drm_for_each_plane(plane, dev) {
4390                 struct nv50_wndw *wndw = nv50_wndw(plane);
4391                 if (plane->funcs != &nv50_wndw)
4392                         continue;
4393                 nv50_wndw_fini(wndw);
4394         }
4395
4396         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4397                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4398                         nv_encoder = nouveau_encoder(encoder);
4399                         nv50_mstm_fini(nv_encoder->dp.mstm);
4400                 }
4401         }
4402 }
4403
4404 int
4405 nv50_display_init(struct drm_device *dev)
4406 {
4407         struct drm_encoder *encoder;
4408         struct drm_plane *plane;
4409         u32 *push;
4410
4411         push = evo_wait(nv50_mast(dev), 32);
4412         if (!push)
4413                 return -EBUSY;
4414
4415         evo_mthd(push, 0x0088, 1);
4416         evo_data(push, nv50_mast(dev)->base.sync.handle);
4417         evo_kick(push, nv50_mast(dev));
4418
4419         list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
4420                 if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
4421                         struct nouveau_encoder *nv_encoder =
4422                                 nouveau_encoder(encoder);
4423                         nv50_mstm_init(nv_encoder->dp.mstm);
4424                 }
4425         }
4426
4427         drm_for_each_plane(plane, dev) {
4428                 struct nv50_wndw *wndw = nv50_wndw(plane);
4429                 if (plane->funcs != &nv50_wndw)
4430                         continue;
4431                 nv50_wndw_init(wndw);
4432         }
4433
4434         return 0;
4435 }
4436
4437 void
4438 nv50_display_destroy(struct drm_device *dev)
4439 {
4440         struct nv50_disp *disp = nv50_disp(dev);
4441
4442         nv50_dmac_destroy(&disp->mast.base, disp->disp);
4443
4444         nouveau_bo_unmap(disp->sync);
4445         if (disp->sync)
4446                 nouveau_bo_unpin(disp->sync);
4447         nouveau_bo_ref(NULL, &disp->sync);
4448
4449         nouveau_display(dev)->priv = NULL;
4450         kfree(disp);
4451 }
4452
4453 MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
4454 static int nouveau_atomic = 0;
4455 module_param_named(atomic, nouveau_atomic, int, 0400);
4456
4457 int
4458 nv50_display_create(struct drm_device *dev)
4459 {
4460         struct nvif_device *device = &nouveau_drm(dev)->client.device;
4461         struct nouveau_drm *drm = nouveau_drm(dev);
4462         struct dcb_table *dcb = &drm->vbios.dcb;
4463         struct drm_connector *connector, *tmp;
4464         struct nv50_disp *disp;
4465         struct dcb_output *dcbe;
4466         int crtcs, ret, i;
4467
4468         disp = kzalloc(sizeof(*disp), GFP_KERNEL);
4469         if (!disp)
4470                 return -ENOMEM;
4471
4472         mutex_init(&disp->mutex);
4473
4474         nouveau_display(dev)->priv = disp;
4475         nouveau_display(dev)->dtor = nv50_display_destroy;
4476         nouveau_display(dev)->init = nv50_display_init;
4477         nouveau_display(dev)->fini = nv50_display_fini;
4478         disp->disp = &nouveau_display(dev)->disp;
4479         dev->mode_config.funcs = &nv50_disp_func;
4480         dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
4481         if (nouveau_atomic)
4482                 dev->driver->driver_features |= DRIVER_ATOMIC;
4483
4484         /* small shared memory area we use for notifiers and semaphores */
4485         ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
4486                              0, 0x0000, NULL, NULL, &disp->sync);
4487         if (!ret) {
4488                 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
4489                 if (!ret) {
4490                         ret = nouveau_bo_map(disp->sync);
4491                         if (ret)
4492                                 nouveau_bo_unpin(disp->sync);
4493                 }
4494                 if (ret)
4495                         nouveau_bo_ref(NULL, &disp->sync);
4496         }
4497
4498         if (ret)
4499                 goto out;
4500
4501         /* allocate master evo channel */
4502         ret = nv50_core_create(device, disp->disp, disp->sync->bo.offset,
4503                               &disp->mast);
4504         if (ret)
4505                 goto out;
4506
4507         /* create crtc objects to represent the hw heads */
4508         if (disp->disp->oclass >= GF110_DISP)
4509                 crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
4510         else
4511                 crtcs = 0x3;
4512
4513         for (i = 0; i < fls(crtcs); i++) {
4514                 if (!(crtcs & (1 << i)))
4515                         continue;
4516                 ret = nv50_head_create(dev, i);
4517                 if (ret)
4518                         goto out;
4519         }
4520
4521         /* create encoder/connector objects based on VBIOS DCB table */
4522         for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
4523                 connector = nouveau_connector_create(dev, dcbe->connector);
4524                 if (IS_ERR(connector))
4525                         continue;
4526
4527                 if (dcbe->location == DCB_LOC_ON_CHIP) {
4528                         switch (dcbe->type) {
4529                         case DCB_OUTPUT_TMDS:
4530                         case DCB_OUTPUT_LVDS:
4531                         case DCB_OUTPUT_DP:
4532                                 ret = nv50_sor_create(connector, dcbe);
4533                                 break;
4534                         case DCB_OUTPUT_ANALOG:
4535                                 ret = nv50_dac_create(connector, dcbe);
4536                                 break;
4537                         default:
4538                                 ret = -ENODEV;
4539                                 break;
4540                         }
4541                 } else {
4542                         ret = nv50_pior_create(connector, dcbe);
4543                 }
4544
4545                 if (ret) {
4546                         NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
4547                                      dcbe->location, dcbe->type,
4548                                      ffs(dcbe->or) - 1, ret);
4549                         ret = 0;
4550                 }
4551         }
4552
4553         /* cull any connectors we created that don't have an encoder */
4554         list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
4555                 if (connector->encoder_ids[0])
4556                         continue;
4557
4558                 NV_WARN(drm, "%s has no encoders, removing\n",
4559                         connector->name);
4560                 connector->funcs->destroy(connector);
4561         }
4562
4563 out:
4564         if (ret)
4565                 nv50_display_destroy(dev);
4566         return ret;
4567 }