fbdev: Garbage collect fbdev scrolling acceleration, part 1 (from TODO list)
[linux-2.6-microblaze.git] / drivers / gpu / drm / nouveau / dispnv50 / crc.c
1 // SPDX-License-Identifier: MIT
2 #include <linux/string.h>
3 #include <drm/drm_crtc.h>
4 #include <drm/drm_atomic_helper.h>
5 #include <drm/drm_vblank.h>
6 #include <drm/drm_vblank_work.h>
7
8 #include <nvif/class.h>
9 #include <nvif/cl0002.h>
10 #include <nvif/timer.h>
11
12 #include <nvhw/class/cl907d.h>
13
14 #include "nouveau_drv.h"
15 #include "core.h"
16 #include "head.h"
17 #include "wndw.h"
18 #include "handles.h"
19 #include "crc.h"
20
21 static const char * const nv50_crc_sources[] = {
22         [NV50_CRC_SOURCE_NONE] = "none",
23         [NV50_CRC_SOURCE_AUTO] = "auto",
24         [NV50_CRC_SOURCE_RG] = "rg",
25         [NV50_CRC_SOURCE_OUTP_ACTIVE] = "outp-active",
26         [NV50_CRC_SOURCE_OUTP_COMPLETE] = "outp-complete",
27         [NV50_CRC_SOURCE_OUTP_INACTIVE] = "outp-inactive",
28 };
29
30 static int nv50_crc_parse_source(const char *buf, enum nv50_crc_source *s)
31 {
32         int i;
33
34         if (!buf) {
35                 *s = NV50_CRC_SOURCE_NONE;
36                 return 0;
37         }
38
39         i = match_string(nv50_crc_sources, ARRAY_SIZE(nv50_crc_sources), buf);
40         if (i < 0)
41                 return i;
42
43         *s = i;
44         return 0;
45 }
46
47 int
48 nv50_crc_verify_source(struct drm_crtc *crtc, const char *source_name,
49                        size_t *values_cnt)
50 {
51         struct nouveau_drm *drm = nouveau_drm(crtc->dev);
52         enum nv50_crc_source source;
53
54         if (nv50_crc_parse_source(source_name, &source) < 0) {
55                 NV_DEBUG(drm, "unknown source %s\n", source_name);
56                 return -EINVAL;
57         }
58
59         *values_cnt = 1;
60         return 0;
61 }
62
63 const char *const *nv50_crc_get_sources(struct drm_crtc *crtc, size_t *count)
64 {
65         *count = ARRAY_SIZE(nv50_crc_sources);
66         return nv50_crc_sources;
67 }
68
69 static void
70 nv50_crc_program_ctx(struct nv50_head *head,
71                      struct nv50_crc_notifier_ctx *ctx)
72 {
73         struct nv50_disp *disp = nv50_disp(head->base.base.dev);
74         struct nv50_core *core = disp->core;
75         u32 interlock[NV50_DISP_INTERLOCK__SIZE] = { 0 };
76
77         core->func->crc->set_ctx(head, ctx);
78         core->func->update(core, interlock, false);
79 }
80
81 static void nv50_crc_ctx_flip_work(struct kthread_work *base)
82 {
83         struct drm_vblank_work *work = to_drm_vblank_work(base);
84         struct nv50_crc *crc = container_of(work, struct nv50_crc, flip_work);
85         struct nv50_head *head = container_of(crc, struct nv50_head, crc);
86         struct drm_crtc *crtc = &head->base.base;
87         struct nv50_disp *disp = nv50_disp(crtc->dev);
88         u8 new_idx = crc->ctx_idx ^ 1;
89
90         /*
91          * We don't want to accidentally wait for longer then the vblank, so
92          * try again for the next vblank if we don't grab the lock
93          */
94         if (!mutex_trylock(&disp->mutex)) {
95                 DRM_DEV_DEBUG_KMS(crtc->dev->dev,
96                                   "Lock contended, delaying CRC ctx flip for head-%d\n",
97                                   head->base.index);
98                 drm_vblank_work_schedule(work,
99                                          drm_crtc_vblank_count(crtc) + 1,
100                                          true);
101                 return;
102         }
103
104         DRM_DEV_DEBUG_KMS(crtc->dev->dev,
105                           "Flipping notifier ctx for head %d (%d -> %d)\n",
106                           drm_crtc_index(crtc), crc->ctx_idx, new_idx);
107
108         nv50_crc_program_ctx(head, NULL);
109         nv50_crc_program_ctx(head, &crc->ctx[new_idx]);
110         mutex_unlock(&disp->mutex);
111
112         spin_lock_irq(&crc->lock);
113         crc->ctx_changed = true;
114         spin_unlock_irq(&crc->lock);
115 }
116
117 static inline void nv50_crc_reset_ctx(struct nv50_crc_notifier_ctx *ctx)
118 {
119         memset_io(ctx->mem.object.map.ptr, 0, ctx->mem.object.map.size);
120 }
121
122 static void
123 nv50_crc_get_entries(struct nv50_head *head,
124                      const struct nv50_crc_func *func,
125                      enum nv50_crc_source source)
126 {
127         struct drm_crtc *crtc = &head->base.base;
128         struct nv50_crc *crc = &head->crc;
129         u32 output_crc;
130
131         while (crc->entry_idx < func->num_entries) {
132                 /*
133                  * While Nvidia's documentation says CRCs are written on each
134                  * subsequent vblank after being enabled, in practice they
135                  * aren't written immediately.
136                  */
137                 output_crc = func->get_entry(head, &crc->ctx[crc->ctx_idx],
138                                              source, crc->entry_idx);
139                 if (!output_crc)
140                         return;
141
142                 drm_crtc_add_crc_entry(crtc, true, crc->frame, &output_crc);
143                 crc->frame++;
144                 crc->entry_idx++;
145         }
146 }
147
148 void nv50_crc_handle_vblank(struct nv50_head *head)
149 {
150         struct drm_crtc *crtc = &head->base.base;
151         struct nv50_crc *crc = &head->crc;
152         const struct nv50_crc_func *func =
153                 nv50_disp(head->base.base.dev)->core->func->crc;
154         struct nv50_crc_notifier_ctx *ctx;
155         bool need_reschedule = false;
156
157         if (!func)
158                 return;
159
160         /*
161          * We don't lose events if we aren't able to report CRCs until the
162          * next vblank, so only report CRCs if the locks we need aren't
163          * contended to prevent missing an actual vblank event
164          */
165         if (!spin_trylock(&crc->lock))
166                 return;
167
168         if (!crc->src)
169                 goto out;
170
171         ctx = &crc->ctx[crc->ctx_idx];
172         if (crc->ctx_changed && func->ctx_finished(head, ctx)) {
173                 nv50_crc_get_entries(head, func, crc->src);
174
175                 crc->ctx_idx ^= 1;
176                 crc->entry_idx = 0;
177                 crc->ctx_changed = false;
178
179                 /*
180                  * Unfortunately when notifier contexts are changed during CRC
181                  * capture, we will inevitably lose the CRC entry for the
182                  * frame where the hardware actually latched onto the first
183                  * UPDATE. According to Nvidia's hardware engineers, there's
184                  * no workaround for this.
185                  *
186                  * Now, we could try to be smart here and calculate the number
187                  * of missed CRCs based on audit timestamps, but those were
188                  * removed starting with volta. Since we always flush our
189                  * updates back-to-back without waiting, we'll just be
190                  * optimistic and assume we always miss exactly one frame.
191                  */
192                 DRM_DEV_DEBUG_KMS(head->base.base.dev->dev,
193                                   "Notifier ctx flip for head-%d finished, lost CRC for frame %llu\n",
194                                   head->base.index, crc->frame);
195                 crc->frame++;
196
197                 nv50_crc_reset_ctx(ctx);
198                 need_reschedule = true;
199         }
200
201         nv50_crc_get_entries(head, func, crc->src);
202
203         if (need_reschedule)
204                 drm_vblank_work_schedule(&crc->flip_work,
205                                          drm_crtc_vblank_count(crtc)
206                                          + crc->flip_threshold
207                                          - crc->entry_idx,
208                                          true);
209
210 out:
211         spin_unlock(&crc->lock);
212 }
213
214 static void nv50_crc_wait_ctx_finished(struct nv50_head *head,
215                                        const struct nv50_crc_func *func,
216                                        struct nv50_crc_notifier_ctx *ctx)
217 {
218         struct drm_device *dev = head->base.base.dev;
219         struct nouveau_drm *drm = nouveau_drm(dev);
220         s64 ret;
221
222         ret = nvif_msec(&drm->client.device, 50,
223                         if (func->ctx_finished(head, ctx)) break;);
224         if (ret == -ETIMEDOUT)
225                 NV_ERROR(drm,
226                          "CRC notifier ctx for head %d not finished after 50ms\n",
227                          head->base.index);
228         else if (ret)
229                 NV_ATOMIC(drm,
230                           "CRC notifier ctx for head-%d finished after %lldns\n",
231                           head->base.index, ret);
232 }
233
234 void nv50_crc_atomic_stop_reporting(struct drm_atomic_state *state)
235 {
236         struct drm_crtc_state *crtc_state;
237         struct drm_crtc *crtc;
238         int i;
239
240         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
241                 struct nv50_head *head = nv50_head(crtc);
242                 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
243                 struct nv50_crc *crc = &head->crc;
244
245                 if (!asyh->clr.crc)
246                         continue;
247
248                 spin_lock_irq(&crc->lock);
249                 crc->src = NV50_CRC_SOURCE_NONE;
250                 spin_unlock_irq(&crc->lock);
251
252                 drm_crtc_vblank_put(crtc);
253                 drm_vblank_work_cancel_sync(&crc->flip_work);
254
255                 NV_ATOMIC(nouveau_drm(crtc->dev),
256                           "CRC reporting on vblank for head-%d disabled\n",
257                           head->base.index);
258
259                 /* CRC generation is still enabled in hw, we'll just report
260                  * any remaining CRC entries ourselves after it gets disabled
261                  * in hardware
262                  */
263         }
264 }
265
266 void nv50_crc_atomic_init_notifier_contexts(struct drm_atomic_state *state)
267 {
268         struct drm_crtc_state *new_crtc_state;
269         struct drm_crtc *crtc;
270         int i;
271
272         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
273                 struct nv50_head *head = nv50_head(crtc);
274                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
275                 struct nv50_crc *crc = &head->crc;
276                 int i;
277
278                 if (!asyh->set.crc)
279                         continue;
280
281                 crc->entry_idx = 0;
282                 crc->ctx_changed = false;
283                 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
284                         nv50_crc_reset_ctx(&crc->ctx[i]);
285         }
286 }
287
288 void nv50_crc_atomic_release_notifier_contexts(struct drm_atomic_state *state)
289 {
290         const struct nv50_crc_func *func =
291                 nv50_disp(state->dev)->core->func->crc;
292         struct drm_crtc_state *new_crtc_state;
293         struct drm_crtc *crtc;
294         int i;
295
296         for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
297                 struct nv50_head *head = nv50_head(crtc);
298                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
299                 struct nv50_crc *crc = &head->crc;
300                 struct nv50_crc_notifier_ctx *ctx = &crc->ctx[crc->ctx_idx];
301
302                 if (!asyh->clr.crc)
303                         continue;
304
305                 if (crc->ctx_changed) {
306                         nv50_crc_wait_ctx_finished(head, func, ctx);
307                         ctx = &crc->ctx[crc->ctx_idx ^ 1];
308                 }
309                 nv50_crc_wait_ctx_finished(head, func, ctx);
310         }
311 }
312
313 void nv50_crc_atomic_start_reporting(struct drm_atomic_state *state)
314 {
315         struct drm_crtc_state *crtc_state;
316         struct drm_crtc *crtc;
317         int i;
318
319         for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
320                 struct nv50_head *head = nv50_head(crtc);
321                 struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
322                 struct nv50_crc *crc = &head->crc;
323                 u64 vbl_count;
324
325                 if (!asyh->set.crc)
326                         continue;
327
328                 drm_crtc_vblank_get(crtc);
329
330                 spin_lock_irq(&crc->lock);
331                 vbl_count = drm_crtc_vblank_count(crtc);
332                 crc->frame = vbl_count;
333                 crc->src = asyh->crc.src;
334                 drm_vblank_work_schedule(&crc->flip_work,
335                                          vbl_count + crc->flip_threshold,
336                                          true);
337                 spin_unlock_irq(&crc->lock);
338
339                 NV_ATOMIC(nouveau_drm(crtc->dev),
340                           "CRC reporting on vblank for head-%d enabled\n",
341                           head->base.index);
342         }
343 }
344
345 int nv50_crc_atomic_check_head(struct nv50_head *head,
346                                struct nv50_head_atom *asyh,
347                                struct nv50_head_atom *armh)
348 {
349         struct nv50_atom *atom = nv50_atom(asyh->state.state);
350         struct drm_device *dev = head->base.base.dev;
351         struct nv50_disp *disp = nv50_disp(dev);
352         bool changed = armh->crc.src != asyh->crc.src;
353
354         if (!armh->crc.src && !asyh->crc.src) {
355                 asyh->set.crc = false;
356                 asyh->clr.crc = false;
357                 return 0;
358         }
359
360         /* While we don't care about entry tags, Volta+ hw always needs the
361          * controlling wndw channel programmed to a wndw that's owned by our
362          * head
363          */
364         if (asyh->crc.src && disp->disp->object.oclass >= GV100_DISP &&
365             !(BIT(asyh->crc.wndw) & asyh->wndw.owned)) {
366                 if (!asyh->wndw.owned) {
367                         /* TODO: once we support flexible channel ownership,
368                          * we should write some code here to handle attempting
369                          * to "steal" a plane: e.g. take a plane that is
370                          * currently not-visible and owned by another head,
371                          * and reassign it to this head. If we fail to do so,
372                          * we shuld reject the mode outright as CRC capture
373                          * then becomes impossible.
374                          */
375                         NV_ATOMIC(nouveau_drm(dev),
376                                   "No available wndws for CRC readback\n");
377                         return -EINVAL;
378                 }
379                 asyh->crc.wndw = ffs(asyh->wndw.owned) - 1;
380         }
381
382         if (drm_atomic_crtc_needs_modeset(&asyh->state) || changed ||
383             armh->crc.wndw != asyh->crc.wndw) {
384                 asyh->clr.crc = armh->crc.src && armh->state.active;
385                 asyh->set.crc = asyh->crc.src && asyh->state.active;
386                 if (changed)
387                         asyh->set.or |= armh->or.crc_raster !=
388                                         asyh->or.crc_raster;
389
390                 if (asyh->clr.crc && asyh->set.crc)
391                         atom->flush_disable = true;
392         } else {
393                 asyh->set.crc = false;
394                 asyh->clr.crc = false;
395         }
396
397         return 0;
398 }
399
400 void nv50_crc_atomic_check_outp(struct nv50_atom *atom)
401 {
402         struct drm_crtc *crtc;
403         struct drm_crtc_state *old_crtc_state, *new_crtc_state;
404         int i;
405
406         if (atom->flush_disable)
407                 return;
408
409         for_each_oldnew_crtc_in_state(&atom->state, crtc, old_crtc_state,
410                                       new_crtc_state, i) {
411                 struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
412                 struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
413                 struct nv50_outp_atom *outp_atom;
414                 struct nouveau_encoder *outp =
415                         nv50_real_outp(nv50_head_atom_get_encoder(armh));
416                 struct drm_encoder *encoder = &outp->base.base;
417
418                 if (!asyh->clr.crc)
419                         continue;
420
421                 /*
422                  * Re-programming ORs can't be done in the same flush as
423                  * disabling CRCs
424                  */
425                 list_for_each_entry(outp_atom, &atom->outp, head) {
426                         if (outp_atom->encoder == encoder) {
427                                 if (outp_atom->set.mask) {
428                                         atom->flush_disable = true;
429                                         return;
430                                 } else {
431                                         break;
432                                 }
433                         }
434                 }
435         }
436 }
437
438 static enum nv50_crc_source_type
439 nv50_crc_source_type(struct nouveau_encoder *outp,
440                      enum nv50_crc_source source)
441 {
442         struct dcb_output *dcbe = outp->dcb;
443
444         switch (source) {
445         case NV50_CRC_SOURCE_NONE: return NV50_CRC_SOURCE_TYPE_NONE;
446         case NV50_CRC_SOURCE_RG:   return NV50_CRC_SOURCE_TYPE_RG;
447         default:                   break;
448         }
449
450         if (dcbe->location != DCB_LOC_ON_CHIP)
451                 return NV50_CRC_SOURCE_TYPE_PIOR;
452
453         switch (dcbe->type) {
454         case DCB_OUTPUT_DP:     return NV50_CRC_SOURCE_TYPE_SF;
455         case DCB_OUTPUT_ANALOG: return NV50_CRC_SOURCE_TYPE_DAC;
456         default:                return NV50_CRC_SOURCE_TYPE_SOR;
457         }
458 }
459
460 void nv50_crc_atomic_set(struct nv50_head *head,
461                          struct nv50_head_atom *asyh)
462 {
463         struct drm_crtc *crtc = &head->base.base;
464         struct drm_device *dev = crtc->dev;
465         struct nv50_crc *crc = &head->crc;
466         const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
467         struct nouveau_encoder *outp =
468                 nv50_real_outp(nv50_head_atom_get_encoder(asyh));
469
470         func->set_src(head, outp->or,
471                       nv50_crc_source_type(outp, asyh->crc.src),
472                       &crc->ctx[crc->ctx_idx], asyh->crc.wndw);
473 }
474
475 void nv50_crc_atomic_clr(struct nv50_head *head)
476 {
477         const struct nv50_crc_func *func =
478                 nv50_disp(head->base.base.dev)->core->func->crc;
479
480         func->set_src(head, 0, NV50_CRC_SOURCE_TYPE_NONE, NULL, 0);
481 }
482
483 static inline int
484 nv50_crc_raster_type(enum nv50_crc_source source)
485 {
486         switch (source) {
487         case NV50_CRC_SOURCE_NONE:
488         case NV50_CRC_SOURCE_AUTO:
489         case NV50_CRC_SOURCE_RG:
490         case NV50_CRC_SOURCE_OUTP_ACTIVE:
491                 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_ACTIVE_RASTER;
492         case NV50_CRC_SOURCE_OUTP_COMPLETE:
493                 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_COMPLETE_RASTER;
494         case NV50_CRC_SOURCE_OUTP_INACTIVE:
495                 return NV907D_HEAD_SET_CONTROL_OUTPUT_RESOURCE_CRC_MODE_NON_ACTIVE_RASTER;
496         }
497
498         return 0;
499 }
500
501 /* We handle mapping the memory for CRC notifiers ourselves, since each
502  * notifier needs it's own handle
503  */
504 static inline int
505 nv50_crc_ctx_init(struct nv50_head *head, struct nvif_mmu *mmu,
506                   struct nv50_crc_notifier_ctx *ctx, size_t len, int idx)
507 {
508         struct nv50_core *core = nv50_disp(head->base.base.dev)->core;
509         int ret;
510
511         ret = nvif_mem_ctor_map(mmu, "kmsCrcNtfy", NVIF_MEM_VRAM, len, &ctx->mem);
512         if (ret)
513                 return ret;
514
515         ret = nvif_object_ctor(&core->chan.base.user, "kmsCrcNtfyCtxDma",
516                                NV50_DISP_HANDLE_CRC_CTX(head, idx),
517                                NV_DMA_IN_MEMORY,
518                                &(struct nv_dma_v0) {
519                                         .target = NV_DMA_V0_TARGET_VRAM,
520                                         .access = NV_DMA_V0_ACCESS_RDWR,
521                                         .start = ctx->mem.addr,
522                                         .limit =  ctx->mem.addr
523                                                 + ctx->mem.size - 1,
524                                }, sizeof(struct nv_dma_v0),
525                                &ctx->ntfy);
526         if (ret)
527                 goto fail_fini;
528
529         return 0;
530
531 fail_fini:
532         nvif_mem_dtor(&ctx->mem);
533         return ret;
534 }
535
536 static inline void
537 nv50_crc_ctx_fini(struct nv50_crc_notifier_ctx *ctx)
538 {
539         nvif_object_dtor(&ctx->ntfy);
540         nvif_mem_dtor(&ctx->mem);
541 }
542
543 int nv50_crc_set_source(struct drm_crtc *crtc, const char *source_str)
544 {
545         struct drm_device *dev = crtc->dev;
546         struct drm_atomic_state *state;
547         struct drm_modeset_acquire_ctx ctx;
548         struct nv50_head *head = nv50_head(crtc);
549         struct nv50_crc *crc = &head->crc;
550         const struct nv50_crc_func *func = nv50_disp(dev)->core->func->crc;
551         struct nvif_mmu *mmu = &nouveau_drm(dev)->client.mmu;
552         struct nv50_head_atom *asyh;
553         struct drm_crtc_state *crtc_state;
554         enum nv50_crc_source source;
555         int ret = 0, ctx_flags = 0, i;
556
557         ret = nv50_crc_parse_source(source_str, &source);
558         if (ret)
559                 return ret;
560
561         /*
562          * Since we don't want the user to accidentally interrupt us as we're
563          * disabling CRCs
564          */
565         if (source)
566                 ctx_flags |= DRM_MODESET_ACQUIRE_INTERRUPTIBLE;
567         drm_modeset_acquire_init(&ctx, ctx_flags);
568
569         state = drm_atomic_state_alloc(dev);
570         if (!state) {
571                 ret = -ENOMEM;
572                 goto out_acquire_fini;
573         }
574         state->acquire_ctx = &ctx;
575
576         if (source) {
577                 for (i = 0; i < ARRAY_SIZE(head->crc.ctx); i++) {
578                         ret = nv50_crc_ctx_init(head, mmu, &crc->ctx[i],
579                                                 func->notifier_len, i);
580                         if (ret)
581                                 goto out_ctx_fini;
582                 }
583         }
584
585 retry:
586         crtc_state = drm_atomic_get_crtc_state(state, &head->base.base);
587         if (IS_ERR(crtc_state)) {
588                 ret = PTR_ERR(crtc_state);
589                 if (ret == -EDEADLK)
590                         goto deadlock;
591                 else if (ret)
592                         goto out_drop_locks;
593         }
594         asyh = nv50_head_atom(crtc_state);
595         asyh->crc.src = source;
596         asyh->or.crc_raster = nv50_crc_raster_type(source);
597
598         ret = drm_atomic_commit(state);
599         if (ret == -EDEADLK)
600                 goto deadlock;
601         else if (ret)
602                 goto out_drop_locks;
603
604         if (!source) {
605                 /*
606                  * If the user specified a custom flip threshold through
607                  * debugfs, reset it
608                  */
609                 crc->flip_threshold = func->flip_threshold;
610         }
611
612 out_drop_locks:
613         drm_modeset_drop_locks(&ctx);
614 out_ctx_fini:
615         if (!source || ret) {
616                 for (i = 0; i < ARRAY_SIZE(crc->ctx); i++)
617                         nv50_crc_ctx_fini(&crc->ctx[i]);
618         }
619         drm_atomic_state_put(state);
620 out_acquire_fini:
621         drm_modeset_acquire_fini(&ctx);
622         return ret;
623
624 deadlock:
625         drm_atomic_state_clear(state);
626         drm_modeset_backoff(&ctx);
627         goto retry;
628 }
629
630 static int
631 nv50_crc_debugfs_flip_threshold_get(struct seq_file *m, void *data)
632 {
633         struct nv50_head *head = m->private;
634         struct drm_crtc *crtc = &head->base.base;
635         struct nv50_crc *crc = &head->crc;
636         int ret;
637
638         ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
639         if (ret)
640                 return ret;
641
642         seq_printf(m, "%d\n", crc->flip_threshold);
643
644         drm_modeset_unlock(&crtc->mutex);
645         return ret;
646 }
647
648 static int
649 nv50_crc_debugfs_flip_threshold_open(struct inode *inode, struct file *file)
650 {
651         return single_open(file, nv50_crc_debugfs_flip_threshold_get,
652                            inode->i_private);
653 }
654
655 static ssize_t
656 nv50_crc_debugfs_flip_threshold_set(struct file *file,
657                                     const char __user *ubuf, size_t len,
658                                     loff_t *offp)
659 {
660         struct seq_file *m = file->private_data;
661         struct nv50_head *head = m->private;
662         struct nv50_head_atom *armh;
663         struct drm_crtc *crtc = &head->base.base;
664         struct nouveau_drm *drm = nouveau_drm(crtc->dev);
665         struct nv50_crc *crc = &head->crc;
666         const struct nv50_crc_func *func =
667                 nv50_disp(crtc->dev)->core->func->crc;
668         int value, ret;
669
670         ret = kstrtoint_from_user(ubuf, len, 10, &value);
671         if (ret)
672                 return ret;
673
674         if (value > func->flip_threshold)
675                 return -EINVAL;
676         else if (value == -1)
677                 value = func->flip_threshold;
678         else if (value < -1)
679                 return -EINVAL;
680
681         ret = drm_modeset_lock_single_interruptible(&crtc->mutex);
682         if (ret)
683                 return ret;
684
685         armh = nv50_head_atom(crtc->state);
686         if (armh->crc.src) {
687                 ret = -EBUSY;
688                 goto out;
689         }
690
691         NV_DEBUG(drm,
692                  "Changing CRC flip threshold for next capture on head-%d to %d\n",
693                  head->base.index, value);
694         crc->flip_threshold = value;
695         ret = len;
696
697 out:
698         drm_modeset_unlock(&crtc->mutex);
699         return ret;
700 }
701
702 static const struct file_operations nv50_crc_flip_threshold_fops = {
703         .owner = THIS_MODULE,
704         .open = nv50_crc_debugfs_flip_threshold_open,
705         .read = seq_read,
706         .write = nv50_crc_debugfs_flip_threshold_set,
707 };
708
709 int nv50_head_crc_late_register(struct nv50_head *head)
710 {
711         struct drm_crtc *crtc = &head->base.base;
712         const struct nv50_crc_func *func =
713                 nv50_disp(crtc->dev)->core->func->crc;
714         struct dentry *root;
715
716         if (!func || !crtc->debugfs_entry)
717                 return 0;
718
719         root = debugfs_create_dir("nv_crc", crtc->debugfs_entry);
720         debugfs_create_file("flip_threshold", 0644, root, head,
721                             &nv50_crc_flip_threshold_fops);
722
723         return 0;
724 }
725
726 static inline void
727 nv50_crc_init_head(struct nv50_disp *disp, const struct nv50_crc_func *func,
728                    struct nv50_head *head)
729 {
730         struct nv50_crc *crc = &head->crc;
731
732         crc->flip_threshold = func->flip_threshold;
733         spin_lock_init(&crc->lock);
734         drm_vblank_work_init(&crc->flip_work, &head->base.base,
735                              nv50_crc_ctx_flip_work);
736 }
737
738 void nv50_crc_init(struct drm_device *dev)
739 {
740         struct nv50_disp *disp = nv50_disp(dev);
741         struct drm_crtc *crtc;
742         const struct nv50_crc_func *func = disp->core->func->crc;
743
744         if (!func)
745                 return;
746
747         drm_for_each_crtc(crtc, dev)
748                 nv50_crc_init_head(disp, func, nv50_head(crtc));
749 }