2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "changk104.h"
28 #include <core/client.h>
29 #include <core/gpuobj.h>
30 #include <subdev/bar.h>
31 #include <subdev/fault.h>
32 #include <subdev/timer.h>
33 #include <subdev/top.h>
34 #include <engine/sw.h>
36 #include <nvif/class.h>
37 #include <nvif/cl0080.h>
39 struct gk104_fifo_engine_status {
52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
53 struct gk104_fifo_engine_status *status)
55 struct nvkm_engine *engine = fifo->engine[engn].engine;
56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
57 struct nvkm_device *device = subdev->device;
58 u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
60 status->busy = !!(stat & 0x80000000);
61 status->faulted = !!(stat & 0x40000000);
62 status->next.tsg = !!(stat & 0x10000000);
63 status->next.id = (stat & 0x0fff0000) >> 16;
64 status->chsw = !!(stat & 0x00008000);
65 status->save = !!(stat & 0x00004000);
66 status->load = !!(stat & 0x00002000);
67 status->prev.tsg = !!(stat & 0x00001000);
68 status->prev.id = (stat & 0x00000fff);
71 if (status->busy && status->chsw) {
72 if (status->load && status->save) {
73 if (engine && nvkm_engine_chsw_load(engine))
74 status->chan = &status->next;
76 status->chan = &status->prev;
79 status->chan = &status->next;
81 status->chan = &status->prev;
85 status->chan = &status->prev;
88 nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
89 "save %d load %d %sid %d%s-> %sid %d%s\n",
90 engn, status->busy, status->faulted,
91 status->chsw, status->save, status->load,
92 status->prev.tsg ? "tsg" : "ch", status->prev.id,
93 status->chan == &status->prev ? "*" : " ",
94 status->next.tsg ? "tsg" : "ch", status->next.id,
95 status->chan == &status->next ? "*" : " ");
99 gk104_fifo_class_new(struct nvkm_fifo *base, const struct nvkm_oclass *oclass,
100 void *argv, u32 argc, struct nvkm_object **pobject)
102 struct gk104_fifo *fifo = gk104_fifo(base);
103 if (oclass->engn == &fifo->func->chan) {
104 const struct gk104_fifo_chan_user *user = oclass->engn;
105 return user->ctor(fifo, oclass, argv, argc, pobject);
107 if (oclass->engn == &fifo->func->user) {
108 const struct gk104_fifo_user_user *user = oclass->engn;
109 return user->ctor(oclass, argv, argc, pobject);
116 gk104_fifo_class_get(struct nvkm_fifo *base, int index,
117 struct nvkm_oclass *oclass)
119 struct gk104_fifo *fifo = gk104_fifo(base);
122 if (fifo->func->user.ctor && c++ == index) {
123 oclass->base = fifo->func->user.user;
124 oclass->engn = &fifo->func->user;
128 if (fifo->func->chan.ctor && c++ == index) {
129 oclass->base = fifo->func->chan.user;
130 oclass->engn = &fifo->func->chan;
138 gk104_fifo_uevent_fini(struct nvkm_fifo *fifo)
140 struct nvkm_device *device = fifo->engine.subdev.device;
141 nvkm_mask(device, 0x002140, 0x80000000, 0x00000000);
145 gk104_fifo_uevent_init(struct nvkm_fifo *fifo)
147 struct nvkm_device *device = fifo->engine.subdev.device;
148 nvkm_mask(device, 0x002140, 0x80000000, 0x80000000);
152 gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
154 const struct gk104_fifo_runlist_func *func = fifo->func->runlist;
155 struct gk104_fifo_chan *chan;
156 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
157 struct nvkm_device *device = subdev->device;
158 struct nvkm_memory *mem;
159 struct nvkm_fifo_cgrp *cgrp;
163 mutex_lock(&subdev->mutex);
164 mem = fifo->runlist[runl].mem[fifo->runlist[runl].next];
165 fifo->runlist[runl].next = !fifo->runlist[runl].next;
168 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
169 func->chan(chan, mem, nr++ * func->size);
172 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
173 func->cgrp(cgrp, mem, nr++ * func->size);
174 list_for_each_entry(chan, &cgrp->chan, head) {
175 func->chan(chan, mem, nr++ * func->size);
180 switch (nvkm_memory_target(mem)) {
181 case NVKM_MEM_TARGET_VRAM: target = 0; break;
182 case NVKM_MEM_TARGET_NCOH: target = 3; break;
188 nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
190 nvkm_wr32(device, 0x002274, (runl << 20) | nr);
192 if (nvkm_msec(device, 2000,
193 if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000))
196 nvkm_error(subdev, "runlist %d update timeout\n", runl);
198 mutex_unlock(&subdev->mutex);
202 gk104_fifo_runlist_remove(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
204 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
205 mutex_lock(&fifo->base.engine.subdev.mutex);
206 if (!list_empty(&chan->head)) {
207 list_del_init(&chan->head);
208 if (cgrp && !--cgrp->chan_nr)
209 list_del_init(&cgrp->head);
211 mutex_unlock(&fifo->base.engine.subdev.mutex);
215 gk104_fifo_runlist_insert(struct gk104_fifo *fifo, struct gk104_fifo_chan *chan)
217 struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
218 mutex_lock(&fifo->base.engine.subdev.mutex);
220 if (!cgrp->chan_nr++)
221 list_add_tail(&cgrp->head, &fifo->runlist[chan->runl].cgrp);
222 list_add_tail(&chan->head, &cgrp->chan);
224 list_add_tail(&chan->head, &fifo->runlist[chan->runl].chan);
226 mutex_unlock(&fifo->base.engine.subdev.mutex);
230 gk104_fifo_runlist_chan(struct gk104_fifo_chan *chan,
231 struct nvkm_memory *memory, u32 offset)
233 nvkm_wo32(memory, offset + 0, chan->base.chid);
234 nvkm_wo32(memory, offset + 4, 0x00000000);
237 const struct gk104_fifo_runlist_func
238 gk104_fifo_runlist = {
240 .chan = gk104_fifo_runlist_chan,
244 gk104_fifo_recover_work(struct work_struct *w)
246 struct gk104_fifo *fifo = container_of(w, typeof(*fifo), recover.work);
247 struct nvkm_device *device = fifo->base.engine.subdev.device;
248 struct nvkm_engine *engine;
250 u32 engm, runm, todo;
253 spin_lock_irqsave(&fifo->base.lock, flags);
254 runm = fifo->recover.runm;
255 engm = fifo->recover.engm;
256 fifo->recover.engm = 0;
257 fifo->recover.runm = 0;
258 spin_unlock_irqrestore(&fifo->base.lock, flags);
260 nvkm_mask(device, 0x002630, runm, runm);
262 for (todo = engm; engn = __ffs(todo), todo; todo &= ~BIT(engn)) {
263 if ((engine = fifo->engine[engn].engine)) {
264 nvkm_subdev_fini(&engine->subdev, false);
265 WARN_ON(nvkm_subdev_init(&engine->subdev));
269 for (todo = runm; runl = __ffs(todo), todo; todo &= ~BIT(runl))
270 gk104_fifo_runlist_commit(fifo, runl);
272 nvkm_wr32(device, 0x00262c, runm);
273 nvkm_mask(device, 0x002630, runm, 0x00000000);
276 static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
279 gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
281 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
282 struct nvkm_device *device = subdev->device;
283 const u32 runm = BIT(runl);
285 assert_spin_locked(&fifo->base.lock);
286 if (fifo->recover.runm & runm)
288 fifo->recover.runm |= runm;
290 /* Block runlist to prevent channel assignment(s) from changing. */
291 nvkm_mask(device, 0x002630, runm, runm);
293 /* Schedule recovery. */
294 nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
295 schedule_work(&fifo->recover.work);
298 static struct gk104_fifo_chan *
299 gk104_fifo_recover_chid(struct gk104_fifo *fifo, int runl, int chid)
301 struct gk104_fifo_chan *chan;
302 struct nvkm_fifo_cgrp *cgrp;
304 list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
305 if (chan->base.chid == chid) {
306 list_del_init(&chan->head);
311 list_for_each_entry(cgrp, &fifo->runlist[runl].cgrp, head) {
312 if (cgrp->id == chid) {
313 chan = list_first_entry(&cgrp->chan, typeof(*chan), head);
314 list_del_init(&chan->head);
315 if (!--cgrp->chan_nr)
316 list_del_init(&cgrp->head);
325 gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
327 struct gk104_fifo *fifo = gk104_fifo(base);
328 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
329 struct nvkm_device *device = subdev->device;
330 const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
331 const u32 runl = (stat & 0x000f0000) >> 16;
332 const bool used = (stat & 0x00000001);
333 unsigned long engn, engm = fifo->runlist[runl].engm;
334 struct gk104_fifo_chan *chan;
336 assert_spin_locked(&fifo->base.lock);
340 /* Lookup SW state for channel, and mark it as dead. */
341 chan = gk104_fifo_recover_chid(fifo, runl, chid);
344 nvkm_fifo_kevent(&fifo->base, chid);
347 /* Disable channel. */
348 nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
349 nvkm_warn(subdev, "channel %d: killed\n", chid);
351 /* Block channel assignments from changing during recovery. */
352 gk104_fifo_recover_runl(fifo, runl);
354 /* Schedule recovery for any engines the channel is on. */
355 for_each_set_bit(engn, &engm, fifo->engine_nr) {
356 struct gk104_fifo_engine_status status;
357 gk104_fifo_engine_status(fifo, engn, &status);
358 if (!status.chan || status.chan->id != chid)
360 gk104_fifo_recover_engn(fifo, engn);
365 gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
367 struct nvkm_engine *engine = fifo->engine[engn].engine;
368 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
369 struct nvkm_device *device = subdev->device;
370 const u32 runl = fifo->engine[engn].runl;
371 const u32 engm = BIT(engn);
372 struct gk104_fifo_engine_status status;
375 assert_spin_locked(&fifo->base.lock);
376 if (fifo->recover.engm & engm)
378 fifo->recover.engm |= engm;
380 /* Block channel assignments from changing during recovery. */
381 gk104_fifo_recover_runl(fifo, runl);
383 /* Determine which channel (if any) is currently on the engine. */
384 gk104_fifo_engine_status(fifo, engn, &status);
386 /* The channel is not longer viable, kill it. */
387 gk104_fifo_recover_chan(&fifo->base, status.chan->id);
390 /* Determine MMU fault ID for the engine, if we're not being
391 * called from the fault handler already.
393 if (!status.faulted && engine) {
394 mmui = nvkm_top_fault_id(device, engine->subdev.index);
396 const struct nvkm_enum *en = fifo->func->fault.engine;
397 for (; en && en->name; en++) {
398 if (en->data2 == engine->subdev.index) {
407 /* Trigger a MMU fault for the engine.
409 * No good idea why this is needed, but nvgpu does something similar,
410 * and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
413 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
415 /* Wait for fault to trigger. */
416 nvkm_msec(device, 2000,
417 gk104_fifo_engine_status(fifo, engn, &status);
422 /* Release MMU fault trigger, and ACK the fault. */
423 nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
424 nvkm_wr32(device, 0x00259c, BIT(mmui));
425 nvkm_wr32(device, 0x002100, 0x10000000);
428 /* Schedule recovery. */
429 nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
430 schedule_work(&fifo->recover.work);
434 gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info)
436 struct gk104_fifo *fifo = gk104_fifo(base);
437 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
438 struct nvkm_device *device = subdev->device;
439 const struct nvkm_enum *er, *ee, *ec, *ea;
440 struct nvkm_engine *engine = NULL;
441 struct nvkm_fifo_chan *chan;
443 char ct[8] = "HUB/", en[16] = "";
446 er = nvkm_enum_find(fifo->func->fault.reason, info->reason);
447 ee = nvkm_enum_find(fifo->func->fault.engine, info->engine);
449 ec = nvkm_enum_find(fifo->func->fault.hubclient, info->client);
451 ec = nvkm_enum_find(fifo->func->fault.gpcclient, info->client);
452 snprintf(ct, sizeof(ct), "GPC%d/", info->gpc);
454 ea = nvkm_enum_find(fifo->func->fault.access, info->access);
456 if (ee && ee->data2) {
458 case NVKM_SUBDEV_BAR:
459 nvkm_mask(device, 0x001704, 0x00000000, 0x00000000);
461 case NVKM_SUBDEV_INSTMEM:
462 nvkm_mask(device, 0x001714, 0x00000000, 0x00000000);
464 case NVKM_ENGINE_IFB:
465 nvkm_mask(device, 0x001718, 0x00000000, 0x00000000);
468 engine = nvkm_device_engine(device, ee->data2);
474 enum nvkm_devidx engidx = nvkm_top_fault(device, info->engine);
475 if (engidx < NVKM_SUBDEV_NR) {
476 const char *src = nvkm_subdev_name[engidx];
479 *dst++ = toupper(*src++);
481 engine = nvkm_device_engine(device, engidx);
484 snprintf(en, sizeof(en), "%s", ee->name);
487 spin_lock_irqsave(&fifo->base.lock, flags);
488 chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst);
491 "fault %02x [%s] at %016llx engine %02x [%s] client %02x "
492 "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n",
493 info->access, ea ? ea->name : "", info->addr,
494 info->engine, ee ? ee->name : en,
495 info->client, ct, ec ? ec->name : "",
496 info->reason, er ? er->name : "", chan ? chan->chid : -1,
497 info->inst, chan ? chan->object.client->name : "unknown");
499 /* Kill the channel that caused the fault. */
501 gk104_fifo_recover_chan(&fifo->base, chan->chid);
503 /* Channel recovery will probably have already done this for the
504 * correct engine(s), but just in case we can't find the channel
507 for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
508 if (fifo->engine[engn].engine == engine) {
509 gk104_fifo_recover_engn(fifo, engn);
514 spin_unlock_irqrestore(&fifo->base.lock, flags);
517 static const struct nvkm_enum
518 gk104_fifo_bind_reason[] = {
519 { 0x01, "BIND_NOT_UNBOUND" },
520 { 0x02, "SNOOP_WITHOUT_BAR1" },
521 { 0x03, "UNBIND_WHILE_RUNNING" },
522 { 0x05, "INVALID_RUNLIST" },
523 { 0x06, "INVALID_CTX_TGT" },
524 { 0x0b, "UNBIND_WHILE_PARKED" },
529 gk104_fifo_intr_bind(struct gk104_fifo *fifo)
531 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
532 struct nvkm_device *device = subdev->device;
533 u32 intr = nvkm_rd32(device, 0x00252c);
534 u32 code = intr & 0x000000ff;
535 const struct nvkm_enum *en =
536 nvkm_enum_find(gk104_fifo_bind_reason, code);
538 nvkm_error(subdev, "BIND_ERROR %02x [%s]\n", code, en ? en->name : "");
541 static const struct nvkm_enum
542 gk104_fifo_sched_reason[] = {
543 { 0x0a, "CTXSW_TIMEOUT" },
548 gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
550 struct nvkm_device *device = fifo->base.engine.subdev.device;
551 unsigned long flags, engm = 0;
554 /* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
555 * as MMU_FAULT cannot be triggered while it's pending.
557 spin_lock_irqsave(&fifo->base.lock, flags);
558 nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
559 nvkm_wr32(device, 0x002100, 0x00000100);
561 for (engn = 0; engn < fifo->engine_nr; engn++) {
562 struct gk104_fifo_engine_status status;
564 gk104_fifo_engine_status(fifo, engn, &status);
565 if (!status.busy || !status.chsw)
571 for_each_set_bit(engn, &engm, fifo->engine_nr)
572 gk104_fifo_recover_engn(fifo, engn);
574 nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
575 spin_unlock_irqrestore(&fifo->base.lock, flags);
579 gk104_fifo_intr_sched(struct gk104_fifo *fifo)
581 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
582 struct nvkm_device *device = subdev->device;
583 u32 intr = nvkm_rd32(device, 0x00254c);
584 u32 code = intr & 0x000000ff;
585 const struct nvkm_enum *en =
586 nvkm_enum_find(gk104_fifo_sched_reason, code);
588 nvkm_error(subdev, "SCHED_ERROR %02x [%s]\n", code, en ? en->name : "");
592 gk104_fifo_intr_sched_ctxsw(fifo);
600 gk104_fifo_intr_chsw(struct gk104_fifo *fifo)
602 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
603 struct nvkm_device *device = subdev->device;
604 u32 stat = nvkm_rd32(device, 0x00256c);
605 nvkm_error(subdev, "CHSW_ERROR %08x\n", stat);
606 nvkm_wr32(device, 0x00256c, stat);
610 gk104_fifo_intr_dropped_fault(struct gk104_fifo *fifo)
612 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
613 struct nvkm_device *device = subdev->device;
614 u32 stat = nvkm_rd32(device, 0x00259c);
615 nvkm_error(subdev, "DROPPED_MMU_FAULT %08x\n", stat);
619 gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
621 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
622 struct nvkm_device *device = subdev->device;
623 u32 inst = nvkm_rd32(device, 0x002800 + (unit * 0x10));
624 u32 valo = nvkm_rd32(device, 0x002804 + (unit * 0x10));
625 u32 vahi = nvkm_rd32(device, 0x002808 + (unit * 0x10));
626 u32 type = nvkm_rd32(device, 0x00280c + (unit * 0x10));
627 struct nvkm_fault_data info;
629 info.inst = (u64)inst << 12;
630 info.addr = ((u64)vahi << 32) | valo;
634 info.gpc = (type & 0x1f000000) >> 24;
635 info.client = (type & 0x00001f00) >> 8;
636 info.access = (type & 0x00000080) >> 7;
637 info.hub = (type & 0x00000040) >> 6;
638 info.reason = (type & 0x000000ff);
640 nvkm_fifo_fault(&fifo->base, &info);
643 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
644 { 0x00000001, "MEMREQ" },
645 { 0x00000002, "MEMACK_TIMEOUT" },
646 { 0x00000004, "MEMACK_EXTRA" },
647 { 0x00000008, "MEMDAT_TIMEOUT" },
648 { 0x00000010, "MEMDAT_EXTRA" },
649 { 0x00000020, "MEMFLUSH" },
650 { 0x00000040, "MEMOP" },
651 { 0x00000080, "LBCONNECT" },
652 { 0x00000100, "LBREQ" },
653 { 0x00000200, "LBACK_TIMEOUT" },
654 { 0x00000400, "LBACK_EXTRA" },
655 { 0x00000800, "LBDAT_TIMEOUT" },
656 { 0x00001000, "LBDAT_EXTRA" },
657 { 0x00002000, "GPFIFO" },
658 { 0x00004000, "GPPTR" },
659 { 0x00008000, "GPENTRY" },
660 { 0x00010000, "GPCRC" },
661 { 0x00020000, "PBPTR" },
662 { 0x00040000, "PBENTRY" },
663 { 0x00080000, "PBCRC" },
664 { 0x00100000, "XBARCONNECT" },
665 { 0x00200000, "METHOD" },
666 { 0x00400000, "METHODCRC" },
667 { 0x00800000, "DEVICE" },
668 { 0x02000000, "SEMAPHORE" },
669 { 0x04000000, "ACQUIRE" },
670 { 0x08000000, "PRI" },
671 { 0x20000000, "NO_CTXSW_SEG" },
672 { 0x40000000, "PBSEG" },
673 { 0x80000000, "SIGNATURE" },
678 gk104_fifo_intr_pbdma_0(struct gk104_fifo *fifo, int unit)
680 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
681 struct nvkm_device *device = subdev->device;
682 u32 mask = nvkm_rd32(device, 0x04010c + (unit * 0x2000));
683 u32 stat = nvkm_rd32(device, 0x040108 + (unit * 0x2000)) & mask;
684 u32 addr = nvkm_rd32(device, 0x0400c0 + (unit * 0x2000));
685 u32 data = nvkm_rd32(device, 0x0400c4 + (unit * 0x2000));
686 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
687 u32 subc = (addr & 0x00070000) >> 16;
688 u32 mthd = (addr & 0x00003ffc);
690 struct nvkm_fifo_chan *chan;
694 if (stat & 0x00800000) {
696 if (nvkm_sw_mthd(device->sw, chid, subc, mthd, data))
701 nvkm_wr32(device, 0x0400c0 + (unit * 0x2000), 0x80600008);
704 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_0, show);
705 chan = nvkm_fifo_chan_chid(&fifo->base, chid, &flags);
706 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d [%010llx %s] "
707 "subc %d mthd %04x data %08x\n",
708 unit, show, msg, chid, chan ? chan->inst->addr : 0,
709 chan ? chan->object.client->name : "unknown",
711 nvkm_fifo_chan_put(&fifo->base, flags, &chan);
714 nvkm_wr32(device, 0x040108 + (unit * 0x2000), stat);
717 static const struct nvkm_bitfield gk104_fifo_pbdma_intr_1[] = {
718 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
719 { 0x00000002, "HCE_RE_ALIGNB" },
720 { 0x00000004, "HCE_PRIV" },
721 { 0x00000008, "HCE_ILLEGAL_MTHD" },
722 { 0x00000010, "HCE_ILLEGAL_CLASS" },
727 gk104_fifo_intr_pbdma_1(struct gk104_fifo *fifo, int unit)
729 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
730 struct nvkm_device *device = subdev->device;
731 u32 mask = nvkm_rd32(device, 0x04014c + (unit * 0x2000));
732 u32 stat = nvkm_rd32(device, 0x040148 + (unit * 0x2000)) & mask;
733 u32 chid = nvkm_rd32(device, 0x040120 + (unit * 0x2000)) & 0xfff;
737 nvkm_snprintbf(msg, sizeof(msg), gk104_fifo_pbdma_intr_1, stat);
738 nvkm_error(subdev, "PBDMA%d: %08x [%s] ch %d %08x %08x\n",
739 unit, stat, msg, chid,
740 nvkm_rd32(device, 0x040150 + (unit * 0x2000)),
741 nvkm_rd32(device, 0x040154 + (unit * 0x2000)));
744 nvkm_wr32(device, 0x040148 + (unit * 0x2000), stat);
748 gk104_fifo_intr_runlist(struct gk104_fifo *fifo)
750 struct nvkm_device *device = fifo->base.engine.subdev.device;
751 u32 mask = nvkm_rd32(device, 0x002a00);
753 int runl = __ffs(mask);
754 wake_up(&fifo->runlist[runl].wait);
755 nvkm_wr32(device, 0x002a00, 1 << runl);
756 mask &= ~(1 << runl);
761 gk104_fifo_intr_engine(struct gk104_fifo *fifo)
763 nvkm_fifo_uevent(&fifo->base);
767 gk104_fifo_intr(struct nvkm_fifo *base)
769 struct gk104_fifo *fifo = gk104_fifo(base);
770 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
771 struct nvkm_device *device = subdev->device;
772 u32 mask = nvkm_rd32(device, 0x002140);
773 u32 stat = nvkm_rd32(device, 0x002100) & mask;
775 if (stat & 0x00000001) {
776 gk104_fifo_intr_bind(fifo);
777 nvkm_wr32(device, 0x002100, 0x00000001);
781 if (stat & 0x00000010) {
782 nvkm_error(subdev, "PIO_ERROR\n");
783 nvkm_wr32(device, 0x002100, 0x00000010);
787 if (stat & 0x00000100) {
788 gk104_fifo_intr_sched(fifo);
789 nvkm_wr32(device, 0x002100, 0x00000100);
793 if (stat & 0x00010000) {
794 gk104_fifo_intr_chsw(fifo);
795 nvkm_wr32(device, 0x002100, 0x00010000);
799 if (stat & 0x00800000) {
800 nvkm_error(subdev, "FB_FLUSH_TIMEOUT\n");
801 nvkm_wr32(device, 0x002100, 0x00800000);
805 if (stat & 0x01000000) {
806 nvkm_error(subdev, "LB_ERROR\n");
807 nvkm_wr32(device, 0x002100, 0x01000000);
811 if (stat & 0x08000000) {
812 gk104_fifo_intr_dropped_fault(fifo);
813 nvkm_wr32(device, 0x002100, 0x08000000);
817 if (stat & 0x10000000) {
818 u32 mask = nvkm_rd32(device, 0x00259c);
820 u32 unit = __ffs(mask);
821 gk104_fifo_intr_fault(fifo, unit);
822 nvkm_wr32(device, 0x00259c, (1 << unit));
823 mask &= ~(1 << unit);
828 if (stat & 0x20000000) {
829 u32 mask = nvkm_rd32(device, 0x0025a0);
831 u32 unit = __ffs(mask);
832 gk104_fifo_intr_pbdma_0(fifo, unit);
833 gk104_fifo_intr_pbdma_1(fifo, unit);
834 nvkm_wr32(device, 0x0025a0, (1 << unit));
835 mask &= ~(1 << unit);
840 if (stat & 0x40000000) {
841 gk104_fifo_intr_runlist(fifo);
845 if (stat & 0x80000000) {
846 nvkm_wr32(device, 0x002100, 0x80000000);
847 gk104_fifo_intr_engine(fifo);
852 nvkm_error(subdev, "INTR %08x\n", stat);
853 nvkm_mask(device, 0x002140, stat, 0x00000000);
854 nvkm_wr32(device, 0x002100, stat);
859 gk104_fifo_fini(struct nvkm_fifo *base)
861 struct gk104_fifo *fifo = gk104_fifo(base);
862 struct nvkm_device *device = fifo->base.engine.subdev.device;
863 flush_work(&fifo->recover.work);
864 /* allow mmu fault interrupts, even when we're not using fifo */
865 nvkm_mask(device, 0x002140, 0x10000000, 0x10000000);
869 gk104_fifo_info(struct nvkm_fifo *base, u64 mthd, u64 *data)
871 struct gk104_fifo *fifo = gk104_fifo(base);
873 case NV_DEVICE_FIFO_RUNLISTS:
874 *data = (1ULL << fifo->runlist_nr) - 1;
876 case NV_DEVICE_FIFO_RUNLIST_ENGINES(0)...
877 NV_DEVICE_FIFO_RUNLIST_ENGINES(63): {
878 int runl = mthd - NV_DEVICE_FIFO_RUNLIST_ENGINES(0), engn;
879 if (runl < fifo->runlist_nr) {
880 unsigned long engm = fifo->runlist[runl].engm;
881 struct nvkm_engine *engine;
883 for_each_set_bit(engn, &engm, fifo->engine_nr) {
884 if ((engine = fifo->engine[engn].engine))
885 *data |= BIT_ULL(engine->subdev.index);
897 gk104_fifo_oneinit(struct nvkm_fifo *base)
899 struct gk104_fifo *fifo = gk104_fifo(base);
900 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
901 struct nvkm_device *device = subdev->device;
902 struct nvkm_vmm *bar = nvkm_bar_bar1_vmm(device);
903 int engn, runl, pbid, ret, i, j;
904 enum nvkm_devidx engidx;
907 /* Determine number of PBDMAs by checking valid enable bits. */
908 nvkm_wr32(device, 0x000204, 0xffffffff);
909 fifo->pbdma_nr = hweight32(nvkm_rd32(device, 0x000204));
910 nvkm_debug(subdev, "%d PBDMA(s)\n", fifo->pbdma_nr);
912 /* Read PBDMA->runlist(s) mapping from HW. */
913 if (!(map = kzalloc(sizeof(*map) * fifo->pbdma_nr, GFP_KERNEL)))
916 for (i = 0; i < fifo->pbdma_nr; i++)
917 map[i] = nvkm_rd32(device, 0x002390 + (i * 0x04));
919 /* Determine runlist configuration from topology device info. */
921 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
922 /* Determine which PBDMA handles requests for this engine. */
923 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
924 if (map[j] & (1 << runl)) {
930 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
931 engn, runl, pbid, nvkm_subdev_name[engidx]);
933 fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
934 fifo->engine[engn].runl = runl;
935 fifo->engine[engn].pbid = pbid;
936 fifo->engine_nr = max(fifo->engine_nr, engn + 1);
937 fifo->runlist[runl].engm |= 1 << engn;
938 fifo->runlist_nr = max(fifo->runlist_nr, runl + 1);
943 for (i = 0; i < fifo->runlist_nr; i++) {
944 for (j = 0; j < ARRAY_SIZE(fifo->runlist[i].mem); j++) {
945 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
946 fifo->base.nr * 2/* TSG+chan */ *
947 fifo->func->runlist->size,
949 &fifo->runlist[i].mem[j]);
954 init_waitqueue_head(&fifo->runlist[i].wait);
955 INIT_LIST_HEAD(&fifo->runlist[i].cgrp);
956 INIT_LIST_HEAD(&fifo->runlist[i].chan);
959 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
960 fifo->base.nr * 0x200, 0x1000, true,
965 ret = nvkm_vmm_get(bar, 12, nvkm_memory_size(fifo->user.mem),
970 return nvkm_memory_map(fifo->user.mem, 0, bar, fifo->user.bar, NULL, 0);
974 gk104_fifo_init(struct nvkm_fifo *base)
976 struct gk104_fifo *fifo = gk104_fifo(base);
977 struct nvkm_device *device = fifo->base.engine.subdev.device;
981 nvkm_wr32(device, 0x000204, (1 << fifo->pbdma_nr) - 1);
984 for (i = 0; i < fifo->pbdma_nr; i++) {
985 nvkm_mask(device, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
986 nvkm_wr32(device, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
987 nvkm_wr32(device, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
991 for (i = 0; i < fifo->pbdma_nr; i++) {
992 nvkm_wr32(device, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
993 nvkm_wr32(device, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
996 nvkm_wr32(device, 0x002254, 0x10000000 | fifo->user.bar->addr >> 12);
998 if (fifo->func->init_pbdma_timeout)
999 fifo->func->init_pbdma_timeout(fifo);
1001 nvkm_wr32(device, 0x002100, 0xffffffff);
1002 nvkm_wr32(device, 0x002140, 0x7fffffff);
1006 gk104_fifo_dtor(struct nvkm_fifo *base)
1008 struct gk104_fifo *fifo = gk104_fifo(base);
1009 struct nvkm_device *device = fifo->base.engine.subdev.device;
1012 nvkm_vmm_put(nvkm_bar_bar1_vmm(device), &fifo->user.bar);
1013 nvkm_memory_unref(&fifo->user.mem);
1015 for (i = 0; i < fifo->runlist_nr; i++) {
1016 nvkm_memory_unref(&fifo->runlist[i].mem[1]);
1017 nvkm_memory_unref(&fifo->runlist[i].mem[0]);
1023 static const struct nvkm_fifo_func
1025 .dtor = gk104_fifo_dtor,
1026 .oneinit = gk104_fifo_oneinit,
1027 .info = gk104_fifo_info,
1028 .init = gk104_fifo_init,
1029 .fini = gk104_fifo_fini,
1030 .intr = gk104_fifo_intr,
1031 .fault = gk104_fifo_fault,
1032 .uevent_init = gk104_fifo_uevent_init,
1033 .uevent_fini = gk104_fifo_uevent_fini,
1034 .recover_chan = gk104_fifo_recover_chan,
1035 .class_get = gk104_fifo_class_get,
1036 .class_new = gk104_fifo_class_new,
1040 gk104_fifo_new_(const struct gk104_fifo_func *func, struct nvkm_device *device,
1041 int index, int nr, struct nvkm_fifo **pfifo)
1043 struct gk104_fifo *fifo;
1045 if (!(fifo = kzalloc(sizeof(*fifo), GFP_KERNEL)))
1048 INIT_WORK(&fifo->recover.work, gk104_fifo_recover_work);
1049 *pfifo = &fifo->base;
1051 return nvkm_fifo_ctor(&gk104_fifo_, device, index, nr, &fifo->base);
1054 const struct nvkm_enum
1055 gk104_fifo_fault_access[] = {
1061 const struct nvkm_enum
1062 gk104_fifo_fault_engine[] = {
1063 { 0x00, "GR", NULL, NVKM_ENGINE_GR },
1064 { 0x01, "DISPLAY" },
1065 { 0x02, "CAPTURE" },
1066 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
1067 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
1068 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
1070 { 0x07, "HOST0", NULL, NVKM_ENGINE_FIFO },
1071 { 0x08, "HOST1", NULL, NVKM_ENGINE_FIFO },
1072 { 0x09, "HOST2", NULL, NVKM_ENGINE_FIFO },
1073 { 0x0a, "HOST3", NULL, NVKM_ENGINE_FIFO },
1074 { 0x0b, "HOST4", NULL, NVKM_ENGINE_FIFO },
1075 { 0x0c, "HOST5", NULL, NVKM_ENGINE_FIFO },
1076 { 0x0d, "HOST6", NULL, NVKM_ENGINE_FIFO },
1077 { 0x0e, "HOST7", NULL, NVKM_ENGINE_FIFO },
1079 { 0x10, "MSVLD", NULL, NVKM_ENGINE_MSVLD },
1080 { 0x11, "MSPPP", NULL, NVKM_ENGINE_MSPPP },
1082 { 0x14, "MSPDEC", NULL, NVKM_ENGINE_MSPDEC },
1083 { 0x15, "CE0", NULL, NVKM_ENGINE_CE0 },
1084 { 0x16, "CE1", NULL, NVKM_ENGINE_CE1 },
1087 { 0x19, "MSENC", NULL, NVKM_ENGINE_MSENC },
1088 { 0x1b, "CE2", NULL, NVKM_ENGINE_CE2 },
1092 const struct nvkm_enum
1093 gk104_fifo_fault_reason[] = {
1095 { 0x01, "PDE_SIZE" },
1097 { 0x03, "VA_LIMIT_VIOLATION" },
1098 { 0x04, "UNBOUND_INST_BLOCK" },
1099 { 0x05, "PRIV_VIOLATION" },
1100 { 0x06, "RO_VIOLATION" },
1101 { 0x07, "WO_VIOLATION" },
1102 { 0x08, "PITCH_MASK_VIOLATION" },
1103 { 0x09, "WORK_CREATION" },
1104 { 0x0a, "UNSUPPORTED_APERTURE" },
1105 { 0x0b, "COMPRESSION_FAILURE" },
1106 { 0x0c, "UNSUPPORTED_KIND" },
1107 { 0x0d, "REGION_VIOLATION" },
1108 { 0x0e, "BOTH_PTES_VALID" },
1109 { 0x0f, "INFO_TYPE_POISONED" },
1113 const struct nvkm_enum
1114 gk104_fifo_fault_hubclient[] = {
1122 { 0x07, "HOST_CPU" },
1123 { 0x08, "HOST_CPU_NB" },
1134 { 0x13, "RASTERTWOD" },
1144 { 0x1d, "DFALCON" },
1146 { 0x1f, "AFALCON" },
1150 const struct nvkm_enum
1151 gk104_fifo_fault_gpcclient[] = {
1152 { 0x00, "L1_0" }, { 0x01, "T1_0" }, { 0x02, "PE_0" },
1153 { 0x03, "L1_1" }, { 0x04, "T1_1" }, { 0x05, "PE_1" },
1154 { 0x06, "L1_2" }, { 0x07, "T1_2" }, { 0x08, "PE_2" },
1155 { 0x09, "L1_3" }, { 0x0a, "T1_3" }, { 0x0b, "PE_3" },
1163 { 0x13, "L1_4" }, { 0x14, "T1_4" }, { 0x15, "PE_4" },
1164 { 0x16, "L1_5" }, { 0x17, "T1_5" }, { 0x18, "PE_5" },
1165 { 0x19, "L1_6" }, { 0x1a, "T1_6" }, { 0x1b, "PE_6" },
1166 { 0x1c, "L1_7" }, { 0x1d, "T1_7" }, { 0x1e, "PE_7" },
1168 { 0x20, "LTP_UTLB_0" },
1169 { 0x21, "LTP_UTLB_1" },
1170 { 0x22, "LTP_UTLB_2" },
1171 { 0x23, "LTP_UTLB_3" },
1172 { 0x24, "GPC_RGG_UTLB" },
1176 static const struct gk104_fifo_func
1178 .fault.access = gk104_fifo_fault_access,
1179 .fault.engine = gk104_fifo_fault_engine,
1180 .fault.reason = gk104_fifo_fault_reason,
1181 .fault.hubclient = gk104_fifo_fault_hubclient,
1182 .fault.gpcclient = gk104_fifo_fault_gpcclient,
1183 .runlist = &gk104_fifo_runlist,
1184 .chan = {{0,0,KEPLER_CHANNEL_GPFIFO_A}, gk104_fifo_gpfifo_new },
1188 gk104_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
1190 return gk104_fifo_new_(&gk104_fifo, device, index, 4096, pfifo);