drm/nouveau/fifo: add runlist block()/allow()
authorBen Skeggs <bskeggs@redhat.com>
Wed, 1 Jun 2022 10:47:31 +0000 (20:47 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Wed, 9 Nov 2022 00:44:48 +0000 (10:44 +1000)
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
13 files changed:
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gf100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk110.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c

index f250b0f..748f3f1 100644 (file)
@@ -211,11 +211,24 @@ nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
 void
 nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct nvkm_cctx *cctx)
 {
+       struct nvkm_cgrp *cgrp = chan->cgrp;
+       struct nvkm_runl *runl = cgrp->runl;
+
+       /* Prevent any channel in channel group from being rescheduled, kick them
+        * off host and any engine(s) they're loaded on.
+        */
+       if (cgrp->hw)
+               nvkm_runl_block(runl);
+
        /* Update context pointer. */
        if (cctx)
                nvkm_fifo_chan_child_init(nvkm_oproxy(oproxy->object));
        else
                nvkm_fifo_chan_child_fini(nvkm_oproxy(oproxy->object), false);
+
+       /* Resume normal operation. */
+       if (cgrp->hw)
+               nvkm_runl_allow(runl);
 }
 
 void
index 70a2609..fcfc241 100644 (file)
@@ -115,6 +115,18 @@ gf100_runq = {
        .intr_0_names = gf100_runq_intr_0_names,
 };
 
+static void
+gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+       nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
+}
+
+static void
+gf100_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+       nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
+}
+
 static bool
 gf100_runl_pending(struct nvkm_runl *runl)
 {
@@ -181,6 +193,8 @@ static const struct nvkm_runl_func
 gf100_runl = {
        .wait = nv50_runl_wait,
        .pending = gf100_runl_pending,
+       .block = gf100_runl_block,
+       .allow = gf100_runl_allow,
 };
 
 static void
index c816654..12aebf8 100644 (file)
@@ -197,6 +197,18 @@ gk104_runq = {
        .intr_0_names = gk104_runq_intr_0_names,
 };
 
+void
+gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+       nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
+}
+
+void
+gk104_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+       nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
+}
+
 bool
 gk104_runl_pending(struct nvkm_runl *runl)
 {
@@ -306,6 +318,8 @@ static const struct nvkm_runl_func
 gk104_runl = {
        .wait = nv50_runl_wait,
        .pending = gk104_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 int
index bcc78b3..134de3c 100644 (file)
@@ -62,6 +62,8 @@ const struct nvkm_runl_func
 gk110_runl = {
        .wait = nv50_runl_wait,
        .pending = gk104_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 int
index b6a2210..d3b2aa7 100644 (file)
@@ -56,6 +56,8 @@ const struct nvkm_runl_func
 gm107_runl = {
        .wait = nv50_runl_wait,
        .pending = gk104_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 static const struct nvkm_enum
index b810175..4dd3fb0 100644 (file)
@@ -33,6 +33,8 @@ static const struct nvkm_runl_func
 gp100_runl = {
        .wait = nv50_runl_wait,
        .pending = gk104_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 static const struct nvkm_enum
index 34a8e79..6e74fdc 100644 (file)
@@ -37,15 +37,12 @@ gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
 static int
 gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
 {
-       struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
-       struct nvkm_device *device = subdev->device;
        const u32 mask = ce ? 0x00020000 : 0x00010000;
        const u32 data = valid ? mask : 0x00000000;
        int ret;
 
        /* Block runlist to prevent the channel from being rescheduled. */
        mutex_lock(&chan->fifo->base.mutex);
-       nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
 
        /* Preempt the channel. */
        ret = gk104_fifo_gpfifo_kick_locked(chan);
@@ -57,7 +54,6 @@ gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid
        }
 
        /* Resume runlist. */
-       nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
        mutex_unlock(&chan->fifo->base.mutex);
        return ret;
 }
index 7ad04e9..c147c26 100644 (file)
@@ -86,6 +86,8 @@ static const struct nvkm_runl_func
 gv100_runl = {
        .wait = nv50_runl_wait,
        .pending = gk104_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 const struct nvkm_enum
index 85f22fc..446e65d 100644 (file)
@@ -129,14 +129,13 @@ nv04_engn = {
 };
 
 void
-nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
-__acquires(fifo->base.lock)
+nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
+__acquires(fifo->lock)
 {
-       struct nv04_fifo *fifo = nv04_fifo(base);
-       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       struct nvkm_device *device = fifo->engine.subdev.device;
        unsigned long flags;
 
-       spin_lock_irqsave(&fifo->base.lock, flags);
+       spin_lock_irqsave(&fifo->lock, flags);
        *pflags = flags;
 
        nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
@@ -165,17 +164,16 @@ __acquires(fifo->base.lock)
 }
 
 void
-nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
-__releases(fifo->base.lock)
+nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
+__releases(fifo->lock)
 {
-       struct nv04_fifo *fifo = nv04_fifo(base);
-       struct nvkm_device *device = fifo->base.engine.subdev.device;
+       struct nvkm_device *device = fifo->engine.subdev.device;
        unsigned long flags = *pflags;
 
        nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
        nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
 
-       spin_unlock_irqrestore(&fifo->base.lock, flags);
+       spin_unlock_irqrestore(&fifo->lock, flags);
 }
 
 const struct nvkm_runl_func
index bab3cfb..16fe77e 100644 (file)
@@ -134,6 +134,8 @@ extern const struct nvkm_enum gk104_fifo_mmu_fault_gpcclient[];
 void gk104_fifo_recover_chan(struct nvkm_fifo *, int);
 int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
 bool gk104_runl_pending(struct nvkm_runl *);
+void gk104_runl_block(struct nvkm_runl *, u32);
+void gk104_runl_allow(struct nvkm_runl *, u32);
 extern const struct nvkm_runq_func gk104_runq;
 void gk104_runq_init(struct nvkm_runq *);
 bool gk104_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
index 0e1d703..438e884 100644 (file)
@@ -82,6 +82,34 @@ nvkm_runl_update_pending(struct nvkm_runl *runl)
        return true;
 }
 
+void
+nvkm_runl_allow(struct nvkm_runl *runl)
+{
+       struct nvkm_fifo *fifo = runl->fifo;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fifo->lock, flags);
+       if (!--runl->blocked) {
+               RUNL_TRACE(runl, "running");
+               runl->func->allow(runl, ~0);
+       }
+       spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nvkm_runl_block(struct nvkm_runl *runl)
+{
+       struct nvkm_fifo *fifo = runl->fifo;
+       unsigned long flags;
+
+       spin_lock_irqsave(&fifo->lock, flags);
+       if (!runl->blocked++) {
+               RUNL_TRACE(runl, "stopped");
+               runl->func->block(runl, ~0);
+       }
+       spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
 void
 nvkm_runl_del(struct nvkm_runl *runl)
 {
index 7682731..aebd8a7 100644 (file)
@@ -26,6 +26,8 @@ struct nvkm_runl {
        const struct nvkm_runl_func {
                int (*wait)(struct nvkm_runl *);
                bool (*pending)(struct nvkm_runl *);
+               void (*block)(struct nvkm_runl *, u32 engm);
+               void (*allow)(struct nvkm_runl *, u32 engm);
        } *func;
        struct nvkm_fifo *fifo;
        int id;
@@ -44,6 +46,8 @@ struct nvkm_runl {
        int chan_nr;
        struct mutex mutex;
 
+       int blocked;
+
        struct list_head head;
 };
 
@@ -52,6 +56,8 @@ struct nvkm_runl *nvkm_runl_get(struct nvkm_fifo *, int runi, u32 addr);
 struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
                                enum nvkm_subdev_type, int inst);
 void nvkm_runl_del(struct nvkm_runl *);
+void nvkm_runl_block(struct nvkm_runl *);
+void nvkm_runl_allow(struct nvkm_runl *);
 bool nvkm_runl_update_pending(struct nvkm_runl *);
 
 struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
index f0564fa..7d3c9d8 100644 (file)
@@ -68,6 +68,8 @@ static const struct nvkm_runl_func
 tu102_runl = {
        .wait = nv50_runl_wait,
        .pending = tu102_runl_pending,
+       .block = gk104_runl_block,
+       .allow = gk104_runl_allow,
 };
 
 static const struct nvkm_enum