void
nvkm_chan_cctx_bind(struct nvkm_chan *chan, struct nvkm_oproxy *oproxy, struct nvkm_cctx *cctx)
{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+ struct nvkm_runl *runl = cgrp->runl;
+
+ /* Prevent any channel in channel group from being rescheduled, kick them
+ * off host and any engine(s) they're loaded on.
+ */
+ if (cgrp->hw)
+ nvkm_runl_block(runl);
+
/* Update context pointer. */
if (cctx)
nvkm_fifo_chan_child_init(nvkm_oproxy(oproxy->object));
else
nvkm_fifo_chan_child_fini(nvkm_oproxy(oproxy->object), false);
+
+ /* Resume normal operation. */
+ if (cgrp->hw)
+ nvkm_runl_allow(runl);
}
void
.intr_0_names = gf100_runq_intr_0_names,
};
+static void
+gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, 0x00000000);
+}
+
+static void
+gf100_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, engm, engm);
+}
+
static bool
gf100_runl_pending(struct nvkm_runl *runl)
{
gf100_runl = {
.wait = nv50_runl_wait,
.pending = gf100_runl_pending,
+ .block = gf100_runl_block,
+ .allow = gf100_runl_allow,
};
static void
.intr_0_names = gk104_runq_intr_0_names,
};
+void
+gk104_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), 0x00000000);
+}
+
+void
+gk104_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+ nvkm_mask(runl->fifo->engine.subdev.device, 0x002630, BIT(runl->id), BIT(runl->id));
+}
+
bool
gk104_runl_pending(struct nvkm_runl *runl)
{
gk104_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
int
gk110_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
int
gm107_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
static const struct nvkm_enum
gp100_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
static const struct nvkm_enum
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
- struct nvkm_subdev *subdev = &chan->base.fifo->engine.subdev;
- struct nvkm_device *device = subdev->device;
const u32 mask = ce ? 0x00020000 : 0x00010000;
const u32 data = valid ? mask : 0x00000000;
int ret;
/* Block runlist to prevent the channel from being rescheduled. */
mutex_lock(&chan->fifo->base.mutex);
- nvkm_mask(device, 0x002630, BIT(chan->runl), BIT(chan->runl));
/* Preempt the channel. */
ret = gk104_fifo_gpfifo_kick_locked(chan);
}
/* Resume runlist. */
- nvkm_mask(device, 0x002630, BIT(chan->runl), 0);
mutex_unlock(&chan->fifo->base.mutex);
return ret;
}
gv100_runl = {
.wait = nv50_runl_wait,
.pending = gk104_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
const struct nvkm_enum
};
void
-nv04_fifo_pause(struct nvkm_fifo *base, unsigned long *pflags)
-__acquires(fifo->base.lock)
+nv04_fifo_pause(struct nvkm_fifo *fifo, unsigned long *pflags)
+__acquires(fifo->lock)
{
- struct nv04_fifo *fifo = nv04_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags;
- spin_lock_irqsave(&fifo->base.lock, flags);
+ spin_lock_irqsave(&fifo->lock, flags);
*pflags = flags;
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
}
void
-nv04_fifo_start(struct nvkm_fifo *base, unsigned long *pflags)
-__releases(fifo->base.lock)
+nv04_fifo_start(struct nvkm_fifo *fifo, unsigned long *pflags)
+__releases(fifo->lock)
{
- struct nv04_fifo *fifo = nv04_fifo(base);
- struct nvkm_device *device = fifo->base.engine.subdev.device;
+ struct nvkm_device *device = fifo->engine.subdev.device;
unsigned long flags = *pflags;
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
- spin_unlock_irqrestore(&fifo->base.lock, flags);
+ spin_unlock_irqrestore(&fifo->lock, flags);
}
const struct nvkm_runl_func
void gk104_fifo_recover_chan(struct nvkm_fifo *, int);
int gk104_fifo_engine_id(struct nvkm_fifo *, struct nvkm_engine *);
bool gk104_runl_pending(struct nvkm_runl *);
+void gk104_runl_block(struct nvkm_runl *, u32);
+void gk104_runl_allow(struct nvkm_runl *, u32);
extern const struct nvkm_runq_func gk104_runq;
void gk104_runq_init(struct nvkm_runq *);
bool gk104_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
return true;
}
+void
+nvkm_runl_allow(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!--runl->blocked) {
+ RUNL_TRACE(runl, "running");
+ runl->func->allow(runl, ~0);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
+void
+nvkm_runl_block(struct nvkm_runl *runl)
+{
+ struct nvkm_fifo *fifo = runl->fifo;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ if (!runl->blocked++) {
+ RUNL_TRACE(runl, "stopped");
+ runl->func->block(runl, ~0);
+ }
+ spin_unlock_irqrestore(&fifo->lock, flags);
+}
+
void
nvkm_runl_del(struct nvkm_runl *runl)
{
const struct nvkm_runl_func {
int (*wait)(struct nvkm_runl *);
bool (*pending)(struct nvkm_runl *);
+ void (*block)(struct nvkm_runl *, u32 engm);
+ void (*allow)(struct nvkm_runl *, u32 engm);
} *func;
struct nvkm_fifo *fifo;
int id;
int chan_nr;
struct mutex mutex;
+ int blocked;
+
struct list_head head;
};
struct nvkm_engn *nvkm_runl_add(struct nvkm_runl *, int engi, const struct nvkm_engn_func *,
enum nvkm_subdev_type, int inst);
void nvkm_runl_del(struct nvkm_runl *);
+void nvkm_runl_block(struct nvkm_runl *);
+void nvkm_runl_allow(struct nvkm_runl *);
bool nvkm_runl_update_pending(struct nvkm_runl *);
struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
tu102_runl = {
.wait = nv50_runl_wait,
.pending = tu102_runl_pending,
+ .block = gk104_runl_block,
+ .allow = gk104_runl_allow,
};
static const struct nvkm_enum