#include <nvif/cl0080.h>
#include <nvif/unpack.h>
-void
-nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
-{
- unsigned long flags;
- if (WARN_ON(!fifo->func->recover_chan))
- return;
- spin_lock_irqsave(&fifo->lock, flags);
- fifo->func->recover_chan(fifo, chid);
- spin_unlock_irqrestore(&fifo->lock, flags);
-}
-
void
nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
{
struct nvkm_cgrp {
const struct nvkm_cgrp_func {
+ void (*preempt)(struct nvkm_cgrp *);
} *func;
char name[64];
struct nvkm_runl *runl;
nvkm_runl_block(runl);
else
nvkm_chan_block(chan);
+ nvkm_chan_preempt(chan, true);
/* Update context pointer. */
if (cctx)
return ret;
}
+int
+nvkm_chan_preempt_locked(struct nvkm_chan *chan, bool wait)
+{
+ struct nvkm_runl *runl = chan->cgrp->runl;
+
+ CHAN_TRACE(chan, "preempt");
+ chan->func->preempt(chan);
+ if (!wait)
+ return 0;
+
+ return nvkm_runl_preempt_wait(runl);
+}
+
+int
+nvkm_chan_preempt(struct nvkm_chan *chan, bool wait)
+{
+ int ret;
+
+ if (!chan->func->preempt)
+ return 0;
+
+ mutex_lock(&chan->cgrp->runl->mutex);
+ ret = nvkm_chan_preempt_locked(chan, wait);
+ mutex_unlock(&chan->cgrp->runl->mutex);
+ return ret;
+}
+
static int
nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
if (atomic_inc_return(&chan->errored) == 1) {
CHAN_ERROR(chan, "errored - disabling channel");
nvkm_chan_block_locked(chan);
+ if (preempt)
+ chan->func->preempt(chan);
nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
}
spin_unlock_irqrestore(&chan->lock, flags);
void (*unbind)(struct nvkm_chan *);
void (*start)(struct nvkm_chan *);
void (*stop)(struct nvkm_chan *);
+ void (*preempt)(struct nvkm_chan *);
u32 (*doorbell_handle)(struct nvkm_chan *);
void *(*dtor)(struct nvkm_fifo_chan *);
void nvkm_chan_allow(struct nvkm_chan *);
void nvkm_chan_block(struct nvkm_chan *);
void nvkm_chan_error(struct nvkm_chan *, bool preempt);
+int nvkm_chan_preempt(struct nvkm_chan *, bool wait);
+int nvkm_chan_preempt_locked(struct nvkm_chan *, bool wait);
int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
struct nvkm_client * /*TODO: remove need for this */);
void nvkm_chan_cctx_put(struct nvkm_chan *, struct nvkm_cctx **);
struct nvkm_object *);
void gk104_fifo_gpfifo_engine_dtor(struct nvkm_fifo_chan *,
struct nvkm_engine *);
-int gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *);
-int gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *);
int gv100_fifo_gpfifo_new(struct gk104_fifo *, const struct nvkm_oclass *,
void *data, u32 size, struct nvkm_object **);
#include <nvif/class.h>
+void
+gf100_chan_preempt(struct nvkm_chan *chan)
+{
+ nvkm_wr32(chan->cgrp->runl->fifo->engine.subdev.device, 0x002634, chan->id);
+}
+
static void
gf100_chan_stop(struct nvkm_chan *chan)
{
.unbind = gf100_chan_unbind,
.start = gf100_chan_start,
.stop = gf100_chan_stop,
+ .preempt = gf100_chan_preempt,
};
static const struct nvkm_engn_func
.intr_0_names = gf100_runq_intr_0_names,
};
+bool
+gf100_runl_preempt_pending(struct nvkm_runl *runl)
+{
+ return nvkm_rd32(runl->fifo->engine.subdev.device, 0x002634) & 0x00100000;
+}
+
static void
gf100_runl_allow(struct nvkm_runl *runl, u32 engm)
{
.pending = gf100_runl_pending,
.block = gf100_runl_block,
.allow = gf100_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
static void
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
+ .preempt = gf100_chan_preempt,
};
void
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
int
#include "changk104.h"
#include <core/memory.h>
+#include <subdev/timer.h>
#include <nvif/class.h>
+void
+gk110_chan_preempt(struct nvkm_chan *chan)
+{
+ struct nvkm_cgrp *cgrp = chan->cgrp;
+
+ if (cgrp->hw) {
+ cgrp->func->preempt(cgrp);
+ return;
+ }
+
+ gf100_chan_preempt(chan);
+}
+
const struct nvkm_chan_func
gk110_chan = {
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
};
+static void
+gk110_cgrp_preempt(struct nvkm_cgrp *cgrp)
+{
+ nvkm_wr32(cgrp->runl->fifo->engine.subdev.device, 0x002634, 0x01000000 | cgrp->id);
+}
+
const struct nvkm_cgrp_func
gk110_cgrp = {
+ .preempt = gk110_cgrp_preempt,
};
void
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
int
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
};
static void
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
{
const u32 offset = gf100_fifo_gpfifo_engine_addr(engine);
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
- struct nvkm_subdev *subdev = &chan->fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
struct nvkm_gpuobj *inst = chan->base.inst;
int ret = 0;
- mutex_lock(&chan->fifo->base.mutex);
- nvkm_wr32(device, 0x002634, chan->base.chid);
- if (nvkm_msec(device, 2000,
- if (nvkm_rd32(device, 0x002634) == chan->base.chid)
- break;
- ) < 0) {
- nvkm_error(subdev, "channel %d [%s] kick timeout\n",
- chan->base.chid, chan->base.object.client->name);
- ret = -ETIMEDOUT;
- }
- mutex_unlock(&chan->fifo->base.mutex);
-
- if (ret && suspend)
- return ret;
-
if (offset) {
nvkm_kmap(inst);
nvkm_wo32(inst, offset + 0x00, 0x00000000);
#include <nvif/cla06f.h>
#include <nvif/unpack.h>
-int
-gk104_fifo_gpfifo_kick_locked(struct gk104_fifo_chan *chan)
-{
- struct gk104_fifo *fifo = chan->fifo;
- struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
- struct nvkm_client *client = chan->base.object.client;
- struct nvkm_fifo_cgrp *cgrp = chan->cgrp;
- int ret = 0;
-
- if (cgrp)
- nvkm_wr32(device, 0x002634, cgrp->id | 0x01000000);
- else
- nvkm_wr32(device, 0x002634, chan->base.chid);
- if (nvkm_msec(device, 2000,
- if (!(nvkm_rd32(device, 0x002634) & 0x00100000))
- break;
- ) < 0) {
- nvkm_error(subdev, "%s %d [%s] kick timeout\n",
- cgrp ? "tsg" : "channel",
- cgrp ? cgrp->id : chan->base.chid, client->name);
- nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
- ret = -ETIMEDOUT;
- }
- return ret;
-}
-
-int
-gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
-{
- int ret;
- mutex_lock(&chan->base.fifo->mutex);
- ret = gk104_fifo_gpfifo_kick_locked(chan);
- mutex_unlock(&chan->base.fifo->mutex);
- return ret;
-}
-
static u32
gk104_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct nvkm_gpuobj *inst = chan->base.inst;
u32 offset = gk104_fifo_gpfifo_engine_addr(engine);
- int ret;
-
- ret = gk104_fifo_gpfifo_kick(chan);
- if (ret && suspend)
- return ret;
if (offset) {
nvkm_kmap(inst);
nvkm_done(inst);
}
- return ret;
+ return 0;
}
static int
if (!list_empty(&chan->head)) {
gk104_fifo_runlist_remove(fifo, chan);
- gk104_fifo_gpfifo_kick(chan);
gk104_fifo_runlist_update(fifo, chan->runl);
}
}
{
const u32 mask = ce ? 0x00020000 : 0x00010000;
const u32 data = valid ? mask : 0x00000000;
- int ret;
-
- /* Block runlist to prevent the channel from being rescheduled. */
- mutex_lock(&chan->fifo->base.mutex);
- /* Preempt the channel. */
- ret = gk104_fifo_gpfifo_kick_locked(chan);
- if (ret == 0) {
+ if (1) {
/* Update engine context validity. */
nvkm_kmap(chan->base.inst);
nvkm_mo32(chan->base.inst, 0x0ac, mask, data);
nvkm_done(chan->base.inst);
}
- /* Resume runlist. */
- mutex_unlock(&chan->fifo->base.mutex);
- return ret;
+ return 0;
}
int
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
.stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
.doorbell_handle = gv100_chan_doorbell_handle,
};
.pending = gk104_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
const struct nvkm_enum
struct gk104_fifo;
struct gk104_fifo_chan;
-void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
-
struct nvkm_fifo_chan_oclass;
struct nvkm_fifo_func {
void *(*dtor)(struct nvkm_fifo *);
void nv50_chan_unbind(struct nvkm_chan *);
void nv50_chan_start(struct nvkm_chan *);
void nv50_chan_stop(struct nvkm_chan *);
+void nv50_chan_preempt(struct nvkm_chan *);
extern const struct nvkm_event_func g84_fifo_nonstall;
extern const struct nvkm_engn_func g84_engn;
void gf100_fifo_mmu_fault_recover(struct nvkm_fifo *, struct nvkm_fault_data *);
extern const struct nvkm_enum gf100_fifo_mmu_fault_access[];
extern const struct nvkm_event_func gf100_fifo_nonstall;
+bool gf100_runl_preempt_pending(struct nvkm_runl *);
void gf100_runq_init(struct nvkm_runq *);
bool gf100_runq_intr(struct nvkm_runq *, struct nvkm_runl *);
extern const struct nvkm_engn_func gf100_engn_sw;
+void gf100_chan_preempt(struct nvkm_chan *);
int gk104_fifo_chid_nr(struct nvkm_fifo *);
int gk104_fifo_runl_ctor(struct nvkm_fifo *);
extern const struct nvkm_runl_func gk110_runl;
extern const struct nvkm_cgrp_func gk110_cgrp;
extern const struct nvkm_chan_func gk110_chan;
+void gk110_chan_preempt(struct nvkm_chan *);
extern const struct nvkm_runq_func gk208_runq;
void gk208_runq_init(struct nvkm_runq *);
#include "priv.h"
#include <core/gpuobj.h>
+#include <subdev/timer.h>
#include <subdev/top.h>
struct nvkm_chan *
return NULL;
}
+int
+nvkm_runl_preempt_wait(struct nvkm_runl *runl)
+{
+ return nvkm_msec(runl->fifo->engine.subdev.device, runl->fifo->timeout.chan_msec,
+ if (!runl->func->preempt_pending(runl))
+ break;
+
+ usleep_range(1, 2);
+ ) < 0 ? -ETIMEDOUT : 0;
+}
+
bool
nvkm_runl_update_pending(struct nvkm_runl *runl)
{
bool (*pending)(struct nvkm_runl *);
void (*block)(struct nvkm_runl *, u32 engm);
void (*allow)(struct nvkm_runl *, u32 engm);
+ bool (*preempt_pending)(struct nvkm_runl *);
} *func;
struct nvkm_fifo *fifo;
int id;
void nvkm_runl_block(struct nvkm_runl *);
void nvkm_runl_allow(struct nvkm_runl *);
bool nvkm_runl_update_pending(struct nvkm_runl *);
+int nvkm_runl_preempt_wait(struct nvkm_runl *);
struct nvkm_chan *nvkm_runl_chan_get_chid(struct nvkm_runl *, int chid, unsigned long *irqflags);
struct nvkm_chan *nvkm_runl_chan_get_inst(struct nvkm_runl *, u64 inst, unsigned long *irqflags);
.unbind = gk104_chan_unbind,
.start = tu102_chan_start,
.stop = gk104_chan_stop,
+ .preempt = gk110_chan_preempt,
.doorbell_handle = tu102_chan_doorbell_handle,
};
.pending = tu102_runl_pending,
.block = gk104_runl_block,
.allow = gk104_runl_allow,
+ .preempt_pending = gf100_runl_preempt_pending,
};
static const struct nvkm_enum
int ret;
nvkm_chan_block(chan);
+ nvkm_chan_preempt(chan, true);
ret = chan->object.func->fini(&chan->object, suspend);
if (ret && suspend)