union { int id; int chid; }; /*FIXME: remove later */
+ spinlock_t lock;
+ atomic_t blocked;
+ atomic_t errored;
+
struct list_head cctxs;
struct nvkm_fifo *fifo;
struct list_head chan;
spinlock_t lock;
struct mutex mutex;
-
-#define NVKM_FIFO_EVENT_KILLED BIT(0)
- struct nvkm_event kevent; /* channel killed */
};
void nvkm_fifo_fault(struct nvkm_fifo *, struct nvkm_fault_data *);
return fifo->func->mmu_fault->recover(fifo, info);
}
-void
-nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
-{
- nvkm_event_ntfy(&fifo->kevent, chid, NVKM_FIFO_EVENT_KILLED);
-}
-
-static const struct nvkm_event_func
-nvkm_fifo_kevent_func = {
-};
-
static int
nvkm_fifo_class_new(struct nvkm_device *device, const struct nvkm_oclass *oclass,
void *argv, u32 argc, struct nvkm_object **pobject)
if (fifo->func->dtor)
data = fifo->func->dtor(fifo);
- nvkm_event_fini(&fifo->kevent);
nvkm_event_fini(&fifo->nonstall.event);
mutex_destroy(&fifo->mutex);
return data;
return ret;
}
- return nvkm_event_init(&nvkm_fifo_kevent_func, &fifo->engine.subdev, 1, nr, &fifo->kevent);
+ return 0;
}
#include "chan.h"
#include "chid.h"
#include "cgrp.h"
+#include "chid.h"
#include "runl.h"
#include "priv.h"
*/
if (cgrp->hw)
nvkm_runl_block(runl);
+ else
+ nvkm_chan_block(chan);
/* Update context pointer. */
if (cctx)
/* Resume normal operation. */
if (cgrp->hw)
nvkm_runl_allow(runl);
+ else
+ nvkm_chan_allow(chan);
}
void
return ret;
}
-static int
-nvkm_fifo_chan_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
-{
- struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
- union nvif_chan_event_args *args = argv;
-
- switch (args->v0.type) {
- case NVIF_CHAN_EVENT_V0_KILLED:
- return nvkm_uevent_add(uevent, &chan->fifo->kevent, chan->chid,
- NVKM_FIFO_EVENT_KILLED, NULL);
- default:
- break;
- }
-
- return -ENOSYS;
-}
-
static int
nvkm_fifo_chan_map(struct nvkm_object *object, void *argv, u32 argc,
enum nvkm_object_map *type, u64 *addr, u64 *size)
nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
- chan->func->fini(chan);
+ if (chan->func->fini)
+ chan->func->fini(chan);
return 0;
}
nvkm_fifo_chan_init(struct nvkm_object *object)
{
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
- chan->func->init(chan);
+ if (chan->func->init)
+ chan->func->init(chan);
return 0;
}
+static void
+nvkm_chan_block_locked(struct nvkm_chan *chan)
+{
+ CHAN_TRACE(chan, "block %d", atomic_read(&chan->blocked));
+ if (atomic_inc_return(&chan->blocked) == 1)
+ chan->func->stop(chan);
+}
+
+void
+nvkm_chan_error(struct nvkm_chan *chan, bool preempt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (atomic_inc_return(&chan->errored) == 1) {
+ CHAN_ERROR(chan, "errored - disabling channel");
+ nvkm_chan_block_locked(chan);
+ nvkm_event_ntfy(&chan->cgrp->runl->chid->event, chan->id, NVKM_CHAN_EVENT_ERRORED);
+ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+}
+
+void
+nvkm_chan_block(struct nvkm_chan *chan)
+{
+ spin_lock_irq(&chan->lock);
+ nvkm_chan_block_locked(chan);
+ spin_unlock_irq(&chan->lock);
+}
+
+void
+nvkm_chan_allow(struct nvkm_chan *chan)
+{
+ spin_lock_irq(&chan->lock);
+ CHAN_TRACE(chan, "allow %d", atomic_read(&chan->blocked));
+ if (atomic_dec_and_test(&chan->blocked))
+ chan->func->start(chan);
+ spin_unlock_irq(&chan->lock);
+}
+
void
nvkm_chan_del(struct nvkm_chan **pchan)
{
.init = nvkm_fifo_chan_init,
.fini = nvkm_fifo_chan_fini,
.map = nvkm_fifo_chan_map,
- .uevent = nvkm_fifo_chan_uevent,
};
int
func->engine_fini = fn->engine_fini;
func->object_ctor = fn->object_ctor;
func->object_dtor = fn->object_dtor;
- func->submit_token = fn->submit_token;
chan->func = func;
chan->id = -1;
+ spin_lock_init(&chan->lock);
+ atomic_set(&chan->blocked, 1);
+ atomic_set(&chan->errored, 0);
nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
chan->fifo = fifo;
struct nvkm_chan_func {
void (*bind)(struct nvkm_chan *);
void (*unbind)(struct nvkm_chan *);
+ void (*start)(struct nvkm_chan *);
+ void (*stop)(struct nvkm_chan *);
+ u32 (*doorbell_handle)(struct nvkm_chan *);
void *(*dtor)(struct nvkm_fifo_chan *);
void (*init)(struct nvkm_fifo_chan *);
bool suspend);
int (*object_ctor)(struct nvkm_fifo_chan *, struct nvkm_object *);
void (*object_dtor)(struct nvkm_fifo_chan *, int);
- u32 (*submit_token)(struct nvkm_fifo_chan *);
};
int nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *, struct nvkm_fifo *,
u32 engm, int bar, u32 base, u32 user,
const struct nvkm_oclass *, struct nvkm_fifo_chan *);
void nvkm_chan_del(struct nvkm_chan **);
+void nvkm_chan_allow(struct nvkm_chan *);
+void nvkm_chan_block(struct nvkm_chan *);
+void nvkm_chan_error(struct nvkm_chan *, bool preempt);
int nvkm_chan_cctx_get(struct nvkm_chan *, struct nvkm_engn *, struct nvkm_cctx **,
struct nvkm_client * /*TODO: remove need for this */);
void nvkm_chan_cctx_put(struct nvkm_chan *, struct nvkm_cctx **);
extern const struct nvkm_fifo_chan_func nv04_fifo_dma_func;
void *nv04_fifo_dma_dtor(struct nvkm_fifo_chan *);
-void nv04_fifo_dma_init(struct nvkm_fifo_chan *);
-void nv04_fifo_dma_fini(struct nvkm_fifo_chan *);
void nv04_fifo_dma_object_dtor(struct nvkm_fifo_chan *, int);
extern const struct nvkm_fifo_chan_oclass nv04_fifo_dma_oclass;
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 chid = chan->base.chid;
- /* remove channel from runlist, fifo will unload context */
- nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x00000000);
nv50_fifo_runlist_update(fifo);
}
{
struct nv50_fifo_chan *chan = nv50_fifo_chan(base);
struct nv50_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 chid = chan->base.chid;
- nvkm_mask(device, 0x002600 + (chid * 4), 0x80000000, 0x80000000);
nv50_fifo_runlist_update(fifo);
}
const struct nvkm_fifo_chan_func
nv04_fifo_dma_func = {
.dtor = nv04_fifo_dma_dtor,
- .init = nv04_fifo_dma_init,
- .fini = nv04_fifo_dma_fini,
.object_ctor = nv04_fifo_dma_object_ctor,
.object_dtor = nv04_fifo_dma_object_dtor,
};
static const struct nvkm_fifo_chan_func
nv40_fifo_dma_func = {
.dtor = nv04_fifo_dma_dtor,
- .init = nv04_fifo_dma_init,
- .fini = nv04_fifo_dma_fini,
.engine_ctor = nv40_fifo_dma_engine_ctor,
.engine_dtor = nv40_fifo_dma_engine_dtor,
.engine_init = nv40_fifo_dma_engine_init,
g84_chan = {
.bind = g84_chan_bind,
.unbind = nv50_chan_unbind,
+ .start = nv50_chan_start,
+ .stop = nv50_chan_stop,
};
const struct nvkm_engn_func
#include <nvif/class.h>
+static void
+gf100_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+}
+
+static void
+gf100_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_wr32(device, 0x003004 + (chan->id * 8), 0x001f0001);
+}
+
static void gf100_fifo_intr_engine(struct nvkm_fifo *);
static void
gf100_chan = {
.bind = gf100_chan_bind,
.unbind = gf100_chan_unbind,
+ .start = gf100_chan_start,
+ .stop = gf100_chan_stop,
};
static const struct nvkm_engn_func
struct gf100_fifo_chan *chan)
{
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
- struct nvkm_device *device = subdev->device;
u32 chid = chan->base.chid;
int engi = gf100_fifo_engine_id(&fifo->base, engine);
engine->subdev.name, chid);
assert_spin_locked(&fifo->base.lock);
- nvkm_mask(device, 0x003004 + (chid * 0x08), 0x00000001, 0x00000000);
+ nvkm_chan_error(&chan->base, false);
list_del_init(&chan->head);
chan->killed = true;
if (engi >= 0 && engi != GF100_FIFO_ENGN_SW)
fifo->recover.mask |= BIT(engi);
schedule_work(&fifo->recover.work);
- nvkm_fifo_kevent(&fifo->base, chid);
}
static const struct nvkm_enum
#include <nvif/class.h>
+void
+gk104_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+}
+
+void
+gk104_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+}
+
void
gk104_chan_unbind(struct nvkm_chan *chan)
{
gk104_chan = {
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
};
void
chan = gk104_fifo_recover_chid(fifo, runl, chid);
if (chan) {
chan->killed = true;
- nvkm_fifo_kevent(&fifo->base, chid);
+ nvkm_chan_error(&chan->base, false);
}
- /* Disable channel. */
- nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
- nvkm_warn(subdev, "channel %d: killed\n", chid);
-
/* Block channel assignments from changing during recovery. */
gk104_fifo_recover_runl(fifo, runl);
gk110_chan = {
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
};
const struct nvkm_cgrp_func
gm107_chan = {
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
};
static void
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 coff = chan->base.chid * 8;
if (!list_empty(&chan->head) && !chan->killed) {
gf100_fifo_runlist_remove(fifo, chan);
- nvkm_mask(device, 0x003004 + coff, 0x00000001, 0x00000000);
gf100_fifo_runlist_commit(fifo);
}
}
{
struct gf100_fifo_chan *chan = gf100_fifo_chan(base);
struct gf100_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 coff = chan->base.chid * 8;
if (list_empty(&chan->head) && !chan->killed) {
gf100_fifo_runlist_insert(fifo, chan);
- nvkm_wr32(device, 0x003004 + coff, 0x001f0001);
gf100_fifo_runlist_commit(fifo);
}
}
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 coff = chan->base.chid * 8;
if (!list_empty(&chan->head)) {
gk104_fifo_runlist_remove(fifo, chan);
- nvkm_mask(device, 0x800004 + coff, 0x00000800, 0x00000800);
gk104_fifo_gpfifo_kick(chan);
gk104_fifo_runlist_update(fifo, chan->runl);
}
{
struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
struct gk104_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 coff = chan->base.chid * 8;
if (list_empty(&chan->head) && !chan->killed) {
gk104_fifo_runlist_insert(fifo, chan);
- nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
gk104_fifo_runlist_update(fifo, chan->runl);
- nvkm_mask(device, 0x800004 + coff, 0x00000400, 0x00000400);
}
}
#include <nvif/clc36f.h>
#include <nvif/unpack.h>
-static u32
-gv100_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *chan)
-{
- return chan->chid;
-}
-
static int
gv100_fifo_gpfifo_engine_valid(struct gk104_fifo_chan *chan, bool ce, bool valid)
{
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
- .submit_token = gv100_fifo_gpfifo_submit_token,
};
int
*chid = chan->base.chid;
*inst = chan->base.inst->addr;
- *token = chan->base.func->submit_token(&chan->base);
+ *token = chan->base.func->doorbell_handle(&chan->base);
/* Hack to support GPUs where even individual channels should be
* part of a channel group.
#include <nvif/clc36f.h>
#include <nvif/unpack.h>
-static u32
-tu102_fifo_gpfifo_submit_token(struct nvkm_fifo_chan *base)
-{
- struct gk104_fifo_chan *chan = gk104_fifo_chan(base);
- return (chan->runl << 16) | chan->base.chid;
-}
-
static const struct nvkm_fifo_chan_func
tu102_fifo_gpfifo = {
.dtor = gk104_fifo_gpfifo_dtor,
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
.engine_init = gv100_fifo_gpfifo_engine_init,
.engine_fini = gv100_fifo_gpfifo_engine_fini,
- .submit_token = tu102_fifo_gpfifo_submit_token,
};
int
#include <nvif/class.h>
+static u32
+gv100_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return chan->id;
+}
+
static const struct nvkm_chan_func
gv100_chan = {
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
+ .start = gk104_chan_start,
+ .stop = gk104_chan_stop,
+ .doorbell_handle = gv100_chan_doorbell_handle,
};
const struct nvkm_engn_func
};
void
-nv04_fifo_dma_fini(struct nvkm_fifo_chan *base)
+nv04_chan_stop(struct nvkm_chan *chan)
{
- struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- struct nv04_fifo *fifo = chan->fifo;
+ struct nv04_fifo *fifo = nv04_fifo(chan->cgrp->runl->fifo);
struct nvkm_device *device = fifo->base.engine.subdev.device;
struct nvkm_memory *fctx = device->imem->ramfc;
const struct nv04_fifo_ramfc *c;
unsigned long flags;
- u32 data = chan->ramfc;
+ u32 data = nv04_fifo_chan(chan)->ramfc;
u32 chid;
/* prevent fifo context switches */
/* if this channel is active, replace it with a null context */
chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.chid->mask;
- if (chid == chan->base.chid) {
+ if (chid == chan->id) {
nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
}
/* restore normal operation, after disabling dma mode */
- nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
+ nvkm_mask(device, NV04_PFIFO_MODE, BIT(chan->id), 0);
nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
spin_unlock_irqrestore(&fifo->base.lock, flags);
}
void
-nv04_fifo_dma_init(struct nvkm_fifo_chan *base)
+nv04_chan_start(struct nvkm_chan *chan)
{
- struct nv04_fifo_chan *chan = nv04_fifo_chan(base);
- struct nv04_fifo *fifo = chan->fifo;
- struct nvkm_device *device = fifo->base.engine.subdev.device;
- u32 mask = 1 << chan->base.chid;
+ struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
unsigned long flags;
- spin_lock_irqsave(&fifo->base.lock, flags);
- nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
- spin_unlock_irqrestore(&fifo->base.lock, flags);
+
+ spin_lock_irqsave(&fifo->lock, flags);
+ nvkm_mask(fifo->engine.subdev.device, NV04_PFIFO_MODE, BIT(chan->id), BIT(chan->id));
+ spin_unlock_irqrestore(&fifo->lock, flags);
}
static const struct nvkm_chan_func
nv04_chan = {
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
};
const struct nvkm_cgrp_func
static const struct nvkm_chan_func
nv10_chan = {
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
};
int
static const struct nvkm_chan_func
nv17_chan = {
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
};
static void
static const struct nvkm_chan_func
nv40_chan = {
+ .start = nv04_chan_start,
+ .stop = nv04_chan_stop,
};
static const struct nvkm_engn_func
#include <nvif/class.h>
+void
+nv50_chan_stop(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+}
+
+void
+nv50_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ nvkm_mask(device, 0x002600 + (chan->id * 4), 0x80000000, 0x80000000);
+}
+
void
nv50_chan_unbind(struct nvkm_chan *chan)
{
nv50_chan = {
.bind = nv50_chan_bind,
.unbind = nv50_chan_unbind,
+ .start = nv50_chan_start,
+ .stop = nv50_chan_stop,
};
static const struct nvkm_engn_func
struct gk104_fifo;
struct gk104_fifo_chan;
-void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
struct nvkm_fifo_chan_oclass;
extern const struct nvkm_runl_func nv04_runl;
extern const struct nvkm_engn_func nv04_engn;
extern const struct nvkm_cgrp_func nv04_cgrp;
+void nv04_chan_start(struct nvkm_chan *);
+void nv04_chan_stop(struct nvkm_chan *);
int nv10_fifo_chid_nr(struct nvkm_fifo *);
int nv50_runl_wait(struct nvkm_runl *);
extern const struct nvkm_engn_func nv50_engn_sw;
void nv50_chan_unbind(struct nvkm_chan *);
+void nv50_chan_start(struct nvkm_chan *);
+void nv50_chan_stop(struct nvkm_chan *);
extern const struct nvkm_event_func g84_fifo_nonstall;
extern const struct nvkm_engn_func g84_engn;
void gk104_chan_bind(struct nvkm_chan *);
void gk104_chan_bind_inst(struct nvkm_chan *);
void gk104_chan_unbind(struct nvkm_chan *);
+void gk104_chan_start(struct nvkm_chan *);
+void gk104_chan_stop(struct nvkm_chan *);
int gk110_fifo_chid_ctor(struct nvkm_fifo *, int);
extern const struct nvkm_runl_func gk110_runl;
u32 addr;
struct nvkm_chid *cgid;
+#define NVKM_CHAN_EVENT_ERRORED BIT(0)
struct nvkm_chid *chid;
struct list_head engns;
#include <core/memory.h>
#include <subdev/mc.h>
+#include <subdev/vfn.h>
#include <nvif/class.h>
+static u32
+tu102_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+ return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+tu102_chan_start(struct nvkm_chan *chan)
+{
+ struct nvkm_device *device = chan->cgrp->runl->fifo->engine.subdev.device;
+
+ gk104_chan_start(chan);
+ nvkm_wr32(device, device->vfn->addr.user + 0x0090, chan->func->doorbell_handle(chan));
+}
+
static const struct nvkm_chan_func
tu102_chan = {
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
+ .start = tu102_chan_start,
+ .stop = gk104_chan_stop,
+ .doorbell_handle = tu102_chan_doorbell_handle,
};
static bool
chan = tu102_fifo_recover_chid(fifo, runl, chid);
if (chan) {
chan->killed = true;
- nvkm_fifo_kevent(&fifo->base, chid);
+ nvkm_chan_error(&chan->base, false);
}
- /* Disable channel. */
- nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
- nvkm_warn(subdev, "channel %d: killed\n", chid);
-
/* Block channel assignments from changing during recovery. */
tu102_fifo_recover_runl(fifo, runl);
#define nvkm_uchan(p) container_of((p), struct nvkm_uchan, object)
#include "cgrp.h"
#include "chan.h"
+#include "chid.h"
#include "runl.h"
#include <core/oproxy.h>
return nvkm_uevent_add(uevent, &runl->fifo->nonstall.event, 0,
NVKM_FIFO_NONSTALL_EVENT, NULL);
case NVIF_CHAN_EVENT_V0_KILLED:
- return chan->object.func->uevent(&chan->object, argv, argc, uevent);
+ return nvkm_uevent_add(uevent, &runl->chid->event, chan->id,
+ NVKM_CHAN_EVENT_ERRORED, NULL);
default:
break;
}
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
int ret;
+ nvkm_chan_block(chan);
+
ret = chan->object.func->fini(&chan->object, suspend);
if (ret && suspend)
return ret;
{
struct nvkm_chan *chan = nvkm_uchan(object)->chan;
+ if (atomic_read(&chan->errored))
+ return 0;
+
if (chan->func->bind)
chan->func->bind(chan);
+ nvkm_chan_allow(chan);
+
return chan->object.func->init(&chan->object);
}