Currently provided by {chan,dma,gpfifo}*.c, and those are going away.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
struct nvkm_cgrp *cgrp;
int runq;
+ struct nvkm_gpuobj *inst;
+ struct nvkm_vmm *vmm;
union { int id; int chid; }; /*FIXME: remove later */
spinlock_t lock;
struct nvkm_object object;
struct list_head head;
- struct nvkm_gpuobj *inst;
struct nvkm_gpuobj *push;
- struct nvkm_vmm *vmm;
u64 addr;
u32 size;
chan->cgrp = nvkm_cgrp_ref(cgrp);
}
- /* instance memory */
- ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
- if (ret)
+ /* Allocate instance block. */
+ ret = nvkm_gpuobj_new(device, func->inst->size, 0x1000, func->inst->zero, NULL,
+ &chan->inst);
+ if (ret) {
+ RUNL_DEBUG(runl, "inst %d", ret);
return ret;
-
- /* allocate push buffer ctxdma instance */
- if (push) {
- dmaobj = nvkm_dmaobj_search(client, push);
- if (IS_ERR(dmaobj))
- return PTR_ERR(dmaobj);
-
- ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
- &chan->push);
- if (ret)
- return ret;
}
- /* channel address space */
- if (hvmm) {
+ /* Initialise virtual address-space. */
+ if (func->inst->vmm) {
struct nvkm_vmm *vmm = nvkm_uvmm_search(client, hvmm);
if (IS_ERR(vmm))
return PTR_ERR(vmm);
- if (vmm->mmu != device->mmu)
+ if (WARN_ON(vmm->mmu != device->mmu))
return -EINVAL;
ret = nvkm_vmm_join(vmm, chan->inst->memory);
- if (ret)
+ if (ret) {
+ RUNL_DEBUG(runl, "vmm %d", ret);
return ret;
+ }
chan->vmm = nvkm_vmm_ref(vmm);
}
+ /* allocate push buffer ctxdma instance */
+ if (push) {
+ dmaobj = nvkm_dmaobj_search(client, push);
+ if (IS_ERR(dmaobj))
+ return PTR_ERR(dmaobj);
+
+ ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
+ &chan->push);
+ if (ret)
+ return ret;
+ }
+
/* Allocate channel ID. */
chan->id = nvkm_chid_get(runl->chid, chan);
if (chan->id < 0) {
};
struct nvkm_chan_func {
+ const struct nvkm_chan_func_inst {
+ u32 size;
+ bool zero;
+ bool vmm;
+ } *inst;
+
void (*bind)(struct nvkm_chan *);
void (*unbind)(struct nvkm_chan *);
void (*start)(struct nvkm_chan *);
const struct nvkm_chan_func
g84_chan = {
+ .inst = &nv50_chan_inst,
.bind = g84_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
nvkm_wr32(device, 0x003000 + (chan->id * 8), 0xc0000000 | chan->inst->addr >> 12);
}
+const struct nvkm_chan_func_inst
+gf100_chan_inst = {
+ .size = 0x1000,
+ .zero = true,
+ .vmm = true,
+};
+
static const struct nvkm_chan_func
gf100_chan = {
+ .inst = &gf100_chan_inst,
.bind = gf100_chan_bind,
.unbind = gf100_chan_unbind,
.start = gf100_chan_start,
static const struct nvkm_chan_func
gk104_chan = {
+ .inst = &gf100_chan_inst,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
const struct nvkm_chan_func
gk110_chan = {
+ .inst = &gf100_chan_inst,
.bind = gk104_chan_bind,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
const struct nvkm_chan_func
gm107_chan = {
+ .inst = &gf100_chan_inst,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
static const struct nvkm_chan_func
gv100_chan = {
+ .inst = &gf100_chan_inst,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = gk104_chan_start,
spin_unlock_irqrestore(&fifo->lock, flags);
}
+const struct nvkm_chan_func_inst
+nv04_chan_inst = {
+ .size = 0x1000,
+};
+
static const struct nvkm_chan_func
nv04_chan = {
+ .inst = &nv04_chan_inst,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
static const struct nvkm_chan_func
nv10_chan = {
+ .inst = &nv04_chan_inst,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
static const struct nvkm_chan_func
nv17_chan = {
+ .inst = &nv04_chan_inst,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
static const struct nvkm_chan_func
nv40_chan = {
+ .inst = &nv04_chan_inst,
.start = nv04_chan_start,
.stop = nv04_chan_stop,
};
nvkm_wr32(device, 0x002600 + (chan->id * 4), nv50_fifo_chan(chan)->ramfc->addr >> 12);
}
+const struct nvkm_chan_func_inst
+nv50_chan_inst = {
+ .size = 0x10000,
+ .vmm = true,
+};
+
static const struct nvkm_chan_func
nv50_chan = {
+ .inst = &nv50_chan_inst,
.bind = nv50_chan_bind,
.unbind = nv50_chan_unbind,
.start = nv50_chan_start,
extern const struct nvkm_runl_func nv04_runl;
extern const struct nvkm_engn_func nv04_engn;
extern const struct nvkm_cgrp_func nv04_cgrp;
+extern const struct nvkm_chan_func_inst nv04_chan_inst;
void nv04_chan_start(struct nvkm_chan *);
void nv04_chan_stop(struct nvkm_chan *);
int nv50_runl_update(struct nvkm_runl *);
int nv50_runl_wait(struct nvkm_runl *);
extern const struct nvkm_engn_func nv50_engn_sw;
+extern const struct nvkm_chan_func_inst nv50_chan_inst;
void nv50_chan_unbind(struct nvkm_chan *);
void nv50_chan_start(struct nvkm_chan *);
void nv50_chan_stop(struct nvkm_chan *);
void gf100_engn_mmu_fault_trigger(struct nvkm_engn *);
bool gf100_engn_mmu_fault_triggered(struct nvkm_engn *);
extern const struct nvkm_engn_func gf100_engn_sw;
+extern const struct nvkm_chan_func_inst gf100_chan_inst;
void gf100_chan_preempt(struct nvkm_chan *);
int gk104_fifo_chid_nr(struct nvkm_fifo *);
static const struct nvkm_chan_func
tu102_chan = {
+ .inst = &gf100_chan_inst,
.bind = gk104_chan_bind_inst,
.unbind = gk104_chan_unbind,
.start = tu102_chan_start,