job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, p->filp);
if (r) {
amdgpu_job_free(job);
amdgpu_mn_unlock(p->mn);
failed:
for (j = 0; j < i; j++)
- drm_sched_entity_destroy(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity, max_wait);
+ max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
mutex_unlock(&mgr->lock);
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
if (!f)
return -EINVAL;
- r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
return;
}
} else {
- drm_sched_entity_destroy(adev->mman.entity.sched,
- &adev->mman.entity);
+ drm_sched_entity_destroy(&adev->mman.entity);
dma_fence_put(man->move);
man->move = NULL;
}
{
int i, j;
- drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
- &adev->uvd.entity);
+ drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo);
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_destroy(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_destroy(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}
{
int ret;
- ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
- sched_entity, submit->cmdbuf.ctx);
+ ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+ submit->cmdbuf.ctx);
if (ret)
return ret;
*
* Returns the remaining time in jiffies left from the input timeout
*/
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity, long timeout)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
+ struct drm_gpu_scheduler *sched;
long ret = timeout;
+ sched = entity->sched;
if (!drm_sched_entity_is_initialized(sched, entity))
return ret;
/**
* entity and signals all jobs with an error code if the process was killed.
*
*/
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
+ struct drm_gpu_scheduler *sched;
+ sched = entity->sched;
drm_sched_entity_set_rq(entity, NULL);
/* Consumption of existing IBs wasn't completed. Forcefully
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
- drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
- drm_sched_entity_fini(sched, entity);
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
+ struct drm_gpu_scheduler *sched = entity->sched;
+
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_destroy(&v3d->queue[q].sched,
- &v3d_priv->sched_entity[q]);
+ drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);
mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
- &v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
}
ret = drm_sched_job_init(&exec->render.base,
- &v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)
struct drm_sched_rq **rq_list,
unsigned int num_rq_list,
atomic_t *guilty);
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity, long timeout);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,