goto free_chunk;
}
- mutex_lock(&p->ctx->lock);
-
/* skip guilty context job */
if (atomic_read(&p->ctx->guilty) == 1) {
ret = -ECANCELED;
dma_fence_put(parser->fence);
if (parser->ctx) {
- mutex_unlock(&parser->ctx->lock);
amdgpu_ctx_put(parser->ctx);
}
if (parser->bo_list)
{
int i, r;
- /* TODO: Investigate why we still need the context lock */
- mutex_unlock(&p->ctx->lock);
-
for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
- goto out;
+ return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
- goto out;
+ return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
- goto out;
+ return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
if (r)
- goto out;
+ return r;
break;
case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
if (r)
- goto out;
+ return r;
break;
}
}
-out:
- mutex_lock(&p->ctx->lock);
- return r;
+ return 0;
}
static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
goto out;
r = amdgpu_cs_submit(&parser, cs);
-
out:
amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
kref_init(&ctx->refcount);
ctx->mgr = mgr;
spin_lock_init(&ctx->ring_lock);
- mutex_init(&ctx->lock);
ctx->reset_counter = atomic_read(&mgr->adev->gpu_reset_counter);
ctx->reset_counter_query = ctx->reset_counter;
drm_dev_exit(idx);
}
- mutex_destroy(&ctx->lock);
kfree(ctx);
}