#ifdef CONFIG_DEBUG_MUTEXES
/**
- * dma_resv_reset_shared_max - reset shared fences for debugging
+ * dma_resv_reset_max_fences - reset shared fences for debugging
* @obj: the dma_resv object to reset
*
* Reset the number of pre-reserved shared slots to test that drivers do
* correct slot allocation using dma_resv_reserve_fences(). See also
* &dma_resv_list.shared_max.
*/
-void dma_resv_reset_shared_max(struct dma_resv *obj)
+void dma_resv_reset_max_fences(struct dma_resv *obj)
{
struct dma_resv_list *fences = dma_resv_shared_list(obj);
if (fences)
fences->shared_max = fences->shared_count;
}
-EXPORT_SYMBOL(dma_resv_reset_shared_max);
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
#endif
/**
*
* See also &dma_resv.fence for a discussion of the semantics.
*/
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+static void dma_resv_add_shared_fence(struct dma_resv *obj,
+ struct dma_fence *fence)
{
struct dma_resv_list *fobj;
struct dma_fence *old;
write_seqcount_end(&obj->seq);
dma_fence_put(old);
}
-EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_replace_fences - replace fences in the dma_resv obj
* @obj: the reservation object
* @context: the context of the fences to replace
* @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
*
* Replace fences with a specified context with a new fence. Only valid if the
* operation represented by the original fence has no longer access to the
* update fence which makes the resource inaccessible.
*/
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *replacement)
+ struct dma_fence *replacement,
+ enum dma_resv_usage usage)
{
struct dma_resv_list *list;
struct dma_fence *old;
unsigned int i;
+ /* Only readers supported for now */
+ WARN_ON(usage != DMA_RESV_USAGE_READ);
+
dma_resv_assert_held(obj);
write_seqcount_begin(&obj->seq);
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
* See also &dma_resv.fence_excl for a discussion of the semantics.
*/
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
+static void dma_resv_add_excl_fence(struct dma_resv *obj,
+ struct dma_fence *fence)
{
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
dma_fence_put(old_fence);
}
-EXPORT_SYMBOL(dma_resv_add_excl_fence);
+
+/**
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
+ * @obj: the reservation object
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
+ *
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
+ * dma_resv_reserve_fences() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
+ */
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ if (usage == DMA_RESV_USAGE_WRITE)
+ dma_resv_add_excl_fence(obj, fence);
+ else
+ dma_resv_add_shared_fence(obj, fence);
+}
+EXPORT_SYMBOL(dma_resv_add_fence);
/* Restart the iterator by initializing all the necessary fields, but not the
* relation to the dma_resv object. */
}
dma_fence_get(f);
- if (dma_resv_iter_is_exclusive(&cursor))
+ if (dma_resv_iter_usage(&cursor) == DMA_RESV_USAGE_WRITE)
excl = f;
else
RCU_INIT_POINTER(list->shared[list->shared_count++], f);
*/
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
{
+ static const char *usage[] = { "write", "read" };
struct dma_resv_iter cursor;
struct dma_fence *fence;
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
seq_printf(seq, "\t%s fence:",
- dma_resv_iter_is_exclusive(&cursor) ?
- "Exclusive" : "Shared");
+ usage[dma_resv_iter_usage(&cursor)]);
dma_fence_describe(fence, seq);
}
}
return r;
}
-static int test_signaling(void *arg, enum dma_resv_usage usage)
+static int test_signaling(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv resv;
struct dma_fence *f;
int r;
goto err_unlock;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
-
+ dma_resv_add_fence(&resv, f, usage);
if (dma_resv_test_signaled(&resv, usage)) {
pr_err("Resv unexpectedly signaled\n");
r = -EINVAL;
return r;
}
-static int test_excl_signaling(void *arg)
-{
- return test_signaling(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_signaling(void *arg)
-{
- return test_signaling(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each(void *arg, enum dma_resv_usage usage)
+static int test_for_each(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
goto err_unlock;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
r = -ENOENT;
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
r = -EINVAL;
goto err_unlock;
}
- if (dma_resv_iter_is_exclusive(&cursor) !=
- (usage >= DMA_RESV_USAGE_READ)) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_unlock;
return r;
}
-static int test_excl_for_each(void *arg)
-{
- return test_for_each(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each(void *arg)
-{
- return test_for_each(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
+static int test_for_each_unlocked(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_resv_iter cursor;
struct dma_fence *f, *fence;
struct dma_resv resv;
goto err_free;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = -ENOENT;
r = -EINVAL;
goto err_iter_end;
}
- if (dma_resv_iter_is_exclusive(&cursor) !=
- (usage >= DMA_RESV_USAGE_READ)) {
+ if (dma_resv_iter_usage(&cursor) != usage) {
pr_err("Unexpected fence usage\n");
r = -EINVAL;
goto err_iter_end;
return r;
}
-static int test_excl_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_for_each_unlocked(void *arg)
-{
- return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
-}
-
-static int test_get_fences(void *arg, enum dma_resv_usage usage)
+static int test_get_fences(void *arg)
{
+ enum dma_resv_usage usage = (unsigned long)arg;
struct dma_fence *f, **fences = NULL;
struct dma_resv resv;
int r, i;
goto err_resv;
}
- if (usage >= DMA_RESV_USAGE_READ)
- dma_resv_add_shared_fence(&resv, f);
- else
- dma_resv_add_excl_fence(&resv, f);
+ dma_resv_add_fence(&resv, f, usage);
dma_resv_unlock(&resv);
r = dma_resv_get_fences(&resv, usage, &i, &fences);
return r;
}
-static int test_excl_get_fences(void *arg)
-{
- return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
-}
-
-static int test_shared_get_fences(void *arg)
-{
- return test_get_fences(arg, DMA_RESV_USAGE_READ);
-}
-
int dma_resv(void)
{
static const struct subtest tests[] = {
SUBTEST(sanitycheck),
- SUBTEST(test_excl_signaling),
- SUBTEST(test_shared_signaling),
- SUBTEST(test_excl_for_each),
- SUBTEST(test_shared_for_each),
- SUBTEST(test_excl_for_each_unlocked),
- SUBTEST(test_shared_for_each_unlocked),
- SUBTEST(test_excl_get_fences),
- SUBTEST(test_shared_get_fences),
+ SUBTEST(test_signaling),
+ SUBTEST(test_for_each),
+ SUBTEST(test_for_each_unlocked),
+ SUBTEST(test_get_fences),
};
+ enum dma_resv_usage usage;
+ int r;
spin_lock_init(&fence_lock);
- return subtests(tests, NULL);
+ for (usage = DMA_RESV_USAGE_WRITE; usage <= DMA_RESV_USAGE_READ;
+ ++usage) {
+ r = subtests(tests, (void *)(unsigned long)usage);
+ if (r)
+ return r;
+ }
+ return 0;
}
*/
replacement = dma_fence_get_stub();
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
- replacement);
+ replacement, DMA_RESV_USAGE_READ);
dma_fence_put(replacement);
return 0;
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &bo->tbo;
- /* One for TTM and one for the CS job */
- p->uf_entry.tv.num_shared = 2;
+ /* One for TTM and two for the CS job */
+ p->uf_entry.tv.num_shared = 3;
drm_gem_object_put(gobj);
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, fence);
- else
- dma_resv_add_excl_fence(resv, fence);
+ dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
}
/**
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj = &submit->bos[i].obj->base;
+ bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv,
- submit->out_fence);
- else
- dma_resv_add_shared_fence(obj->resv,
- submit->out_fence);
-
+ dma_resv_add_fence(obj->resv, submit->out_fence, write ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
submit_unlock_object(submit, i);
}
}
if (dma_resv_iter_is_restarted(&cursor))
args->busy = 0;
- if (dma_resv_iter_is_exclusive(&cursor))
- /* Translate the exclusive fence to the READ *and* WRITE engine */
+ if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
+ /* Translate the write fences to the READ *and* WRITE engine */
args->busy |= busy_check_writer(fence);
else
- /* Translate shared fences to READ set of engines */
+ /* Translate read fences to READ set of engines */
args->busy |= busy_check_reader(fence);
}
dma_resv_iter_end(&cursor);
obj->base.resv, NULL, true,
i915_fence_timeout(i915),
I915_FENCE_GFP);
- dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
+ dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
+ DMA_RESV_USAGE_WRITE);
dma_fence_work_commit(&clflush->base);
/*
* We must have successfully populated the pages(since we are
if (IS_ERR_OR_NULL(copy_fence))
return PTR_ERR_OR_ZERO(copy_fence);
- dma_resv_add_excl_fence(dst_bo->base.resv, copy_fence);
- dma_resv_add_shared_fence(src_bo->base.resv, copy_fence);
-
+ dma_resv_add_fence(dst_bo->base.resv, copy_fence, DMA_RESV_USAGE_WRITE);
+ dma_resv_add_fence(src_bo->base.resv, copy_fence, DMA_RESV_USAGE_READ);
dma_fence_put(copy_fence);
return 0;
if (rq) {
err = dma_resv_reserve_fences(obj->base.resv, 1);
if (!err)
- dma_resv_add_excl_fence(obj->base.resv,
- &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_gem_object_set_moving_fence(obj, &rq->fence);
i915_request_put(rq);
}
}
if (fence) {
- dma_resv_add_excl_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_WRITE);
obj->write_domain = I915_GEM_DOMAIN_RENDER;
obj->read_domains = 0;
}
}
if (fence) {
- dma_resv_add_shared_fence(vma->obj->base.resv, fence);
+ dma_resv_add_fence(vma->obj->base.resv, fence,
+ DMA_RESV_USAGE_READ);
obj->write_domain = 0;
}
}
goto out_rpm;
}
- dma_resv_add_shared_fence(obj->base.resv, fence);
+ dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
dma_fence_put(fence);
out_rpm:
obj->mm.pages->sgl, I915_CACHE_NONE,
true, 0xdeadbeaf, &rq);
if (rq) {
- dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+ dma_resv_add_fence(obj->base.resv, &rq->fence,
+ DMA_RESV_USAGE_WRITE);
i915_request_put(rq);
}
fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
- if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence);
- else
- dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence);
+ dma_resv_add_fence(lima_bo_resv(bos[i]), fence,
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
drm_gem_unlock_reservations((struct drm_gem_object **)bos,
struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
- dma_resv_add_excl_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_WRITE);
else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
- dma_resv_add_shared_fence(obj->resv, submit->user_fence);
+ dma_resv_add_fence(obj->resv, submit->user_fence,
+ DMA_RESV_USAGE_READ);
}
}
{
struct dma_resv *resv = nvbo->bo.base.resv;
- if (exclusive)
- dma_resv_add_excl_fence(resv, &fence->base);
- else if (fence)
- dma_resv_add_shared_fence(resv, &fence->base);
+ if (!fence)
+ return;
+
+ dma_resv_add_fence(resv, &fence->base, exclusive ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
}
static void
dma_resv_for_each_fence(&cursor, resv,
dma_resv_usage_rw(exclusive),
fence) {
+ enum dma_resv_usage usage;
struct nouveau_fence *f;
- if (i == 0 && dma_resv_iter_is_exclusive(&cursor))
+ usage = dma_resv_iter_usage(&cursor);
+ if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
continue;
f = nouveau_local_fence(fence, chan->drm);
int i;
for (i = 0; i < bo_count; i++)
- dma_resv_add_excl_fence(bos[i]->resv, fence);
+ dma_resv_add_fence(bos[i]->resv, fence, DMA_RESV_USAGE_WRITE);
}
int panfrost_job_push(struct panfrost_job *job)
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
- dma_resv_add_shared_fence(bo->base.resv, &release->base);
+ dma_resv_add_fence(bo->base.resv, &release->base,
+ DMA_RESV_USAGE_READ);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
return;
}
- if (shared)
- dma_resv_add_shared_fence(resv, &fence->base);
- else
- dma_resv_add_excl_fence(resv, &fence->base);
+ dma_resv_add_fence(resv, &fence->base, shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
}
return ret;
}
- dma_resv_add_shared_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (unlikely(ret)) {
if (ret)
return ret;
- dma_resv_add_excl_fence(&ghost_obj->base._resv, fence);
+ dma_resv_add_fence(&ghost_obj->base._resv, fence,
+ DMA_RESV_USAGE_WRITE);
/**
* If we're not moving to fixed memory, the TTM object
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, DMA_RESV_USAGE_WRITE);
if (!evict)
ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt);
else if (!from->use_tt && pipeline)
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- if (entry->num_shared)
- dma_resv_add_shared_fence(bo->base.resv, fence);
- else
- dma_resv_add_excl_fence(bo->base.resv, fence);
+ dma_resv_add_fence(bo->base.resv, fence, entry->num_shared ?
+ DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE);
ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ dma_resv_add_fence(job->bo[i]->resv, job->done_fence,
+ DMA_RESV_USAGE_WRITE);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
+ dma_resv_add_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
/* Expose the fence via the dma-buf */
dma_resv_lock(resv, NULL);
ret = dma_resv_reserve_fences(resv, 1);
- if (!ret) {
- if (arg->flags & VGEM_FENCE_WRITE)
- dma_resv_add_excl_fence(resv, fence);
- else
- dma_resv_add_shared_fence(resv, fence);
- }
+ if (!ret)
+ dma_resv_add_fence(resv, fence, arg->flags & VGEM_FENCE_WRITE ?
+ DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
dma_resv_unlock(resv);
/* Record the fence in our idr for later signaling */
int i;
for (i = 0; i < objs->nents; i++)
- dma_resv_add_excl_fence(objs->objs[i]->resv, fence);
+ dma_resv_add_fence(objs->objs[i]->resv, fence,
+ DMA_RESV_USAGE_WRITE);
}
void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
ret = dma_resv_reserve_fences(bo->base.resv, 1);
if (!ret)
- dma_resv_add_excl_fence(bo->base.resv, &fence->base);
+ dma_resv_add_fence(bo->base.resv, &fence->base,
+ DMA_RESV_USAGE_WRITE);
else
/* Last resort fallback when we are OOM */
dma_fence_wait(&fence->base, false);
* e.g. exposed in `Implicit Fence Poll Support`_ must follow the
* below rules.
*
- * - Drivers must add a shared fence through dma_resv_add_shared_fence()
- * for anything the userspace API considers a read access. This highly
- * depends upon the API and window system.
+ * - Drivers must add a read fence through dma_resv_add_fence() with the
+ * DMA_RESV_USAGE_READ flag for anything the userspace API considers a
+ * read access. This highly depends upon the API and window system.
*
- * - Similarly drivers must set the exclusive fence through
- * dma_resv_add_excl_fence() for anything the userspace API considers
- * write access.
+ * - Similarly drivers must add a write fence through
+ * dma_resv_add_fence() with the DMA_RESV_USAGE_WRITE flag for
+ * anything the userspace API considers write access.
*
- * - Drivers may just always set the exclusive fence, since that only
+ * - Drivers may just always add a write fence, since that only
* causes unecessarily synchronization, but no correctness issues.
*
* - Some drivers only expose a synchronous userspace API with no
* Dynamic importers, see dma_buf_attachment_is_dynamic(), have
* additional constraints on how they set up fences:
*
- * - Dynamic importers must obey the exclusive fence and wait for it to
+ * - Dynamic importers must obey the write fences and wait for them to
* signal before allowing access to the buffer's underlying storage
* through the device.
*
/** @fence: the currently handled fence */
struct dma_fence *fence;
+ /** @fence_usage: the usage of the current fence */
+ enum dma_resv_usage fence_usage;
+
/** @seq: sequence number to check for modifications */
unsigned int seq;
}
/**
- * dma_resv_iter_is_exclusive - test if the current fence is the exclusive one
+ * dma_resv_iter_usage - Return the usage of the current fence
* @cursor: the cursor of the current position
*
- * Returns true if the currently returned fence is the exclusive one.
+ * Returns the usage of the currently processed fence.
*/
-static inline bool dma_resv_iter_is_exclusive(struct dma_resv_iter *cursor)
+static inline enum dma_resv_usage
+dma_resv_iter_usage(struct dma_resv_iter *cursor)
{
- return cursor->index == 0;
+ return cursor->fence_usage;
}
/**
#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
#ifdef CONFIG_DEBUG_MUTEXES
-void dma_resv_reset_shared_max(struct dma_resv *obj);
+void dma_resv_reset_max_fences(struct dma_resv *obj);
#else
-static inline void dma_resv_reset_shared_max(struct dma_resv *obj) {}
+static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
#endif
/**
*/
static inline void dma_resv_unlock(struct dma_resv *obj)
{
- dma_resv_reset_shared_max(obj);
+ dma_resv_reset_max_fences(obj);
ww_mutex_unlock(&obj->lock);
}
void dma_resv_init(struct dma_resv *obj);
void dma_resv_fini(struct dma_resv *obj);
int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
-void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage);
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
- struct dma_fence *fence);
-void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
+ struct dma_fence *fence,
+ enum dma_resv_usage usage);
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
unsigned int *num_fences, struct dma_fence ***fences);
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,