# SPDX-License-Identifier: GPL-2.0-only
 obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
-        reservation.o seqno-fence.o
+        dma-resv.o seqno-fence.o
 obj-$(CONFIG_SYNC_FILE)                += sync_file.o
 obj-$(CONFIG_SW_SYNC)          += sw_sync.o sync_debug.o
 obj-$(CONFIG_UDMABUF)          += udmabuf.o
 
 #include <linux/module.h>
 #include <linux/seq_file.h>
 #include <linux/poll.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/mm.h>
 #include <linux/mount.h>
 #include <linux/pseudo_fs.h>
        list_del(&dmabuf->list_node);
        mutex_unlock(&db_list.lock);
 
-       if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
-               reservation_object_fini(dmabuf->resv);
+       if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+               dma_resv_fini(dmabuf->resv);
 
        module_put(dmabuf->owner);
        kfree(dmabuf);
  * To support cross-device and cross-driver synchronization of buffer access
  * implicit fences (represented internally in the kernel with &struct fence) can
  * be attached to a &dma_buf. The glue for that and a few related things are
- * provided in the &reservation_object structure.
+ * provided in the &dma_resv structure.
  *
  * Userspace can query the state of these implicitly tracked fences using poll()
  * and related system calls:
 static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 {
        struct dma_buf *dmabuf;
-       struct reservation_object *resv;
-       struct reservation_object_list *fobj;
+       struct dma_resv *resv;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence_excl;
        __poll_t events;
        unsigned shared_count;
                return 0;
 
        rcu_read_lock();
-       reservation_object_fences(resv, &fence_excl, &fobj, &shared_count);
+       dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
        if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
                __poll_t pevents = EPOLLIN;
 struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
 {
        struct dma_buf *dmabuf;
-       struct reservation_object *resv = exp_info->resv;
+       struct dma_resv *resv = exp_info->resv;
        struct file *file;
        size_t alloc_size = sizeof(struct dma_buf);
        int ret;
 
        if (!exp_info->resv)
-               alloc_size += sizeof(struct reservation_object);
+               alloc_size += sizeof(struct dma_resv);
        else
                /* prevent &dma_buf[1] == dma_buf->resv */
                alloc_size += 1;
        dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
 
        if (!resv) {
-               resv = (struct reservation_object *)&dmabuf[1];
-               reservation_object_init(resv);
+               resv = (struct dma_resv *)&dmabuf[1];
+               dma_resv_init(resv);
        }
        dmabuf->resv = resv;
 
 {
        bool write = (direction == DMA_BIDIRECTIONAL ||
                      direction == DMA_TO_DEVICE);
-       struct reservation_object *resv = dmabuf->resv;
+       struct dma_resv *resv = dmabuf->resv;
        long ret;
 
        /* Wait on any implicit rendering fences */
-       ret = reservation_object_wait_timeout_rcu(resv, write, true,
+       ret = dma_resv_wait_timeout_rcu(resv, write, true,
                                                  MAX_SCHEDULE_TIMEOUT);
        if (ret < 0)
                return ret;
        int ret;
        struct dma_buf *buf_obj;
        struct dma_buf_attachment *attach_obj;
-       struct reservation_object *robj;
-       struct reservation_object_list *fobj;
+       struct dma_resv *robj;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence;
        int count = 0, attach_count, shared_count, i;
        size_t size = 0;
 
                robj = buf_obj->resv;
                rcu_read_lock();
-               reservation_object_fences(robj, &fence, &fobj, &shared_count);
+               dma_resv_fences(robj, &fence, &fobj, &shared_count);
                rcu_read_unlock();
 
                if (fence)
 
  *
  * - Then there's also implicit fencing, where the synchronization points are
  *   implicitly passed around as part of shared &dma_buf instances. Such
- *   implicit fences are stored in &struct reservation_object through the
+ *   implicit fences are stored in &struct dma_resv through the
  *   &dma_buf.resv pointer.
  */
 
 
--- /dev/null
+/*
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/export.h>
+
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage shared and
+ * exclusive fences associated with a buffer.  A reservation object
+ * can have attached one exclusive fence (normally associated with
+ * write operations) or N shared fences (read operations).  The RCU
+ * mechanism is used to protect read access to fences from locked
+ * write-side updates.
+ */
+
+DEFINE_WD_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
+
+/**
+ * dma_resv_list_alloc - allocate fence list
+ * @shared_max: number of fences we need space for
+ *
+ * Allocate a new dma_resv_list and make sure to correctly initialize
+ * shared_max.
+ */
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
+{
+       struct dma_resv_list *list;
+
+       list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+       if (!list)
+               return NULL;
+
+       list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
+               sizeof(*list->shared);
+
+       return list;
+}
+
+/**
+ * dma_resv_list_free - free fence list
+ * @list: list to free
+ *
+ * Free a dma_resv_list and make sure to drop all references.
+ */
+static void dma_resv_list_free(struct dma_resv_list *list)
+{
+       unsigned int i;
+
+       if (!list)
+               return;
+
+       for (i = 0; i < list->shared_count; ++i)
+               dma_fence_put(rcu_dereference_protected(list->shared[i], true));
+
+       kfree_rcu(list, rcu);
+}
+
+/**
+ * dma_resv_init - initialize a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_init(struct dma_resv *obj)
+{
+       ww_mutex_init(&obj->lock, &reservation_ww_class);
+       RCU_INIT_POINTER(obj->fence, NULL);
+       RCU_INIT_POINTER(obj->fence_excl, NULL);
+}
+EXPORT_SYMBOL(dma_resv_init);
+
+/**
+ * dma_resv_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_fini(struct dma_resv *obj)
+{
+       struct dma_resv_list *fobj;
+       struct dma_fence *excl;
+
+       /*
+        * This object should be dead and all references must have
+        * been released to it, so no need to be protected with rcu.
+        */
+       excl = rcu_dereference_protected(obj->fence_excl, 1);
+       if (excl)
+               dma_fence_put(excl);
+
+       fobj = rcu_dereference_protected(obj->fence, 1);
+       dma_resv_list_free(fobj);
+       ww_mutex_destroy(&obj->lock);
+}
+EXPORT_SYMBOL(dma_resv_fini);
+
+/**
+ * dma_resv_reserve_shared - Reserve space to add shared fences to
+ * a dma_resv.
+ * @obj: reservation object
+ * @num_fences: number of fences we want to add
+ *
+ * Should be called before dma_resv_add_shared_fence().  Must
+ * be called with obj->lock held.
+ *
+ * RETURNS
+ * Zero for success, or -errno
+ */
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
+{
+       struct dma_resv_list *old, *new;
+       unsigned int i, j, k, max;
+
+       dma_resv_assert_held(obj);
+
+       old = dma_resv_get_list(obj);
+
+       if (old && old->shared_max) {
+               if ((old->shared_count + num_fences) <= old->shared_max)
+                       return 0;
+               else
+                       max = max(old->shared_count + num_fences,
+                                 old->shared_max * 2);
+       } else {
+               max = 4;
+       }
+
+       new = dma_resv_list_alloc(max);
+       if (!new)
+               return -ENOMEM;
+
+       /*
+        * no need to bump fence refcounts, rcu_read access
+        * requires the use of kref_get_unless_zero, and the
+        * references from the old struct are carried over to
+        * the new.
+        */
+       for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
+               struct dma_fence *fence;
+
+               fence = rcu_dereference_protected(old->shared[i],
+                                                 dma_resv_held(obj));
+               if (dma_fence_is_signaled(fence))
+                       RCU_INIT_POINTER(new->shared[--k], fence);
+               else
+                       RCU_INIT_POINTER(new->shared[j++], fence);
+       }
+       new->shared_count = j;
+
+       /*
+        * We are not changing the effective set of fences here so can
+        * merely update the pointer to the new array; both existing
+        * readers and new readers will see exactly the same set of
+        * active (unsignaled) shared fences. Individual fences and the
+        * old array are protected by RCU and so will not vanish under
+        * the gaze of the rcu_read_lock() readers.
+        */
+       rcu_assign_pointer(obj->fence, new);
+
+       if (!old)
+               return 0;
+
+       /* Drop the references to the signaled fences */
+       for (i = k; i < max; ++i) {
+               struct dma_fence *fence;
+
+               fence = rcu_dereference_protected(new->shared[i],
+                                                 dma_resv_held(obj));
+               dma_fence_put(fence);
+       }
+       kfree_rcu(old, rcu);
+
+       return 0;
+}
+EXPORT_SYMBOL(dma_resv_reserve_shared);
+
+/**
+ * dma_resv_add_shared_fence - Add a fence to a shared slot
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to a shared slot, obj->lock must be held, and
+ * dma_resv_reserve_shared() has been called.
+ */
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
+{
+       struct dma_resv_list *fobj;
+       struct dma_fence *old;
+       unsigned int i, count;
+
+       dma_fence_get(fence);
+
+       dma_resv_assert_held(obj);
+
+       fobj = dma_resv_get_list(obj);
+       count = fobj->shared_count;
+
+       for (i = 0; i < count; ++i) {
+
+               old = rcu_dereference_protected(fobj->shared[i],
+                                               dma_resv_held(obj));
+               if (old->context == fence->context ||
+                   dma_fence_is_signaled(old))
+                       goto replace;
+       }
+
+       BUG_ON(fobj->shared_count >= fobj->shared_max);
+       old = NULL;
+       count++;
+
+replace:
+       RCU_INIT_POINTER(fobj->shared[i], fence);
+       /* pointer update must be visible before we extend the shared_count */
+       smp_store_mb(fobj->shared_count, count);
+       dma_fence_put(old);
+}
+EXPORT_SYMBOL(dma_resv_add_shared_fence);
+
+/**
+ * dma_resv_add_excl_fence - Add an exclusive fence.
+ * @obj: the reservation object
+ * @fence: the shared fence to add
+ *
+ * Add a fence to the exclusive slot.  The obj->lock must be held.
+ */
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
+{
+       struct dma_fence *old_fence = dma_resv_get_excl(obj);
+       struct dma_resv_list *old;
+       u32 i = 0;
+
+       dma_resv_assert_held(obj);
+
+       old = dma_resv_get_list(obj);
+       if (old)
+               i = old->shared_count;
+
+       if (fence)
+               dma_fence_get(fence);
+
+       preempt_disable();
+       rcu_assign_pointer(obj->fence_excl, fence);
+       /* pointer update must be visible before we modify the shared_count */
+       if (old)
+               smp_store_mb(old->shared_count, 0);
+       preempt_enable();
+
+       /* inplace update, no shared fences */
+       while (i--)
+               dma_fence_put(rcu_dereference_protected(old->shared[i],
+                                               dma_resv_held(obj)));
+
+       dma_fence_put(old_fence);
+}
+EXPORT_SYMBOL(dma_resv_add_excl_fence);
+
+/**
+* dma_resv_copy_fences - Copy all fences from src to dst.
+* @dst: the destination reservation object
+* @src: the source reservation object
+*
+* Copy all fences from src to dst. dst-lock must be held.
+*/
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
+{
+       struct dma_resv_list *src_list, *dst_list;
+       struct dma_fence *old, *new;
+       unsigned int i, shared_count;
+
+       dma_resv_assert_held(dst);
+
+       rcu_read_lock();
+
+retry:
+       dma_resv_fences(src, &new, &src_list, &shared_count);
+       if (shared_count) {
+               rcu_read_unlock();
+
+               dst_list = dma_resv_list_alloc(shared_count);
+               if (!dst_list)
+                       return -ENOMEM;
+
+               rcu_read_lock();
+               dma_resv_fences(src, &new, &src_list, &shared_count);
+               if (!src_list || shared_count > dst_list->shared_max) {
+                       kfree(dst_list);
+                       goto retry;
+               }
+
+               dst_list->shared_count = 0;
+               for (i = 0; i < shared_count; ++i) {
+                       struct dma_fence *fence;
+
+                       fence = rcu_dereference(src_list->shared[i]);
+                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                    &fence->flags))
+                               continue;
+
+                       if (!dma_fence_get_rcu(fence)) {
+                               dma_resv_list_free(dst_list);
+                               goto retry;
+                       }
+
+                       if (dma_fence_is_signaled(fence)) {
+                               dma_fence_put(fence);
+                               continue;
+                       }
+
+                       rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
+               }
+       } else {
+               dst_list = NULL;
+       }
+
+       if (new && !dma_fence_get_rcu(new)) {
+               dma_resv_list_free(dst_list);
+               goto retry;
+       }
+       rcu_read_unlock();
+
+       src_list = dma_resv_get_list(dst);
+       old = dma_resv_get_excl(dst);
+
+       preempt_disable();
+       rcu_assign_pointer(dst->fence_excl, new);
+       rcu_assign_pointer(dst->fence, dst_list);
+       preempt_enable();
+
+       dma_resv_list_free(src_list);
+       dma_fence_put(old);
+
+       return 0;
+}
+EXPORT_SYMBOL(dma_resv_copy_fences);
+
+/**
+ * dma_resv_get_fences_rcu - Get an object's shared and exclusive
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @pfence_excl: the returned exclusive fence (or NULL)
+ * @pshared_count: the number of shared fences returned
+ * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * the required size, and must be freed by caller)
+ *
+ * Retrieve all fences from the reservation object. If the pointer for the
+ * exclusive fence is not specified the fence is put into the array of the
+ * shared fences as well. Returns either zero or -ENOMEM.
+ */
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+                           struct dma_fence **pfence_excl,
+                           unsigned *pshared_count,
+                           struct dma_fence ***pshared)
+{
+       struct dma_fence **shared = NULL;
+       struct dma_fence *fence_excl;
+       unsigned int shared_count;
+       int ret = 1;
+
+       do {
+               struct dma_resv_list *fobj;
+               unsigned int i;
+               size_t sz = 0;
+
+               i = 0;
+
+               rcu_read_lock();
+               dma_resv_fences(obj, &fence_excl, &fobj,
+                                         &shared_count);
+
+               if (fence_excl && !dma_fence_get_rcu(fence_excl))
+                       goto unlock;
+
+               if (fobj)
+                       sz += sizeof(*shared) * fobj->shared_max;
+
+               if (!pfence_excl && fence_excl)
+                       sz += sizeof(*shared);
+
+               if (sz) {
+                       struct dma_fence **nshared;
+
+                       nshared = krealloc(shared, sz,
+                                          GFP_NOWAIT | __GFP_NOWARN);
+                       if (!nshared) {
+                               rcu_read_unlock();
+
+                               dma_fence_put(fence_excl);
+                               fence_excl = NULL;
+
+                               nshared = krealloc(shared, sz, GFP_KERNEL);
+                               if (nshared) {
+                                       shared = nshared;
+                                       continue;
+                               }
+
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       shared = nshared;
+                       for (i = 0; i < shared_count; ++i) {
+                               shared[i] = rcu_dereference(fobj->shared[i]);
+                               if (!dma_fence_get_rcu(shared[i]))
+                                       break;
+                       }
+               }
+
+               if (i != shared_count) {
+                       while (i--)
+                               dma_fence_put(shared[i]);
+                       dma_fence_put(fence_excl);
+                       goto unlock;
+               }
+
+               ret = 0;
+unlock:
+               rcu_read_unlock();
+       } while (ret);
+
+       if (pfence_excl)
+               *pfence_excl = fence_excl;
+       else if (fence_excl)
+               shared[++shared_count] = fence_excl;
+
+       if (!shared_count) {
+               kfree(shared);
+               shared = NULL;
+       }
+
+       *pshared_count = shared_count;
+       *pshared = shared;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
+
+/**
+ * dma_resv_wait_timeout_rcu - Wait on reservation's objects
+ * shared and/or exclusive fences.
+ * @obj: the reservation object
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
+                              bool wait_all, bool intr,
+                              unsigned long timeout)
+{
+       struct dma_resv_list *fobj;
+       struct dma_fence *fence;
+       unsigned shared_count;
+       long ret = timeout ? timeout : 1;
+       int i;
+
+retry:
+       rcu_read_lock();
+       i = -1;
+
+       dma_resv_fences(obj, &fence, &fobj, &shared_count);
+       if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+               if (!dma_fence_get_rcu(fence))
+                       goto unlock_retry;
+
+               if (dma_fence_is_signaled(fence)) {
+                       dma_fence_put(fence);
+                       fence = NULL;
+               }
+
+       } else {
+               fence = NULL;
+       }
+
+       if (wait_all) {
+               for (i = 0; !fence && i < shared_count; ++i) {
+                       struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
+
+                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+                                    &lfence->flags))
+                               continue;
+
+                       if (!dma_fence_get_rcu(lfence))
+                               goto unlock_retry;
+
+                       if (dma_fence_is_signaled(lfence)) {
+                               dma_fence_put(lfence);
+                               continue;
+                       }
+
+                       fence = lfence;
+                       break;
+               }
+       }
+
+       rcu_read_unlock();
+       if (fence) {
+               ret = dma_fence_wait_timeout(fence, intr, ret);
+               dma_fence_put(fence);
+               if (ret > 0 && wait_all && (i + 1 < shared_count))
+                       goto retry;
+       }
+       return ret;
+
+unlock_retry:
+       rcu_read_unlock();
+       goto retry;
+}
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
+
+
+static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
+{
+       struct dma_fence *fence, *lfence = passed_fence;
+       int ret = 1;
+
+       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
+               fence = dma_fence_get_rcu(lfence);
+               if (!fence)
+                       return -1;
+
+               ret = !!dma_fence_is_signaled(fence);
+               dma_fence_put(fence);
+       }
+       return ret;
+}
+
+/**
+ * dma_resv_test_signaled_rcu - Test if a reservation object's
+ * fences have been signaled.
+ * @obj: the reservation object
+ * @test_all: if true, test all fences, otherwise only test the exclusive
+ * fence
+ *
+ * RETURNS
+ * true if all fences signaled, else false
+ */
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
+{
+       struct dma_resv_list *fobj;
+       struct dma_fence *fence_excl;
+       unsigned shared_count;
+       int ret;
+
+       rcu_read_lock();
+retry:
+       ret = true;
+
+       dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
+       if (test_all) {
+               unsigned i;
+
+               for (i = 0; i < shared_count; ++i) {
+                       struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
+
+                       ret = dma_resv_test_signaled_single(fence);
+                       if (ret < 0)
+                               goto retry;
+                       else if (!ret)
+                               break;
+               }
+       }
+
+       if (!shared_count && fence_excl) {
+               ret = dma_resv_test_signaled_single(fence_excl);
+               if (ret < 0)
+                       goto retry;
+       }
+
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
 
+++ /dev/null
-/*
- * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/reservation.h>
-#include <linux/export.h>
-
-/**
- * DOC: Reservation Object Overview
- *
- * The reservation object provides a mechanism to manage shared and
- * exclusive fences associated with a buffer.  A reservation object
- * can have attached one exclusive fence (normally associated with
- * write operations) or N shared fences (read operations).  The RCU
- * mechanism is used to protect read access to fences from locked
- * write-side updates.
- */
-
-DEFINE_WD_CLASS(reservation_ww_class);
-EXPORT_SYMBOL(reservation_ww_class);
-
-/**
- * reservation_object_list_alloc - allocate fence list
- * @shared_max: number of fences we need space for
- *
- * Allocate a new reservation_object_list and make sure to correctly initialize
- * shared_max.
- */
-static struct reservation_object_list *
-reservation_object_list_alloc(unsigned int shared_max)
-{
-       struct reservation_object_list *list;
-
-       list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
-       if (!list)
-               return NULL;
-
-       list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
-               sizeof(*list->shared);
-
-       return list;
-}
-
-/**
- * reservation_object_list_free - free fence list
- * @list: list to free
- *
- * Free a reservation_object_list and make sure to drop all references.
- */
-static void reservation_object_list_free(struct reservation_object_list *list)
-{
-       unsigned int i;
-
-       if (!list)
-               return;
-
-       for (i = 0; i < list->shared_count; ++i)
-               dma_fence_put(rcu_dereference_protected(list->shared[i], true));
-
-       kfree_rcu(list, rcu);
-}
-
-/**
- * reservation_object_init - initialize a reservation object
- * @obj: the reservation object
- */
-void reservation_object_init(struct reservation_object *obj)
-{
-       ww_mutex_init(&obj->lock, &reservation_ww_class);
-       RCU_INIT_POINTER(obj->fence, NULL);
-       RCU_INIT_POINTER(obj->fence_excl, NULL);
-}
-EXPORT_SYMBOL(reservation_object_init);
-
-/**
- * reservation_object_fini - destroys a reservation object
- * @obj: the reservation object
- */
-void reservation_object_fini(struct reservation_object *obj)
-{
-       struct reservation_object_list *fobj;
-       struct dma_fence *excl;
-
-       /*
-        * This object should be dead and all references must have
-        * been released to it, so no need to be protected with rcu.
-        */
-       excl = rcu_dereference_protected(obj->fence_excl, 1);
-       if (excl)
-               dma_fence_put(excl);
-
-       fobj = rcu_dereference_protected(obj->fence, 1);
-       reservation_object_list_free(fobj);
-       ww_mutex_destroy(&obj->lock);
-}
-EXPORT_SYMBOL(reservation_object_fini);
-
-/**
- * reservation_object_reserve_shared - Reserve space to add shared fences to
- * a reservation_object.
- * @obj: reservation object
- * @num_fences: number of fences we want to add
- *
- * Should be called before reservation_object_add_shared_fence().  Must
- * be called with obj->lock held.
- *
- * RETURNS
- * Zero for success, or -errno
- */
-int reservation_object_reserve_shared(struct reservation_object *obj,
-                                     unsigned int num_fences)
-{
-       struct reservation_object_list *old, *new;
-       unsigned int i, j, k, max;
-
-       reservation_object_assert_held(obj);
-
-       old = reservation_object_get_list(obj);
-
-       if (old && old->shared_max) {
-               if ((old->shared_count + num_fences) <= old->shared_max)
-                       return 0;
-               else
-                       max = max(old->shared_count + num_fences,
-                                 old->shared_max * 2);
-       } else {
-               max = 4;
-       }
-
-       new = reservation_object_list_alloc(max);
-       if (!new)
-               return -ENOMEM;
-
-       /*
-        * no need to bump fence refcounts, rcu_read access
-        * requires the use of kref_get_unless_zero, and the
-        * references from the old struct are carried over to
-        * the new.
-        */
-       for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
-               struct dma_fence *fence;
-
-               fence = rcu_dereference_protected(old->shared[i],
-                                                 reservation_object_held(obj));
-               if (dma_fence_is_signaled(fence))
-                       RCU_INIT_POINTER(new->shared[--k], fence);
-               else
-                       RCU_INIT_POINTER(new->shared[j++], fence);
-       }
-       new->shared_count = j;
-
-       /*
-        * We are not changing the effective set of fences here so can
-        * merely update the pointer to the new array; both existing
-        * readers and new readers will see exactly the same set of
-        * active (unsignaled) shared fences. Individual fences and the
-        * old array are protected by RCU and so will not vanish under
-        * the gaze of the rcu_read_lock() readers.
-        */
-       rcu_assign_pointer(obj->fence, new);
-
-       if (!old)
-               return 0;
-
-       /* Drop the references to the signaled fences */
-       for (i = k; i < max; ++i) {
-               struct dma_fence *fence;
-
-               fence = rcu_dereference_protected(new->shared[i],
-                                                 reservation_object_held(obj));
-               dma_fence_put(fence);
-       }
-       kfree_rcu(old, rcu);
-
-       return 0;
-}
-EXPORT_SYMBOL(reservation_object_reserve_shared);
-
-/**
- * reservation_object_add_shared_fence - Add a fence to a shared slot
- * @obj: the reservation object
- * @fence: the shared fence to add
- *
- * Add a fence to a shared slot, obj->lock must be held, and
- * reservation_object_reserve_shared() has been called.
- */
-void reservation_object_add_shared_fence(struct reservation_object *obj,
-                                        struct dma_fence *fence)
-{
-       struct reservation_object_list *fobj;
-       struct dma_fence *old;
-       unsigned int i, count;
-
-       dma_fence_get(fence);
-
-       reservation_object_assert_held(obj);
-
-       fobj = reservation_object_get_list(obj);
-       count = fobj->shared_count;
-
-       for (i = 0; i < count; ++i) {
-
-               old = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(obj));
-               if (old->context == fence->context ||
-                   dma_fence_is_signaled(old))
-                       goto replace;
-       }
-
-       BUG_ON(fobj->shared_count >= fobj->shared_max);
-       old = NULL;
-       count++;
-
-replace:
-       RCU_INIT_POINTER(fobj->shared[i], fence);
-       /* pointer update must be visible before we extend the shared_count */
-       smp_store_mb(fobj->shared_count, count);
-       dma_fence_put(old);
-}
-EXPORT_SYMBOL(reservation_object_add_shared_fence);
-
-/**
- * reservation_object_add_excl_fence - Add an exclusive fence.
- * @obj: the reservation object
- * @fence: the shared fence to add
- *
- * Add a fence to the exclusive slot.  The obj->lock must be held.
- */
-void reservation_object_add_excl_fence(struct reservation_object *obj,
-                                      struct dma_fence *fence)
-{
-       struct dma_fence *old_fence = reservation_object_get_excl(obj);
-       struct reservation_object_list *old;
-       u32 i = 0;
-
-       reservation_object_assert_held(obj);
-
-       old = reservation_object_get_list(obj);
-       if (old)
-               i = old->shared_count;
-
-       if (fence)
-               dma_fence_get(fence);
-
-       preempt_disable();
-       rcu_assign_pointer(obj->fence_excl, fence);
-       /* pointer update must be visible before we modify the shared_count */
-       if (old)
-               smp_store_mb(old->shared_count, 0);
-       preempt_enable();
-
-       /* inplace update, no shared fences */
-       while (i--)
-               dma_fence_put(rcu_dereference_protected(old->shared[i],
-                                               reservation_object_held(obj)));
-
-       dma_fence_put(old_fence);
-}
-EXPORT_SYMBOL(reservation_object_add_excl_fence);
-
-/**
-* reservation_object_copy_fences - Copy all fences from src to dst.
-* @dst: the destination reservation object
-* @src: the source reservation object
-*
-* Copy all fences from src to dst. dst-lock must be held.
-*/
-int reservation_object_copy_fences(struct reservation_object *dst,
-                                  struct reservation_object *src)
-{
-       struct reservation_object_list *src_list, *dst_list;
-       struct dma_fence *old, *new;
-       unsigned int i, shared_count;
-
-       reservation_object_assert_held(dst);
-
-       rcu_read_lock();
-
-retry:
-       reservation_object_fences(src, &new, &src_list, &shared_count);
-       if (shared_count) {
-               rcu_read_unlock();
-
-               dst_list = reservation_object_list_alloc(shared_count);
-               if (!dst_list)
-                       return -ENOMEM;
-
-               rcu_read_lock();
-               reservation_object_fences(src, &new, &src_list, &shared_count);
-               if (!src_list || shared_count > dst_list->shared_max) {
-                       kfree(dst_list);
-                       goto retry;
-               }
-
-               dst_list->shared_count = 0;
-               for (i = 0; i < shared_count; ++i) {
-                       struct dma_fence *fence;
-
-                       fence = rcu_dereference(src_list->shared[i]);
-                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                    &fence->flags))
-                               continue;
-
-                       if (!dma_fence_get_rcu(fence)) {
-                               reservation_object_list_free(dst_list);
-                               goto retry;
-                       }
-
-                       if (dma_fence_is_signaled(fence)) {
-                               dma_fence_put(fence);
-                               continue;
-                       }
-
-                       rcu_assign_pointer(dst_list->shared[dst_list->shared_count++], fence);
-               }
-       } else {
-               dst_list = NULL;
-       }
-
-       if (new && !dma_fence_get_rcu(new)) {
-               reservation_object_list_free(dst_list);
-               goto retry;
-       }
-       rcu_read_unlock();
-
-       src_list = reservation_object_get_list(dst);
-       old = reservation_object_get_excl(dst);
-
-       preempt_disable();
-       rcu_assign_pointer(dst->fence_excl, new);
-       rcu_assign_pointer(dst->fence, dst_list);
-       preempt_enable();
-
-       reservation_object_list_free(src_list);
-       dma_fence_put(old);
-
-       return 0;
-}
-EXPORT_SYMBOL(reservation_object_copy_fences);
-
-/**
- * reservation_object_get_fences_rcu - Get an object's shared and exclusive
- * fences without update side lock held
- * @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
- * the required size, and must be freed by caller)
- *
- * Retrieve all fences from the reservation object. If the pointer for the
- * exclusive fence is not specified the fence is put into the array of the
- * shared fences as well. Returns either zero or -ENOMEM.
- */
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
-                                     struct dma_fence **pfence_excl,
-                                     unsigned *pshared_count,
-                                     struct dma_fence ***pshared)
-{
-       struct dma_fence **shared = NULL;
-       struct dma_fence *fence_excl;
-       unsigned int shared_count;
-       int ret = 1;
-
-       do {
-               struct reservation_object_list *fobj;
-               unsigned int i;
-               size_t sz = 0;
-
-               i = 0;
-
-               rcu_read_lock();
-               reservation_object_fences(obj, &fence_excl, &fobj,
-                                         &shared_count);
-
-               if (fence_excl && !dma_fence_get_rcu(fence_excl))
-                       goto unlock;
-
-               if (fobj)
-                       sz += sizeof(*shared) * fobj->shared_max;
-
-               if (!pfence_excl && fence_excl)
-                       sz += sizeof(*shared);
-
-               if (sz) {
-                       struct dma_fence **nshared;
-
-                       nshared = krealloc(shared, sz,
-                                          GFP_NOWAIT | __GFP_NOWARN);
-                       if (!nshared) {
-                               rcu_read_unlock();
-
-                               dma_fence_put(fence_excl);
-                               fence_excl = NULL;
-
-                               nshared = krealloc(shared, sz, GFP_KERNEL);
-                               if (nshared) {
-                                       shared = nshared;
-                                       continue;
-                               }
-
-                               ret = -ENOMEM;
-                               break;
-                       }
-                       shared = nshared;
-                       for (i = 0; i < shared_count; ++i) {
-                               shared[i] = rcu_dereference(fobj->shared[i]);
-                               if (!dma_fence_get_rcu(shared[i]))
-                                       break;
-                       }
-               }
-
-               if (i != shared_count) {
-                       while (i--)
-                               dma_fence_put(shared[i]);
-                       dma_fence_put(fence_excl);
-                       goto unlock;
-               }
-
-               ret = 0;
-unlock:
-               rcu_read_unlock();
-       } while (ret);
-
-       if (pfence_excl)
-               *pfence_excl = fence_excl;
-       else if (fence_excl)
-               shared[++shared_count] = fence_excl;
-
-       if (!shared_count) {
-               kfree(shared);
-               shared = NULL;
-       }
-
-       *pshared_count = shared_count;
-       *pshared = shared;
-       return ret;
-}
-EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
-
-/**
- * reservation_object_wait_timeout_rcu - Wait on reservation's objects
- * shared and/or exclusive fences.
- * @obj: the reservation object
- * @wait_all: if true, wait on all fences, else wait on just exclusive fence
- * @intr: if true, do interruptible wait
- * @timeout: timeout value in jiffies or zero to return immediately
- *
- * RETURNS
- * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
- * greater than zer on success.
- */
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
-                                        bool wait_all, bool intr,
-                                        unsigned long timeout)
-{
-       struct reservation_object_list *fobj;
-       struct dma_fence *fence;
-       unsigned shared_count;
-       long ret = timeout ? timeout : 1;
-       int i;
-
-retry:
-       rcu_read_lock();
-       i = -1;
-
-       reservation_object_fences(obj, &fence, &fobj, &shared_count);
-       if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
-               if (!dma_fence_get_rcu(fence))
-                       goto unlock_retry;
-
-               if (dma_fence_is_signaled(fence)) {
-                       dma_fence_put(fence);
-                       fence = NULL;
-               }
-
-       } else {
-               fence = NULL;
-       }
-
-       if (wait_all) {
-               for (i = 0; !fence && i < shared_count; ++i) {
-                       struct dma_fence *lfence = rcu_dereference(fobj->shared[i]);
-
-                       if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
-                                    &lfence->flags))
-                               continue;
-
-                       if (!dma_fence_get_rcu(lfence))
-                               goto unlock_retry;
-
-                       if (dma_fence_is_signaled(lfence)) {
-                               dma_fence_put(lfence);
-                               continue;
-                       }
-
-                       fence = lfence;
-                       break;
-               }
-       }
-
-       rcu_read_unlock();
-       if (fence) {
-               ret = dma_fence_wait_timeout(fence, intr, ret);
-               dma_fence_put(fence);
-               if (ret > 0 && wait_all && (i + 1 < shared_count))
-                       goto retry;
-       }
-       return ret;
-
-unlock_retry:
-       rcu_read_unlock();
-       goto retry;
-}
-EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
-
-
-static inline int
-reservation_object_test_signaled_single(struct dma_fence *passed_fence)
-{
-       struct dma_fence *fence, *lfence = passed_fence;
-       int ret = 1;
-
-       if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
-               fence = dma_fence_get_rcu(lfence);
-               if (!fence)
-                       return -1;
-
-               ret = !!dma_fence_is_signaled(fence);
-               dma_fence_put(fence);
-       }
-       return ret;
-}
-
-/**
- * reservation_object_test_signaled_rcu - Test if a reservation object's
- * fences have been signaled.
- * @obj: the reservation object
- * @test_all: if true, test all fences, otherwise only test the exclusive
- * fence
- *
- * RETURNS
- * true if all fences signaled, else false
- */
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
-                                         bool test_all)
-{
-       struct reservation_object_list *fobj;
-       struct dma_fence *fence_excl;
-       unsigned shared_count;
-       int ret;
-
-       rcu_read_lock();
-retry:
-       ret = true;
-
-       reservation_object_fences(obj, &fence_excl, &fobj, &shared_count);
-       if (test_all) {
-               unsigned i;
-
-               for (i = 0; i < shared_count; ++i) {
-                       struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
-                       ret = reservation_object_test_signaled_single(fence);
-                       if (ret < 0)
-                               goto retry;
-                       else if (!ret)
-                               break;
-               }
-       }
-
-       if (!shared_count && fence_excl) {
-               ret = reservation_object_test_signaled_single(fence_excl);
-               if (ret < 0)
-                       goto retry;
-       }
-
-       rcu_read_unlock();
-       return ret;
-}
-EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
 
 static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
                                        struct amdgpu_amdkfd_fence *ef)
 {
-       struct reservation_object *resv = bo->tbo.base.resv;
-       struct reservation_object_list *old, *new;
+       struct dma_resv *resv = bo->tbo.base.resv;
+       struct dma_resv_list *old, *new;
        unsigned int i, j, k;
 
        if (!ef)
                return -EINVAL;
 
-       old = reservation_object_get_list(resv);
+       old = dma_resv_get_list(resv);
        if (!old)
                return 0;
 
                struct dma_fence *f;
 
                f = rcu_dereference_protected(old->shared[i],
-                                             reservation_object_held(resv));
+                                             dma_resv_held(resv));
 
                if (f->context == ef->base.context)
                        RCU_INIT_POINTER(new->shared[--j], f);
                struct dma_fence *f;
 
                f = rcu_dereference_protected(new->shared[i],
-                                             reservation_object_held(resv));
+                                             dma_resv_held(resv));
                dma_fence_put(f);
        }
        kfree_rcu(old, rcu);
                                  AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
-       ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(vm->root.base.bo,
         * Add process eviction fence to bo so they can
         * evict each other.
         */
-       ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
 
 
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-               struct reservation_object *resv = bo->tbo.base.resv;
+               struct dma_resv *resv = bo->tbo.base.resv;
 
                r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
                                     amdgpu_bo_explicit_sync(bo));
        *map = mapping;
 
        /* Double check that the BO is reserved by this CS */
-       if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
+       if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
                return -EINVAL;
 
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
 
                goto unpin;
        }
 
-       r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
+       r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
                                              &work->shared_count,
                                              &work->shared);
        if (unlikely(r != 0)) {
 
 }
 
 static int
-__reservation_object_make_exclusive(struct reservation_object *obj)
+__dma_resv_make_exclusive(struct dma_resv *obj)
 {
        struct dma_fence **fences;
        unsigned int count;
        int r;
 
-       if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+       if (!dma_resv_get_list(obj)) /* no shared fences to convert */
                return 0;
 
-       r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+       r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
        if (r)
                return r;
 
        if (count == 0) {
                /* Now that was unexpected. */
        } else if (count == 1) {
-               reservation_object_add_excl_fence(obj, fences[0]);
+               dma_resv_add_excl_fence(obj, fences[0]);
                dma_fence_put(fences[0]);
                kfree(fences);
        } else {
                if (!array)
                        goto err_fences_put;
 
-               reservation_object_add_excl_fence(obj, &array->base);
+               dma_resv_add_excl_fence(obj, &array->base);
                dma_fence_put(&array->base);
        }
 
                 * fences on the reservation object into a single exclusive
                 * fence.
                 */
-               r = __reservation_object_make_exclusive(bo->tbo.base.resv);
+               r = __dma_resv_make_exclusive(bo->tbo.base.resv);
                if (r)
                        goto error_unreserve;
        }
                                 struct dma_buf_attachment *attach,
                                 struct sg_table *sg)
 {
-       struct reservation_object *resv = attach->dmabuf->resv;
+       struct dma_resv *resv = attach->dmabuf->resv;
        struct amdgpu_device *adev = dev->dev_private;
        struct amdgpu_bo *bo;
        struct amdgpu_bo_param bp;
        bp.flags = 0;
        bp.type = ttm_bo_type_sg;
        bp.resv = resv;
-       reservation_object_lock(resv, NULL);
+       dma_resv_lock(resv, NULL);
        ret = amdgpu_bo_create(adev, &bp, &bo);
        if (ret)
                goto error;
        if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
                bo->prime_shared_count = 1;
 
-       reservation_object_unlock(resv);
+       dma_resv_unlock(resv);
        return &bo->tbo.base;
 
 error:
-       reservation_object_unlock(resv);
+       dma_resv_unlock(resv);
        return ERR_PTR(ret);
 }
 
 
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
                             int alignment, u32 initial_domain,
                             u64 flags, enum ttm_bo_type type,
-                            struct reservation_object *resv,
+                            struct dma_resv *resv,
                             struct drm_gem_object **obj)
 {
        struct amdgpu_bo *bo;
        union drm_amdgpu_gem_create *args = data;
        uint64_t flags = args->in.domain_flags;
        uint64_t size = args->in.bo_size;
-       struct reservation_object *resv = NULL;
+       struct dma_resv *resv = NULL;
        struct drm_gem_object *gobj;
        uint32_t handle;
        int r;
                return -ENOENT;
        }
        robj = gem_to_amdgpu_bo(gobj);
-       ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
+       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
                                                  timeout);
 
        /* ret == 0 means not signaled,
 
 int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
                             int alignment, u32 initial_domain,
                             u64 flags, enum ttm_bo_type type,
-                            struct reservation_object *resv,
+                            struct dma_resv *resv,
                             struct drm_gem_object **obj);
 
 int amdgpu_mode_dumb_create(struct drm_file *file_priv,
 
  *
  * Free the pasid only after all the fences in resv are signaled.
  */
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
                               unsigned int pasid)
 {
        struct dma_fence *fence, **fences;
        unsigned count;
        int r;
 
-       r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+       r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
        if (r)
                goto fallback;
 
        /* Not enough memory for the delayed delete, as last resort
         * block for all the fences to complete.
         */
-       reservation_object_wait_timeout_rcu(resv, true, false,
+       dma_resv_wait_timeout_rcu(resv, true, false,
                                            MAX_SCHEDULE_TIMEOUT);
        amdgpu_pasid_free(pasid);
 }
 
 
 int amdgpu_pasid_alloc(unsigned int bits);
 void amdgpu_pasid_free(unsigned int pasid);
-void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+void amdgpu_pasid_free_delayed(struct dma_resv *resv,
                               unsigned int pasid);
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 
                if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
                        continue;
 
-               r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+               r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
                        true, false, MAX_SCHEDULE_TIMEOUT);
                if (r <= 0)
                        DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
 
 fail_unreserve:
        if (!bp->resv)
-               reservation_object_unlock(bo->tbo.base.resv);
+               dma_resv_unlock(bo->tbo.base.resv);
        amdgpu_bo_unref(&bo);
        return r;
 }
 
        if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
                if (!bp->resv)
-                       WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
+                       WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
                                                        NULL));
 
                r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 
                if (!bp->resv)
-                       reservation_object_unlock((*bo_ptr)->tbo.base.resv);
+                       dma_resv_unlock((*bo_ptr)->tbo.base.resv);
 
                if (r)
                        amdgpu_bo_unref(bo_ptr);
                return 0;
        }
 
-       r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
+       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
                                                MAX_SCHEDULE_TIMEOUT);
        if (r < 0)
                return r;
  */
 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
 {
-       reservation_object_assert_held(bo->tbo.base.resv);
+       dma_resv_assert_held(bo->tbo.base.resv);
 
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
                     bool shared)
 {
-       struct reservation_object *resv = bo->tbo.base.resv;
+       struct dma_resv *resv = bo->tbo.base.resv;
 
        if (shared)
-               reservation_object_add_shared_fence(resv, fence);
+               dma_resv_add_shared_fence(resv, fence);
        else
-               reservation_object_add_excl_fence(resv, fence);
+               dma_resv_add_excl_fence(resv, fence);
 }
 
 /**
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
+       WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
                     !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 
        u32                             preferred_domain;
        u64                             flags;
        enum ttm_bo_type                type;
-       struct reservation_object       *resv;
+       struct dma_resv *resv;
 };
 
 /* bo virtual addresses in a vm */
 
  */
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
-                    struct reservation_object *resv,
+                    struct dma_resv *resv,
                     void *owner, bool explicit_sync)
 {
-       struct reservation_object_list *flist;
+       struct dma_resv_list *flist;
        struct dma_fence *f;
        void *fence_owner;
        unsigned i;
                return -EINVAL;
 
        /* always sync to the exclusive fence */
-       f = reservation_object_get_excl(resv);
+       f = dma_resv_get_excl(resv);
        r = amdgpu_sync_fence(adev, sync, f, false);
 
-       flist = reservation_object_get_list(resv);
+       flist = dma_resv_get_list(resv);
        if (!flist || r)
                return r;
 
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
-                                             reservation_object_held(resv));
+                                             dma_resv_held(resv));
                /* We only want to trigger KFD eviction fences on
                 * evict or move jobs. Skip KFD fences otherwise.
                 */
 
 #include <linux/hashtable.h>
 
 struct dma_fence;
-struct reservation_object;
+struct dma_resv;
 struct amdgpu_device;
 struct amdgpu_ring;
 
                      struct dma_fence *f, bool explicit);
 int amdgpu_sync_resv(struct amdgpu_device *adev,
                     struct amdgpu_sync *sync,
-                    struct reservation_object *resv,
+                    struct dma_resv *resv,
                     void *owner,
                     bool explicit_sync);
 struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
 
                               struct amdgpu_copy_mem *src,
                               struct amdgpu_copy_mem *dst,
                               uint64_t size,
-                              struct reservation_object *resv,
+                              struct dma_resv *resv,
                               struct dma_fence **f)
 {
        struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 {
        unsigned long num_pages = bo->mem.num_pages;
        struct drm_mm_node *node = bo->mem.mm_node;
-       struct reservation_object_list *flist;
+       struct dma_resv_list *flist;
        struct dma_fence *f;
        int i;
 
         * cleanly handle page faults.
         */
        if (bo->type == ttm_bo_type_kernel &&
-           !reservation_object_test_signaled_rcu(bo->base.resv, true))
+           !dma_resv_test_signaled_rcu(bo->base.resv, true))
                return false;
 
        /* If bo is a KFD BO, check if the bo belongs to the current process.
         * If true, then return false as any KFD process needs all its BOs to
         * be resident to run successfully
         */
-       flist = reservation_object_get_list(bo->base.resv);
+       flist = dma_resv_get_list(bo->base.resv);
        if (flist) {
                for (i = 0; i < flist->shared_count; ++i) {
                        f = rcu_dereference_protected(flist->shared[i],
-                               reservation_object_held(bo->base.resv));
+                               dma_resv_held(bo->base.resv));
                        if (amdkfd_fence_check_mm(f, current->mm))
                                return false;
                }
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
-                      struct reservation_object *resv,
+                      struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush)
 {
 
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                       uint32_t src_data,
-                      struct reservation_object *resv,
+                      struct dma_resv *resv,
                       struct dma_fence **fence)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 
 
 int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
                       uint64_t dst_offset, uint32_t byte_count,
-                      struct reservation_object *resv,
+                      struct dma_resv *resv,
                       struct dma_fence **fence, bool direct_submit,
                       bool vm_needs_flush);
 int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                               struct amdgpu_copy_mem *src,
                               struct amdgpu_copy_mem *dst,
                               uint64_t size,
-                              struct reservation_object *resv,
+                              struct dma_resv *resv,
                               struct dma_fence **f);
 int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        uint32_t src_data,
-                       struct reservation_object *resv,
+                       struct dma_resv *resv,
                        struct dma_fence **fence);
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
 
        ib->length_dw = 16;
 
        if (direct) {
-               r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+               r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
                                                        true, false,
                                                        msecs_to_jiffies(10));
                if (r == 0)
 
                        ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
                        pages_addr = ttm->dma_address;
                }
-               exclusive = reservation_object_get_excl(bo->tbo.base.resv);
+               exclusive = dma_resv_get_excl(bo->tbo.base.resv);
        }
 
        if (bo) {
  */
 static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
-       struct reservation_object *resv = vm->root.base.bo->tbo.base.resv;
+       struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
        struct dma_fence *excl, **shared;
        unsigned i, shared_count;
        int r;
 
-       r = reservation_object_get_fences_rcu(resv, &excl,
+       r = dma_resv_get_fences_rcu(resv, &excl,
                                              &shared_count, &shared);
        if (r) {
                /* Not enough memory to grab the fence list, as last resort
                 * block for all the fences to complete.
                 */
-               reservation_object_wait_timeout_rcu(resv, true, false,
+               dma_resv_wait_timeout_rcu(resv, true, false,
                                                    MAX_SCHEDULE_TIMEOUT);
                return;
        }
                           struct amdgpu_vm *vm)
 {
        struct amdgpu_bo_va *bo_va, *tmp;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
        bool clear;
        int r;
 
                spin_unlock(&vm->invalidated_lock);
 
                /* Try to reserve the BO to avoid clearing its ptes */
-               if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+               if (!amdgpu_vm_debug && dma_resv_trylock(resv))
                        clear = false;
                /* Somebody else is using the BO right now */
                else
                        return r;
 
                if (!clear)
-                       reservation_object_unlock(resv);
+                       dma_resv_unlock(resv);
                spin_lock(&vm->invalidated_lock);
        }
        spin_unlock(&vm->invalidated_lock);
                        struct amdgpu_bo *bo;
 
                        bo = mapping->bo_va->base.bo;
-                       if (reservation_object_locking_ctx(bo->tbo.base.resv) !=
+                       if (dma_resv_locking_ctx(bo->tbo.base.resv) !=
                            ticket)
                                continue;
                }
  */
 long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
 {
-       return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
+       return dma_resv_wait_timeout_rcu(vm->root.base.bo->tbo.base.resv,
                                                   true, true, timeout);
 }
 
        if (r)
                goto error_free_root;
 
-       r = reservation_object_reserve_shared(root->tbo.base.resv, 1);
+       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
        if (r)
                goto error_unreserve;
 
 
                 * deadlock during GPU reset when this fence will not signal
                 * but we hold reservation lock for the BO.
                 */
-               r = reservation_object_wait_timeout_rcu(abo->tbo.base.resv, true,
+               r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
                                                        false,
                                                        msecs_to_jiffies(5000));
                if (unlikely(r <= 0))
 
  * As a contrast, with implicit fencing the kernel keeps track of any
  * ongoing rendering, and automatically ensures that the atomic update waits
  * for any pending rendering to complete. For shared buffers represented with
- * a &struct dma_buf this is tracked in &struct reservation_object.
+ * a &struct dma_buf this is tracked in &struct dma_resv.
  * Implicit syncing is how Linux traditionally worked (e.g. DRI2/3 on X.org),
  * whereas explicit fencing is what Android wants.
  *
 
        kref_init(&obj->refcount);
        obj->handle_count = 0;
        obj->size = size;
-       reservation_object_init(&obj->_resv);
+       dma_resv_init(&obj->_resv);
        if (!obj->resv)
                obj->resv = &obj->_resv;
 
 EXPORT_SYMBOL(drm_gem_object_lookup);
 
 /**
- * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
  * shared and/or exclusive fences.
  * @filep: DRM file private date
  * @handle: userspace handle
  * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
  * greater than 0 on success.
  */
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
                                    bool wait_all, unsigned long timeout)
 {
        long ret;
                return -EINVAL;
        }
 
-       ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+       ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all,
                                                  true, timeout);
        if (ret == 0)
                ret = -ETIME;
 
        return ret;
 }
-EXPORT_SYMBOL(drm_gem_reservation_object_wait);
+EXPORT_SYMBOL(drm_gem_dma_resv_wait);
 
 /**
  * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
        if (obj->filp)
                fput(obj->filp);
 
-       reservation_object_fini(&obj->_resv);
+       dma_resv_fini(&obj->_resv);
        drm_gem_free_mmap_offset(obj);
 }
 EXPORT_SYMBOL(drm_gem_object_release);
        if (contended != -1) {
                struct drm_gem_object *obj = objs[contended];
 
-               ret = reservation_object_lock_slow_interruptible(obj->resv,
+               ret = dma_resv_lock_slow_interruptible(obj->resv,
                                                                 acquire_ctx);
                if (ret) {
                        ww_acquire_done(acquire_ctx);
                if (i == contended)
                        continue;
 
-               ret = reservation_object_lock_interruptible(objs[i]->resv,
+               ret = dma_resv_lock_interruptible(objs[i]->resv,
                                                            acquire_ctx);
                if (ret) {
                        int j;
 
                        for (j = 0; j < i; j++)
-                               reservation_object_unlock(objs[j]->resv);
+                               dma_resv_unlock(objs[j]->resv);
 
                        if (contended != -1 && contended >= i)
-                               reservation_object_unlock(objs[contended]->resv);
+                               dma_resv_unlock(objs[contended]->resv);
 
                        if (ret == -EDEADLK) {
                                contended = i;
        int i;
 
        for (i = 0; i < count; i++)
-               reservation_object_unlock(objs[i]->resv);
+               dma_resv_unlock(objs[i]->resv);
 
        ww_acquire_fini(acquire_ctx);
 }
 
        if (!write) {
                struct dma_fence *fence =
-                       reservation_object_get_excl_rcu(obj->resv);
+                       dma_resv_get_excl_rcu(obj->resv);
 
                return drm_gem_fence_array_add(fence_array, fence);
        }
 
-       ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+       ret = dma_resv_get_fences_rcu(obj->resv, NULL,
                                                &fence_count, &fences);
        if (ret || !fence_count)
                return ret;
 
 
 #include <linux/dma-buf.h>
 #include <linux/dma-fence.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/slab.h>
 
 #include <drm/drm_atomic.h>
                return 0;
 
        obj = drm_gem_fb_get_obj(state->fb, 0);
-       fence = reservation_object_get_excl_rcu(obj->resv);
+       fence = dma_resv_get_excl_rcu(obj->resv);
        drm_atomic_set_fence_for_plane(state, fence);
 
        return 0;
 
        }
 
        if (op & ETNA_PREP_NOSYNC) {
-               if (!reservation_object_test_signaled_rcu(obj->resv,
+               if (!dma_resv_test_signaled_rcu(obj->resv,
                                                          write))
                        return -EBUSY;
        } else {
                unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
 
-               ret = reservation_object_wait_timeout_rcu(obj->resv,
+               ret = dma_resv_wait_timeout_rcu(obj->resv,
                                                          write, true, remain);
                if (ret <= 0)
                        return ret == 0 ? -ETIMEDOUT : ret;
 static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-       struct reservation_object *robj = obj->resv;
-       struct reservation_object_list *fobj;
+       struct dma_resv *robj = obj->resv;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence;
        unsigned long off = drm_vma_node_start(&obj->vma_node);
 
 
 #ifndef __ETNAVIV_GEM_H__
 #define __ETNAVIV_GEM_H__
 
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 
 
  */
 
 #include <linux/dma-fence-array.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/sync_file.h>
 #include "etnaviv_cmdbuf.h"
 #include "etnaviv_drv.h"
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
-               struct reservation_object *robj = bo->obj->base.resv;
+               struct dma_resv *robj = bo->obj->base.resv;
 
                if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
-                       ret = reservation_object_reserve_shared(robj, 1);
+                       ret = dma_resv_reserve_shared(robj, 1);
                        if (ret)
                                return ret;
                }
                        continue;
 
                if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
-                       ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+                       ret = dma_resv_get_fences_rcu(robj, &bo->excl,
                                                                &bo->nr_shared,
                                                                &bo->shared);
                        if (ret)
                                return ret;
                } else {
-                       bo->excl = reservation_object_get_excl_rcu(robj);
+                       bo->excl = dma_resv_get_excl_rcu(robj);
                }
 
        }
                struct drm_gem_object *obj = &submit->bos[i].obj->base;
 
                if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
-                       reservation_object_add_excl_fence(obj->resv,
+                       dma_resv_add_excl_fence(obj->resv,
                                                          submit->out_fence);
                else
-                       reservation_object_add_shared_fence(obj->resv,
+                       dma_resv_add_shared_fence(obj->resv,
                                                            submit->out_fence);
 
                submit_unlock_object(submit, i);
 
 #include <linux/intel-iommu.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/slab.h>
 #include <linux/vgaarb.h>
 
                if (ret < 0)
                        return ret;
 
-               fence = reservation_object_get_excl_rcu(obj->base.resv);
+               fence = dma_resv_get_excl_rcu(obj->base.resv);
                if (fence) {
                        add_rps_boost_after_vblank(new_state->crtc, fence);
                        dma_fence_put(fence);
 
 {
        struct drm_i915_gem_busy *args = data;
        struct drm_i915_gem_object *obj;
-       struct reservation_object_list *list;
+       struct dma_resv_list *list;
        unsigned int i, shared_count;
        struct dma_fence *excl;
        int err;
         * Alternatively, we can trade that extra information on read/write
         * activity with
         *      args->busy =
-        *              !reservation_object_test_signaled_rcu(obj->resv, true);
+        *              !dma_resv_test_signaled_rcu(obj->resv, true);
         * to report the overall busyness. This is what the wait-ioctl does.
         *
         */
-       reservation_object_fences(obj->base.resv, &excl, &list, &shared_count);
+       dma_resv_fences(obj->base.resv, &excl, &list, &shared_count);
 
        /* Translate the exclusive fence to the READ *and* WRITE engine */
        args->busy = busy_check_writer(excl);
 
                                                true, I915_FENCE_TIMEOUT,
                                                I915_FENCE_GFP);
 
-               reservation_object_add_excl_fence(obj->base.resv,
+               dma_resv_add_excl_fence(obj->base.resv,
                                                  &clflush->dma);
 
                i915_sw_fence_commit(&clflush->wait);
 
        if (err < 0) {
                dma_fence_set_error(&work->dma, err);
        } else {
-               reservation_object_add_excl_fence(obj->base.resv, &work->dma);
+               dma_resv_add_excl_fence(obj->base.resv, &work->dma);
                err = 0;
        }
        i915_gem_object_unlock(obj);
 
 
 #include <linux/dma-buf.h>
 #include <linux/highmem.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 
  */
 
 #include <linux/intel-iommu.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/sync_file.h>
 #include <linux/uaccess.h>
 
                goto skip_request;
 
        i915_vma_lock(batch);
-       GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
+       GEM_BUG_ON(!dma_resv_test_signaled_rcu(batch->resv, true));
        err = i915_vma_move_to_active(batch, rq, 0);
        i915_vma_unlock(batch);
        if (err)
 
        if (!eb->reloc_cache.vaddr &&
            (DBG_FORCE_RELOC == FORCE_GPU_RELOC ||
-            !reservation_object_test_signaled_rcu(vma->resv, true))) {
+            !dma_resv_test_signaled_rcu(vma->resv, true))) {
                const unsigned int gen = eb->reloc_cache.gen;
                unsigned int len;
                u32 *batch;
 
                                            I915_FENCE_GFP) < 0)
                goto err;
 
-       reservation_object_add_excl_fence(obj->base.resv, &stub->dma);
+       dma_resv_add_excl_fence(obj->base.resv, &stub->dma);
 
        return &stub->dma;
 
 
        __drm_gem_object_put(&obj->base);
 }
 
-#define assert_object_held(obj) reservation_object_assert_held((obj)->base.resv)
+#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 
 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 {
-       reservation_object_lock(obj->base.resv, NULL);
+       dma_resv_lock(obj->base.resv, NULL);
 }
 
 static inline int
 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 {
-       return reservation_object_lock_interruptible(obj->base.resv, NULL);
+       return dma_resv_lock_interruptible(obj->base.resv, NULL);
 }
 
 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 {
-       reservation_object_unlock(obj->base.resv);
+       dma_resv_unlock(obj->base.resv);
 }
 
 struct dma_fence *
        struct dma_fence *fence;
 
        rcu_read_lock();
-       fence = reservation_object_get_excl_rcu(obj->base.resv);
+       fence = dma_resv_get_excl_rcu(obj->base.resv);
        rcu_read_unlock();
 
        if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 
 }
 
 static long
-i915_gem_object_wait_reservation(struct reservation_object *resv,
+i915_gem_object_wait_reservation(struct dma_resv *resv,
                                 unsigned int flags,
                                 long timeout)
 {
                unsigned int count, i;
                int ret;
 
-               ret = reservation_object_get_fences_rcu(resv,
+               ret = dma_resv_get_fences_rcu(resv,
                                                        &excl, &count, &shared);
                if (ret)
                        return ret;
                 */
                prune_fences = count && timeout >= 0;
        } else {
-               excl = reservation_object_get_excl_rcu(resv);
+               excl = dma_resv_get_excl_rcu(resv);
        }
 
        if (excl && timeout >= 0)
         * Opportunistically prune the fences iff we know they have *all* been
         * signaled.
         */
-       if (prune_fences && reservation_object_trylock(resv)) {
-               if (reservation_object_test_signaled_rcu(resv, true))
-                       reservation_object_add_excl_fence(resv, NULL);
-               reservation_object_unlock(resv);
+       if (prune_fences && dma_resv_trylock(resv)) {
+               if (dma_resv_test_signaled_rcu(resv, true))
+                       dma_resv_add_excl_fence(resv, NULL);
+               dma_resv_unlock(resv);
        }
 
        return timeout;
                unsigned int count, i;
                int ret;
 
-               ret = reservation_object_get_fences_rcu(obj->base.resv,
+               ret = dma_resv_get_fences_rcu(obj->base.resv,
                                                        &excl, &count, &shared);
                if (ret)
                        return ret;
 
                kfree(shared);
        } else {
-               excl = reservation_object_get_excl_rcu(obj->base.resv);
+               excl = dma_resv_get_excl_rcu(obj->base.resv);
        }
 
        if (excl) {
 
 #include <linux/mm_types.h>
 #include <linux/perf_event.h>
 #include <linux/pm_qos.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/shmem_fs.h>
 #include <linux/stackdepot.h>
 
 
 #include <drm/i915_drm.h>
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 
        list_for_each_entry(obj, list, batch_pool_link) {
                /* The batches are strictly LRU ordered */
                if (i915_gem_object_is_active(obj)) {
-                       struct reservation_object *resv = obj->base.resv;
+                       struct dma_resv *resv = obj->base.resv;
 
-                       if (!reservation_object_test_signaled_rcu(resv, true))
+                       if (!dma_resv_test_signaled_rcu(resv, true))
                                break;
 
                        i915_retire_requests(pool->engine->i915);
                         * than replace the existing fence.
                         */
                        if (rcu_access_pointer(resv->fence)) {
-                               reservation_object_lock(resv, NULL);
-                               reservation_object_add_excl_fence(resv, NULL);
-                               reservation_object_unlock(resv);
+                               dma_resv_lock(resv, NULL);
+                               dma_resv_add_excl_fence(resv, NULL);
+                               dma_resv_unlock(resv);
                        }
                }
 
-               GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->base.resv,
+               GEM_BUG_ON(!dma_resv_test_signaled_rcu(obj->base.resv,
                                                                 true));
 
                if (obj->base.size >= size)
 
                struct dma_fence **shared;
                unsigned int count, i;
 
-               ret = reservation_object_get_fences_rcu(obj->base.resv,
+               ret = dma_resv_get_fences_rcu(obj->base.resv,
                                                        &excl, &count, &shared);
                if (ret)
                        return ret;
                        dma_fence_put(shared[i]);
                kfree(shared);
        } else {
-               excl = reservation_object_get_excl_rcu(obj->base.resv);
+               excl = dma_resv_get_excl_rcu(obj->base.resv);
        }
 
        if (excl) {
 
 #include <linux/slab.h>
 #include <linux/dma-fence.h>
 #include <linux/irq_work.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "i915_sw_fence.h"
 #include "i915_selftest.h"
 }
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
-                                   struct reservation_object *resv,
+                                   struct dma_resv *resv,
                                    const struct dma_fence_ops *exclude,
                                    bool write,
                                    unsigned long timeout,
                struct dma_fence **shared;
                unsigned int count, i;
 
-               ret = reservation_object_get_fences_rcu(resv,
+               ret = dma_resv_get_fences_rcu(resv,
                                                        &excl, &count, &shared);
                if (ret)
                        return ret;
                        dma_fence_put(shared[i]);
                kfree(shared);
        } else {
-               excl = reservation_object_get_excl_rcu(resv);
+               excl = dma_resv_get_excl_rcu(resv);
        }
 
        if (ret >= 0 && excl && excl->ops != exclude) {
 
 #include <linux/wait.h>
 
 struct completion;
-struct reservation_object;
+struct dma_resv;
 
 struct i915_sw_fence {
        wait_queue_head_t wait;
                                  gfp_t gfp);
 
 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
-                                   struct reservation_object *resv,
+                                   struct dma_resv *resv,
                                    const struct dma_fence_ops *exclude,
                                    bool write,
                                    unsigned long timeout,
 
                return;
 
        /* Prune the shared fence arrays iff completely idle (inc. external) */
-       if (reservation_object_trylock(obj->base.resv)) {
-               if (reservation_object_test_signaled_rcu(obj->base.resv, true))
-                       reservation_object_add_excl_fence(obj->base.resv, NULL);
-               reservation_object_unlock(obj->base.resv);
+       if (dma_resv_trylock(obj->base.resv)) {
+               if (dma_resv_test_signaled_rcu(obj->base.resv, true))
+                       dma_resv_add_excl_fence(obj->base.resv, NULL);
+               dma_resv_unlock(obj->base.resv);
        }
 
        /*
                         struct i915_request *rq,
                         unsigned int flags)
 {
-       struct reservation_object *resv = vma->resv;
+       struct dma_resv *resv = vma->resv;
 
        /*
         * Ignore errors from failing to allocate the new fence, we can't
         * synchronisation leading to rendering corruption.
         */
        if (flags & EXEC_OBJECT_WRITE)
-               reservation_object_add_excl_fence(resv, &rq->fence);
-       else if (reservation_object_reserve_shared(resv, 1) == 0)
-               reservation_object_add_shared_fence(resv, &rq->fence);
+               dma_resv_add_excl_fence(resv, &rq->fence);
+       else if (dma_resv_reserve_shared(resv, 1) == 0)
+               dma_resv_add_shared_fence(resv, &rq->fence);
 }
 
 int i915_vma_move_to_active(struct i915_vma *vma,
 
        struct i915_address_space *vm;
        const struct i915_vma_ops *ops;
        struct i915_fence_reg *fence;
-       struct reservation_object *resv; /** Alias of obj->resv */
+       struct dma_resv *resv; /** Alias of obj->resv */
        struct sg_table *pages;
        void __iomem *iomap;
        void *private; /* owned by creator */
 void i915_vma_reopen(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
-#define assert_vma_held(vma) reservation_object_assert_held((vma)->resv)
+#define assert_vma_held(vma) dma_resv_assert_held((vma)->resv)
 
 static inline void i915_vma_lock(struct i915_vma *vma)
 {
-       reservation_object_lock(vma->resv, NULL);
+       dma_resv_lock(vma->resv, NULL);
 }
 
 static inline void i915_vma_unlock(struct i915_vma *vma)
 {
-       reservation_object_unlock(vma->resv);
+       dma_resv_unlock(vma->resv);
 }
 
 int __i915_vma_do_pin(struct i915_vma *vma,
 
        int err = 0;
 
        if (!write) {
-               err = reservation_object_reserve_shared(bo->gem.resv, 1);
+               err = dma_resv_reserve_shared(bo->gem.resv, 1);
                if (err)
                        return err;
        }
 
        for (i = 0; i < submit->nr_bos; i++) {
                if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
-                       reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+                       dma_resv_add_excl_fence(bos[i]->gem.resv, fence);
                else
-                       reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+                       dma_resv_add_shared_fence(bos[i]->gem.resv, fence);
        }
 
        lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
 
        timeout = drm_timeout_abs_to_jiffies(timeout_ns);
 
-       ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+       ret = drm_gem_dma_resv_wait(file, handle, write, timeout);
        if (ret == 0)
                ret = timeout ? -ETIMEDOUT : -EBUSY;
 
 
  */
 
 #include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_fb_helper.h>
 
 int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive)
 {
-       struct reservation_object_list *fobj;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence;
        int i, ret;
 
-       fobj = reservation_object_get_list(obj->resv);
+       fobj = dma_resv_get_list(obj->resv);
        if (!fobj || (fobj->shared_count == 0)) {
-               fence = reservation_object_get_excl(obj->resv);
+               fence = dma_resv_get_excl(obj->resv);
                /* don't need to wait on our own fences, since ring is fifo */
                if (fence && (fence->context != fctx->context)) {
                        ret = dma_fence_wait(fence, true);
 
        for (i = 0; i < fobj->shared_count; i++) {
                fence = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(obj->resv));
+                                               dma_resv_held(obj->resv));
                if (fence->context != fctx->context) {
                        ret = dma_fence_wait(fence, true);
                        if (ret)
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
        msm_obj->gpu = gpu;
        if (exclusive)
-               reservation_object_add_excl_fence(obj->resv, fence);
+               dma_resv_add_excl_fence(obj->resv, fence);
        else
-               reservation_object_add_shared_fence(obj->resv, fence);
+               dma_resv_add_shared_fence(obj->resv, fence);
        list_del_init(&msm_obj->mm_list);
        list_add_tail(&msm_obj->mm_list, &gpu->active_list);
 }
                op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
        long ret;
 
-       ret = reservation_object_wait_timeout_rcu(obj->resv, write,
+       ret = dma_resv_wait_timeout_rcu(obj->resv, write,
                                                  true,  remain);
        if (ret == 0)
                return remain == 0 ? -EBUSY : -ETIMEDOUT;
 void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
-       struct reservation_object *robj = obj->resv;
-       struct reservation_object_list *fobj;
+       struct dma_resv *robj = obj->resv;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence;
        struct msm_gem_vma *vma;
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
 #define __MSM_GEM_H__
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include "msm_drv.h"
 
 /* Additional internal-use only BO flags: */
 
                         * strange place to call it.  OTOH this is a
                         * convenient can-fail point to hook it in.
                         */
-                       ret = reservation_object_reserve_shared(msm_obj->base.resv,
+                       ret = dma_resv_reserve_shared(msm_obj->base.resv,
                                                                1);
                        if (ret)
                                return ret;
 
 #include <linux/of_graph.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <linux/spinlock.h>
 
 #include <drm/drm_atomic.h>
 
                asyw->image.handle[0] = ctxdma->object.handle;
        }
 
-       asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.base.resv);
+       asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
        asyw->image.offset[0] = fb->nvbo->bo.offset;
 
        if (wndw->func->prepare) {
 
 int
 nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
               uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
-              struct sg_table *sg, struct reservation_object *robj,
+              struct sg_table *sg, struct dma_resv *robj,
               struct nouveau_bo **pnvbo)
 {
        struct nouveau_drm *drm = cli->drm;
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct drm_device *dev = drm->dev;
-       struct dma_fence *fence = reservation_object_get_excl(bo->base.resv);
+       struct dma_fence *fence = dma_resv_get_excl(bo->base.resv);
 
        nv10_bo_put_tile_region(dev, *old_tile, fence);
        *old_tile = new_tile;
 void
 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
 {
-       struct reservation_object *resv = nvbo->bo.base.resv;
+       struct dma_resv *resv = nvbo->bo.base.resv;
 
        if (exclusive)
-               reservation_object_add_excl_fence(resv, &fence->base);
+               dma_resv_add_excl_fence(resv, &fence->base);
        else if (fence)
-               reservation_object_add_shared_fence(resv, &fence->base);
+               dma_resv_add_shared_fence(resv, &fence->base);
 }
 
 struct ttm_bo_driver nouveau_bo_driver = {
 
 void nouveau_bo_move_init(struct nouveau_drm *);
 int  nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
                    u32 tile_mode, u32 tile_flags, struct sg_table *sg,
-                   struct reservation_object *robj,
+                   struct dma_resv *robj,
                    struct nouveau_bo **);
 int  nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
 int  nouveau_bo_unpin(struct nouveau_bo *);
 
 {
        struct nouveau_fence_chan *fctx = chan->fence;
        struct dma_fence *fence;
-       struct reservation_object *resv = nvbo->bo.base.resv;
-       struct reservation_object_list *fobj;
+       struct dma_resv *resv = nvbo->bo.base.resv;
+       struct dma_resv_list *fobj;
        struct nouveau_fence *f;
        int ret = 0, i;
 
        if (!exclusive) {
-               ret = reservation_object_reserve_shared(resv, 1);
+               ret = dma_resv_reserve_shared(resv, 1);
 
                if (ret)
                        return ret;
        }
 
-       fobj = reservation_object_get_list(resv);
-       fence = reservation_object_get_excl(resv);
+       fobj = dma_resv_get_list(resv);
+       fence = dma_resv_get_excl(resv);
 
        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
                struct nouveau_channel *prev = NULL;
                bool must_wait = true;
 
                fence = rcu_dereference_protected(fobj->shared[i],
-                                               reservation_object_held(resv));
+                                               dma_resv_held(resv));
 
                f = nouveau_local_fence(fence, chan->drm);
                if (f) {
 
                return -ENOENT;
        nvbo = nouveau_gem_object(gem);
 
-       lret = reservation_object_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
+       lret = dma_resv_wait_timeout_rcu(nvbo->bo.base.resv, write, true,
                                                   no_wait ? 0 : 30 * HZ);
        if (!lret)
                ret = -EBUSY;
 
 {
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_bo *nvbo;
-       struct reservation_object *robj = attach->dmabuf->resv;
+       struct dma_resv *robj = attach->dmabuf->resv;
        u32 flags = 0;
        int ret;
 
        flags = TTM_PL_FLAG_TT;
 
-       reservation_object_lock(robj, NULL);
+       dma_resv_lock(robj, NULL);
        ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
                             sg, robj, &nvbo);
-       reservation_object_unlock(robj);
+       dma_resv_unlock(robj);
        if (ret)
                return ERR_PTR(ret);
 
 
        if (!gem_obj)
                return -ENOENT;
 
-       ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+       ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true,
                                                  true, timeout);
        if (!ret)
                ret = timeout ? -ETIMEDOUT : -EBUSY;
 
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 #include <drm/gpu_scheduler.h>
 #include <drm/panfrost_drm.h>
 
        int i;
 
        for (i = 0; i < bo_count; i++)
-               implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+               implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv);
 }
 
 static void panfrost_attach_object_fences(struct drm_gem_object **bos,
        int i;
 
        for (i = 0; i < bo_count; i++)
-               reservation_object_add_excl_fence(bos[i]->resv, fence);
+               dma_resv_add_excl_fence(bos[i]->resv, fence);
 }
 
 int panfrost_job_push(struct panfrost_job *job)
 
        struct qxl_bo *bo;
 
        list_for_each_entry(bo, &qdev->gem.objects, list) {
-               struct reservation_object_list *fobj;
+               struct dma_resv_list *fobj;
                int rel;
 
                rcu_read_lock();
 
                        return ret;
        }
 
-       ret = reservation_object_reserve_shared(bo->tbo.base.resv, 1);
+       ret = dma_resv_reserve_shared(bo->tbo.base.resv, 1);
        if (ret)
                return ret;
 
        list_for_each_entry(entry, &release->bos, head) {
                bo = entry->bo;
 
-               reservation_object_add_shared_fence(bo->base.resv, &release->base);
+               dma_resv_add_shared_fence(bo->base.resv, &release->base);
                ttm_bo_add_to_lru(bo);
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        }
        spin_unlock(&glob->lru_lock);
        ww_acquire_fini(&release->ticket);
 
 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
                                    uint64_t src_offset, uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv)
+                                   struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
                                  uint64_t src_offset, uint64_t dst_offset,
                                  unsigned num_gpu_pages,
-                                 struct reservation_object *resv)
+                                 struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
                                        uint64_t src_offset,
                                        uint64_t dst_offset,
                                        unsigned num_gpu_pages,
-                                       struct reservation_object *resv)
+                                       struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
                                    uint64_t src_offset,
                                    uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv)
+                                   struct dma_resv *resv)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        struct radeon_fence *fence;
 
                                   uint64_t src_offset,
                                   uint64_t dst_offset,
                                   unsigned num_gpu_pages,
-                                  struct reservation_object *resv)
+                                  struct dma_resv *resv)
 {
        struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
        struct radeon_fence *fence;
 
 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
                                     uint64_t src_offset, uint64_t dst_offset,
                                     unsigned num_gpu_pages,
-                                    struct reservation_object *resv)
+                                    struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
                                   uint64_t src_offset, uint64_t dst_offset,
                                   unsigned num_gpu_pages,
-                                  struct reservation_object *resv)
+                                  struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
                       struct radeon_fence *fence);
 int radeon_sync_resv(struct radeon_device *rdev,
                     struct radeon_sync *sync,
-                    struct reservation_object *resv,
+                    struct dma_resv *resv,
                     bool shared);
 int radeon_sync_rings(struct radeon_device *rdev,
                      struct radeon_sync *sync,
                                             uint64_t src_offset,
                                             uint64_t dst_offset,
                                             unsigned num_gpu_pages,
-                                            struct reservation_object *resv);
+                                            struct dma_resv *resv);
                u32 blit_ring_index;
                struct radeon_fence *(*dma)(struct radeon_device *rdev,
                                            uint64_t src_offset,
                                            uint64_t dst_offset,
                                            unsigned num_gpu_pages,
-                                           struct reservation_object *resv);
+                                           struct dma_resv *resv);
                u32 dma_ring_index;
                /* method used for bo copy */
                struct radeon_fence *(*copy)(struct radeon_device *rdev,
                                             uint64_t src_offset,
                                             uint64_t dst_offset,
                                             unsigned num_gpu_pages,
-                                            struct reservation_object *resv);
+                                            struct dma_resv *resv);
                /* ring used for bo copies */
                u32 copy_ring_index;
        } copy;
 
                                    uint64_t src_offset,
                                    uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv);
+                                   struct dma_resv *resv);
 int r100_set_surface_reg(struct radeon_device *rdev, int reg,
                         uint32_t tiling_flags, uint32_t pitch,
                         uint32_t offset, uint32_t obj_size);
                                   uint64_t src_offset,
                                   uint64_t dst_offset,
                                   unsigned num_gpu_pages,
-                                  struct reservation_object *resv);
+                                  struct dma_resv *resv);
 void r200_set_safe_registers(struct radeon_device *rdev);
 
 /*
 struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
                                     uint64_t src_offset, uint64_t dst_offset,
                                     unsigned num_gpu_pages,
-                                    struct reservation_object *resv);
+                                    struct dma_resv *resv);
 struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
                                   uint64_t src_offset, uint64_t dst_offset,
                                   unsigned num_gpu_pages,
-                                  struct reservation_object *resv);
+                                  struct dma_resv *resv);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
                                    uint64_t src_offset, uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv);
+                                   struct dma_resv *resv);
 u32 rv770_get_xclk(struct radeon_device *rdev);
 int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
 int rv770_get_temp(struct radeon_device *rdev);
 struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
                                        uint64_t src_offset, uint64_t dst_offset,
                                        unsigned num_gpu_pages,
-                                       struct reservation_object *resv);
+                                       struct dma_resv *resv);
 int evergreen_get_temp(struct radeon_device *rdev);
 int evergreen_get_allowed_info_register(struct radeon_device *rdev,
                                        u32 reg, u32 *val);
 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
                                 uint64_t src_offset, uint64_t dst_offset,
                                 unsigned num_gpu_pages,
-                                struct reservation_object *resv);
+                                struct dma_resv *resv);
 
 void si_dma_vm_copy_pages(struct radeon_device *rdev,
                          struct radeon_ib *ib,
 struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
                                  uint64_t src_offset, uint64_t dst_offset,
                                  unsigned num_gpu_pages,
-                                 struct reservation_object *resv);
+                                 struct dma_resv *resv);
 struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
                                    uint64_t src_offset, uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv);
+                                   struct dma_resv *resv);
 int cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
 int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
 bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
 
 static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
                                    uint64_t saddr, uint64_t daddr,
                                    int flag, int n,
-                                   struct reservation_object *resv)
+                                   struct dma_resv *resv)
 {
        unsigned long start_jiffies;
        unsigned long end_jiffies;
 
        int r;
 
        list_for_each_entry(reloc, &p->validated, tv.head) {
-               struct reservation_object *resv;
+               struct dma_resv *resv;
 
                resv = reloc->robj->tbo.base.resv;
                r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
 
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
                goto cleanup;
        }
-       work->fence = dma_fence_get(reservation_object_get_excl(new_rbo->tbo.base.resv));
+       work->fence = dma_fence_get(dma_resv_get_excl(new_rbo->tbo.base.resv));
        radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(new_rbo);
 
 
        }
        if (domain == RADEON_GEM_DOMAIN_CPU) {
                /* Asking for cpu access wait for object idle */
-               r = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+               r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
                if (!r)
                        r = -EBUSY;
 
        }
        robj = gem_to_radeon_bo(gobj);
 
-       r = reservation_object_test_signaled_rcu(robj->tbo.base.resv, true);
+       r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true);
        if (r == 0)
                r = -EBUSY;
        else
        }
        robj = gem_to_radeon_bo(gobj);
 
-       ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
+       ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ);
        if (ret == 0)
                r = -EBUSY;
        else if (ret < 0)
 
                                continue;
                        }
 
-                       r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
+                       r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
                                true, false, MAX_SCHEDULE_TIMEOUT);
                        if (r <= 0)
                                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
 int radeon_bo_create(struct radeon_device *rdev,
                     unsigned long size, int byte_align, bool kernel,
                     u32 domain, u32 flags, struct sg_table *sg,
-                    struct reservation_object *resv,
+                    struct dma_resv *resv,
                     struct radeon_bo **bo_ptr)
 {
        struct radeon_bo *bo;
        int steal;
        int i;
 
-       reservation_object_assert_held(bo->tbo.base.resv);
+       dma_resv_assert_held(bo->tbo.base.resv);
 
        if (!bo->tiling_flags)
                return 0;
                                uint32_t *tiling_flags,
                                uint32_t *pitch)
 {
-       reservation_object_assert_held(bo->tbo.base.resv);
+       dma_resv_assert_held(bo->tbo.base.resv);
 
        if (tiling_flags)
                *tiling_flags = bo->tiling_flags;
                                bool force_drop)
 {
        if (!force_drop)
-               reservation_object_assert_held(bo->tbo.base.resv);
+               dma_resv_assert_held(bo->tbo.base.resv);
 
        if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
                return 0;
 void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
                     bool shared)
 {
-       struct reservation_object *resv = bo->tbo.base.resv;
+       struct dma_resv *resv = bo->tbo.base.resv;
 
        if (shared)
-               reservation_object_add_shared_fence(resv, &fence->base);
+               dma_resv_add_shared_fence(resv, &fence->base);
        else
-               reservation_object_add_excl_fence(resv, &fence->base);
+               dma_resv_add_excl_fence(resv, &fence->base);
 }
 
                            unsigned long size, int byte_align,
                            bool kernel, u32 domain, u32 flags,
                            struct sg_table *sg,
-                           struct reservation_object *resv,
+                           struct dma_resv *resv,
                            struct radeon_bo **bo_ptr);
 extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
 extern void radeon_bo_kunmap(struct radeon_bo *bo);
 
                                                        struct dma_buf_attachment *attach,
                                                        struct sg_table *sg)
 {
-       struct reservation_object *resv = attach->dmabuf->resv;
+       struct dma_resv *resv = attach->dmabuf->resv;
        struct radeon_device *rdev = dev->dev_private;
        struct radeon_bo *bo;
        int ret;
 
-       reservation_object_lock(resv, NULL);
+       dma_resv_lock(resv, NULL);
        ret = radeon_bo_create(rdev, attach->dmabuf->size, PAGE_SIZE, false,
                               RADEON_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
-       reservation_object_unlock(resv);
+       dma_resv_unlock(resv);
        if (ret)
                return ERR_PTR(ret);
 
 
  */
 int radeon_sync_resv(struct radeon_device *rdev,
                     struct radeon_sync *sync,
-                    struct reservation_object *resv,
+                    struct dma_resv *resv,
                     bool shared)
 {
-       struct reservation_object_list *flist;
+       struct dma_resv_list *flist;
        struct dma_fence *f;
        struct radeon_fence *fence;
        unsigned i;
        int r = 0;
 
        /* always sync to the exclusive fence */
-       f = reservation_object_get_excl(resv);
+       f = dma_resv_get_excl(resv);
        fence = f ? to_radeon_fence(f) : NULL;
        if (fence && fence->rdev == rdev)
                radeon_sync_fence(sync, fence);
        else if (f)
                r = dma_fence_wait(f, true);
 
-       flist = reservation_object_get_list(resv);
+       flist = dma_resv_get_list(resv);
        if (shared || !flist || r)
                return r;
 
        for (i = 0; i < flist->shared_count; ++i) {
                f = rcu_dereference_protected(flist->shared[i],
-                                             reservation_object_held(resv));
+                                             dma_resv_held(resv));
                fence = to_radeon_fence(f);
                if (fence && fence->rdev == rdev)
                        radeon_sync_fence(sync, fence);
 
                return -EINVAL;
        }
 
-       f = reservation_object_get_excl(bo->tbo.base.resv);
+       f = dma_resv_get_excl(bo->tbo.base.resv);
        if (f) {
                r = radeon_fence_wait((struct radeon_fence *)f, false);
                if (r) {
 
                int r;
 
                radeon_sync_resv(rdev, &ib->sync, pt->tbo.base.resv, true);
-               r = reservation_object_reserve_shared(pt->tbo.base.resv, 1);
+               r = dma_resv_reserve_shared(pt->tbo.base.resv, 1);
                if (r)
                        return r;
 
 
 struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
                                    uint64_t src_offset, uint64_t dst_offset,
                                    unsigned num_gpu_pages,
-                                   struct reservation_object *resv)
+                                   struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
 struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
                                 uint64_t src_offset, uint64_t dst_offset,
                                 unsigned num_gpu_pages,
-                                struct reservation_object *resv)
+                                struct dma_resv *resv)
 {
        struct radeon_fence *fence;
        struct radeon_sync sync;
 
 #include <linux/file.h>
 #include <linux/module.h>
 #include <linux/atomic.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 static void ttm_bo_global_kobj_release(struct kobject *kobj);
 
        atomic_dec(&bo->bdev->glob->bo_count);
        dma_fence_put(bo->moving);
        if (!ttm_bo_uses_embedded_gem_object(bo))
-               reservation_object_fini(&bo->base._resv);
+               dma_resv_fini(&bo->base._resv);
        mutex_destroy(&bo->wu_mutex);
        bo->destroy(bo);
        ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
        struct ttm_bo_device *bdev = bo->bdev;
        struct ttm_mem_type_manager *man;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        if (!list_empty(&bo->lru))
                return;
 void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
                             struct ttm_lru_bulk_move *bulk)
 {
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        ttm_bo_del_from_lru(bo);
        ttm_bo_add_to_lru(bo);
                if (!pos->first)
                        continue;
 
-               reservation_object_assert_held(pos->first->base.resv);
-               reservation_object_assert_held(pos->last->base.resv);
+               dma_resv_assert_held(pos->first->base.resv);
+               dma_resv_assert_held(pos->last->base.resv);
 
                man = &pos->first->bdev->man[TTM_PL_TT];
                list_bulk_move_tail(&man->lru[i], &pos->first->lru,
                if (!pos->first)
                        continue;
 
-               reservation_object_assert_held(pos->first->base.resv);
-               reservation_object_assert_held(pos->last->base.resv);
+               dma_resv_assert_held(pos->first->base.resv);
+               dma_resv_assert_held(pos->last->base.resv);
 
                man = &pos->first->bdev->man[TTM_PL_VRAM];
                list_bulk_move_tail(&man->lru[i], &pos->first->lru,
                if (!pos->first)
                        continue;
 
-               reservation_object_assert_held(pos->first->base.resv);
-               reservation_object_assert_held(pos->last->base.resv);
+               dma_resv_assert_held(pos->first->base.resv);
+               dma_resv_assert_held(pos->last->base.resv);
 
                lru = &pos->first->bdev->glob->swap_lru[i];
                list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
        if (bo->base.resv == &bo->base._resv)
                return 0;
 
-       BUG_ON(!reservation_object_trylock(&bo->base._resv));
+       BUG_ON(!dma_resv_trylock(&bo->base._resv));
 
-       r = reservation_object_copy_fences(&bo->base._resv, bo->base.resv);
+       r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
        if (r)
-               reservation_object_unlock(&bo->base._resv);
+               dma_resv_unlock(&bo->base._resv);
 
        return r;
 }
 
 static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 {
-       struct reservation_object_list *fobj;
+       struct dma_resv_list *fobj;
        struct dma_fence *fence;
        int i;
 
-       fobj = reservation_object_get_list(&bo->base._resv);
-       fence = reservation_object_get_excl(&bo->base._resv);
+       fobj = dma_resv_get_list(&bo->base._resv);
+       fence = dma_resv_get_excl(&bo->base._resv);
        if (fence && !fence->ops->signaled)
                dma_fence_enable_sw_signaling(fence);
 
        for (i = 0; fobj && i < fobj->shared_count; ++i) {
                fence = rcu_dereference_protected(fobj->shared[i],
-                                       reservation_object_held(bo->base.resv));
+                                       dma_resv_held(bo->base.resv));
 
                if (!fence->ops->signaled)
                        dma_fence_enable_sw_signaling(fence);
                /* Last resort, if we fail to allocate memory for the
                 * fences block for the BO to become idle
                 */
-               reservation_object_wait_timeout_rcu(bo->base.resv, true, false,
+               dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
                                                    30 * HZ);
                spin_lock(&glob->lru_lock);
                goto error;
        }
 
        spin_lock(&glob->lru_lock);
-       ret = reservation_object_trylock(bo->base.resv) ? 0 : -EBUSY;
+       ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
        if (!ret) {
-               if (reservation_object_test_signaled_rcu(&bo->base._resv, true)) {
+               if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
                        ttm_bo_del_from_lru(bo);
                        spin_unlock(&glob->lru_lock);
                        if (bo->base.resv != &bo->base._resv)
-                               reservation_object_unlock(&bo->base._resv);
+                               dma_resv_unlock(&bo->base._resv);
 
                        ttm_bo_cleanup_memtype_use(bo);
-                       reservation_object_unlock(bo->base.resv);
+                       dma_resv_unlock(bo->base.resv);
                        return;
                }
 
                        ttm_bo_add_to_lru(bo);
                }
 
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        }
        if (bo->base.resv != &bo->base._resv)
-               reservation_object_unlock(&bo->base._resv);
+               dma_resv_unlock(&bo->base._resv);
 
 error:
        kref_get(&bo->list_kref);
                               bool unlock_resv)
 {
        struct ttm_bo_global *glob = bo->bdev->glob;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
        int ret;
 
        if (unlikely(list_empty(&bo->ddestroy)))
        else
                resv = &bo->base._resv;
 
-       if (reservation_object_test_signaled_rcu(resv, true))
+       if (dma_resv_test_signaled_rcu(resv, true))
                ret = 0;
        else
                ret = -EBUSY;
                long lret;
 
                if (unlock_resv)
-                       reservation_object_unlock(bo->base.resv);
+                       dma_resv_unlock(bo->base.resv);
                spin_unlock(&glob->lru_lock);
 
-               lret = reservation_object_wait_timeout_rcu(resv, true,
+               lret = dma_resv_wait_timeout_rcu(resv, true,
                                                           interruptible,
                                                           30 * HZ);
 
                        return -EBUSY;
 
                spin_lock(&glob->lru_lock);
-               if (unlock_resv && !reservation_object_trylock(bo->base.resv)) {
+               if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
                        /*
                         * We raced, and lost, someone else holds the reservation now,
                         * and is probably busy in ttm_bo_cleanup_memtype_use.
 
        if (ret || unlikely(list_empty(&bo->ddestroy))) {
                if (unlock_resv)
-                       reservation_object_unlock(bo->base.resv);
+                       dma_resv_unlock(bo->base.resv);
                spin_unlock(&glob->lru_lock);
                return ret;
        }
        ttm_bo_cleanup_memtype_use(bo);
 
        if (unlock_resv)
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
 
        return 0;
 }
 
                if (remove_all || bo->base.resv != &bo->base._resv) {
                        spin_unlock(&glob->lru_lock);
-                       reservation_object_lock(bo->base.resv, NULL);
+                       dma_resv_lock(bo->base.resv, NULL);
 
                        spin_lock(&glob->lru_lock);
                        ttm_bo_cleanup_refs(bo, false, !remove_all, true);
 
-               } else if (reservation_object_trylock(bo->base.resv)) {
+               } else if (dma_resv_trylock(bo->base.resv)) {
                        ttm_bo_cleanup_refs(bo, false, !remove_all, true);
                } else {
                        spin_unlock(&glob->lru_lock);
        struct ttm_placement placement;
        int ret = 0;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        placement.num_placement = 0;
        placement.num_busy_placement = 0;
        bool ret = false;
 
        if (bo->base.resv == ctx->resv) {
-               reservation_object_assert_held(bo->base.resv);
+               dma_resv_assert_held(bo->base.resv);
                if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
                    || !list_empty(&bo->ddestroy))
                        ret = true;
                if (busy)
                        *busy = false;
        } else {
-               ret = reservation_object_trylock(bo->base.resv);
+               ret = dma_resv_trylock(bo->base.resv);
                *locked = ret;
                if (busy)
                        *busy = !ret;
                return -EBUSY;
 
        if (ctx->interruptible)
-               r = reservation_object_lock_interruptible(busy_bo->base.resv,
+               r = dma_resv_lock_interruptible(busy_bo->base.resv,
                                                          ticket);
        else
-               r = reservation_object_lock(busy_bo->base.resv, ticket);
+               r = dma_resv_lock(busy_bo->base.resv, ticket);
 
        /*
         * TODO: It would be better to keep the BO locked until allocation is at
         * of TTM.
         */
        if (!r)
-               reservation_object_unlock(busy_bo->base.resv);
+               dma_resv_unlock(busy_bo->base.resv);
 
        return r == -EDEADLK ? -EBUSY : r;
 }
                        if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
                                                            &busy)) {
                                if (busy && !busy_bo && ticket !=
-                                   reservation_object_locking_ctx(bo->base.resv))
+                                   dma_resv_locking_ctx(bo->base.resv))
                                        busy_bo = bo;
                                continue;
                        }
                        if (place && !bdev->driver->eviction_valuable(bo,
                                                                      place)) {
                                if (locked)
-                                       reservation_object_unlock(bo->base.resv);
+                                       dma_resv_unlock(bo->base.resv);
                                continue;
                        }
                        break;
        spin_unlock(&man->move_lock);
 
        if (fence) {
-               reservation_object_add_shared_fence(bo->base.resv, fence);
+               dma_resv_add_shared_fence(bo->base.resv, fence);
 
-               ret = reservation_object_reserve_shared(bo->base.resv, 1);
+               ret = dma_resv_reserve_shared(bo->base.resv, 1);
                if (unlikely(ret)) {
                        dma_fence_put(fence);
                        return ret;
        struct ww_acquire_ctx *ticket;
        int ret;
 
-       ticket = reservation_object_locking_ctx(bo->base.resv);
+       ticket = dma_resv_locking_ctx(bo->base.resv);
        do {
                ret = (*man->func->get_node)(man, bo, place, mem);
                if (unlikely(ret != 0))
        bool type_found = false;
        int i, ret;
 
-       ret = reservation_object_reserve_shared(bo->base.resv, 1);
+       ret = dma_resv_reserve_shared(bo->base.resv, 1);
        if (unlikely(ret))
                return ret;
 
        int ret = 0;
        struct ttm_mem_reg mem;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        mem.num_pages = bo->num_pages;
        mem.size = mem.num_pages << PAGE_SHIFT;
        int ret;
        uint32_t new_flags;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
        /*
         * Check whether we need to move buffer.
         */
                         struct ttm_operation_ctx *ctx,
                         size_t acc_size,
                         struct sg_table *sg,
-                        struct reservation_object *resv,
+                        struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *))
 {
        int ret = 0;
        bo->sg = sg;
        if (resv) {
                bo->base.resv = resv;
-               reservation_object_assert_held(bo->base.resv);
+               dma_resv_assert_held(bo->base.resv);
        } else {
                bo->base.resv = &bo->base._resv;
        }
                 * bo.gem is not initialized, so we have to setup the
                 * struct elements we want use regardless.
                 */
-               reservation_object_init(&bo->base._resv);
+               dma_resv_init(&bo->base._resv);
                drm_vma_node_reset(&bo->base.vma_node);
        }
        atomic_inc(&bo->bdev->glob->bo_count);
         * since otherwise lockdep will be angered in radeon.
         */
        if (!resv) {
-               locked = reservation_object_trylock(bo->base.resv);
+               locked = dma_resv_trylock(bo->base.resv);
                WARN_ON(!locked);
        }
 
                bool interruptible,
                size_t acc_size,
                struct sg_table *sg,
-               struct reservation_object *resv,
+               struct dma_resv *resv,
                void (*destroy) (struct ttm_buffer_object *))
 {
        struct ttm_operation_ctx ctx = { interruptible, false };
        long timeout = 15 * HZ;
 
        if (no_wait) {
-               if (reservation_object_test_signaled_rcu(bo->base.resv, true))
+               if (dma_resv_test_signaled_rcu(bo->base.resv, true))
                        return 0;
                else
                        return -EBUSY;
        }
 
-       timeout = reservation_object_wait_timeout_rcu(bo->base.resv, true,
+       timeout = dma_resv_wait_timeout_rcu(bo->base.resv, true,
                                                      interruptible, timeout);
        if (timeout < 0)
                return timeout;
        if (timeout == 0)
                return -EBUSY;
 
-       reservation_object_add_excl_fence(bo->base.resv, NULL);
+       dma_resv_add_excl_fence(bo->base.resv, NULL);
        return 0;
 }
 EXPORT_SYMBOL(ttm_bo_wait);
         * already swapped buffer.
         */
        if (locked)
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        kref_put(&bo->list_kref, ttm_bo_release_list);
        return ret;
 }
        ret = mutex_lock_interruptible(&bo->wu_mutex);
        if (unlikely(ret != 0))
                return -ERESTARTSYS;
-       if (!reservation_object_is_locked(bo->base.resv))
+       if (!dma_resv_is_locked(bo->base.resv))
                goto out_unlock;
-       ret = reservation_object_lock_interruptible(bo->base.resv, NULL);
+       ret = dma_resv_lock_interruptible(bo->base.resv, NULL);
        if (ret == -EINTR)
                ret = -ERESTARTSYS;
        if (unlikely(ret != 0))
                goto out_unlock;
-       reservation_object_unlock(bo->base.resv);
+       dma_resv_unlock(bo->base.resv);
 
 out_unlock:
        mutex_unlock(&bo->wu_mutex);
 
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/module.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct ttm_transfer_obj {
        struct ttm_buffer_object base;
        fbo->base.destroy = &ttm_transfered_destroy;
        fbo->base.acc_size = 0;
        fbo->base.base.resv = &fbo->base.base._resv;
-       reservation_object_init(fbo->base.base.resv);
-       ret = reservation_object_trylock(fbo->base.base.resv);
+       dma_resv_init(fbo->base.base.resv);
+       ret = dma_resv_trylock(fbo->base.base.resv);
        WARN_ON(!ret);
 
        *new_obj = &fbo->base;
        int ret;
        struct ttm_buffer_object *ghost_obj;
 
-       reservation_object_add_excl_fence(bo->base.resv, fence);
+       dma_resv_add_excl_fence(bo->base.resv, fence);
        if (evict) {
                ret = ttm_bo_wait(bo, false, false);
                if (ret)
                if (ret)
                        return ret;
 
-               reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+               dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 
                /**
                 * If we're not moving to fixed memory, the TTM object
 
        int ret;
 
-       reservation_object_add_excl_fence(bo->base.resv, fence);
+       dma_resv_add_excl_fence(bo->base.resv, fence);
 
        if (!evict) {
                struct ttm_buffer_object *ghost_obj;
                if (ret)
                        return ret;
 
-               reservation_object_add_excl_fence(ghost_obj->base.resv, fence);
+               dma_resv_add_excl_fence(ghost_obj->base.resv, fence);
 
                /**
                 * If we're not moving to fixed memory, the TTM object
        if (ret)
                return ret;
 
-       ret = reservation_object_copy_fences(ghost->base.resv, bo->base.resv);
+       ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv);
        /* Last resort, wait for the BO to be idle when we are OOM */
        if (ret)
                ttm_bo_wait(bo, false, false);
 
                ttm_bo_get(bo);
                up_read(&vmf->vma->vm_mm->mmap_sem);
                (void) dma_fence_wait(bo->moving, true);
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
                ttm_bo_put(bo);
                goto out_unlock;
        }
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
-       if (unlikely(!reservation_object_trylock(bo->base.resv))) {
+       if (unlikely(!dma_resv_trylock(bo->base.resv))) {
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
                        if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
                                ttm_bo_get(bo);
 out_io_unlock:
        ttm_mem_io_unlock(man);
 out_unlock:
-       reservation_object_unlock(bo->base.resv);
+       dma_resv_unlock(bo->base.resv);
        return ret;
 }
 
 
        list_for_each_entry_continue_reverse(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        }
 }
 
 
                if (list_empty(&bo->lru))
                        ttm_bo_add_to_lru(bo);
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        }
        spin_unlock(&glob->lru_lock);
 
 
                ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
                if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-                       reservation_object_unlock(bo->base.resv);
+                       dma_resv_unlock(bo->base.resv);
 
                        ret = -EBUSY;
 
                        if (!entry->num_shared)
                                continue;
 
-                       ret = reservation_object_reserve_shared(bo->base.resv,
+                       ret = dma_resv_reserve_shared(bo->base.resv,
                                                                entry->num_shared);
                        if (!ret)
                                continue;
 
                if (ret == -EDEADLK) {
                        if (intr) {
-                               ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+                               ret = dma_resv_lock_slow_interruptible(bo->base.resv,
                                                                                 ticket);
                        } else {
-                               reservation_object_lock_slow(bo->base.resv, ticket);
+                               dma_resv_lock_slow(bo->base.resv, ticket);
                                ret = 0;
                        }
                }
 
                if (!ret && entry->num_shared)
-                       ret = reservation_object_reserve_shared(bo->base.resv,
+                       ret = dma_resv_reserve_shared(bo->base.resv,
                                                                entry->num_shared);
 
                if (unlikely(ret != 0)) {
        list_for_each_entry(entry, list, head) {
                bo = entry->bo;
                if (entry->num_shared)
-                       reservation_object_add_shared_fence(bo->base.resv, fence);
+                       dma_resv_add_shared_fence(bo->base.resv, fence);
                else
-                       reservation_object_add_excl_fence(bo->base.resv, fence);
+                       dma_resv_add_excl_fence(bo->base.resv, fence);
                if (list_empty(&bo->lru))
                        ttm_bo_add_to_lru(bo);
                else
                        ttm_bo_move_to_lru_tail(bo, NULL);
-               reservation_object_unlock(bo->base.resv);
+               dma_resv_unlock(bo->base.resv);
        }
        spin_unlock(&glob->lru_lock);
        if (ticket)
 
        struct ttm_bo_device *bdev = bo->bdev;
        uint32_t page_flags = 0;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        if (bdev->need_dma32)
                page_flags |= TTM_PAGE_FLAG_DMA32;
 
        if (args->pad != 0)
                return -EINVAL;
 
-       ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+       ret = drm_gem_dma_resv_wait(file_priv, args->handle,
                                              true, timeout_jiffies);
 
        /* Decrement the user's timeout, in case we got interrupted
 
        for (i = 0; i < job->bo_count; i++) {
                /* XXX: Use shared fences for read-only objects. */
-               reservation_object_add_excl_fence(job->bo[i]->resv,
+               dma_resv_add_excl_fence(job->bo[i]->resv,
                                                  job->done_fence);
        }
 
 
                bo = to_vc4_bo(&exec->bo[i]->base);
                bo->seqno = seqno;
 
-               reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
+               dma_resv_add_shared_fence(bo->base.base.resv, exec->fence);
        }
 
        list_for_each_entry(bo, &exec->unref_list, unref_head) {
                bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
                bo->write_seqno = seqno;
 
-               reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
+               dma_resv_add_excl_fence(bo->base.base.resv, exec->fence);
        }
 }
 
        for (i = 0; i < exec->bo_count; i++) {
                bo = &exec->bo[i]->base;
 
-               ret = reservation_object_reserve_shared(bo->resv, 1);
+               ret = dma_resv_reserve_shared(bo->resv, 1);
                if (ret) {
                        vc4_unlock_bo_reservations(dev, exec, acquire_ctx);
                        return ret;
 
  */
 
 #include <linux/dma-buf.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_file.h>
 
 {
        struct drm_vgem_fence_attach *arg = data;
        struct vgem_file *vfile = file->driver_priv;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
        struct drm_gem_object *obj;
        struct dma_fence *fence;
        int ret;
 
        /* Check for a conflicting fence */
        resv = obj->resv;
-       if (!reservation_object_test_signaled_rcu(resv,
+       if (!dma_resv_test_signaled_rcu(resv,
                                                  arg->flags & VGEM_FENCE_WRITE)) {
                ret = -EBUSY;
                goto err_fence;
 
        /* Expose the fence via the dma-buf */
        ret = 0;
-       reservation_object_lock(resv, NULL);
+       dma_resv_lock(resv, NULL);
        if (arg->flags & VGEM_FENCE_WRITE)
-               reservation_object_add_excl_fence(resv, fence);
-       else if ((ret = reservation_object_reserve_shared(resv, 1)) == 0)
-               reservation_object_add_shared_fence(resv, fence);
-       reservation_object_unlock(resv);
+               dma_resv_add_excl_fence(resv, fence);
+       else if ((ret = dma_resv_reserve_shared(resv, 1)) == 0)
+               dma_resv_add_shared_fence(resv, fence);
+       dma_resv_unlock(resv);
 
        /* Record the fence in our idr for later signaling */
        if (ret == 0) {
 
                (vgdev, qobj->hw_res_handle,
                 vfpriv->ctx_id, offset, args->level,
                 &box, fence);
-       reservation_object_add_excl_fence(qobj->tbo.base.resv,
+       dma_resv_add_excl_fence(qobj->tbo.base.resv,
                                          &fence->f);
 
        dma_fence_put(&fence->f);
                        (vgdev, qobj,
                         vfpriv ? vfpriv->ctx_id : 0, offset,
                         args->level, &box, fence);
-               reservation_object_add_excl_fence(qobj->tbo.base.resv,
+               dma_resv_add_excl_fence(qobj->tbo.base.resv,
                                                  &fence->f);
                dma_fence_put(&fence->f);
        }
 
                         0, 0, vgfb->fence);
                ret = virtio_gpu_object_reserve(bo, false);
                if (!ret) {
-                       reservation_object_add_excl_fence(bo->tbo.base.resv,
+                       dma_resv_add_excl_fence(bo->tbo.base.resv,
                                                          &vgfb->fence->f);
                        dma_fence_put(&vgfb->fence->f);
                        vgfb->fence = NULL;
 
 
        /* Buffer objects need to be either pinned or reserved: */
        if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
-               reservation_object_assert_held(dst->base.resv);
+               dma_resv_assert_held(dst->base.resv);
        if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
-               reservation_object_assert_held(src->base.resv);
+               dma_resv_assert_held(src->base.resv);
 
        if (dst->ttm->state == tt_unpopulated) {
                ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
 
        uint32_t old_mem_type = bo->mem.mem_type;
        int ret;
 
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        if (pin) {
                if (vbo->pin_count++ > 0)
                bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
                long lret;
 
-               lret = reservation_object_wait_timeout_rcu
+               lret = dma_resv_wait_timeout_rcu
                        (bo->base.resv, true, true,
                         nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
                if (!lret)
 
        if (fence == NULL) {
                vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
-               reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+               dma_resv_add_excl_fence(bo->base.resv, &fence->base);
                dma_fence_put(&fence->base);
        } else
-               reservation_object_add_excl_fence(bo->base.resv, &fence->base);
+               dma_resv_add_excl_fence(bo->base.resv, &fence->base);
 }
 
 
 
        } *cmd;
 
        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
        if (!cmd)
                return 0;
 
        WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
-       reservation_object_assert_held(bo->base.resv);
+       dma_resv_assert_held(bo->base.resv);
 
        mutex_lock(&dev_priv->binding_mutex);
        if (!vcotbl->scrubbed)
 
 
        if (switch_backup && new_backup != res->backup) {
                if (res->backup) {
-                       reservation_object_assert_held(res->backup->base.base.resv);
+                       dma_resv_assert_held(res->backup->base.base.resv);
                        list_del_init(&res->mob_head);
                        vmw_bo_unreference(&res->backup);
                }
 
                if (new_backup) {
                        res->backup = vmw_bo_reference(new_backup);
-                       reservation_object_assert_held(new_backup->base.base.resv);
+                       dma_resv_assert_held(new_backup->base.base.resv);
                        list_add_tail(&res->mob_head, &new_backup->res_list);
                } else {
                        res->backup = NULL;
                .num_shared = 0
        };
 
-       reservation_object_assert_held(vbo->base.base.resv);
+       dma_resv_assert_held(vbo->base.base.resv);
        list_for_each_entry_safe(res, next, &vbo->res_list, mob_head) {
                if (!res->func->unbind)
                        continue;
 
 
 struct device_node;
 struct videomode;
-struct reservation_object;
+struct dma_resv;
 struct dma_buf_attachment;
 
 struct pci_dev;
 
  */
 
 #include <linux/kref.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include <drm/drm_vma_manager.h>
 
         *
         * Normally (@resv == &@_resv) except for imported GEM objects.
         */
-       struct reservation_object *resv;
+       struct dma_resv *resv;
 
        /**
         * @_resv:
         *
         * This is unused for imported GEM objects.
         */
-       struct reservation_object _resv;
+       struct dma_resv _resv;
 
        /**
         * @funcs:
 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
                           int count, struct drm_gem_object ***objs_out);
 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp, u32 handle);
-long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
                                    bool wait_all, unsigned long timeout);
 int drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
                              struct ww_acquire_ctx *acquire_ctx);
 
 #include <linux/mutex.h>
 #include <linux/mm.h>
 #include <linux/bitmap.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 struct ttm_bo_global;
 
 struct ttm_operation_ctx {
        bool interruptible;
        bool no_wait_gpu;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
        uint64_t bytes_moved;
        uint32_t flags;
 };
  * @page_alignment: Data alignment in pages.
  * @ctx: TTM operation context for memory allocation.
  * @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
  * This function initializes a pre-allocated struct ttm_buffer_object.
                         struct ttm_operation_ctx *ctx,
                         size_t acc_size,
                         struct sg_table *sg,
-                        struct reservation_object *resv,
+                        struct dma_resv *resv,
                         void (*destroy) (struct ttm_buffer_object *));
 
 /**
  * point to the shmem object backing a GEM object if TTM is used to back a
  * GEM user interface.
  * @acc_size: Accounted size for this object.
- * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
  * @destroy: Destroy function. Use NULL for kfree().
  *
  * This function initializes a pre-allocated struct ttm_buffer_object.
                unsigned long size, enum ttm_bo_type type,
                struct ttm_placement *placement,
                uint32_t page_alignment, bool interrubtible, size_t acc_size,
-               struct sg_table *sg, struct reservation_object *resv,
+               struct sg_table *sg, struct dma_resv *resv,
                void (*destroy) (struct ttm_buffer_object *));
 
 /**
 
 #include <linux/workqueue.h>
 #include <linux/fs.h>
 #include <linux/spinlock.h>
-#include <linux/reservation.h>
+#include <linux/dma-resv.h>
 
 #include "ttm_bo_api.h"
 #include "ttm_memory.h"
                if (WARN_ON(ticket))
                        return -EBUSY;
 
-               success = reservation_object_trylock(bo->base.resv);
+               success = dma_resv_trylock(bo->base.resv);
                return success ? 0 : -EBUSY;
        }
 
        if (interruptible)
-               ret = reservation_object_lock_interruptible(bo->base.resv, ticket);
+               ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
        else
-               ret = reservation_object_lock(bo->base.resv, ticket);
+               ret = dma_resv_lock(bo->base.resv, ticket);
        if (ret == -EINTR)
                return -ERESTARTSYS;
        return ret;
        WARN_ON(!kref_read(&bo->kref));
 
        if (interruptible)
-               ret = reservation_object_lock_slow_interruptible(bo->base.resv,
+               ret = dma_resv_lock_slow_interruptible(bo->base.resv,
                                                                 ticket);
        else
-               reservation_object_lock_slow(bo->base.resv, ticket);
+               dma_resv_lock_slow(bo->base.resv, ticket);
 
        if (likely(ret == 0))
                ttm_bo_del_sub_from_lru(bo);
        else
                ttm_bo_move_to_lru_tail(bo, NULL);
        spin_unlock(&bo->bdev->glob->lru_lock);
-       reservation_object_unlock(bo->base.resv);
+       dma_resv_unlock(bo->base.resv);
 }
 
 /*
 
        struct module *owner;
        struct list_head list_node;
        void *priv;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
 
        /* poll support */
        wait_queue_head_t poll;
        const struct dma_buf_ops *ops;
        size_t size;
        int flags;
-       struct reservation_object *resv;
+       struct dma_resv *resv;
        void *priv;
 };
 
 
 }
 
 /**
- * dma_fence_get_rcu - get a fence from a reservation_object_list with
+ * dma_fence_get_rcu - get a fence from a dma_resv_list with
  *                     rcu read lock
  * @fence: fence to increase refcount of
  *
  * so long as the caller is using RCU on the pointer to the fence.
  *
  * An alternative mechanism is to employ a seqlock to protect a bunch of
- * fences, such as used by struct reservation_object. When using a seqlock,
+ * fences, such as used by struct dma_resv. When using a seqlock,
  * the seqlock must be taken before and checked after a reference to the
  * fence is acquired (as shown here).
  *
 
--- /dev/null
+/*
+ * Header file for reservations for dma-buf and ttm
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Copyright (C) 2012-2013 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _LINUX_RESERVATION_H
+#define _LINUX_RESERVATION_H
+
+#include <linux/ww_mutex.h>
+#include <linux/dma-fence.h>
+#include <linux/slab.h>
+#include <linux/seqlock.h>
+#include <linux/rcupdate.h>
+
+extern struct ww_class reservation_ww_class;
+
+/**
+ * struct dma_resv_list - a list of shared fences
+ * @rcu: for internal use
+ * @shared_count: table of shared fences
+ * @shared_max: for growing shared fence table
+ * @shared: shared fence table
+ */
+struct dma_resv_list {
+       struct rcu_head rcu;
+       u32 shared_count, shared_max;
+       struct dma_fence __rcu *shared[];
+};
+
+/**
+ * struct dma_resv - a reservation object manages fences for a buffer
+ * @lock: update side lock
+ * @seq: sequence count for managing RCU read-side synchronization
+ * @fence_excl: the exclusive fence, if there is one currently
+ * @fence: list of current shared fences
+ */
+struct dma_resv {
+       struct ww_mutex lock;
+
+       struct dma_fence __rcu *fence_excl;
+       struct dma_resv_list __rcu *fence;
+};
+
+#define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
+#define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
+
+/**
+ * dma_resv_get_excl - get the reservation object's
+ * exclusive fence, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the exclusive fence (if any).  Does NOT take a
+ * reference. Writers must hold obj->lock, readers may only
+ * hold a RCU read side lock.
+ *
+ * RETURNS
+ * The exclusive fence or NULL
+ */
+static inline struct dma_fence *dma_resv_get_excl(struct dma_resv *obj)
+{
+       return rcu_dereference_protected(obj->fence_excl,
+                                        dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_get_list - get the reservation object's
+ * shared fence list, with update-side lock held
+ * @obj: the reservation object
+ *
+ * Returns the shared fence list.  Does NOT take references to
+ * the fence.  The obj->lock must be held.
+ */
+static inline struct dma_resv_list *dma_resv_get_list(struct dma_resv *obj)
+{
+       return rcu_dereference_protected(obj->fence,
+                                        dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_fences - read consistent fence pointers
+ * @obj: reservation object where we get the fences from
+ * @excl: pointer for the exclusive fence
+ * @list: pointer for the shared fence list
+ *
+ * Make sure we have a consisten exclusive fence and shared fence list.
+ * Must be called with rcu read side lock held.
+ */
+static inline void dma_resv_fences(struct dma_resv *obj,
+                                  struct dma_fence **excl,
+                                  struct dma_resv_list **list,
+                                  u32 *shared_count)
+{
+       do {
+               *excl = rcu_dereference(obj->fence_excl);
+               *list = rcu_dereference(obj->fence);
+               *shared_count = *list ? (*list)->shared_count : 0;
+               smp_rmb(); /* See dma_resv_add_excl_fence */
+       } while (rcu_access_pointer(obj->fence_excl) != *excl);
+}
+
+/**
+ * dma_resv_get_excl_rcu - get the reservation object's
+ * exclusive fence, without lock held.
+ * @obj: the reservation object
+ *
+ * If there is an exclusive fence, this atomically increments it's
+ * reference count and returns it.
+ *
+ * RETURNS
+ * The exclusive fence or NULL if none
+ */
+static inline struct dma_fence *dma_resv_get_excl_rcu(struct dma_resv *obj)
+{
+       struct dma_fence *fence;
+
+       if (!rcu_access_pointer(obj->fence_excl))
+               return NULL;
+
+       rcu_read_lock();
+       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
+       rcu_read_unlock();
+
+       return fence;
+}
+
+/**
+ * dma_resv_lock - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object for exclusive access and modification. Note,
+ * that the lock is only against other writers, readers will run concurrently
+ * with a writer under RCU. The seqlock is used to notify readers if they
+ * overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int dma_resv_lock(struct dma_resv *obj,
+                               struct ww_acquire_ctx *ctx)
+{
+       return ww_mutex_lock(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_interruptible - lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Locks the reservation object interruptible for exclusive access and
+ * modification. Note, that the lock is only against other writers, readers
+ * will run concurrently with a writer under RCU. The seqlock is used to
+ * notify readers if they overlap with a writer.
+ *
+ * As the reservation object may be locked by multiple parties in an
+ * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
+ * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
+ * object may be locked by itself by passing NULL as @ctx.
+ */
+static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
+                                             struct ww_acquire_ctx *ctx)
+{
+       return ww_mutex_lock_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow - slowpath lock the reservation object
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object after a die case. This function
+ * will sleep until the lock becomes available. See dma_resv_lock() as
+ * well.
+ */
+static inline void dma_resv_lock_slow(struct dma_resv *obj,
+                                     struct ww_acquire_ctx *ctx)
+{
+       ww_mutex_lock_slow(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_lock_slow_interruptible - slowpath lock the reservation
+ * object, interruptible
+ * @obj: the reservation object
+ * @ctx: the locking context
+ *
+ * Acquires the reservation object interruptible after a die case. This function
+ * will sleep until the lock becomes available. See
+ * dma_resv_lock_interruptible() as well.
+ */
+static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
+                                                  struct ww_acquire_ctx *ctx)
+{
+       return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
+}
+
+/**
+ * dma_resv_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
+{
+       return ww_mutex_trylock(&obj->lock);
+}
+
+/**
+ * dma_resv_is_locked - is the reservation object locked
+ * @obj: the reservation object
+ *
+ * Returns true if the mutex is locked, false if unlocked.
+ */
+static inline bool dma_resv_is_locked(struct dma_resv *obj)
+{
+       return ww_mutex_is_locked(&obj->lock);
+}
+
+/**
+ * dma_resv_locking_ctx - returns the context used to lock the object
+ * @obj: the reservation object
+ *
+ * Returns the context used to lock a reservation object or NULL if no context
+ * was used or the object is not locked at all.
+ */
+static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
+{
+       return READ_ONCE(obj->lock.ctx);
+}
+
+/**
+ * dma_resv_unlock - unlock the reservation object
+ * @obj: the reservation object
+ *
+ * Unlocks the reservation object following exclusive access.
+ */
+static inline void dma_resv_unlock(struct dma_resv *obj)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+       /* Test shared fence slot reservation */
+       if (rcu_access_pointer(obj->fence)) {
+               struct dma_resv_list *fence = dma_resv_get_list(obj);
+
+               fence->shared_max = fence->shared_count;
+       }
+#endif
+       ww_mutex_unlock(&obj->lock);
+}
+
+void dma_resv_init(struct dma_resv *obj);
+void dma_resv_fini(struct dma_resv *obj);
+int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences);
+void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence);
+
+void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence);
+
+int dma_resv_get_fences_rcu(struct dma_resv *obj,
+                           struct dma_fence **pfence_excl,
+                           unsigned *pshared_count,
+                           struct dma_fence ***pshared);
+
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
+
+long dma_resv_wait_timeout_rcu(struct dma_resv *obj, bool wait_all, bool intr,
+                              unsigned long timeout);
+
+bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all);
+
+#endif /* _LINUX_RESERVATION_H */
 
+++ /dev/null
-/*
- * Header file for reservations for dma-buf and ttm
- *
- * Copyright(C) 2011 Linaro Limited. All rights reserved.
- * Copyright (C) 2012-2013 Canonical Ltd
- * Copyright (C) 2012 Texas Instruments
- *
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- *
- * Based on bo.c which bears the following copyright notice,
- * but is dual licensed:
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-#ifndef _LINUX_RESERVATION_H
-#define _LINUX_RESERVATION_H
-
-#include <linux/ww_mutex.h>
-#include <linux/dma-fence.h>
-#include <linux/slab.h>
-#include <linux/seqlock.h>
-#include <linux/rcupdate.h>
-
-extern struct ww_class reservation_ww_class;
-
-/**
- * struct reservation_object_list - a list of shared fences
- * @rcu: for internal use
- * @shared_count: table of shared fences
- * @shared_max: for growing shared fence table
- * @shared: shared fence table
- */
-struct reservation_object_list {
-       struct rcu_head rcu;
-       u32 shared_count, shared_max;
-       struct dma_fence __rcu *shared[];
-};
-
-/**
- * struct reservation_object - a reservation object manages fences for a buffer
- * @lock: update side lock
- * @seq: sequence count for managing RCU read-side synchronization
- * @fence_excl: the exclusive fence, if there is one currently
- * @fence: list of current shared fences
- */
-struct reservation_object {
-       struct ww_mutex lock;
-
-       struct dma_fence __rcu *fence_excl;
-       struct reservation_object_list __rcu *fence;
-};
-
-#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base)
-#define reservation_object_assert_held(obj) \
-       lockdep_assert_held(&(obj)->lock.base)
-
-/**
- * reservation_object_get_excl - get the reservation object's
- * exclusive fence, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the exclusive fence (if any).  Does NOT take a
- * reference. Writers must hold obj->lock, readers may only
- * hold a RCU read side lock.
- *
- * RETURNS
- * The exclusive fence or NULL
- */
-static inline struct dma_fence *
-reservation_object_get_excl(struct reservation_object *obj)
-{
-       return rcu_dereference_protected(obj->fence_excl,
-                                        reservation_object_held(obj));
-}
-
-/**
- * reservation_object_get_list - get the reservation object's
- * shared fence list, with update-side lock held
- * @obj: the reservation object
- *
- * Returns the shared fence list.  Does NOT take references to
- * the fence.  The obj->lock must be held.
- */
-static inline struct reservation_object_list *
-reservation_object_get_list(struct reservation_object *obj)
-{
-       return rcu_dereference_protected(obj->fence,
-                                        reservation_object_held(obj));
-}
-
-/**
- * reservation_object_fences - read consistent fence pointers
- * @obj: reservation object where we get the fences from
- * @excl: pointer for the exclusive fence
- * @list: pointer for the shared fence list
- *
- * Make sure we have a consisten exclusive fence and shared fence list.
- * Must be called with rcu read side lock held.
- */
-static inline void
-reservation_object_fences(struct reservation_object *obj,
-                         struct dma_fence **excl,
-                         struct reservation_object_list **list,
-                         u32 *shared_count)
-{
-       do {
-               *excl = rcu_dereference(obj->fence_excl);
-               *list = rcu_dereference(obj->fence);
-               *shared_count = *list ? (*list)->shared_count : 0;
-               smp_rmb(); /* See reservation_object_add_excl_fence */
-       } while (rcu_access_pointer(obj->fence_excl) != *excl);
-}
-
-/**
- * reservation_object_get_excl_rcu - get the reservation object's
- * exclusive fence, without lock held.
- * @obj: the reservation object
- *
- * If there is an exclusive fence, this atomically increments it's
- * reference count and returns it.
- *
- * RETURNS
- * The exclusive fence or NULL if none
- */
-static inline struct dma_fence *
-reservation_object_get_excl_rcu(struct reservation_object *obj)
-{
-       struct dma_fence *fence;
-
-       if (!rcu_access_pointer(obj->fence_excl))
-               return NULL;
-
-       rcu_read_lock();
-       fence = dma_fence_get_rcu_safe(&obj->fence_excl);
-       rcu_read_unlock();
-
-       return fence;
-}
-
-/**
- * reservation_object_lock - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object for exclusive access and modification. Note,
- * that the lock is only against other writers, readers will run concurrently
- * with a writer under RCU. The seqlock is used to notify readers if they
- * overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int
-reservation_object_lock(struct reservation_object *obj,
-                       struct ww_acquire_ctx *ctx)
-{
-       return ww_mutex_lock(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_interruptible - lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Locks the reservation object interruptible for exclusive access and
- * modification. Note, that the lock is only against other writers, readers
- * will run concurrently with a writer under RCU. The seqlock is used to
- * notify readers if they overlap with a writer.
- *
- * As the reservation object may be locked by multiple parties in an
- * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
- * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
- * object may be locked by itself by passing NULL as @ctx.
- */
-static inline int
-reservation_object_lock_interruptible(struct reservation_object *obj,
-                                     struct ww_acquire_ctx *ctx)
-{
-       return ww_mutex_lock_interruptible(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_slow - slowpath lock the reservation object
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object after a die case. This function
- * will sleep until the lock becomes available. See reservation_object_lock() as
- * well.
- */
-static inline void
-reservation_object_lock_slow(struct reservation_object *obj,
-                            struct ww_acquire_ctx *ctx)
-{
-       ww_mutex_lock_slow(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_lock_slow_interruptible - slowpath lock the reservation
- * object, interruptible
- * @obj: the reservation object
- * @ctx: the locking context
- *
- * Acquires the reservation object interruptible after a die case. This function
- * will sleep until the lock becomes available. See
- * reservation_object_lock_interruptible() as well.
- */
-static inline int
-reservation_object_lock_slow_interruptible(struct reservation_object *obj,
-                                          struct ww_acquire_ctx *ctx)
-{
-       return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
-}
-
-/**
- * reservation_object_trylock - trylock the reservation object
- * @obj: the reservation object
- *
- * Tries to lock the reservation object for exclusive access and modification.
- * Note, that the lock is only against other writers, readers will run
- * concurrently with a writer under RCU. The seqlock is used to notify readers
- * if they overlap with a writer.
- *
- * Also note that since no context is provided, no deadlock protection is
- * possible.
- *
- * Returns true if the lock was acquired, false otherwise.
- */
-static inline bool __must_check
-reservation_object_trylock(struct reservation_object *obj)
-{
-       return ww_mutex_trylock(&obj->lock);
-}
-
-/**
- * reservation_object_is_locked - is the reservation object locked
- * @obj: the reservation object
- *
- * Returns true if the mutex is locked, false if unlocked.
- */
-static inline bool
-reservation_object_is_locked(struct reservation_object *obj)
-{
-       return ww_mutex_is_locked(&obj->lock);
-}
-
-/**
- * reservation_object_locking_ctx - returns the context used to lock the object
- * @obj: the reservation object
- *
- * Returns the context used to lock a reservation object or NULL if no context
- * was used or the object is not locked at all.
- */
-static inline struct ww_acquire_ctx *
-reservation_object_locking_ctx(struct reservation_object *obj)
-{
-       return READ_ONCE(obj->lock.ctx);
-}
-
-/**
- * reservation_object_unlock - unlock the reservation object
- * @obj: the reservation object
- *
- * Unlocks the reservation object following exclusive access.
- */
-static inline void
-reservation_object_unlock(struct reservation_object *obj)
-{
-#ifdef CONFIG_DEBUG_MUTEXES
-       /* Test shared fence slot reservation */
-       if (rcu_access_pointer(obj->fence)) {
-               struct reservation_object_list *fence =
-                       reservation_object_get_list(obj);
-
-               fence->shared_max = fence->shared_count;
-       }
-#endif
-       ww_mutex_unlock(&obj->lock);
-}
-
-void reservation_object_init(struct reservation_object *obj);
-void reservation_object_fini(struct reservation_object *obj);
-int reservation_object_reserve_shared(struct reservation_object *obj,
-                                     unsigned int num_fences);
-void reservation_object_add_shared_fence(struct reservation_object *obj,
-                                        struct dma_fence *fence);
-
-void reservation_object_add_excl_fence(struct reservation_object *obj,
-                                      struct dma_fence *fence);
-
-int reservation_object_get_fences_rcu(struct reservation_object *obj,
-                                     struct dma_fence **pfence_excl,
-                                     unsigned *pshared_count,
-                                     struct dma_fence ***pshared);
-
-int reservation_object_copy_fences(struct reservation_object *dst,
-                                  struct reservation_object *src);
-
-long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
-                                        bool wait_all, bool intr,
-                                        unsigned long timeout);
-
-bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
-                                         bool test_all);
-
-#endif /* _LINUX_RESERVATION_H */