return ret;
 
        fb_obj_bump_render_priority(obj);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
 
        if (!new_plane_state->uapi.fence) { /* implicit fencing */
                struct dma_fence *fence;
 
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
        spin_unlock(&obj->vma.lock);
 
-       obj->frontbuffer = NULL;
+       RCU_INIT_POINTER(obj->frontbuffer, NULL);
        spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock);
 
        i915_gem_object_put(obj);
-       kfree(front);
+       kfree_rcu(front, rcu);
 }
 
 struct intel_frontbuffer *
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct intel_frontbuffer *front;
 
-       spin_lock(&i915->fb_tracking.lock);
-       front = obj->frontbuffer;
-       if (front)
-               kref_get(&front->ref);
-       spin_unlock(&i915->fb_tracking.lock);
+       front = __intel_frontbuffer_get(obj);
        if (front)
                return front;
 
                         i915_active_may_sleep(frontbuffer_retire));
 
        spin_lock(&i915->fb_tracking.lock);
-       if (obj->frontbuffer) {
+       if (rcu_access_pointer(obj->frontbuffer)) {
                kfree(front);
-               front = obj->frontbuffer;
+               front = rcu_dereference_protected(obj->frontbuffer, true);
                kref_get(&front->ref);
        } else {
                i915_gem_object_get(obj);
-               obj->frontbuffer = front;
+               rcu_assign_pointer(obj->frontbuffer, front);
        }
        spin_unlock(&i915->fb_tracking.lock);
 
 
 #include <linux/atomic.h>
 #include <linux/kref.h>
 
+#include "gem/i915_gem_object_types.h"
 #include "i915_active.h"
 
 struct drm_i915_private;
-struct drm_i915_gem_object;
 
 enum fb_op_origin {
        ORIGIN_GTT,
        atomic_t bits;
        struct i915_active write;
        struct drm_i915_gem_object *obj;
+       struct rcu_head rcu;
 };
 
 void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 void intel_frontbuffer_flip(struct drm_i915_private *i915,
                            unsigned frontbuffer_bits);
 
+void intel_frontbuffer_put(struct intel_frontbuffer *front);
+
+static inline struct intel_frontbuffer *
+__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
+{
+       struct intel_frontbuffer *front;
+
+       if (likely(!rcu_access_pointer(obj->frontbuffer)))
+               return NULL;
+
+       rcu_read_lock();
+       do {
+               front = rcu_dereference(obj->frontbuffer);
+               if (!front)
+                       break;
+
+               if (unlikely(!kref_get_unless_zero(&front->ref)))
+                       continue;
+
+               if (likely(front == rcu_access_pointer(obj->frontbuffer)))
+                       break;
+
+               intel_frontbuffer_put(front);
+       } while (1);
+       rcu_read_unlock();
+
+       return front;
+}
+
 struct intel_frontbuffer *
 intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 
                             struct intel_frontbuffer *new,
                             unsigned int frontbuffer_bits);
 
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
 #endif /* __INTEL_FRONTBUFFER_H__ */
 
                                       struct i915_vma *vma)
 {
        enum pipe pipe = overlay->crtc->pipe;
+       struct intel_frontbuffer *from, *to;
 
        WARN_ON(overlay->old_vma);
 
-       intel_frontbuffer_track(overlay->vma ? overlay->vma->obj->frontbuffer : NULL,
-                               vma ? vma->obj->frontbuffer : NULL,
-                               INTEL_FRONTBUFFER_OVERLAY(pipe));
+       if (overlay->vma)
+               from = intel_frontbuffer_get(overlay->vma->obj);
+       if (vma)
+               to = intel_frontbuffer_get(vma->obj);
+
+       intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+
+       if (to)
+               intel_frontbuffer_put(to);
+       if (from)
+               intel_frontbuffer_put(from);
 
        intel_frontbuffer_flip_prepare(overlay->i915,
                                       INTEL_FRONTBUFFER_OVERLAY(pipe));
                ret = PTR_ERR(vma);
                goto out_pin_section;
        }
-       intel_frontbuffer_flush(new_bo->frontbuffer, ORIGIN_DIRTYFB);
+       i915_gem_object_flush_frontbuffer(new_bo, ORIGIN_DIRTYFB);
 
        if (!overlay->active) {
                u32 oconfig;
 
 {
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
        drm_clflush_sg(obj->mm.pages);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 }
 
 static int clflush_work(struct dma_fence_work *base)
 
        i915_gem_object_unlock(obj);
 
        if (write_domain)
-               intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
 out_unpin:
        i915_gem_object_unpin_pages(obj);
        }
 
 out:
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
        obj->mm.dirty = true;
        /* return with the pages pinned */
        return 0;
 
                }
                spin_unlock(&obj->vma.lock);
 
-               intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+               i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
                break;
 
        case I915_GEM_DOMAIN_WC:
        obj->write_domain = 0;
 }
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_flush(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin)
+{
+       struct intel_frontbuffer *front;
+
+       front = __intel_frontbuffer_get(obj);
+       if (front) {
+               intel_frontbuffer_invalidate(front, origin);
+               intel_frontbuffer_put(front);
+       }
+}
+
 void i915_gem_init__objects(struct drm_i915_private *i915)
 {
        INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 
 
 #include <drm/i915_drm.h>
 
+#include "display/intel_frontbuffer.h"
 #include "i915_gem_object_types.h"
-
 #include "i915_gem_gtt.h"
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
                                  unsigned int flags,
                                  const struct i915_sched_attr *attr);
 
+void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                        enum fb_op_origin origin);
+void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                             enum fb_op_origin origin);
+
+static inline void
+i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+                                 enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_flush_frontbuffer(obj, origin);
+}
+
+static inline void
+i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+                                      enum fb_op_origin origin)
+{
+       if (unlikely(rcu_access_pointer(obj->frontbuffer)))
+               __i915_gem_object_invalidate_frontbuffer(obj, origin);
+}
+
 #endif
 
         */
        u16 write_domain;
 
-       struct intel_frontbuffer *frontbuffer;
+       struct intel_frontbuffer __rcu *frontbuffer;
 
        /** Current tiling stride for the object, if it's tiled. */
        unsigned int tiling_and_stride;
 
         * We manually control the domain here and pretend that it
         * remains coherent i.e. in the GTT domain, like shmem_pwrite.
         */
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        if (copy_from_user(vaddr, user_data, args->size))
                return -EFAULT;
        drm_clflush_virt_range(vaddr, args->size);
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        return 0;
 }
 
                goto out_unpin;
        }
 
-       intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
 
        user_data = u64_to_user_ptr(args->data_ptr);
        offset = args->offset;
        }
 
        intel_gt_flush_ggtt_writes(ggtt->vm.gt);
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 
        i915_gem_object_unlock_fence(obj, fence);
 out_unpin:
                offset = 0;
        }
 
-       intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_CPU);
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
        i915_gem_object_unlock_fence(obj, fence);
 
        return ret;
 
                return err;
 
        if (flags & EXEC_OBJECT_WRITE) {
-               if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS))
-                       i915_active_add_request(&obj->frontbuffer->write, rq);
+               struct intel_frontbuffer *front;
+
+               front = __intel_frontbuffer_get(obj);
+               if (unlikely(front)) {
+                       if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
+                               i915_active_add_request(&front->write, rq);
+                       intel_frontbuffer_put(front);
+               }
 
                dma_resv_add_excl_fence(vma->resv, &rq->fence);
                obj->write_domain = I915_GEM_DOMAIN_RENDER;