list_move(&rq->sched.link, pl);
                        set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
+                       /* Check in case we rollback so far we wrap [size/2] */
+                       if (intel_ring_direction(rq->ring,
+                                                intel_ring_wrap(rq->ring,
+                                                                rq->tail),
+                                                rq->ring->tail) > 0)
+                               rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
                        active = rq;
                } else {
                        struct intel_engine_cs *owner = rq->context->engine;
         * HW has a tendency to ignore us rewinding the TAIL to the end of
         * an earlier request.
         */
+       GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+       prev = rq->ring->tail;
        tail = intel_ring_set_tail(rq->ring, rq->tail);
-       prev = ce->lrc_reg_state[CTX_RING_TAIL];
        if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
                desc |= CTX_DESC_FORCE_RESTORE;
        ce->lrc_reg_state[CTX_RING_TAIL] = tail;
        return 0;
 }
 
+static void assert_request_valid(struct i915_request *rq)
+{
+       struct intel_ring *ring __maybe_unused = rq->ring;
+
+       /* Can we unwind this request without appearing to go forwards? */
+       GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
 /*
  * Reserve space for 2 NOOPs at the end of each request to be
  * used as a workaround for not being allowed to do lite
        *cs++ = MI_NOOP;
        request->wa_tail = intel_ring_offset(request, cs);
 
+       /* Check that entire request is less than half the ring */
+       assert_request_valid(request);
+
        return cs;
 }
 
 
        void *vaddr;
 };
 
+static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+
+       ce = intel_context_create(engine);
+       if (IS_ERR(ce))
+               return ce;
+
+       /* We build large requests to read the registers from the ring */
+       ce->ring = __intel_context_ring_size(SZ_16K);
+
+       return ce;
+}
+
 static int request_add_sync(struct i915_request *rq, int err)
 {
        i915_request_get(rq);
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine);
+               ce = mocs_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
 
-               ce = intel_context_create(engine);
+               ce = mocs_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        break;
 
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+static struct intel_ring *mock_ring(unsigned long sz)
+{
+       struct intel_ring *ring;
+
+       ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+       if (!ring)
+               return NULL;
+
+       kref_init(&ring->ref);
+       ring->size = sz;
+       ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
+       ring->effective_size = sz;
+       ring->vaddr = (void *)(ring + 1);
+       atomic_set(&ring->pin_count, 1);
+
+       intel_ring_update_space(ring);
+
+       return ring;
+}
+
+static void mock_ring_free(struct intel_ring *ring)
+{
+       kfree(ring);
+}
+
+static int check_ring_direction(struct intel_ring *ring,
+                               u32 next, u32 prev,
+                               int expected)
+{
+       int result;
+
+       result = intel_ring_direction(ring, next, prev);
+       if (result < 0)
+               result = -1;
+       else if (result > 0)
+               result = 1;
+
+       if (result != expected) {
+               pr_err("intel_ring_direction(%u, %u):%d != %d\n",
+                      next, prev, result, expected);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
+{
+       u32 prev = x, next = intel_ring_wrap(ring, x + step);
+       int err = 0;
+
+       err |= check_ring_direction(ring, next, next,  0);
+       err |= check_ring_direction(ring, prev, prev,  0);
+       err |= check_ring_direction(ring, next, prev,  1);
+       err |= check_ring_direction(ring, prev, next, -1);
+
+       return err;
+}
+
+static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
+{
+       int err = 0;
+
+       err |= check_ring_step(ring, x, step);
+       err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
+       err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
+
+       return err;
+}
+
+static int igt_ring_direction(void *dummy)
+{
+       struct intel_ring *ring;
+       unsigned int half = 2048;
+       int step, err = 0;
+
+       ring = mock_ring(2 * half);
+       if (!ring)
+               return -ENOMEM;
+
+       GEM_BUG_ON(ring->size != 2 * half);
+
+       /* Precision of wrap detection is limited to ring->size / 2 */
+       for (step = 1; step < half; step <<= 1) {
+               err |= check_ring_offset(ring, 0, step);
+               err |= check_ring_offset(ring, half, step);
+       }
+       err |= check_ring_step(ring, 0, half - 64);
+
+       /* And check unwrapped handling for good measure */
+       err |= check_ring_offset(ring, 0, 2 * half + 64);
+       err |= check_ring_offset(ring, 3 * half, 1);
+
+       mock_ring_free(ring);
+       return err;
+}
+
+int intel_ring_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_ring_direction),
+       };
+
+       return i915_subtests(tests, NULL);
+}