seq_printf(m, "No flip due on pipe %c (plane %c)\n",
                                   pipe, plane);
                } else {
-                       if (!work->pending) {
+                       if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                                seq_printf(m, "Flip queued on pipe %c (plane %c)\n",
                                           pipe, plane);
                        } else {
                                seq_printf(m, "Stall check enabled, ");
                        else
                                seq_printf(m, "Stall check waiting for page flip ioctl, ");
-                       seq_printf(m, "%d prepares\n", work->pending);
+                       seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
 
                        if (work->old_fb_obj) {
                                struct drm_i915_gem_object *obj = work->old_fb_obj;
 
 
        spin_lock_irqsave(&dev->event_lock, flags);
        work = intel_crtc->unpin_work;
-       if (work == NULL || !work->pending) {
+
+       /* Ensure we don't miss a work->pending update ... */
+       smp_rmb();
+
+       if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
                spin_unlock_irqrestore(&dev->event_lock, flags);
                return;
        }
 
+       /* and that the unpin work is consistent wrt ->pending. */
+       smp_rmb();
+
        intel_crtc->unpin_work = NULL;
 
        if (work->event)
                to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
        unsigned long flags;
 
+       /* NB: An MMIO update of the plane base pointer will also
+        * generate a page-flip completion irq, i.e. every modeset
+        * is also accompanied by a spurious intel_prepare_page_flip().
+        */
        spin_lock_irqsave(&dev->event_lock, flags);
-       if (intel_crtc->unpin_work) {
-               if ((++intel_crtc->unpin_work->pending) > 1)
-                       DRM_ERROR("Prepared flip multiple times\n");
-       } else {
-               DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
-       }
+       if (intel_crtc->unpin_work)
+               atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
+inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
+{
+       /* Ensure that the work item is consistent when activating it ... */
+       smp_wmb();
+       atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
+       /* and that it is marked active as soon as the irq could fire. */
+       smp_wmb();
+}
+
 static int intel_gen2_queue_flip(struct drm_device *dev,
                                 struct drm_crtc *crtc,
                                 struct drm_framebuffer *fb,
        intel_ring_emit(ring, fb->pitches[0]);
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, 0); /* aux display base address, unused */
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, MI_NOOP);
 
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
        pf = 0;
        pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
        intel_ring_emit(ring, pf | pipesrc);
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;
 
        intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
        intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
        intel_ring_emit(ring, (MI_NOOP));
+
+       intel_mark_page_flip_active(intel_crtc);
        intel_ring_advance(ring);
        return 0;