xhci_zero_in_ctx(xhci, virt_dev);
 }
 
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+               struct usb_device *udev, struct usb_host_endpoint *ep,
+               unsigned int ep_index, struct xhci_ring *ep_ring)
+{
+       struct xhci_dequeue_state deq_state;
+
+       xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+       /* We need to move the HW's dequeue pointer past this TD,
+        * or it will attempt to resend it on the next doorbell ring.
+        */
+       xhci_find_new_dequeue_state(xhci, udev->slot_id,
+                       ep_index, ep_ring->stopped_td, &deq_state);
+
+       xhci_dbg(xhci, "Queueing new dequeue state\n");
+       xhci_queue_new_dequeue_state(xhci, ep_ring,
+                       udev->slot_id,
+                       ep_index, &deq_state);
+}
+
 /* Deal with stalled endpoints.  The core should have sent the control message
  * to clear the halt condition.  However, we need to make the xHCI hardware
  * reset its sequence number, since a device will expect a sequence number of
        unsigned int ep_index;
        unsigned long flags;
        int ret;
-       struct xhci_dequeue_state deq_state;
        struct xhci_ring *ep_ring;
 
        xhci = hcd_to_xhci(hcd);
                                ep->desc.bEndpointAddress);
                return;
        }
+       if (usb_endpoint_xfer_control(&ep->desc)) {
+               xhci_dbg(xhci, "Control endpoint stall already handled.\n");
+               return;
+       }
 
        xhci_dbg(xhci, "Queueing reset endpoint command\n");
        spin_lock_irqsave(&xhci->lock, flags);
         * command.  Better hope that last command worked!
         */
        if (!ret) {
-               xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
-               /* We need to move the HW's dequeue pointer past this TD,
-                * or it will attempt to resend it on the next doorbell ring.
-                */
-               xhci_find_new_dequeue_state(xhci, udev->slot_id,
-                               ep_index, ep_ring->stopped_td, &deq_state);
-               xhci_dbg(xhci, "Queueing new dequeue state\n");
-               xhci_queue_new_dequeue_state(xhci, ep_ring,
-                               udev->slot_id,
-                               ep_index, &deq_state);
+               xhci_cleanup_stalled_ring(xhci, udev, ep, ep_index, ep_ring);
                kfree(ep_ring->stopped_td);
                xhci_ring_cmd_db(xhci);
        }
 
 {
        struct xhci_virt_device *xdev;
        struct xhci_ring *ep_ring;
+       unsigned int slot_id;
        int ep_index;
        struct xhci_td *td = 0;
        dma_addr_t event_dma;
        struct xhci_ep_ctx *ep_ctx;
 
        xhci_dbg(xhci, "In %s\n", __func__);
-       xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
+       slot_id = TRB_TO_SLOT_ID(event->flags);
+       xdev = xhci->devs[slot_id];
        if (!xdev) {
                xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
                return -ENODEV;
                        xhci_warn(xhci, "WARN: short transfer on control ep\n");
                        status = -EREMOTEIO;
                        break;
+               case COMP_STALL:
+                       /* Did we transfer part of the data (middle) phase? */
+                       if (event_trb != ep_ring->dequeue &&
+                                       event_trb != td->last_trb)
+                               td->urb->actual_length =
+                                       td->urb->transfer_buffer_length
+                                       - TRB_LEN(event->transfer_len);
+                       else
+                               td->urb->actual_length = 0;
+
+                       ep_ring->stopped_td = td;
+                       ep_ring->stopped_trb = event_trb;
+                       xhci_queue_reset_ep(xhci, slot_id, ep_index);
+                       xhci_cleanup_stalled_ring(xhci,
+                                       td->urb->dev,
+                                       td->urb->ep,
+                                       ep_index, ep_ring);
+                       xhci_ring_cmd_db(xhci);
+                       goto td_cleanup;
                default:
                        /* Others already handled above */
                        break;
                        inc_deq(xhci, ep_ring, false);
                }
 
+td_cleanup:
                /* Clean up the endpoint's TD list */
                urb = td->urb;
                list_del(&td->td_list);
                        list_del(&td->cancelled_td_list);
                        ep_ring->cancels_pending--;
                }
-               /* Leave the TD around for the reset endpoint function to use */
-               if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
+               /* Leave the TD around for the reset endpoint function to use
+                * (but only if it's not a control endpoint, since we already
+                * queued the Set TR dequeue pointer command for stalled
+                * control endpoints).
+                */
+               if (usb_endpoint_xfer_control(&urb->ep->desc) ||
+                       GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
                        kfree(td);
                }
                urb->hcpriv = NULL;
 
  */
 #define        FORCE_EVENT     (0x1)
 #define ERROR_COUNT(p) (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p)      (((p) >> 3) & 0x7)
 #define EP_TYPE(p)     ((p) << 3)
 #define ISOC_OUT_EP    1
 #define BULK_OUT_EP    2
 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
                struct xhci_ring *ep_ring, unsigned int slot_id,
                unsigned int ep_index, struct xhci_dequeue_state *deq_state);
+void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
+               struct usb_device *udev, struct usb_host_endpoint *ep,
+               unsigned int ep_index, struct xhci_ring *ep_ring);
 
 /* xHCI roothub code */
 int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,