struct transport_container session_cont;
 };
 
+/* Worker to perform connection failure on unresponsive connections
+ * completely in kernel space.
+ */
+static void stop_conn_work_fn(struct work_struct *work);
+static DECLARE_WORK(stop_conn_work, stop_conn_work_fn);
+
 static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
 static struct workqueue_struct *iscsi_eh_timer_workq;
 
 static LIST_HEAD(sesslist);
 static DEFINE_SPINLOCK(sesslock);
 static LIST_HEAD(connlist);
+static LIST_HEAD(connlist_err);
 static DEFINE_SPINLOCK(connlock);
 
 static uint32_t iscsi_conn_get_sid(struct iscsi_cls_conn *conn)
 
        mutex_init(&conn->ep_mutex);
        INIT_LIST_HEAD(&conn->conn_list);
+       INIT_LIST_HEAD(&conn->conn_list_err);
        conn->transport = transport;
        conn->cid = cid;
 
 
        spin_lock_irqsave(&connlock, flags);
        list_del(&conn->conn_list);
+       list_del(&conn->conn_list_err);
        spin_unlock_irqrestore(&connlock, flags);
 
        transport_unregister_device(&conn->dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
 
+static void stop_conn_work_fn(struct work_struct *work)
+{
+       struct iscsi_cls_conn *conn, *tmp;
+       unsigned long flags;
+       LIST_HEAD(recovery_list);
+
+       spin_lock_irqsave(&connlock, flags);
+       if (list_empty(&connlist_err)) {
+               spin_unlock_irqrestore(&connlock, flags);
+               return;
+       }
+       list_splice_init(&connlist_err, &recovery_list);
+       spin_unlock_irqrestore(&connlock, flags);
+
+       list_for_each_entry_safe(conn, tmp, &recovery_list, conn_list_err) {
+               uint32_t sid = iscsi_conn_get_sid(conn);
+               struct iscsi_cls_session *session;
+
+               mutex_lock(&rx_queue_mutex);
+
+               session = iscsi_session_lookup(sid);
+               if (session) {
+                       if (system_state != SYSTEM_RUNNING) {
+                               session->recovery_tmo = 0;
+                               conn->transport->stop_conn(conn,
+                                                          STOP_CONN_TERM);
+                       } else {
+                               conn->transport->stop_conn(conn,
+                                                          STOP_CONN_RECOVER);
+                       }
+               }
+
+               list_del_init(&conn->conn_list_err);
+
+               mutex_unlock(&rx_queue_mutex);
+
+               /* we don't want to hold rx_queue_mutex for too long,
+                * for instance if many conns failed at the same time,
+                * since this stall other iscsi maintenance operations.
+                * Give other users a chance to proceed.
+                */
+               cond_resched();
+       }
+}
+
 void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 {
        struct nlmsghdr *nlh;
        struct iscsi_uevent *ev;
        struct iscsi_internal *priv;
        int len = nlmsg_total_size(sizeof(*ev));
+       unsigned long flags;
+
+       spin_lock_irqsave(&connlock, flags);
+       list_add(&conn->conn_list_err, &connlist_err);
+       spin_unlock_irqrestore(&connlock, flags);
+       queue_work(system_unbound_wq, &stop_conn_work);
 
        priv = iscsi_if_transport_lookup(conn->transport);
        if (!priv)
 iscsi_if_destroy_conn(struct iscsi_transport *transport, struct iscsi_uevent *ev)
 {
        struct iscsi_cls_conn *conn;
+       unsigned long flags;
 
        conn = iscsi_conn_lookup(ev->u.d_conn.sid, ev->u.d_conn.cid);
        if (!conn)
                return -EINVAL;
 
+       spin_lock_irqsave(&connlock, flags);
+       if (!list_empty(&conn->conn_list_err)) {
+               spin_unlock_irqrestore(&connlock, flags);
+               return -EAGAIN;
+       }
+       spin_unlock_irqrestore(&connlock, flags);
+
        ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
        if (transport->destroy_conn)
                transport->destroy_conn(conn);