#define ISM_UNREG_SBA  0x11
 #define ISM_UNREG_IEQ  0x12
 
-#define ISM_ERROR      0xFFFF
-
 struct ism_req_hdr {
        u32 cmd;
        u16 : 16;
 
 #define ISM_EVENT_GID  1
 #define ISM_EVENT_SWR  2
 
+#define ISM_ERROR      0xFFFF
+
 struct smcd_event {
        u32 type;
        u32 code;
 
        return smc_cdc_get_slot_and_msg_send(conn);
 }
 
+static void smc_close_cancel_work(struct smc_sock *smc)
+{
+       struct sock *sk = &smc->sk;
+
+       release_sock(sk);
+       cancel_work_sync(&smc->conn.close_work);
+       cancel_delayed_work_sync(&smc->conn.tx_work);
+       lock_sock(sk);
+       sk->sk_state = SMC_CLOSED;
+}
+
 /* terminate smc socket abnormally - active abort
  * link group is terminated, i.e. RDMA communication no longer possible
  */
        switch (sk->sk_state) {
        case SMC_ACTIVE:
                sk->sk_state = SMC_PEERABORTWAIT;
-               release_sock(sk);
-               cancel_delayed_work_sync(&smc->conn.tx_work);
-               lock_sock(sk);
+               smc_close_cancel_work(smc);
                sk->sk_state = SMC_CLOSED;
                sock_put(sk); /* passive closing */
                break;
        case SMC_APPCLOSEWAIT1:
        case SMC_APPCLOSEWAIT2:
-               release_sock(sk);
-               cancel_delayed_work_sync(&smc->conn.tx_work);
-               lock_sock(sk);
+               smc_close_cancel_work(smc);
                sk->sk_state = SMC_CLOSED;
                sock_put(sk); /* postponed passive closing */
                break;
        case SMC_PEERCLOSEWAIT1:
        case SMC_PEERCLOSEWAIT2:
        case SMC_PEERFINCLOSEWAIT:
+               sk->sk_state = SMC_PEERABORTWAIT;
+               smc_close_cancel_work(smc);
                sk->sk_state = SMC_CLOSED;
                smc_conn_free(&smc->conn);
                release_clcsock = true;
                break;
        case SMC_PROCESSABORT:
        case SMC_APPFINCLOSEWAIT:
+               sk->sk_state = SMC_PEERABORTWAIT;
+               smc_close_cancel_work(smc);
                sk->sk_state = SMC_CLOSED;
+               smc_conn_free(&smc->conn);
+               release_clcsock = true;
                break;
        case SMC_INIT:
        case SMC_PEERABORTWAIT:
 
 
        if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
                smc_llc_link_inactive(lnk);
-       if (lgr->is_smcd)
+       if (lgr->is_smcd && !lgr->terminating)
                smc_ism_signal_shutdown(lgr);
        smc_lgr_free(lgr);
 }
        if (!lgr)
                return;
        if (lgr->is_smcd) {
-               smc_ism_unset_conn(conn);
+               if (!list_empty(&lgr->list))
+                       smc_ism_unset_conn(conn);
                tasklet_kill(&conn->rx_tsklet);
        } else {
                smc_cdc_tx_dismiss_slots(conn);
 {
        smc_lgr_free_bufs(lgr);
        if (lgr->is_smcd) {
-               smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
-               put_device(&lgr->smcd->dev);
+               if (!lgr->terminating) {
+                       smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+                       put_device(&lgr->smcd->dev);
+               }
        } else {
                smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]);
                put_device(&lgr->lnk[SMC_SINGLE_LINK].smcibdev->ibdev->dev);
        spin_unlock_bh(lgr_lock);
 }
 
+static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
+{
+       int i;
+
+       for (i = 0; i < SMC_RMBE_SIZES; i++) {
+               struct smc_buf_desc *buf_desc;
+
+               list_for_each_entry(buf_desc, &lgr->rmbs[i], list) {
+                       buf_desc->len += sizeof(struct smcd_cdc_msg);
+                       smc_ism_unregister_dmb(lgr->smcd, buf_desc);
+               }
+       }
+}
+
 static void smc_sk_wake_ups(struct smc_sock *smc)
 {
        smc->sk.sk_write_space(&smc->sk);
        conn->killed = 1;
        smc->sk.sk_err = ECONNABORTED;
        smc_sk_wake_ups(smc);
-       if (conn->lgr->is_smcd)
+       if (conn->lgr->is_smcd) {
+               smc_ism_unset_conn(conn);
                tasklet_kill(&conn->rx_tsklet);
+       }
        smc_lgr_unregister_conn(conn);
        smc_close_active_abort(smc);
 }
 
+static void smc_lgr_cleanup(struct smc_link_group *lgr)
+{
+       if (lgr->is_smcd) {
+               smc_ism_signal_shutdown(lgr);
+               smcd_unregister_all_dmbs(lgr);
+               smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+               put_device(&lgr->smcd->dev);
+       } else {
+               struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
+
+               wake_up(&lnk->wr_reg_wait);
+       }
+}
+
 /* terminate link group */
 static void __smc_lgr_terminate(struct smc_link_group *lgr)
 {
                node = rb_first(&lgr->conns_all);
        }
        read_unlock_bh(&lgr->conns_lock);
-       if (!lgr->is_smcd)
-               wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
+       smc_lgr_cleanup(lgr);
        smc_lgr_schedule_free_work_fast(lgr);
 }
 
 
 int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
 {
        struct smcd_dmb dmb;
+       int rc = 0;
+
+       if (!dmb_desc->dma_addr)
+               return rc;
 
        memset(&dmb, 0, sizeof(dmb));
        dmb.dmb_tok = dmb_desc->token;
        dmb.cpu_addr = dmb_desc->cpu_addr;
        dmb.dma_addr = dmb_desc->dma_addr;
        dmb.dmb_len = dmb_desc->len;
-       return smcd->ops->unregister_dmb(smcd, &dmb);
+       rc = smcd->ops->unregister_dmb(smcd, &dmb);
+       if (!rc || rc == ISM_ERROR) {
+               dmb_desc->cpu_addr = NULL;
+               dmb_desc->dma_addr = 0;
+       }
+
+       return rc;
 }
 
 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
 
        spin_lock_irqsave(&smcd->lock, flags);
        conn = smcd->conn[dmbno];
-       if (conn)
+       if (conn && !conn->killed)
                tasklet_schedule(&conn->rx_tsklet);
        spin_unlock_irqrestore(&smcd->lock, flags);
 }