The following is the lockdep warning which detects possible
deadlock condition with the way ar->lock and ar->list_lock
are being used.
  (&(&ar->lock)->rlock){+.-...}, at: [<
ffffffffa0492d13>] ath6kl_indicate_tx_activity+0x83/0x110 [ath6kl]
 but this lock took another, SOFTIRQ-unsafe lock in the past:
  (&(&ar->list_lock)->rlock){+.+...}
 and interrupts could create inverse lock ordering between them.
 other info that might help us debug this:
  Possible interrupt unsafe locking scenario:
        CPU0                    CPU1
        ----                    ----
   lock(&(&ar->list_lock)->rlock);
                                local_irq_disable();
                                lock(&(&ar->lock)->rlock);
                                lock(&(&ar->list_lock)->rlock);
   <Interrupt>
     lock(&(&ar->lock)->rlock);
  *** DEADLOCK ***
softirqs have to be disabled when acquiring ar->list_lock to avoid
the above deadlock condition. When the above warning printed the
interface is still up and running without issue.
Reported-by: Kalle Valo <kvalo@qca.qualcomm.com>
Signed-off-by: Vasanthakumar Thiagarajan <vthiagar@qca.qualcomm.com>
Signed-off-by: Kalle Valo <kvalo@qca.qualcomm.com>
 
        struct ath6kl *ar = wiphy_priv(wiphy);
        struct ath6kl_vif *vif = netdev_priv(ndev);
 
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_del(&vif->list);
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
 
        if (type == NL80211_IFTYPE_ADHOC)
                ar->ibss_if_active = true;
 
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_add_tail(&vif->list, &ar->vif_list);
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        return ndev;
 
 
                return;
        }
 
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
                list_del(&vif->list);
-               spin_unlock(&ar->list_lock);
+               spin_unlock_bh(&ar->list_lock);
                ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
                rtnl_lock();
                ath6kl_deinit_if_data(vif);
                rtnl_unlock();
-               spin_lock(&ar->list_lock);
+               spin_lock_bh(&ar->list_lock);
        }
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        clear_bit(WMI_READY, &ar->flag);
 
 
 {
        struct ath6kl_vif *vif;
 
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        if (list_empty(&ar->vif_list)) {
-               spin_unlock(&ar->list_lock);
+               spin_unlock_bh(&ar->list_lock);
                return NULL;
        }
 
        vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
 
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        return vif;
 }
 
 
 stop_adhoc_netq:
        /* FIXME: Locking */
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_for_each_entry(vif, &ar->vif_list, list) {
                if (vif->nw_type == ADHOC_NETWORK) {
-                       spin_unlock(&ar->list_lock);
+                       spin_unlock_bh(&ar->list_lock);
 
                        spin_lock_bh(&vif->if_lock);
                        set_bit(NETQ_STOPPED, &vif->flags);
                        return action;
                }
        }
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        return action;
 }
        __skb_queue_purge(&skb_queue);
 
        /* FIXME: Locking */
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_for_each_entry(vif, &ar->vif_list, list) {
                if (test_bit(CONNECTED, &vif->flags) &&
                    !flushing[vif->fw_vif_idx]) {
-                       spin_unlock(&ar->list_lock);
+                       spin_unlock_bh(&ar->list_lock);
                        netif_wake_queue(vif->ndev);
-                       spin_lock(&ar->list_lock);
+                       spin_lock_bh(&ar->list_lock);
                }
        }
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        if (wake_event)
                wake_up(&ar->event_wq);
 
                return NULL;
 
        /* FIXME: Locking */
-       spin_lock(&ar->list_lock);
+       spin_lock_bh(&ar->list_lock);
        list_for_each_entry(vif, &ar->vif_list, list) {
                if (vif->fw_vif_idx == if_idx) {
                        found = vif;
                        break;
                }
        }
-       spin_unlock(&ar->list_lock);
+       spin_unlock_bh(&ar->list_lock);
 
        return found;
 }