rtw89: ser: control hci interrupts on/off by state
authorZong-Zhe Yang <kevin_yang@realtek.com>
Mon, 14 Mar 2022 07:12:46 +0000 (15:12 +0800)
committerKalle Valo <kvalo@kernel.org>
Wed, 6 Apr 2022 07:46:02 +0000 (10:46 +0300)
While SER (system error recover) is processing, it's supposed to mean
something is under recovery. So, disable interrupts (excluding the one
of halt which could be used during SER) to avoid unexpected behavior.
And then, enable interrupts after SER is done.

Signed-off-by: Zong-Zhe Yang <kevin_yang@realtek.com>
Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
Signed-off-by: Kalle Valo <kvalo@kernel.org>
Link: https://lore.kernel.org/r/20220314071250.40292-5-pkshih@realtek.com
drivers/net/wireless/realtek/rtw89/core.h
drivers/net/wireless/realtek/rtw89/pci.c
drivers/net/wireless/realtek/rtw89/pci.h
drivers/net/wireless/realtek/rtw89/ser.c

index 771722132c53b3ff4eb613e51d6212536414bb26..5efa5ca372c66a93a7d3ee9c8a0e5e4683daf68d 100644 (file)
@@ -2025,6 +2025,13 @@ struct rtw89_hci_ops {
        int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step);
        void (*dump_err_status)(struct rtw89_dev *rtwdev);
        int (*napi_poll)(struct napi_struct *napi, int budget);
+
+       /* Deal with locks inside recovery_start and recovery_complete callbacks
+        * by hci instance, and handle things which need to consider under SER.
+        * e.g. turn on/off interrupts except for the one for halt notification.
+        */
+       void (*recovery_start)(struct rtw89_dev *rtwdev);
+       void (*recovery_complete)(struct rtw89_dev *rtwdev);
 };
 
 struct rtw89_hci_info {
@@ -3033,6 +3040,18 @@ static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
                return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
 }
 
+static inline void rtw89_hci_recovery_start(struct rtw89_dev *rtwdev)
+{
+       if (rtwdev->hci.ops->recovery_start)
+               rtwdev->hci.ops->recovery_start(rtwdev);
+}
+
+static inline void rtw89_hci_recovery_complete(struct rtw89_dev *rtwdev)
+{
+       if (rtwdev->hci.ops->recovery_complete)
+               rtwdev->hci.ops->recovery_complete(rtwdev);
+}
+
 static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
 {
        return rtwdev->hci.ops->read8(rtwdev, addr);
index e79bfc335b446b2f82f6c2416b8b8a13238077ef..32e8283e22f3ba816ced5c2ef7233fed3d940ffe 100644 (file)
@@ -647,6 +647,29 @@ static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev,
        rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
 }
 
+static void rtw89_pci_ops_recovery_start(struct rtw89_dev *rtwdev)
+{
+       struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtwpci->irq_lock, flags);
+       rtwpci->under_recovery = true;
+       rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
+       rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
+       spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
+static void rtw89_pci_ops_recovery_complete(struct rtw89_dev *rtwdev)
+{
+       struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rtwpci->irq_lock, flags);
+       rtwpci->under_recovery = false;
+       rtw89_pci_enable_intr(rtwdev, rtwpci);
+       spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+}
+
 static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
 {
        struct rtw89_dev *rtwdev = dev;
@@ -664,6 +687,9 @@ static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
        if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
                rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
 
+       if (unlikely(rtwpci->under_recovery))
+               return IRQ_HANDLED;
+
        if (likely(rtwpci->running)) {
                local_bh_disable();
                napi_schedule(&rtwdev->napi);
@@ -2931,6 +2957,9 @@ static const struct rtw89_hci_ops rtw89_pci_ops = {
        .mac_lv1_rcvy   = rtw89_pci_ops_mac_lv1_recovery,
        .dump_err_status = rtw89_pci_ops_dump_err_status,
        .napi_poll      = rtw89_pci_napi_poll,
+
+       .recovery_start = rtw89_pci_ops_recovery_start,
+       .recovery_complete = rtw89_pci_ops_recovery_complete,
 };
 
 int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index b84acd0d0582ac5f25c35307916e4f2f0abe5335..2c8030af3e72f1edaa92f3b849dbd41364acd756 100644 (file)
@@ -594,6 +594,7 @@ struct rtw89_pci {
        /* protect TRX resources (exclude RXQ) */
        spinlock_t trx_lock;
        bool running;
+       bool under_recovery;
        struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
        struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
        struct sk_buff_head h2c_queue;
index 5327b97b9c728b24fced92528533cc227b7bacd9..a20389cde7e239b136fe5a253393232af38be1cc 100644 (file)
@@ -302,8 +302,11 @@ static void hal_send_m4_event(struct rtw89_ser *ser)
 /* state handler */
 static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
 {
+       struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
        switch (evt) {
        case SER_EV_STATE_IN:
+               rtw89_hci_recovery_complete(rtwdev);
                break;
        case SER_EV_L1_RESET:
                ser_state_goto(ser, SER_RESET_TRX_ST);
@@ -312,6 +315,7 @@ static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
                ser_state_goto(ser, SER_L2_RESET_ST);
                break;
        case SER_EV_STATE_OUT:
+               rtw89_hci_recovery_start(rtwdev);
        default:
                break;
        }