/* to avoid rule conflict, when user configure rule by ethtool,
         * we need to clear all arfs rules
         */
+       spin_lock_bh(&hdev->fd_rule_lock);
        hclge_clear_arfs_rules(handle);
 
-       spin_lock_bh(&hdev->fd_rule_lock);
        ret = hclge_fd_config_rule(hdev, rule);
 
        spin_unlock_bh(&hdev->fd_rule_lock);
        return ret;
 }
 
+/* make sure being called after lock up with fd_rule_lock */
 static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
                                     bool clear_list)
 {
        if (!hnae3_dev_fd_supported(hdev))
                return;
 
-       spin_lock_bh(&hdev->fd_rule_lock);
        for_each_set_bit(location, hdev->fd_bmap,
                         hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
                hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
                bitmap_zero(hdev->fd_bmap,
                            hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
        }
-
-       spin_unlock_bh(&hdev->fd_rule_lock);
 }
 
 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
                                      u16 flow_id, struct flow_keys *fkeys)
 {
        struct hclge_vport *vport = hclge_get_vport(handle);
-       struct hclge_fd_rule_tuples new_tuples;
+       struct hclge_fd_rule_tuples new_tuples = {};
        struct hclge_dev *hdev = vport->back;
        struct hclge_fd_rule *rule;
        u16 tmp_queue_id;
        if (!hnae3_dev_fd_supported(hdev))
                return -EOPNOTSUPP;
 
-       memset(&new_tuples, 0, sizeof(new_tuples));
-       hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
-       spin_lock_bh(&hdev->fd_rule_lock);
-
        /* when there is already fd rule existed add by user,
         * arfs should not work
         */
+       spin_lock_bh(&hdev->fd_rule_lock);
        if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
                spin_unlock_bh(&hdev->fd_rule_lock);
                return -EOPNOTSUPP;
        }
 
+       hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
        /* check is there flow director filter existed for this flow,
         * if not, create a new filter for it;
         * if filter exist with different queue id, modify the filter;
 #endif
 }
 
+/* make sure being called after lock up with fd_rule_lock */
 static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
 {
 #ifdef CONFIG_RFS_ACCEL
 
        hdev->fd_en = enable;
        clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
-       if (!enable)
+
+       if (!enable) {
+               spin_lock_bh(&hdev->fd_rule_lock);
                hclge_del_all_fd_entries(handle, clear);
-       else
+               spin_unlock_bh(&hdev->fd_rule_lock);
+       } else {
                hclge_restore_fd_entries(handle);
+       }
 }
 
 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
        int i;
 
        set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+       spin_lock_bh(&hdev->fd_rule_lock);
        hclge_clear_arfs_rules(handle);
+       spin_unlock_bh(&hdev->fd_rule_lock);
 
        /* If it is not PF reset, the firmware will disable the MAC,
         * so it only need to stop phy here.