iwlwifi: mvm: rs: introduce new API for rate scaling
authorGregory Greenman <gregory.greenman@intel.com>
Wed, 1 Nov 2017 05:16:29 +0000 (07:16 +0200)
committerLuca Coelho <luciano.coelho@intel.com>
Tue, 5 Dec 2017 19:01:40 +0000 (21:01 +0200)
New devices will have rate scaling algorithm running in the firmware.
With this feature, the driver's responsiblity is to provide an initial
configuration and to handle notifications regarding recent rates and
some other parameters. Debugfs hooks will be still available for
reading the current rate/statistics and setting a fixed rate.
The old API is supported so far, though both APIs cannot be used
simultaneously.

This is the first patch in the series. It adds a new TLV specifying
FW support for the new API and updates lq_sta to support two types
of rate scaling.

Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
Signed-off-by: Luca Coelho <luciano.coelho@intel.com>
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.h
drivers/net/wireless/intel/iwlwifi/mvm/rx.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h

index 17367283f85f5ff6c45c513d41685db25a0d2f8b..4687d016f6767ea219cbf5b5a37be1a312327448 100644 (file)
@@ -310,6 +310,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
  * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
  * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
  * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
  * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
@@ -364,6 +365,7 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT          = (__force iwl_ucode_tlv_capa_t)39,
        IWL_UCODE_TLV_CAPA_CDB_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)40,
        IWL_UCODE_TLV_CAPA_D0I3_END_FIRST               = (__force iwl_ucode_tlv_capa_t)41,
+       IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
index 5a646dc6ad877b699d3a544e975f95be48ac8eac..4ff099f462fb628a2260627dc36497745887b71d 100644 (file)
@@ -420,6 +420,12 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
        ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
        ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
        ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+
+       if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)) {
+               ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+               ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+       }
+
        if (iwl_mvm_has_new_rx_api(mvm))
                ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
 
index 59c5ec3f46cb1df30f48af0a8762b988c994c113..a565bae770b86868e6cbfbfb088b7d1c25d87918 100644 (file)
@@ -1593,7 +1593,7 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
 void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
 int rs_pretty_print_rate(char *buf, const u32 rate);
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-                        struct iwl_lq_sta *lq_sta,
+                        struct iwl_mvm_sta *mvmsta,
                         struct ieee80211_rx_status *rx_status);
 
 /* power management */
index fbfa5eafcc935eac86f4324fd40e03981d2251d5..739c47dbc6e885763d303ce628d45c7ee60b1d09 100644 (file)
@@ -809,7 +809,7 @@ static int rs_collect_tlc_data(struct iwl_mvm *mvm,
                return -EINVAL;
 
        if (tbl->column != RS_COLUMN_INVALID) {
-               struct lq_sta_pers *pers = &mvmsta->lq_sta.pers;
+               struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
 
                pers->tx_stats[tbl->column][scale_index].total += attempts;
                pers->tx_stats[tbl->column][scale_index].success += successes;
@@ -1206,7 +1206,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
        u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
        /* Treat uninitialized rate scaling data same as non-existing. */
        if (!lq_sta) {
@@ -2785,9 +2785,10 @@ out:
 
 /* Save info about RSSI of last Rx */
 void rs_update_last_rssi(struct iwl_mvm *mvm,
-                        struct iwl_lq_sta *lq_sta,
+                        struct iwl_mvm_sta *mvmsta,
                         struct ieee80211_rx_status *rx_status)
 {
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
        int i;
 
        lq_sta->pers.chains = rx_status->chains;
@@ -2859,12 +2860,11 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
 static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
                        struct ieee80211_tx_rate_control *txrc)
 {
-       struct sk_buff *skb = txrc->skb;
-       struct iwl_op_mode *op_mode __maybe_unused =
-                       (struct iwl_op_mode *)mvm_r;
+       struct iwl_op_mode *op_mode = mvm_r;
        struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+       struct sk_buff *skb = txrc->skb;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-       struct iwl_lq_sta *lq_sta = mvm_sta;
+       struct iwl_lq_sta *lq_sta;
        struct rs_rate *optimal_rate;
        u32 last_ucode_rate;
 
@@ -2876,18 +2876,14 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
                mvm_sta = NULL;
        }
 
-       /* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
-
-       /* Treat uninitialized rate scaling data same as non-existing. */
-       if (lq_sta && !lq_sta->pers.drv) {
-               IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
-               mvm_sta = NULL;
-       }
-
        /* Send management frames and NO_ACK data using lowest rate. */
        if (rate_control_send_low(sta, mvm_sta, txrc))
                return;
 
+       if (!mvm_sta)
+               return;
+
+       lq_sta = mvm_sta;
        iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
                                  info->band, &info->control.rates[0]);
        info->control.rates[0].count = 1;
@@ -2910,7 +2906,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
        struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
 
        IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
 
@@ -2924,7 +2920,7 @@ static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
        memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
        lq_sta->pers.last_rssi = S8_MIN;
 
-       return &mvmsta->lq_sta;
+       return lq_sta;
 }
 
 static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
@@ -3117,7 +3113,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta;
+       struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
        struct ieee80211_supported_band *sband;
        unsigned long supp; /* must be unsigned long for for_each_set_bit */
 
@@ -3383,7 +3379,7 @@ static void rs_bfer_active_iter(void *_data,
 {
        struct rs_bfer_active_iter_data *data = _data;
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.lq;
+       struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
        u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
 
        if (sta == data->exclude_sta)
@@ -3495,7 +3491,8 @@ static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
 
        /* Disallow BFER on another STA if active and we're a higher priority */
        if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
-               struct iwl_lq_cmd *bfersta_lq_cmd = &bfer_mvmsta->lq_sta.lq;
+               struct iwl_lq_cmd *bfersta_lq_cmd =
+                       &bfer_mvmsta->lq_sta.rs_drv.lq;
                u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
 
                bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
@@ -3697,7 +3694,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
 
        struct iwl_lq_sta *lq_sta = file->private_data;
        struct iwl_mvm_sta *mvmsta =
-               container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+               container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
        struct iwl_mvm *mvm;
        struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
        struct rs_rate *rate = &tbl->rate;
@@ -3990,7 +3987,7 @@ static void rs_add_debugfs(void *mvm, void *priv_sta, struct dentry *dir)
        struct iwl_lq_sta *lq_sta = priv_sta;
        struct iwl_mvm_sta *mvmsta;
 
-       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta);
+       mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
 
        if (!mvmsta->vif)
                return;
@@ -4065,7 +4062,7 @@ void iwl_mvm_rate_control_unregister(void)
 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                          bool enable)
 {
-       struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+       struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
 
        lockdep_assert_held(&mvm->mutex);
 
index 32b4d66debea224c0ae88c0ff8b7f1a48b120118..5075a8061c8220bb8b871ce207814ec0c2759621 100644 (file)
@@ -217,6 +217,38 @@ struct iwl_rate_mcs_info {
        char    mcs[IWL_MAX_MCS_DISPLAY_SIZE];
 };
 
+/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+       /* last tx rate_n_flags */
+       u32 last_rate_n_flags;
+
+       /* persistent fields - initialized only once - keep last! */
+       struct lq_sta_pers_rs_fw {
+               u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+               u32 dbg_fixed_rate;
+               u16 dbg_agg_frame_count_lim;
+#endif
+               u8 chains;
+               s8 chain_signal[IEEE80211_MAX_CHAINS];
+               s8 last_rssi;
+               struct iwl_mvm *drv;
+       } pers;
+};
+
 /**
  * struct iwl_rate_scale_data -- tx success history for one rate
  */
@@ -406,5 +438,4 @@ struct iwl_mvm_sta;
 
 int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
                          bool enable);
-
 #endif /* __rs__ */
index 7cbea7890b0cfdcf2e6eaf13a2540a92948730b4..63a57f0a16eff4d1e00fe4293d660dd8d03b1537 100644 (file)
@@ -383,7 +383,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                                 false);
                }
 
-               rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+               rs_update_last_rssi(mvm, mvmsta, rx_status);
 
                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
                    ieee80211_is_beacon(hdr->frame_control)) {
index f444ff3a33968a29c0c1b954357829e05ca8140a..00d048bd2d3538f7f2d09cb5c68e448c7d4b2af5 100644 (file)
@@ -933,7 +933,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
                                                                 false);
                }
 
-               rs_update_last_rssi(mvm, &mvmsta->lq_sta, rx_status);
+               rs_update_last_rssi(mvm, mvmsta, rx_status);
 
                if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
                    ieee80211_is_beacon(hdr->frame_control)) {
index 8351232472eb54713a2df89be72d95d3b5cf2a8e..12875f773a860d3286e82acae7b0219c55555568 100644 (file)
@@ -2548,6 +2548,14 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                .aggregate = true,
        };
 
+       /*
+        * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+        * manager, so this function should never be called in this case.
+        */
+       if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+                                    IWL_UCODE_TLV_CAPA_TLC_OFFLOAD)))
+               return -EINVAL;
+
        BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
                     != IWL_MAX_TID_COUNT);
 
@@ -2645,12 +2653,12 @@ out:
         */
        mvmsta->max_agg_bufsize =
                min(mvmsta->max_agg_bufsize, buf_size);
-       mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+       mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
 
        IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
                     sta->addr, tid);
 
-       return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false);
+       return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
 }
 
 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
index aedabe101cf0f09681a7c61cfa6d6eb326b033a6..5ffd6adbc38399f2d2e430b7f0c3a098241ac85a 100644 (file)
@@ -383,6 +383,8 @@ struct iwl_mvm_rxq_dup_data {
  * and from Tx response flow, it needs a spinlock.
  * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
  * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ *     the driver - %rs_drv or in the FW - %rs_fw.
  * @reserved_queue: the queue reserved for this STA for DQA purposes
  *     Every STA has is given one reserved queue to allow it to operate. If no
  *     such queue can be guaranteed, the STA addition will fail.
@@ -417,7 +419,10 @@ struct iwl_mvm_sta {
        spinlock_t lock;
        struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
        u8 tid_to_baid[IWL_MAX_TID_COUNT];
-       struct iwl_lq_sta lq_sta;
+       union {
+               struct iwl_lq_sta_rs_fw rs_fw;
+               struct iwl_lq_sta rs_drv;
+       } lq_sta;
        struct ieee80211_vif *vif;
        struct iwl_mvm_key_pn __rcu *ptk_pn[4];
        struct iwl_mvm_rxq_dup_data *dup_data;