struct mt76_rx_status *status,
struct ieee80211_supported_band *sband,
__le32 *rxv, u8 *mode);
+void mt76_connac2_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
+void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta,
+ struct list_head *free_list);
+void mt76_connac2_tx_token_put(struct mt76_dev *dev);
#endif /* __MT76_CONNAC_H */
return 0;
}
EXPORT_SYMBOL_GPL(mt76_connac2_mac_fill_rx_rate);
+
+void mt76_connac2_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+{
+ struct mt76_wcid *wcid;
+ u16 fc, tid;
+ u32 val;
+
+ if (!sta ||
+ !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ tid = le32_get_bits(txwi[1], MT_TXD1_TID);
+ if (tid >= 6) /* skip VO queue */
+ return;
+
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
+ return;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ if (!test_and_set_bit(tid, &wcid->ampdu_state))
+ ieee80211_start_tx_ba_session(sta, tid, 0);
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_tx_check_aggr);
+
+void mt76_connac2_txwi_free(struct mt76_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta,
+ struct list_head *free_list)
+{
+ struct mt76_wcid *wcid;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt76_connac_txp_skb_unmap(dev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(dev, t);
+ if (sta) {
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ wcid_idx = wcid->idx;
+ } else {
+ wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+
+ if (wcid && wcid->sta) {
+ sta = container_of((void *)wcid, struct ieee80211_sta,
+ drv_priv);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&wcid->poll_list))
+ list_add_tail(&wcid->poll_list,
+ &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ }
+ }
+
+ if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt76_connac2_tx_check_aggr(sta, txwi);
+
+ __mt76_tx_complete_skb(dev, wcid_idx, t->skb, free_list);
+out:
+ t->skb = NULL;
+ mt76_put_txwi(dev, t);
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_txwi_free);
+
+void mt76_connac2_tx_token_put(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->token_lock);
+ idr_for_each_entry(&dev->token, txwi, id) {
+ mt76_connac2_txwi_free(dev, txwi, NULL, NULL);
+ dev->token_count--;
+ }
+ spin_unlock_bh(&dev->token_lock);
+ idr_destroy(&dev->token);
+}
+EXPORT_SYMBOL_GPL(mt76_connac2_tx_token_put);
static void mt7915_stop_hardware(struct mt7915_dev *dev)
{
mt7915_mcu_exit(dev);
- mt7915_tx_token_put(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
mt7915_dma_cleanup(dev);
tasklet_disable(&dev->mt76.irq_tasklet);
return MT_TXD_SIZE + sizeof(*txp);
}
-static void
-mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
-{
- struct mt7915_sta *msta;
- u16 fc, tid;
- u32 val;
-
- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
- return;
-
- tid = le32_get_bits(txwi[1], MT_TXD1_TID);
- if (tid >= 6) /* skip VO queue */
- return;
-
- val = le32_to_cpu(txwi[2]);
- fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
- FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
- if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
- return;
-
- msta = (struct mt7915_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
- ieee80211_start_tx_ba_session(sta, tid, 0);
-}
-
-static void
-mt7915_txwi_free(struct mt7915_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, struct list_head *free_list)
-{
- struct mt76_dev *mdev = &dev->mt76;
- struct mt7915_sta *msta;
- struct mt76_wcid *wcid;
- __le32 *txwi;
- u16 wcid_idx;
-
- mt76_connac_txp_skb_unmap(mdev, t);
- if (!t->skb)
- goto out;
-
- txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- wcid = (struct mt76_wcid *)sta->drv_priv;
- wcid_idx = wcid->idx;
- } else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
- wcid = rcu_dereference(dev->mt76.wcid[wcid_idx]);
-
- if (wcid && wcid->sta) {
- msta = container_of(wcid, struct mt7915_sta, wcid);
- sta = container_of((void *)msta, struct ieee80211_sta,
- drv_priv);
- spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
- &mdev->sta_poll_list);
- spin_unlock_bh(&mdev->sta_poll_lock);
- }
- }
-
- if (sta && likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7915_tx_check_aggr(sta, txwi);
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-
-out:
- t->skb = NULL;
- mt76_put_txwi(mdev, t);
-}
-
static void
mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
{
if (!txwi)
continue;
- mt7915_txwi_free(dev, txwi, sta, &free_list);
+ mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
}
}
if (!txwi)
continue;
- mt7915_txwi_free(dev, txwi, NULL, &free_list);
+ mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list);
}
mt7915_mac_tx_free_done(dev, &free_list, wake);
mt7915_update_vif_beacon, mphy_ext->hw);
}
-void mt7915_tx_token_put(struct mt7915_dev *dev)
-{
- struct mt76_txwi_cache *txwi;
- int id;
-
- spin_lock_bh(&dev->mt76.token_lock);
- idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7915_txwi_free(dev, txwi, NULL, NULL);
- dev->mt76.token_count--;
- }
- spin_unlock_bh(&dev->mt76.token_lock);
- idr_destroy(&dev->mt76.token);
-}
-
static int
mt7915_mac_restart(struct mt7915_dev *dev)
{
napi_disable(&dev->mt76.tx_napi);
/* token reinit */
- mt7915_tx_token_put(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
idr_init(&dev->mt76.token);
mt7915_dma_reset(dev, true);
if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
mt7915_dma_reset(dev, false);
- mt7915_tx_token_put(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
idr_init(&dev->mt76.token);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7915_tx_token_put(struct mt7915_dev *dev);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
return 0;
}
-static void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
-{
- struct mt7921_sta *msta;
- u16 fc, tid;
- u32 val;
-
- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
- return;
-
- tid = le32_get_bits(txwi[1], MT_TXD1_TID);
- if (tid >= 6) /* skip VO queue */
- return;
-
- val = le32_to_cpu(txwi[2]);
- fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
- FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
- if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
- return;
-
- msta = (struct mt7921_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
- ieee80211_start_tx_ba_session(sta, tid, 0);
-}
-
void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
{
struct mt7921_sta *msta = NULL;
rcu_read_unlock();
}
-void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
- struct list_head *free_list)
-{
- struct mt76_dev *mdev = &dev->mt76;
- __le32 *txwi;
- u16 wcid_idx;
-
- mt76_connac_txp_skb_unmap(mdev, t);
- if (!t->skb)
- goto out;
-
- txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
- if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
- if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
-
- wcid_idx = wcid->idx;
- } else {
- wcid_idx = le32_get_bits(txwi[1], MT_TXD1_WLAN_IDX);
- }
-
- __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
-out:
- t->skb = NULL;
- mt76_put_txwi(mdev, t);
-}
-EXPORT_SYMBOL_GPL(mt7921_txwi_free);
-
static void mt7921_mac_tx_free(struct mt7921_dev *dev, void *data, int len)
{
struct mt76_connac_tx_free *free = data;
if (!txwi)
continue;
- mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
+ mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
}
if (wake)
sta = wcid_to_sta(wcid);
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi);
+ mt76_connac2_tx_check_aggr(sta, txwi);
skb_pull(e->skb, headroom);
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
struct mt76_tx_info *tx_info);
void mt7921_tx_worker(struct mt76_worker *w);
-void mt7921_tx_token_put(struct mt7921_dev *dev);
bool mt7921_rx_check(struct mt76_dev *mdev, void *data, int len);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
void *data, int len);
int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
struct netlink_callback *cb, void *data, int len);
-void mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
- struct list_head *free_list);
int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
cancel_work_sync(&pm->wake_work);
cancel_work_sync(&dev->reset_work);
- mt7921_tx_token_put(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
__mt7921_mcu_drv_pmctrl(dev);
mt7921_dma_cleanup(dev);
mt7921_wfsys_reset(dev);
return 0;
}
-void mt7921_tx_token_put(struct mt7921_dev *dev)
-{
- struct mt76_txwi_cache *txwi;
- int id;
-
- spin_lock_bh(&dev->mt76.token_lock);
- idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7921_txwi_free(dev, txwi, NULL, false, NULL);
- dev->mt76.token_count--;
- }
- spin_unlock_bh(&dev->mt76.token_lock);
- idr_destroy(&dev->mt76.token);
-}
-
int mt7921e_mac_reset(struct mt7921_dev *dev)
{
int i, err;
napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
napi_disable(&dev->mt76.tx_napi);
- mt7921_tx_token_put(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
idr_init(&dev->mt76.token);
mt7921_wpdma_reset(dev, true);