rcu_read_unlock();
}
-void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
+static void wfx_skb_dtor(struct wfx_dev *wdev, struct sk_buff *skb)
{
struct hif_msg *hif = (struct hif_msg *)skb->data;
struct hif_req_tx *req = (struct hif_req_tx *)hif->body;
wfx_skb_dtor(wvif->wdev, skb);
}
+void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct wfx_dev *wdev = hw->priv;
+ struct sk_buff_head dropped;
+ struct wfx_queue *queue;
+ struct sk_buff *skb;
+ int vif_id = -1;
+ int i;
+
+ if (vif)
+ vif_id = ((struct wfx_vif *)vif->drv_priv)->id;
+ skb_queue_head_init(&dropped);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (!(BIT(i) & queues))
+ continue;
+ queue = &wdev->tx_queue[i];
+ if (drop)
+ wfx_tx_queue_drop(wdev, queue, vif_id, &dropped);
+ if (wdev->chip_frozen)
+ continue;
+ if (wait_event_timeout(wdev->tx_dequeue,
+ wfx_tx_queue_empty(wdev, queue, vif_id),
+ msecs_to_jiffies(1000)) <= 0)
+ dev_warn(wdev->dev, "frames queued while flushing tx queues?");
+ }
+ wfx_tx_flush(wdev);
+ if (wdev->chip_frozen)
+ wfx_pending_drop(wdev, &dropped);
+ while ((skb = skb_dequeue(&dropped)) != NULL)
+ wfx_skb_dtor(wdev, skb);
+}
wfx_tx_flush(wdev);
}
-/* If successful, LOCKS the TX queue! */
-void wfx_tx_queues_wait_empty_vif(struct wfx_vif *wvif)
+void wfx_tx_queues_init(struct wfx_dev *wdev)
{
int i;
- bool done;
- struct wfx_queue *queue;
- struct sk_buff *item;
- struct wfx_dev *wdev = wvif->wdev;
- struct hif_msg *hif;
- if (wvif->wdev->chip_frozen) {
- wfx_tx_lock_flush(wdev);
- wfx_tx_queues_clear(wdev);
- return;
+ skb_queue_head_init(&wdev->tx_pending);
+ init_waitqueue_head(&wdev->tx_dequeue);
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ skb_queue_head_init(&wdev->tx_queue[i].normal);
+ skb_queue_head_init(&wdev->tx_queue[i].cab);
}
-
- do {
- done = true;
- wfx_tx_lock_flush(wdev);
- for (i = 0; i < IEEE80211_NUM_ACS && done; ++i) {
- queue = &wdev->tx_queue[i];
- spin_lock_bh(&queue->normal.lock);
- skb_queue_walk(&queue->normal, item) {
- hif = (struct hif_msg *)item->data;
- if (hif->interface == wvif->id)
- done = false;
- }
- spin_unlock_bh(&queue->normal.lock);
- spin_lock_bh(&queue->cab.lock);
- skb_queue_walk(&queue->cab, item) {
- hif = (struct hif_msg *)item->data;
- if (hif->interface == wvif->id)
- done = false;
- }
- spin_unlock_bh(&queue->cab.lock);
- }
- if (!done) {
- wfx_tx_unlock(wdev);
- msleep(20);
- }
- } while (!done);
}
-static void wfx_tx_queue_clear(struct wfx_dev *wdev, struct wfx_queue *queue,
- struct sk_buff_head *gc_list)
+void wfx_tx_queues_check_empty(struct wfx_dev *wdev)
{
- struct sk_buff *item;
+ int i;
- while ((item = skb_dequeue(&queue->normal)) != NULL)
- skb_queue_head(gc_list, item);
- while ((item = skb_dequeue(&queue->cab)) != NULL)
- skb_queue_head(gc_list, item);
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_pending));
+ for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
+ WARN_ON(atomic_read(&wdev->tx_queue[i].pending_frames));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].normal));
+ WARN_ON(!skb_queue_empty_lockless(&wdev->tx_queue[i].cab));
+ }
}
-void wfx_tx_queues_clear(struct wfx_dev *wdev)
+static bool __wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id)
{
- int i;
- struct sk_buff *item;
- struct sk_buff_head gc_list;
+ struct hif_msg *hif_msg;
+ struct sk_buff *skb;
- skb_queue_head_init(&gc_list);
- for (i = 0; i < IEEE80211_NUM_ACS; ++i)
- wfx_tx_queue_clear(wdev, &wdev->tx_queue[i], &gc_list);
- wake_up(&wdev->tx_dequeue);
- while ((item = skb_dequeue(&gc_list)) != NULL)
- wfx_skb_dtor(wdev, item);
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk(skb_queue, skb) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ spin_unlock_bh(&skb_queue->lock);
+ return false;
+ }
+ }
+ spin_unlock_bh(&skb_queue->lock);
+ return true;
}
-void wfx_tx_queues_init(struct wfx_dev *wdev)
+bool wfx_tx_queue_empty(struct wfx_dev *wdev,
+ struct wfx_queue *queue, int vif_id)
{
- int i;
-
- memset(wdev->tx_queue, 0, sizeof(wdev->tx_queue));
- skb_queue_head_init(&wdev->tx_pending);
- init_waitqueue_head(&wdev->tx_dequeue);
+ return __wfx_tx_queue_empty(wdev, &queue->normal, vif_id) &&
+ __wfx_tx_queue_empty(wdev, &queue->cab, vif_id);
+}
- for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
- skb_queue_head_init(&wdev->tx_queue[i].normal);
- skb_queue_head_init(&wdev->tx_queue[i].cab);
+static void __wfx_tx_queue_drop(struct wfx_dev *wdev,
+ struct sk_buff_head *skb_queue, int vif_id,
+ struct sk_buff_head *dropped)
+{
+ struct sk_buff *skb, *tmp;
+ struct hif_msg *hif_msg;
+
+ spin_lock_bh(&skb_queue->lock);
+ skb_queue_walk_safe(skb_queue, skb, tmp) {
+ hif_msg = (struct hif_msg *)skb->data;
+ if (vif_id < 0 || hif_msg->interface == vif_id) {
+ __skb_unlink(skb, skb_queue);
+ skb_queue_head(dropped, skb);
+ }
}
+ spin_unlock_bh(&skb_queue->lock);
}
-void wfx_tx_queues_deinit(struct wfx_dev *wdev)
+void wfx_tx_queue_drop(struct wfx_dev *wdev, struct wfx_queue *queue,
+ int vif_id, struct sk_buff_head *dropped)
{
- WARN_ON(!skb_queue_empty(&wdev->tx_pending));
- wfx_tx_queues_clear(wdev);
+ __wfx_tx_queue_drop(wdev, &queue->cab, vif_id, dropped);
+ __wfx_tx_queue_drop(wdev, &queue->normal, vif_id, dropped);
+ wake_up(&wdev->tx_dequeue);
}
void wfx_tx_queues_put(struct wfx_dev *wdev, struct sk_buff *skb)
return 0;
}
+void wfx_pending_drop(struct wfx_dev *wdev, struct sk_buff_head *dropped)
+{
+ struct wfx_queue *queue;
+ struct sk_buff *skb;
+
+ WARN(!wdev->chip_frozen, "%s should only be used to recover a frozen device",
+ __func__);
+ while ((skb = skb_dequeue(&wdev->tx_pending)) != NULL) {
+ queue = &wdev->tx_queue[skb_get_queue_mapping(skb)];
+ WARN_ON(skb_get_queue_mapping(skb) > 3);
+ WARN_ON(!atomic_read(&queue->pending_frames));
+ atomic_dec(&queue->pending_frames);
+ skb_queue_head(dropped, skb);
+ }
+}
+
struct sk_buff *wfx_pending_get(struct wfx_dev *wdev, u32 packet_id)
{
struct wfx_queue *queue;
return false;
}
-bool wfx_tx_queues_empty(struct wfx_dev *wdev)
-{
- int i;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- if (!skb_queue_empty_lockless(&wdev->tx_queue[i].normal) ||
- !skb_queue_empty_lockless(&wdev->tx_queue[i].cab))
- return false;
- return true;
-}
-
static bool wfx_handle_tx_data(struct wfx_dev *wdev, struct sk_buff *skb)
{
struct hif_req_tx *req = wfx_skb_txreq(skb);
if (!skb)
return NULL;
skb_queue_tail(&wdev->tx_pending, skb);
- if (wfx_tx_queues_empty(wdev))
- wake_up(&wdev->tx_dequeue);
+ wake_up(&wdev->tx_dequeue);
// FIXME: is it useful?
if (wfx_handle_tx_data(wdev, skb))
continue;
return 0;
}
-static int __wfx_flush(struct wfx_dev *wdev, bool drop)
-{
- for (;;) {
- if (drop)
- wfx_tx_queues_clear(wdev);
- if (wait_event_timeout(wdev->tx_dequeue,
- wfx_tx_queues_empty(wdev),
- 2 * HZ) <= 0)
- return -ETIMEDOUT;
- wfx_tx_flush(wdev);
- if (wfx_tx_queues_empty(wdev))
- return 0;
- dev_warn(wdev->dev, "frames queued while flushing tx queues");
- }
-}
-
-void wfx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- u32 queues, bool drop)
-{
- // FIXME: only flush requested vif and queues
- __wfx_flush(hw->priv, drop);
-}
-
/* WSM callbacks */
static void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
skb = ieee80211_beacon_get_tim(wvif->wdev->hw, wvif->vif,
&tim_offset, &tim_length);
- if (!skb) {
- __wfx_flush(wvif->wdev, true);
+ if (!skb)
return -ENOENT;
- }
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
}
wvif->state = WFX_STATE_PASSIVE;
- wfx_tx_queues_wait_empty_vif(wvif);
- wfx_tx_unlock(wdev);
/* FIXME: In add to reset MAC address, try to reset interface */
hif_set_macaddr(wvif, NULL);
{
struct wfx_dev *wdev = hw->priv;
- wfx_tx_lock_flush(wdev);
- mutex_lock(&wdev->conf_mutex);
- wfx_tx_queues_clear(wdev);
- mutex_unlock(&wdev->conf_mutex);
- wfx_tx_unlock(wdev);
- WARN(atomic_read(&wdev->tx_lock), "tx_lock is locked");
+ wfx_tx_queues_check_empty(wdev);
}