aboutsummaryrefslogtreecommitdiffstats
path: root/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
diff options
context:
space:
mode:
Diffstat (limited to 'package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch')
-rw-r--r--package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch478
1 files changed, 0 insertions, 478 deletions
diff --git a/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch b/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
deleted file mode 100644
index aba1ff4532..0000000000
--- a/package/kernel/mac80211/patches/345-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch
+++ /dev/null
@@ -1,478 +0,0 @@
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sun, 4 Sep 2016 17:46:24 +0200
-Subject: [PATCH] mac80211: fix sequence number assignment for PS response
- frames
-
-When using intermediate queues, sequence number allocation is deferred
-until dequeue. This doesn't work for PS response frames, which bypass
-those queues.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/mac80211/tx.c
-+++ b/net/mac80211/tx.c
-@@ -38,6 +38,12 @@
- #include "wme.h"
- #include "rate.h"
-
-+static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx);
-+static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
-+ struct sta_info *sta, u8 pn_offs,
-+ struct ieee80211_key_conf *key_conf,
-+ struct sk_buff *skb);
-+
- /* misc utils */
-
- static inline void ieee80211_tx_stats(struct net_device *dev, u32 len)
-@@ -849,8 +855,7 @@ ieee80211_tx_h_sequence(struct ieee80211
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- tx->sta->tx_stats.msdu[tid]++;
-
-- if (!tx->sta->sta.txq[0])
-- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
-+ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid);
-
- return TX_CONTINUE;
- }
-@@ -1398,6 +1403,7 @@ void ieee80211_txq_init(struct ieee80211
- fq_tin_init(&txqi->tin);
- fq_flow_init(&txqi->def_flow);
- codel_vars_init(&txqi->def_cvars);
-+ __skb_queue_head_init(&txqi->frags);
-
- txqi->txq.vif = &sdata->vif;
-
-@@ -1420,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee8021
- struct fq_tin *tin = &txqi->tin;
-
- fq_tin_reset(fq, tin, fq_skb_free_func);
-+ ieee80211_purge_tx_queue(&local->hw, &txqi->frags);
- }
-
- int ieee80211_txq_setup_flows(struct ieee80211_local *local)
-@@ -1476,12 +1483,19 @@ struct sk_buff *ieee80211_tx_dequeue(str
- struct sk_buff *skb = NULL;
- struct fq *fq = &local->fq;
- struct fq_tin *tin = &txqi->tin;
-+ struct ieee80211_tx_info *info;
-
- spin_lock_bh(&fq->lock);
-
- if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags))
- goto out;
-
-+ /* Make sure fragments stay together. */
-+ skb = __skb_dequeue(&txqi->frags);
-+ if (skb)
-+ goto out;
-+
-+begin:
- skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
- if (!skb)
- goto out;
-@@ -1489,16 +1503,38 @@ struct sk_buff *ieee80211_tx_dequeue(str
- ieee80211_set_skb_vif(skb, txqi);
-
- hdr = (struct ieee80211_hdr *)skb->data;
-- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) {
-+ info = IEEE80211_SKB_CB(skb);
-+ if (txq->sta && info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
- struct sta_info *sta = container_of(txq->sta, struct sta_info,
- sta);
-- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-+ u8 pn_offs = 0;
-
-- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid);
-- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
-- info->flags |= IEEE80211_TX_CTL_AMPDU;
-- else
-- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
-+ if (info->control.hw_key)
-+ pn_offs = ieee80211_padded_hdrlen(hw, hdr->frame_control);
-+
-+ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs,
-+ info->control.hw_key, skb);
-+ } else {
-+ struct ieee80211_tx_data tx = { };
-+
-+ __skb_queue_head_init(&tx.skbs);
-+ tx.local = local;
-+ tx.skb = skb;
-+ tx.hdrlen = ieee80211_padded_hdrlen(hw, hdr->frame_control);
-+ if (txq->sta) {
-+ tx.sta = container_of(txq->sta, struct sta_info, sta);
-+ tx.sdata = tx.sta->sdata;
-+ } else {
-+ tx.sdata = vif_to_sdata(info->control.vif);
-+ }
-+
-+ if (invoke_tx_handlers_late(&tx))
-+ goto begin;
-+
-+ skb = __skb_dequeue(&tx.skbs);
-+
-+ if (!skb_queue_empty(&tx.skbs))
-+ skb_queue_splice_tail(&tx.skbs, &txqi->frags);
- }
-
- out:
-@@ -1512,6 +1548,47 @@ out:
- }
- EXPORT_SYMBOL(ieee80211_tx_dequeue);
-
-+static bool ieee80211_queue_skb(struct ieee80211_local *local,
-+ struct ieee80211_sub_if_data *sdata,
-+ struct sta_info *sta,
-+ struct sk_buff *skb)
-+{
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-+ struct fq *fq = &local->fq;
-+ struct ieee80211_vif *vif;
-+ struct txq_info *txqi;
-+ struct ieee80211_sta *pubsta;
-+
-+ if (!local->ops->wake_tx_queue ||
-+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
-+ return false;
-+
-+ if (sta && sta->uploaded)
-+ pubsta = &sta->sta;
-+ else
-+ pubsta = NULL;
-+
-+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-+ sdata = container_of(sdata->bss,
-+ struct ieee80211_sub_if_data, u.ap);
-+
-+ vif = &sdata->vif;
-+ txqi = ieee80211_get_txq(local, vif, pubsta, skb);
-+
-+ if (!txqi)
-+ return false;
-+
-+ info->control.vif = vif;
-+
-+ spin_lock_bh(&fq->lock);
-+ ieee80211_txq_enqueue(local, txqi, skb);
-+ spin_unlock_bh(&fq->lock);
-+
-+ drv_wake_tx_queue(local, txqi);
-+
-+ return true;
-+}
-+
- static bool ieee80211_tx_frags(struct ieee80211_local *local,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
-@@ -1519,9 +1596,7 @@ static bool ieee80211_tx_frags(struct ie
- bool txpending)
- {
- struct ieee80211_tx_control control = {};
-- struct fq *fq = &local->fq;
- struct sk_buff *skb, *tmp;
-- struct txq_info *txqi;
- unsigned long flags;
-
- skb_queue_walk_safe(skbs, skb, tmp) {
-@@ -1536,21 +1611,6 @@ static bool ieee80211_tx_frags(struct ie
- }
- #endif
-
-- txqi = ieee80211_get_txq(local, vif, sta, skb);
-- if (txqi) {
-- info->control.vif = vif;
--
-- __skb_unlink(skb, skbs);
--
-- spin_lock_bh(&fq->lock);
-- ieee80211_txq_enqueue(local, txqi, skb);
-- spin_unlock_bh(&fq->lock);
--
-- drv_wake_tx_queue(local, txqi);
--
-- continue;
-- }
--
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- if (local->queue_stop_reasons[q] ||
- (!txpending && !skb_queue_empty(&local->pending[q]))) {
-@@ -1671,10 +1731,13 @@ static bool __ieee80211_tx(struct ieee80
- /*
- * Invoke TX handlers, return 0 on success and non-zero if the
- * frame was dropped or queued.
-+ *
-+ * The handlers are split into an early and late part. The latter is everything
-+ * that can be sensitive to reordering, and will be deferred to after packets
-+ * are dequeued from the intermediate queues (when they are enabled).
- */
--static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
-+static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx)
- {
-- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- ieee80211_tx_result res = TX_DROP;
-
- #define CALL_TXH(txh) \
-@@ -1688,16 +1751,42 @@ static int invoke_tx_handlers(struct iee
- CALL_TXH(ieee80211_tx_h_check_assoc);
- CALL_TXH(ieee80211_tx_h_ps_buf);
- CALL_TXH(ieee80211_tx_h_check_control_port_protocol);
-- CALL_TXH(ieee80211_tx_h_select_key);
-+
- if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL))
- CALL_TXH(ieee80211_tx_h_rate_ctrl);
-
-+ txh_done:
-+ if (unlikely(res == TX_DROP)) {
-+ I802_DEBUG_INC(tx->local->tx_handlers_drop);
-+ if (tx->skb)
-+ ieee80211_free_txskb(&tx->local->hw, tx->skb);
-+ else
-+ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs);
-+ return -1;
-+ } else if (unlikely(res == TX_QUEUED)) {
-+ I802_DEBUG_INC(tx->local->tx_handlers_queued);
-+ return -1;
-+ }
-+
-+ return 0;
-+}
-+
-+/*
-+ * Late handlers can be called while the sta lock is held. Handlers that can
-+ * cause packets to be generated will cause deadlock!
-+ */
-+static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx)
-+{
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-+ ieee80211_tx_result res = TX_CONTINUE;
-+
- if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) {
- __skb_queue_tail(&tx->skbs, tx->skb);
- tx->skb = NULL;
- goto txh_done;
- }
-
-+ CALL_TXH(ieee80211_tx_h_select_key);
- CALL_TXH(ieee80211_tx_h_michael_mic_add);
- CALL_TXH(ieee80211_tx_h_sequence);
- CALL_TXH(ieee80211_tx_h_fragment);
-@@ -1724,6 +1813,15 @@ static int invoke_tx_handlers(struct iee
- return 0;
- }
-
-+static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
-+{
-+ int r = invoke_tx_handlers_early(tx);
-+ if (r)
-+ return r;
-+
-+ return invoke_tx_handlers_late(tx);
-+}
-+
- bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, struct sk_buff *skb,
- int band, struct ieee80211_sta **sta)
-@@ -1798,7 +1896,13 @@ static bool ieee80211_tx(struct ieee8021
- info->hw_queue =
- sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
-
-- if (!invoke_tx_handlers(&tx))
-+ if (invoke_tx_handlers_early(&tx))
-+ return false;
-+
-+ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb))
-+ return true;
-+
-+ if (!invoke_tx_handlers_late(&tx))
- result = __ieee80211_tx(local, &tx.skbs, led_len,
- tx.sta, txpending);
-
-@@ -3181,7 +3285,7 @@ out:
- }
-
- static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
-- struct net_device *dev, struct sta_info *sta,
-+ struct sta_info *sta,
- struct ieee80211_fast_tx *fast_tx,
- struct sk_buff *skb)
- {
-@@ -3192,9 +3296,9 @@ static bool ieee80211_xmit_fast(struct i
- struct ethhdr eth;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
-- struct ieee80211_tx_data tx;
-- ieee80211_tx_result r;
- struct tid_ampdu_tx *tid_tx = NULL;
-+ ieee80211_tx_result r;
-+ struct ieee80211_tx_data tx;
- u8 tid = IEEE80211_NUM_TIDS;
-
- /* control port protocol needs a lot of special handling */
-@@ -3232,8 +3336,6 @@ static bool ieee80211_xmit_fast(struct i
- return true;
- }
-
-- ieee80211_tx_stats(dev, skb->len + extra_head);
--
- if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) &&
- ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb))
- return true;
-@@ -3262,24 +3364,7 @@ static bool ieee80211_xmit_fast(struct i
- info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
- IEEE80211_TX_CTL_DONTFRAG |
- (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
--
-- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
-- *ieee80211_get_qos_ctl(hdr) = tid;
-- if (!sta->sta.txq[0])
-- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
-- } else {
-- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
-- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
-- sdata->sequence_number += 0x10;
-- }
--
-- if (skb_shinfo(skb)->gso_size)
-- sta->tx_stats.msdu[tid] +=
-- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
-- else
-- sta->tx_stats.msdu[tid]++;
--
-- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
-+ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
-
- __skb_queue_head_init(&tx.skbs);
-
-@@ -3305,22 +3390,71 @@ static bool ieee80211_xmit_fast(struct i
- }
- }
-
-+ if (ieee80211_queue_skb(local, sdata, sta, skb))
-+ return true;
-+
-+ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs,
-+ &fast_tx->key->conf, skb);
-+
-+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-+ sdata = container_of(sdata->bss,
-+ struct ieee80211_sub_if_data, u.ap);
-+
-+ __skb_queue_tail(&tx.skbs, skb);
-+ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
-+
-+ return true;
-+}
-+
-+/*
-+ * Can be called while the sta lock is held. Anything that can cause packets to
-+ * be generated will cause deadlock!
-+ */
-+static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata,
-+ struct sta_info *sta, u8 pn_offs,
-+ struct ieee80211_key_conf *key_conf,
-+ struct sk_buff *skb)
-+{
-+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-+ struct ieee80211_hdr *hdr = (void *)skb->data;
-+ u8 tid = IEEE80211_NUM_TIDS;
-+
-+ ieee80211_tx_stats(skb->dev, skb->len);
-+
-+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
-+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
-+ *ieee80211_get_qos_ctl(hdr) = tid;
-+ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
-+ } else {
-+ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
-+ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number);
-+ sdata->sequence_number += 0x10;
-+ }
-+
-+ if (skb_shinfo(skb)->gso_size)
-+ sta->tx_stats.msdu[tid] +=
-+ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size);
-+ else
-+ sta->tx_stats.msdu[tid]++;
-+
-+ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
-+
- /* statistics normally done by ieee80211_tx_h_stats (but that
- * has to consider fragmentation, so is more complex)
- */
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
-
-- if (fast_tx->pn_offs) {
-+ if (pn_offs && (key_conf->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) {
- u64 pn;
-- u8 *crypto_hdr = skb->data + fast_tx->pn_offs;
-+ u8 *crypto_hdr = skb->data + pn_offs;
-
-- switch (fast_tx->key->conf.cipher) {
-+ switch (key_conf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- case WLAN_CIPHER_SUITE_CCMP_256:
- case WLAN_CIPHER_SUITE_GCMP:
- case WLAN_CIPHER_SUITE_GCMP_256:
-- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn);
-+ pn = atomic64_inc_return(&key_conf->tx_pn);
- crypto_hdr[0] = pn;
- crypto_hdr[1] = pn >> 8;
- crypto_hdr[4] = pn >> 16;
-@@ -3331,12 +3465,6 @@ static bool ieee80211_xmit_fast(struct i
- }
- }
-
-- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
-- sdata = container_of(sdata->bss,
-- struct ieee80211_sub_if_data, u.ap);
--
-- __skb_queue_tail(&tx.skbs, skb);
-- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false);
- return true;
- }
-
-@@ -3364,7 +3492,7 @@ void __ieee80211_subif_start_xmit(struct
- fast_tx = rcu_dereference(sta->fast_tx);
-
- if (fast_tx &&
-- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb))
-+ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
- goto out;
- }
-
---- a/include/net/mac80211.h
-+++ b/include/net/mac80211.h
-@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags {
- * frame (PS-Poll or uAPSD).
- * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
- * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
-+ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
- *
- * These flags are used in tx_info->control.flags.
- */
-@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags {
- IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1),
- IEEE80211_TX_CTRL_RATE_INJECT = BIT(2),
- IEEE80211_TX_CTRL_AMSDU = BIT(3),
-+ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
- };
-
- /*
---- a/net/mac80211/ieee80211_i.h
-+++ b/net/mac80211/ieee80211_i.h
-@@ -814,11 +814,13 @@ enum txq_info_flags {
- * @def_flow: used as a fallback flow when a packet destined to @tin hashes to
- * a fq_flow which is already owned by a different tin
- * @def_cvars: codel vars for @def_flow
-+ * @frags: used to keep fragments created after dequeue
- */
- struct txq_info {
- struct fq_tin tin;
- struct fq_flow def_flow;
- struct codel_vars def_cvars;
-+ struct sk_buff_head frags;
- unsigned long flags;
-
- /* keep last! */