diff options
author | Felix Fietkau <nbd@nbd.name> | 2016-09-05 19:12:05 +0200 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2016-09-08 15:28:38 +0200 |
commit | a194ffd4a89588bc75aeb9a27f59c36afd3d24bd (patch) | |
tree | 2e10f14cdd16f5d7a32ef0e01ec2fc9ea73439f5 /package/kernel | |
parent | 859d940c79e8cb1c66a3d0cccc18e9eac499415b (diff) | |
download | upstream-a194ffd4a89588bc75aeb9a27f59c36afd3d24bd.tar.gz upstream-a194ffd4a89588bc75aeb9a27f59c36afd3d24bd.tar.bz2 upstream-a194ffd4a89588bc75aeb9a27f59c36afd3d24bd.zip |
mac80211: fix packet loss on fq reordering
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'package/kernel')
3 files changed, 481 insertions, 110 deletions
diff --git a/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch b/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch new file mode 100644 index 0000000000..8ceed5195d --- /dev/null +++ b/package/kernel/mac80211/patches/346-mac80211-Move-reorder-sensitive-TX-handlers-to-after.patch @@ -0,0 +1,478 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sun, 4 Sep 2016 17:46:24 +0200 +Subject: [PATCH] mac80211: fix sequence number assignment for PS response + frames + +When using intermediate queues, sequence number allocation is deferred +until dequeue. This doesn't work for PS response frames, which bypass +those queues. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -38,6 +38,12 @@ + #include "wme.h" + #include "rate.h" + ++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx); ++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, ++ struct sta_info *sta, u8 pn_offs, ++ struct ieee80211_key_conf *key_conf, ++ struct sk_buff *skb); ++ + /* misc utils */ + + static inline void ieee80211_tx_stats(struct net_device *dev, u32 len) +@@ -849,8 +855,7 @@ ieee80211_tx_h_sequence(struct ieee80211 + tid = *qc & IEEE80211_QOS_CTL_TID_MASK; + tx->sta->tx_stats.msdu[tid]++; + +- if (!tx->sta->sta.txq[0]) +- hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); ++ hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); + + return TX_CONTINUE; + } +@@ -1398,6 +1403,7 @@ void ieee80211_txq_init(struct ieee80211 + fq_tin_init(&txqi->tin); + fq_flow_init(&txqi->def_flow); + codel_vars_init(&txqi->def_cvars); ++ __skb_queue_head_init(&txqi->frags); + + txqi->txq.vif = &sdata->vif; + +@@ -1420,6 +1426,7 @@ void ieee80211_txq_purge(struct ieee8021 + struct fq_tin *tin = &txqi->tin; + + fq_tin_reset(fq, tin, fq_skb_free_func); ++ ieee80211_purge_tx_queue(&local->hw, &txqi->frags); + } + + int ieee80211_txq_setup_flows(struct ieee80211_local *local) +@@ -1476,12 +1483,19 @@ struct sk_buff *ieee80211_tx_dequeue(str + struct sk_buff *skb = NULL; + struct fq *fq = &local->fq; + struct fq_tin *tin = &txqi->tin; ++ struct ieee80211_tx_info *info; + + spin_lock_bh(&fq->lock); + + if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags)) + goto out; + ++ /* Make sure fragments stay together. */ ++ skb = __skb_dequeue(&txqi->frags); ++ if (skb) ++ goto out; ++ ++begin: + skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); + if (!skb) + goto out; +@@ -1489,16 +1503,38 @@ struct sk_buff *ieee80211_tx_dequeue(str + ieee80211_set_skb_vif(skb, txqi); + + hdr = (struct ieee80211_hdr *)skb->data; +- if (txq->sta && ieee80211_is_data_qos(hdr->frame_control)) { ++ info = IEEE80211_SKB_CB(skb); ++ if (txq->sta && info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { + struct sta_info *sta = container_of(txq->sta, struct sta_info, + sta); +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ++ u8 pn_offs = 0; + +- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, txq->tid); +- if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) +- info->flags |= IEEE80211_TX_CTL_AMPDU; +- else +- info->flags &= ~IEEE80211_TX_CTL_AMPDU; ++ if (info->control.hw_key) ++ pn_offs = ieee80211_padded_hdrlen(hw, hdr->frame_control); ++ ++ ieee80211_xmit_fast_finish(sta->sdata, sta, pn_offs, ++ info->control.hw_key, skb); ++ } else { ++ struct ieee80211_tx_data tx = { }; ++ ++ __skb_queue_head_init(&tx.skbs); ++ tx.local = local; ++ tx.skb = skb; ++ tx.hdrlen = ieee80211_padded_hdrlen(hw, hdr->frame_control); ++ if (txq->sta) { ++ tx.sta = container_of(txq->sta, struct sta_info, sta); ++ tx.sdata = tx.sta->sdata; ++ } else { ++ tx.sdata = vif_to_sdata(info->control.vif); ++ } ++ ++ if (invoke_tx_handlers_late(&tx)) ++ goto begin; ++ ++ skb = __skb_dequeue(&tx.skbs); ++ ++ if (!skb_queue_empty(&tx.skbs)) ++ skb_queue_splice_tail(&tx.skbs, &txqi->frags); + } + + out: +@@ -1512,6 +1548,47 @@ out: + } + EXPORT_SYMBOL(ieee80211_tx_dequeue); + ++static bool ieee80211_queue_skb(struct ieee80211_local *local, ++ struct ieee80211_sub_if_data *sdata, ++ struct sta_info *sta, ++ struct sk_buff *skb) ++{ ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ++ struct fq *fq = &local->fq; ++ struct ieee80211_vif *vif; ++ struct txq_info *txqi; ++ struct ieee80211_sta *pubsta; ++ ++ if (!local->ops->wake_tx_queue || ++ sdata->vif.type == NL80211_IFTYPE_MONITOR) ++ return false; ++ ++ if (sta && sta->uploaded) ++ pubsta = &sta->sta; ++ else ++ pubsta = NULL; ++ ++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ++ sdata = container_of(sdata->bss, ++ struct ieee80211_sub_if_data, u.ap); ++ ++ vif = &sdata->vif; ++ txqi = ieee80211_get_txq(local, vif, pubsta, skb); ++ ++ if (!txqi) ++ return false; ++ ++ info->control.vif = vif; ++ ++ spin_lock_bh(&fq->lock); ++ ieee80211_txq_enqueue(local, txqi, skb); ++ spin_unlock_bh(&fq->lock); ++ ++ drv_wake_tx_queue(local, txqi); ++ ++ return true; ++} ++ + static bool ieee80211_tx_frags(struct ieee80211_local *local, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, +@@ -1519,9 +1596,7 @@ static bool ieee80211_tx_frags(struct ie + bool txpending) + { + struct ieee80211_tx_control control = {}; +- struct fq *fq = &local->fq; + struct sk_buff *skb, *tmp; +- struct txq_info *txqi; + unsigned long flags; + + skb_queue_walk_safe(skbs, skb, tmp) { +@@ -1536,21 +1611,6 @@ static bool ieee80211_tx_frags(struct ie + } + #endif + +- txqi = ieee80211_get_txq(local, vif, sta, skb); +- if (txqi) { +- info->control.vif = vif; +- +- __skb_unlink(skb, skbs); +- +- spin_lock_bh(&fq->lock); +- ieee80211_txq_enqueue(local, txqi, skb); +- spin_unlock_bh(&fq->lock); +- +- drv_wake_tx_queue(local, txqi); +- +- continue; +- } +- + spin_lock_irqsave(&local->queue_stop_reason_lock, flags); + if (local->queue_stop_reasons[q] || + (!txpending && !skb_queue_empty(&local->pending[q]))) { +@@ -1671,10 +1731,13 @@ static bool __ieee80211_tx(struct ieee80 + /* + * Invoke TX handlers, return 0 on success and non-zero if the + * frame was dropped or queued. ++ * ++ * The handlers are split into an early and late part. The latter is everything ++ * that can be sensitive to reordering, and will be deferred to after packets ++ * are dequeued from the intermediate queues (when they are enabled). + */ +-static int invoke_tx_handlers(struct ieee80211_tx_data *tx) ++static int invoke_tx_handlers_early(struct ieee80211_tx_data *tx) + { +- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); + ieee80211_tx_result res = TX_DROP; + + #define CALL_TXH(txh) \ +@@ -1688,16 +1751,42 @@ static int invoke_tx_handlers(struct iee + CALL_TXH(ieee80211_tx_h_check_assoc); + CALL_TXH(ieee80211_tx_h_ps_buf); + CALL_TXH(ieee80211_tx_h_check_control_port_protocol); +- CALL_TXH(ieee80211_tx_h_select_key); ++ + if (!ieee80211_hw_check(&tx->local->hw, HAS_RATE_CONTROL)) + CALL_TXH(ieee80211_tx_h_rate_ctrl); + ++ txh_done: ++ if (unlikely(res == TX_DROP)) { ++ I802_DEBUG_INC(tx->local->tx_handlers_drop); ++ if (tx->skb) ++ ieee80211_free_txskb(&tx->local->hw, tx->skb); ++ else ++ ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); ++ return -1; ++ } else if (unlikely(res == TX_QUEUED)) { ++ I802_DEBUG_INC(tx->local->tx_handlers_queued); ++ return -1; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Late handlers can be called while the sta lock is held. Handlers that can ++ * cause packets to be generated will cause deadlock! ++ */ ++static int invoke_tx_handlers_late(struct ieee80211_tx_data *tx) ++{ ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ++ ieee80211_tx_result res = TX_CONTINUE; ++ + if (unlikely(info->flags & IEEE80211_TX_INTFL_RETRANSMISSION)) { + __skb_queue_tail(&tx->skbs, tx->skb); + tx->skb = NULL; + goto txh_done; + } + ++ CALL_TXH(ieee80211_tx_h_select_key); + CALL_TXH(ieee80211_tx_h_michael_mic_add); + CALL_TXH(ieee80211_tx_h_sequence); + CALL_TXH(ieee80211_tx_h_fragment); +@@ -1724,6 +1813,15 @@ static int invoke_tx_handlers(struct iee + return 0; + } + ++static int invoke_tx_handlers(struct ieee80211_tx_data *tx) ++{ ++ int r = invoke_tx_handlers_early(tx); ++ if (r) ++ return r; ++ ++ return invoke_tx_handlers_late(tx); ++} ++ + bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, struct sk_buff *skb, + int band, struct ieee80211_sta **sta) +@@ -1798,7 +1896,13 @@ static bool ieee80211_tx(struct ieee8021 + info->hw_queue = + sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; + +- if (!invoke_tx_handlers(&tx)) ++ if (invoke_tx_handlers_early(&tx)) ++ return false; ++ ++ if (ieee80211_queue_skb(local, sdata, tx.sta, tx.skb)) ++ return true; ++ ++ if (!invoke_tx_handlers_late(&tx)) + result = __ieee80211_tx(local, &tx.skbs, led_len, + tx.sta, txpending); + +@@ -3181,7 +3285,7 @@ out: + } + + static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, +- struct net_device *dev, struct sta_info *sta, ++ struct sta_info *sta, + struct ieee80211_fast_tx *fast_tx, + struct sk_buff *skb) + { +@@ -3192,9 +3296,9 @@ static bool ieee80211_xmit_fast(struct i + struct ethhdr eth; + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_hdr *hdr = (void *)fast_tx->hdr; +- struct ieee80211_tx_data tx; +- ieee80211_tx_result r; + struct tid_ampdu_tx *tid_tx = NULL; ++ ieee80211_tx_result r; ++ struct ieee80211_tx_data tx; + u8 tid = IEEE80211_NUM_TIDS; + + /* control port protocol needs a lot of special handling */ +@@ -3232,8 +3336,6 @@ static bool ieee80211_xmit_fast(struct i + return true; + } + +- ieee80211_tx_stats(dev, skb->len + extra_head); +- + if ((hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) && + ieee80211_amsdu_aggregate(sdata, sta, fast_tx, skb)) + return true; +@@ -3262,24 +3364,7 @@ static bool ieee80211_xmit_fast(struct i + info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT | + IEEE80211_TX_CTL_DONTFRAG | + (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); +- +- if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { +- *ieee80211_get_qos_ctl(hdr) = tid; +- if (!sta->sta.txq[0]) +- hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); +- } else { +- info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; +- hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); +- sdata->sequence_number += 0x10; +- } +- +- if (skb_shinfo(skb)->gso_size) +- sta->tx_stats.msdu[tid] += +- DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); +- else +- sta->tx_stats.msdu[tid]++; +- +- info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; ++ info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; + + __skb_queue_head_init(&tx.skbs); + +@@ -3305,22 +3390,71 @@ static bool ieee80211_xmit_fast(struct i + } + } + ++ if (ieee80211_queue_skb(local, sdata, sta, skb)) ++ return true; ++ ++ ieee80211_xmit_fast_finish(sdata, sta, fast_tx->pn_offs, ++ &fast_tx->key->conf, skb); ++ ++ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) ++ sdata = container_of(sdata->bss, ++ struct ieee80211_sub_if_data, u.ap); ++ ++ __skb_queue_tail(&tx.skbs, skb); ++ ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false); ++ ++ return true; ++} ++ ++/* ++ * Can be called while the sta lock is held. Anything that can cause packets to ++ * be generated will cause deadlock! ++ */ ++static bool ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, ++ struct sta_info *sta, u8 pn_offs, ++ struct ieee80211_key_conf *key_conf, ++ struct sk_buff *skb) ++{ ++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); ++ struct ieee80211_hdr *hdr = (void *)skb->data; ++ u8 tid = IEEE80211_NUM_TIDS; ++ ++ ieee80211_tx_stats(skb->dev, skb->len); ++ ++ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { ++ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; ++ *ieee80211_get_qos_ctl(hdr) = tid; ++ hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); ++ } else { ++ info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; ++ hdr->seq_ctrl = cpu_to_le16(sdata->sequence_number); ++ sdata->sequence_number += 0x10; ++ } ++ ++ if (skb_shinfo(skb)->gso_size) ++ sta->tx_stats.msdu[tid] += ++ DIV_ROUND_UP(skb->len, skb_shinfo(skb)->gso_size); ++ else ++ sta->tx_stats.msdu[tid]++; ++ ++ info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)]; ++ + /* statistics normally done by ieee80211_tx_h_stats (but that + * has to consider fragmentation, so is more complex) + */ + sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len; + sta->tx_stats.packets[skb_get_queue_mapping(skb)]++; + +- if (fast_tx->pn_offs) { ++ if (pn_offs) { + u64 pn; +- u8 *crypto_hdr = skb->data + fast_tx->pn_offs; ++ u8 *crypto_hdr = skb->data + pn_offs; + +- switch (fast_tx->key->conf.cipher) { ++ switch (key_conf->cipher) { + case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: +- pn = atomic64_inc_return(&fast_tx->key->conf.tx_pn); ++ pn = atomic64_inc_return(&key_conf->tx_pn); + crypto_hdr[0] = pn; + crypto_hdr[1] = pn >> 8; + crypto_hdr[4] = pn >> 16; +@@ -3331,12 +3465,6 @@ static bool ieee80211_xmit_fast(struct i + } + } + +- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) +- sdata = container_of(sdata->bss, +- struct ieee80211_sub_if_data, u.ap); +- +- __skb_queue_tail(&tx.skbs, skb); +- ieee80211_tx_frags(local, &sdata->vif, &sta->sta, &tx.skbs, false); + return true; + } + +@@ -3364,7 +3492,7 @@ void __ieee80211_subif_start_xmit(struct + fast_tx = rcu_dereference(sta->fast_tx); + + if (fast_tx && +- ieee80211_xmit_fast(sdata, dev, sta, fast_tx, skb)) ++ ieee80211_xmit_fast(sdata, sta, fast_tx, skb)) + goto out; + } + +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -715,6 +715,7 @@ enum mac80211_tx_info_flags { + * frame (PS-Poll or uAPSD). + * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information + * @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame ++ * @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path + * + * These flags are used in tx_info->control.flags. + */ +@@ -723,6 +724,7 @@ enum mac80211_tx_control_flags { + IEEE80211_TX_CTRL_PS_RESPONSE = BIT(1), + IEEE80211_TX_CTRL_RATE_INJECT = BIT(2), + IEEE80211_TX_CTRL_AMSDU = BIT(3), ++ IEEE80211_TX_CTRL_FAST_XMIT = BIT(4), + }; + + /* +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -814,11 +814,13 @@ enum txq_info_flags { + * @def_flow: used as a fallback flow when a packet destined to @tin hashes to + * a fq_flow which is already owned by a different tin + * @def_cvars: codel vars for @def_flow ++ * @frags: used to keep fragments created after dequeue + */ + struct txq_info { + struct fq_tin tin; + struct fq_flow def_flow; + struct codel_vars def_cvars; ++ struct sk_buff_head frags; + unsigned long flags; + + /* keep last! */ diff --git a/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch b/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch deleted file mode 100644 index a82d12f1c1..0000000000 --- a/package/kernel/mac80211/patches/346-mac80211-fix-sequence-number-assignment-for-PS-respo.patch +++ /dev/null @@ -1,107 +0,0 @@ -From: Felix Fietkau <nbd@nbd.name> -Date: Sun, 4 Sep 2016 17:46:24 +0200 -Subject: [PATCH] mac80211: fix sequence number assignment for PS response - frames - -When using intermediate queues, sequence number allocation is deferred -until dequeue. This doesn't work for PS response frames, which bypass -those queues. - -Signed-off-by: Felix Fietkau <nbd@nbd.name> ---- - ---- a/net/mac80211/tx.c -+++ b/net/mac80211/tx.c -@@ -792,6 +792,36 @@ static __le16 ieee80211_tx_next_seq(stru - return ret; - } - -+static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, -+ struct ieee80211_vif *vif, -+ struct ieee80211_sta *pubsta, -+ struct sk_buff *skb) -+{ -+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); -+ struct ieee80211_txq *txq = NULL; -+ -+ if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || -+ (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) -+ return NULL; -+ -+ if (!ieee80211_is_data(hdr->frame_control)) -+ return NULL; -+ -+ if (pubsta) { -+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; -+ -+ txq = pubsta->txq[tid]; -+ } else if (vif) { -+ txq = vif->txq; -+ } -+ -+ if (!txq) -+ return NULL; -+ -+ return to_txq_info(txq); -+} -+ - static ieee80211_tx_result debug_noinline - ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) - { -@@ -849,7 +879,8 @@ ieee80211_tx_h_sequence(struct ieee80211 - tid = *qc & IEEE80211_QOS_CTL_TID_MASK; - tx->sta->tx_stats.msdu[tid]++; - -- if (!tx->sta->sta.txq[0]) -+ if (!ieee80211_get_txq(tx->local, info->control.vif, &tx->sta->sta, -+ tx->skb)) - hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); - - return TX_CONTINUE; -@@ -1238,36 +1269,6 @@ ieee80211_tx_prepare(struct ieee80211_su - return TX_CONTINUE; - } - --static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local, -- struct ieee80211_vif *vif, -- struct ieee80211_sta *pubsta, -- struct sk_buff *skb) --{ -- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; -- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); -- struct ieee80211_txq *txq = NULL; -- -- if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || -- (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) -- return NULL; -- -- if (!ieee80211_is_data(hdr->frame_control)) -- return NULL; -- -- if (pubsta) { -- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; -- -- txq = pubsta->txq[tid]; -- } else if (vif) { -- txq = vif->txq; -- } -- -- if (!txq) -- return NULL; -- -- return to_txq_info(txq); --} -- - static void ieee80211_set_skb_enqueue_time(struct sk_buff *skb) - { - IEEE80211_SKB_CB(skb)->control.enqueue_time = codel_get_time(); -@@ -3265,7 +3266,7 @@ static bool ieee80211_xmit_fast(struct i - - if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { - *ieee80211_get_qos_ctl(hdr) = tid; -- if (!sta->sta.txq[0]) -+ if (!ieee80211_get_txq(local, &sdata->vif, &sta->sta, skb)) - hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); - } else { - info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; diff --git a/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch b/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch index c40598dde6..aba065ee8e 100644 --- a/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch +++ b/package/kernel/mac80211/patches/522-mac80211_configure_antenna_gain.patch @@ -18,7 +18,7 @@ const u8 *addr); --- a/include/net/mac80211.h +++ b/include/net/mac80211.h -@@ -1317,6 +1317,7 @@ enum ieee80211_smps_mode { +@@ -1319,6 +1319,7 @@ enum ieee80211_smps_mode { * * @power_level: requested transmit power (in dBm), backward compatibility * value only that is set to the minimum of all interfaces @@ -26,7 +26,7 @@ * * @chandef: the channel definition to tune to * @radar_enabled: whether radar detection is enabled -@@ -1337,6 +1338,7 @@ enum ieee80211_smps_mode { +@@ -1339,6 +1340,7 @@ enum ieee80211_smps_mode { struct ieee80211_conf { u32 flags; int power_level, dynamic_ps_timeout; @@ -87,7 +87,7 @@ CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h -@@ -1338,6 +1338,7 @@ struct ieee80211_local { +@@ -1340,6 +1340,7 @@ struct ieee80211_local { int dynamic_ps_forced_timeout; int user_power_level; /* in dBm, for all interfaces */ |