diff options
author | Felix Fietkau <nbd@nbd.name> | 2019-03-13 21:58:55 +0100 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2019-03-16 19:59:02 +0100 |
commit | 6869ae2ab57c16b28404f2257c0ad9612cc3a0a2 (patch) | |
tree | 05563896f10963614eb079ebd0e340240b2b8328 /package | |
parent | 04e4b779cc64c85955910909b979c81177691a3b (diff) | |
download | upstream-6869ae2ab57c16b28404f2257c0ad9612cc3a0a2.tar.gz upstream-6869ae2ab57c16b28404f2257c0ad9612cc3a0a2.tar.bz2 upstream-6869ae2ab57c16b28404f2257c0ad9612cc3a0a2.zip |
mac80211: improve locking around the txq scheduling / airtime fairness API
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'package')
-rw-r--r-- | package/kernel/mac80211/patches/subsys/352-mac80211-rework-locking-for-txq-scheduling-airtime-f.patch | 214 |
1 files changed, 214 insertions, 0 deletions
diff --git a/package/kernel/mac80211/patches/subsys/352-mac80211-rework-locking-for-txq-scheduling-airtime-f.patch b/package/kernel/mac80211/patches/subsys/352-mac80211-rework-locking-for-txq-scheduling-airtime-f.patch new file mode 100644 index 0000000000..ef67dd1977 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/352-mac80211-rework-locking-for-txq-scheduling-airtime-f.patch @@ -0,0 +1,214 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Wed, 13 Mar 2019 19:09:22 +0100 +Subject: [PATCH] mac80211: rework locking for txq scheduling / airtime + fairness + +Holding the lock around the entire duration of tx scheduling can create +some nasty lock contention, especially when processing airtime information +from the tx status or the rx path. +Improve locking by only holding the active_txq_lock for lookups / scheduling +list modifications. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -6069,8 +6069,6 @@ struct sk_buff *ieee80211_tx_dequeue(str + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @ac: AC number to return packets from. + * +- * Should only be called between calls to ieee80211_txq_schedule_start() +- * and ieee80211_txq_schedule_end(). + * Returns the next txq if successful, %NULL if no queue is eligible. If a txq + * is returned, it should be returned with ieee80211_return_txq() after the + * driver has finished scheduling it. +@@ -6078,51 +6076,41 @@ struct sk_buff *ieee80211_tx_dequeue(str + struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac); + + /** +- * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() +- * +- * @hw: pointer as obtained from ieee80211_alloc_hw() +- * @txq: pointer obtained from station or virtual interface +- * +- * Should only be called between calls to ieee80211_txq_schedule_start() +- * and ieee80211_txq_schedule_end(). +- */ +-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); +- +-/** +- * ieee80211_txq_schedule_start - acquire locks for safe scheduling of an AC ++ * ieee80211_txq_schedule_start - start new scheduling round for TXQs + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @ac: AC number to acquire locks for + * +- * Acquire locks needed to schedule TXQs from the given AC. Should be called +- * before ieee80211_next_txq() or ieee80211_return_txq(). ++ * Should be called before ieee80211_next_txq() or ieee80211_return_txq(). + */ +-void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) +- __acquires(txq_lock); ++void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac); ++ ++/* (deprecated) */ ++static inline void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) ++{ ++} + + /** +- * ieee80211_txq_schedule_end - release locks for safe scheduling of an AC ++ * ieee80211_schedule_txq - schedule a TXQ for transmission + * + * @hw: pointer as obtained from ieee80211_alloc_hw() +- * @ac: AC number to acquire locks for ++ * @txq: pointer obtained from station or virtual interface + * +- * Release locks previously acquired by ieee80211_txq_schedule_end(). ++ * Schedules a TXQ for transmission if it is not already scheduled. + */ +-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) +- __releases(txq_lock); ++void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); + + /** +- * ieee80211_schedule_txq - schedule a TXQ for transmission ++ * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() + * + * @hw: pointer as obtained from ieee80211_alloc_hw() + * @txq: pointer obtained from station or virtual interface +- * +- * Schedules a TXQ for transmission if it is not already scheduled. Takes a +- * lock, which means it must *not* be called between +- * ieee80211_txq_schedule_start() and ieee80211_txq_schedule_end() + */ +-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) +- __acquires(txq_lock) __releases(txq_lock); ++static inline void ++ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) ++{ ++ ieee80211_schedule_txq(hw, txq); ++} + + /** + * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -3619,16 +3619,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue); + struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) + { + struct ieee80211_local *local = hw_to_local(hw); ++ struct ieee80211_txq *ret = NULL; + struct txq_info *txqi = NULL; + +- lockdep_assert_held(&local->active_txq_lock[ac]); ++ spin_lock_bh(&local->active_txq_lock[ac]); + + begin: + txqi = list_first_entry_or_null(&local->active_txqs[ac], + struct txq_info, + schedule_order); + if (!txqi) +- return NULL; ++ goto out; + + if (txqi->txq.sta) { + struct sta_info *sta = container_of(txqi->txq.sta, +@@ -3645,21 +3646,25 @@ struct ieee80211_txq *ieee80211_next_txq + + + if (txqi->schedule_round == local->schedule_round[ac]) +- return NULL; ++ goto out; + + list_del_init(&txqi->schedule_order); + txqi->schedule_round = local->schedule_round[ac]; +- return &txqi->txq; ++ ret = &txqi->txq; ++ ++out: ++ spin_unlock_bh(&local->active_txq_lock[ac]); ++ return ret; + } + EXPORT_SYMBOL(ieee80211_next_txq); + +-void ieee80211_return_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq) ++void ieee80211_schedule_txq(struct ieee80211_hw *hw, ++ struct ieee80211_txq *txq) + { + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *txqi = to_txq_info(txq); + +- lockdep_assert_held(&local->active_txq_lock[txq->ac]); ++ spin_lock_bh(&local->active_txq_lock[txq->ac]); + + if (list_empty(&txqi->schedule_order) && + (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) { +@@ -3679,18 +3684,7 @@ void ieee80211_return_txq(struct ieee802 + list_add_tail(&txqi->schedule_order, + &local->active_txqs[txq->ac]); + } +-} +-EXPORT_SYMBOL(ieee80211_return_txq); + +-void ieee80211_schedule_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq) +- __acquires(txq_lock) __releases(txq_lock) +-{ +- struct ieee80211_local *local = hw_to_local(hw); +- struct txq_info *txqi = to_txq_info(txq); +- +- spin_lock_bh(&local->active_txq_lock[txq->ac]); +- ieee80211_return_txq(hw, txq); + spin_unlock_bh(&local->active_txq_lock[txq->ac]); + } + EXPORT_SYMBOL(ieee80211_schedule_txq); +@@ -3703,7 +3697,7 @@ bool ieee80211_txq_may_transmit(struct i + struct sta_info *sta; + u8 ac = txq->ac; + +- lockdep_assert_held(&local->active_txq_lock[ac]); ++ spin_lock_bh(&local->active_txq_lock[ac]); + + if (!txqi->txq.sta) + goto out; +@@ -3733,34 +3727,27 @@ bool ieee80211_txq_may_transmit(struct i + + sta->airtime[ac].deficit += sta->airtime_weight; + list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); ++ spin_unlock_bh(&local->active_txq_lock[ac]); + + return false; + out: + if (!list_empty(&txqi->schedule_order)) + list_del_init(&txqi->schedule_order); ++ spin_unlock_bh(&local->active_txq_lock[ac]); + + return true; + } + EXPORT_SYMBOL(ieee80211_txq_may_transmit); + + void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) +- __acquires(txq_lock) + { + struct ieee80211_local *local = hw_to_local(hw); + + spin_lock_bh(&local->active_txq_lock[ac]); + local->schedule_round[ac]++; +-} +-EXPORT_SYMBOL(ieee80211_txq_schedule_start); +- +-void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac) +- __releases(txq_lock) +-{ +- struct ieee80211_local *local = hw_to_local(hw); +- + spin_unlock_bh(&local->active_txq_lock[ac]); + } +-EXPORT_SYMBOL(ieee80211_txq_schedule_end); ++EXPORT_SYMBOL(ieee80211_txq_schedule_start); + + void __ieee80211_subif_start_xmit(struct sk_buff *skb, + struct net_device *dev, |