diff options
Diffstat (limited to 'package')
8 files changed, 1695 insertions, 3 deletions
diff --git a/package/kernel/mac80211/patches/subsys/330-mac80211-switch-airtime-fairness-back-to-deficit-rou.patch b/package/kernel/mac80211/patches/subsys/330-mac80211-switch-airtime-fairness-back-to-deficit-rou.patch new file mode 100644 index 0000000000..f7ed81cd9a --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/330-mac80211-switch-airtime-fairness-back-to-deficit-rou.patch @@ -0,0 +1,1249 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sun, 19 Jun 2022 23:13:05 +0200 +Subject: [PATCH] mac80211: switch airtime fairness back to deficit round-robin + scheduling + +This reverts commits 6a789ba679d652587532cec2a0e0274fda172f3b and +2433647bc8d983a543e7d31b41ca2de1c7e2c198. + +The virtual time scheduler code has a number of issues: +- queues slowed down by hardware/firmware powersave handling were not properly + handled. +- on ath10k in push-pull mode, tx queues that the driver tries to pull from + were starved, causing excessive latency +- delay between tx enqueue and reported airtime use were causing excessively + bursty tx behavior + +The bursty behavior may also be present on the round-robin scheduler, but there +it is much easier to fix without introducing additional regressions + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/include/net/mac80211.h ++++ b/include/net/mac80211.h +@@ -6666,6 +6666,9 @@ static inline void ieee80211_txq_schedul + { + } + ++void __ieee80211_schedule_txq(struct ieee80211_hw *hw, ++ struct ieee80211_txq *txq, bool force); ++ + /** + * ieee80211_schedule_txq - schedule a TXQ for transmission + * +@@ -6678,7 +6681,11 @@ static inline void ieee80211_txq_schedul + * The driver may call this function if it has buffered packets for + * this TXQ internally. + */ +-void ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq); ++static inline void ++ieee80211_schedule_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq) ++{ ++ __ieee80211_schedule_txq(hw, txq, true); ++} + + /** + * ieee80211_return_txq - return a TXQ previously acquired by ieee80211_next_txq() +@@ -6690,8 +6697,12 @@ void ieee80211_schedule_txq(struct ieee8 + * The driver may set force=true if it has buffered packets for this TXQ + * internally. + */ +-void ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, +- bool force); ++static inline void ++ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq, ++ bool force) ++{ ++ __ieee80211_schedule_txq(hw, txq, force); ++} + + /** + * ieee80211_txq_may_transmit - check whether TXQ is allowed to transmit +--- a/net/mac80211/cfg.c ++++ b/net/mac80211/cfg.c +@@ -1554,38 +1554,6 @@ static void sta_apply_mesh_params(struct + #endif + } + +-static void sta_apply_airtime_params(struct ieee80211_local *local, +- struct sta_info *sta, +- struct station_parameters *params) +-{ +- u8 ac; +- +- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { +- struct airtime_sched_info *air_sched = &local->airtime[ac]; +- struct airtime_info *air_info = &sta->airtime[ac]; +- struct txq_info *txqi; +- u8 tid; +- +- spin_lock_bh(&air_sched->lock); +- for (tid = 0; tid < IEEE80211_NUM_TIDS + 1; tid++) { +- if (air_info->weight == params->airtime_weight || +- !sta->sta.txq[tid] || +- ac != ieee80211_ac_from_tid(tid)) +- continue; +- +- airtime_weight_set(air_info, params->airtime_weight); +- +- txqi = to_txq_info(sta->sta.txq[tid]); +- if (RB_EMPTY_NODE(&txqi->schedule_order)) +- continue; +- +- ieee80211_update_airtime_weight(local, air_sched, +- 0, true); +- } +- spin_unlock_bh(&air_sched->lock); +- } +-} +- + static int sta_apply_parameters(struct ieee80211_local *local, + struct sta_info *sta, + struct station_parameters *params) +@@ -1773,8 +1741,7 @@ static int sta_apply_parameters(struct i + sta_apply_mesh_params(local, sta, params); + + if (params->airtime_weight) +- sta_apply_airtime_params(local, sta, params); +- ++ sta->airtime_weight = params->airtime_weight; + + /* set the STA state after all sta info from usermode has been set */ + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) || +--- a/net/mac80211/debugfs.c ++++ b/net/mac80211/debugfs.c +@@ -216,14 +216,14 @@ static ssize_t aql_txq_limit_read(struct + "VI %u %u\n" + "BE %u %u\n" + "BK %u %u\n", +- local->airtime[IEEE80211_AC_VO].aql_txq_limit_low, +- local->airtime[IEEE80211_AC_VO].aql_txq_limit_high, +- local->airtime[IEEE80211_AC_VI].aql_txq_limit_low, +- local->airtime[IEEE80211_AC_VI].aql_txq_limit_high, +- local->airtime[IEEE80211_AC_BE].aql_txq_limit_low, +- local->airtime[IEEE80211_AC_BE].aql_txq_limit_high, +- local->airtime[IEEE80211_AC_BK].aql_txq_limit_low, +- local->airtime[IEEE80211_AC_BK].aql_txq_limit_high); ++ local->aql_txq_limit_low[IEEE80211_AC_VO], ++ local->aql_txq_limit_high[IEEE80211_AC_VO], ++ local->aql_txq_limit_low[IEEE80211_AC_VI], ++ local->aql_txq_limit_high[IEEE80211_AC_VI], ++ local->aql_txq_limit_low[IEEE80211_AC_BE], ++ local->aql_txq_limit_high[IEEE80211_AC_BE], ++ local->aql_txq_limit_low[IEEE80211_AC_BK], ++ local->aql_txq_limit_high[IEEE80211_AC_BK]); + return simple_read_from_buffer(user_buf, count, ppos, + buf, len); + } +@@ -255,11 +255,11 @@ static ssize_t aql_txq_limit_write(struc + if (ac >= IEEE80211_NUM_ACS) + return -EINVAL; + +- q_limit_low_old = local->airtime[ac].aql_txq_limit_low; +- q_limit_high_old = local->airtime[ac].aql_txq_limit_high; ++ q_limit_low_old = local->aql_txq_limit_low[ac]; ++ q_limit_high_old = local->aql_txq_limit_high[ac]; + +- local->airtime[ac].aql_txq_limit_low = q_limit_low; +- local->airtime[ac].aql_txq_limit_high = q_limit_high; ++ local->aql_txq_limit_low[ac] = q_limit_low; ++ local->aql_txq_limit_high[ac] = q_limit_high; + + mutex_lock(&local->sta_mtx); + list_for_each_entry(sta, &local->sta_list, list) { +@@ -382,46 +382,6 @@ static const struct file_operations forc + .llseek = default_llseek, + }; + +-static ssize_t airtime_read(struct file *file, +- char __user *user_buf, +- size_t count, +- loff_t *ppos) +-{ +- struct ieee80211_local *local = file->private_data; +- char buf[200]; +- u64 v_t[IEEE80211_NUM_ACS]; +- u64 wt[IEEE80211_NUM_ACS]; +- int len = 0, ac; +- +- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { +- spin_lock_bh(&local->airtime[ac].lock); +- v_t[ac] = local->airtime[ac].v_t; +- wt[ac] = local->airtime[ac].weight_sum; +- spin_unlock_bh(&local->airtime[ac].lock); +- } +- len = scnprintf(buf, sizeof(buf), +- "\tVO VI BE BK\n" +- "Virt-t\t%-10llu %-10llu %-10llu %-10llu\n" +- "Weight\t%-10llu %-10llu %-10llu %-10llu\n", +- v_t[0], +- v_t[1], +- v_t[2], +- v_t[3], +- wt[0], +- wt[1], +- wt[2], +- wt[3]); +- +- return simple_read_from_buffer(user_buf, count, ppos, +- buf, len); +-} +- +-static const struct file_operations airtime_ops = { +- .read = airtime_read, +- .open = simple_open, +- .llseek = default_llseek, +-}; +- + #ifdef CONFIG_PM + static ssize_t reset_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +@@ -672,11 +632,7 @@ void debugfs_hw_add(struct ieee80211_loc + if (local->ops->wake_tx_queue) + DEBUGFS_ADD_MODE(aqm, 0600); + +- if (wiphy_ext_feature_isset(local->hw.wiphy, +- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) { +- DEBUGFS_ADD_MODE(airtime, 0600); +- DEBUGFS_ADD_MODE(airtime_flags, 0600); +- } ++ DEBUGFS_ADD_MODE(airtime_flags, 0600); + + DEBUGFS_ADD(aql_txq_limit); + debugfs_create_u32("aql_threshold", 0600, +--- a/net/mac80211/debugfs_netdev.c ++++ b/net/mac80211/debugfs_netdev.c +@@ -512,34 +512,6 @@ static ssize_t ieee80211_if_fmt_aqm( + } + IEEE80211_IF_FILE_R(aqm); + +-static ssize_t ieee80211_if_fmt_airtime( +- const struct ieee80211_sub_if_data *sdata, char *buf, int buflen) +-{ +- struct ieee80211_local *local = sdata->local; +- struct ieee80211_txq *txq = sdata->vif.txq; +- struct airtime_info *air_info; +- int len; +- +- if (!txq) +- return 0; +- +- spin_lock_bh(&local->airtime[txq->ac].lock); +- air_info = to_airtime_info(txq); +- len = scnprintf(buf, +- buflen, +- "RX: %llu us\nTX: %llu us\nWeight: %u\n" +- "Virt-T: %lld us\n", +- air_info->rx_airtime, +- air_info->tx_airtime, +- air_info->weight, +- air_info->v_t); +- spin_unlock_bh(&local->airtime[txq->ac].lock); +- +- return len; +-} +- +-IEEE80211_IF_FILE_R(airtime); +- + IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX); + + /* IBSS attributes */ +@@ -685,10 +657,8 @@ static void add_common_files(struct ieee + + if (sdata->local->ops->wake_tx_queue && + sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE && +- sdata->vif.type != NL80211_IFTYPE_NAN) { ++ sdata->vif.type != NL80211_IFTYPE_NAN) + DEBUGFS_ADD(aqm); +- DEBUGFS_ADD(airtime); +- } + } + + static void add_sta_files(struct ieee80211_sub_if_data *sdata) +--- a/net/mac80211/debugfs_sta.c ++++ b/net/mac80211/debugfs_sta.c +@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f + size_t bufsz = 400; + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + u64 rx_airtime = 0, tx_airtime = 0; +- u64 v_t[IEEE80211_NUM_ACS]; ++ s64 deficit[IEEE80211_NUM_ACS]; + ssize_t rv; + int ac; + +@@ -210,18 +210,18 @@ static ssize_t sta_airtime_read(struct f + return -ENOMEM; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { +- spin_lock_bh(&local->airtime[ac].lock); ++ spin_lock_bh(&local->active_txq_lock[ac]); + rx_airtime += sta->airtime[ac].rx_airtime; + tx_airtime += sta->airtime[ac].tx_airtime; +- v_t[ac] = sta->airtime[ac].v_t; +- spin_unlock_bh(&local->airtime[ac].lock); ++ deficit[ac] = sta->airtime[ac].deficit; ++ spin_unlock_bh(&local->active_txq_lock[ac]); + } + + p += scnprintf(p, bufsz + buf - p, + "RX: %llu us\nTX: %llu us\nWeight: %u\n" +- "Virt-T: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n", +- rx_airtime, tx_airtime, sta->airtime[0].weight, +- v_t[0], v_t[1], v_t[2], v_t[3]); ++ "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n", ++ rx_airtime, tx_airtime, sta->airtime_weight, ++ deficit[0], deficit[1], deficit[2], deficit[3]); + + rv = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf); + kfree(buf); +@@ -236,11 +236,11 @@ static ssize_t sta_airtime_write(struct + int ac; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { +- spin_lock_bh(&local->airtime[ac].lock); ++ spin_lock_bh(&local->active_txq_lock[ac]); + sta->airtime[ac].rx_airtime = 0; + sta->airtime[ac].tx_airtime = 0; +- sta->airtime[ac].v_t = 0; +- spin_unlock_bh(&local->airtime[ac].lock); ++ sta->airtime[ac].deficit = sta->airtime_weight; ++ spin_unlock_bh(&local->active_txq_lock[ac]); + } + + return count; +@@ -263,10 +263,10 @@ static ssize_t sta_aql_read(struct file + return -ENOMEM; + + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { +- spin_lock_bh(&local->airtime[ac].lock); ++ spin_lock_bh(&local->active_txq_lock[ac]); + q_limit_l[ac] = sta->airtime[ac].aql_limit_low; + q_limit_h[ac] = sta->airtime[ac].aql_limit_high; +- spin_unlock_bh(&local->airtime[ac].lock); ++ spin_unlock_bh(&local->active_txq_lock[ac]); + q_depth[ac] = atomic_read(&sta->airtime[ac].aql_tx_pending); + } + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -863,16 +863,20 @@ enum txq_info_flags { + * @def_flow: used as a fallback flow when a packet destined to @tin hashes to + * a fq_flow which is already owned by a different tin + * @def_cvars: codel vars for @def_flow +- * @schedule_order: used with ieee80211_local->active_txqs + * @frags: used to keep fragments created after dequeue ++ * @schedule_order: used with ieee80211_local->active_txqs ++ * @schedule_round: counter to prevent infinite loops on TXQ scheduling + */ + struct txq_info { + struct fq_tin tin; + struct codel_vars def_cvars; + struct codel_stats cstats; +- struct rb_node schedule_order; ++ ++ u16 schedule_round; ++ struct list_head schedule_order; + + struct sk_buff_head frags; ++ + unsigned long flags; + + /* keep last! */ +@@ -949,8 +953,6 @@ struct ieee80211_sub_if_data { + struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS]; + struct mac80211_qos_map __rcu *qos_map; + +- struct airtime_info airtime[IEEE80211_NUM_ACS]; +- + struct work_struct csa_finalize_work; + bool csa_block_tx; /* write-protected by sdata_lock and local->mtx */ + struct cfg80211_chan_def csa_chandef; +@@ -1180,44 +1182,6 @@ enum mac80211_scan_state { + SCAN_ABORT, + }; + +-/** +- * struct airtime_sched_info - state used for airtime scheduling and AQL +- * +- * @lock: spinlock that protects all the fields in this struct +- * @active_txqs: rbtree of currently backlogged queues, sorted by virtual time +- * @schedule_pos: the current position maintained while a driver walks the tree +- * with ieee80211_next_txq() +- * @active_list: list of struct airtime_info structs that were active within +- * the last AIRTIME_ACTIVE_DURATION (100 ms), used to compute +- * weight_sum +- * @last_weight_update: used for rate limiting walking active_list +- * @last_schedule_time: tracks the last time a transmission was scheduled; used +- * for catching up v_t if no stations are eligible for +- * transmission. +- * @v_t: global virtual time; queues with v_t < this are eligible for +- * transmission +- * @weight_sum: total sum of all active stations used for dividing airtime +- * @weight_sum_reciprocal: reciprocal of weight_sum (to avoid divisions in fast +- * path - see comment above +- * IEEE80211_RECIPROCAL_DIVISOR_64) +- * @aql_txq_limit_low: AQL limit when total outstanding airtime +- * is < IEEE80211_AQL_THRESHOLD +- * @aql_txq_limit_high: AQL limit when total outstanding airtime +- * is > IEEE80211_AQL_THRESHOLD +- */ +-struct airtime_sched_info { +- spinlock_t lock; +- struct rb_root_cached active_txqs; +- struct rb_node *schedule_pos; +- struct list_head active_list; +- u64 last_weight_update; +- u64 last_schedule_activity; +- u64 v_t; +- u64 weight_sum; +- u64 weight_sum_reciprocal; +- u32 aql_txq_limit_low; +- u32 aql_txq_limit_high; +-}; + DECLARE_STATIC_KEY_FALSE(aql_disable); + + struct ieee80211_local { +@@ -1231,8 +1195,13 @@ struct ieee80211_local { + struct codel_params cparams; + + /* protects active_txqs and txqi->schedule_order */ +- struct airtime_sched_info airtime[IEEE80211_NUM_ACS]; ++ spinlock_t active_txq_lock[IEEE80211_NUM_ACS]; ++ struct list_head active_txqs[IEEE80211_NUM_ACS]; ++ u16 schedule_round[IEEE80211_NUM_ACS]; ++ + u16 airtime_flags; ++ u32 aql_txq_limit_low[IEEE80211_NUM_ACS]; ++ u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; + u32 aql_threshold; + atomic_t aql_total_pending_airtime; + +@@ -1649,125 +1618,6 @@ static inline bool txq_has_queue(struct + return !(skb_queue_empty(&txqi->frags) && !txqi->tin.backlog_packets); + } + +-static inline struct airtime_info *to_airtime_info(struct ieee80211_txq *txq) +-{ +- struct ieee80211_sub_if_data *sdata; +- struct sta_info *sta; +- +- if (txq->sta) { +- sta = container_of(txq->sta, struct sta_info, sta); +- return &sta->airtime[txq->ac]; +- } +- +- sdata = vif_to_sdata(txq->vif); +- return &sdata->airtime[txq->ac]; +-} +- +-/* To avoid divisions in the fast path, we keep pre-computed reciprocals for +- * airtime weight calculations. There are two different weights to keep track +- * of: The per-station weight and the sum of weights per phy. +- * +- * For the per-station weights (kept in airtime_info below), we use 32-bit +- * reciprocals with a devisor of 2^19. This lets us keep the multiplications and +- * divisions for the station weights as 32-bit operations at the cost of a bit +- * of rounding error for high weights; but the choice of divisor keeps rounding +- * errors <10% for weights <2^15, assuming no more than 8ms of airtime is +- * reported at a time. +- * +- * For the per-phy sum of weights the values can get higher, so we use 64-bit +- * operations for those with a 32-bit divisor, which should avoid any +- * significant rounding errors. +- */ +-#define IEEE80211_RECIPROCAL_DIVISOR_64 0x100000000ULL +-#define IEEE80211_RECIPROCAL_SHIFT_64 32 +-#define IEEE80211_RECIPROCAL_DIVISOR_32 0x80000U +-#define IEEE80211_RECIPROCAL_SHIFT_32 19 +- +-static inline void airtime_weight_set(struct airtime_info *air_info, u16 weight) +-{ +- if (air_info->weight == weight) +- return; +- +- air_info->weight = weight; +- if (weight) { +- air_info->weight_reciprocal = +- IEEE80211_RECIPROCAL_DIVISOR_32 / weight; +- } else { +- air_info->weight_reciprocal = 0; +- } +-} +- +-static inline void airtime_weight_sum_set(struct airtime_sched_info *air_sched, +- int weight_sum) +-{ +- if (air_sched->weight_sum == weight_sum) +- return; +- +- air_sched->weight_sum = weight_sum; +- if (air_sched->weight_sum) { +- air_sched->weight_sum_reciprocal = IEEE80211_RECIPROCAL_DIVISOR_64; +- do_div(air_sched->weight_sum_reciprocal, air_sched->weight_sum); +- } else { +- air_sched->weight_sum_reciprocal = 0; +- } +-} +- +-/* A problem when trying to enforce airtime fairness is that we want to divide +- * the airtime between the currently *active* stations. However, basing this on +- * the instantaneous queue state of stations doesn't work, as queues tend to +- * oscillate very quickly between empty and occupied, leading to the scheduler +- * thinking only a single station is active when deciding whether to allow +- * transmission (and thus not throttling correctly). +- * +- * To fix this we use a timer-based notion of activity: a station is considered +- * active if it has been scheduled within the last 100 ms; we keep a separate +- * list of all the stations considered active in this manner, and lazily update +- * the total weight of active stations from this list (filtering the stations in +- * the list by their 'last active' time). +- * +- * We add one additional safeguard to guard against stations that manage to get +- * scheduled every 100 ms but don't transmit a lot of data, and thus don't use +- * up any airtime. Such stations would be able to get priority for an extended +- * period of time if they do start transmitting at full capacity again, and so +- * we add an explicit maximum for how far behind a station is allowed to fall in +- * the virtual airtime domain. This limit is set to a relatively high value of +- * 20 ms because the main mechanism for catching up idle stations is the active +- * state as described above; i.e., the hard limit should only be hit in +- * pathological cases. +- */ +-#define AIRTIME_ACTIVE_DURATION (100 * NSEC_PER_MSEC) +-#define AIRTIME_MAX_BEHIND 20000 /* 20 ms */ +- +-static inline bool airtime_is_active(struct airtime_info *air_info, u64 now) +-{ +- return air_info->last_scheduled >= now - AIRTIME_ACTIVE_DURATION; +-} +- +-static inline void airtime_set_active(struct airtime_sched_info *air_sched, +- struct airtime_info *air_info, u64 now) +-{ +- air_info->last_scheduled = now; +- air_sched->last_schedule_activity = now; +- list_move_tail(&air_info->list, &air_sched->active_list); +-} +- +-static inline bool airtime_catchup_v_t(struct airtime_sched_info *air_sched, +- u64 v_t, u64 now) +-{ +- air_sched->v_t = v_t; +- return true; +-} +- +-static inline void init_airtime_info(struct airtime_info *air_info, +- struct airtime_sched_info *air_sched) +-{ +- atomic_set(&air_info->aql_tx_pending, 0); +- air_info->aql_limit_low = air_sched->aql_txq_limit_low; +- air_info->aql_limit_high = air_sched->aql_txq_limit_high; +- airtime_weight_set(air_info, IEEE80211_DEFAULT_AIRTIME_WEIGHT); +- INIT_LIST_HEAD(&air_info->list); +-} +- + static inline int ieee80211_bssid_match(const u8 *raddr, const u8 *addr) + { + return ether_addr_equal(raddr, addr) || +@@ -2013,14 +1863,6 @@ int ieee80211_tx_control_port(struct wip + u64 *cookie); + int ieee80211_probe_mesh_link(struct wiphy *wiphy, struct net_device *dev, + const u8 *buf, size_t len); +-void ieee80211_resort_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq); +-void ieee80211_unschedule_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq, +- bool purge); +-void ieee80211_update_airtime_weight(struct ieee80211_local *local, +- struct airtime_sched_info *air_sched, +- u64 now, bool force); + + /* HT */ + void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -2190,9 +2190,6 @@ int ieee80211_if_add(struct ieee80211_lo + } + } + +- for (i = 0; i < IEEE80211_NUM_ACS; i++) +- init_airtime_info(&sdata->airtime[i], &local->airtime[i]); +- + ieee80211_set_default_queues(sdata); + + sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -707,13 +707,10 @@ struct ieee80211_hw *ieee80211_alloc_hw_ + spin_lock_init(&local->queue_stop_reason_lock); + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { +- struct airtime_sched_info *air_sched = &local->airtime[i]; +- +- air_sched->active_txqs = RB_ROOT_CACHED; +- INIT_LIST_HEAD(&air_sched->active_list); +- spin_lock_init(&air_sched->lock); +- air_sched->aql_txq_limit_low = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; +- air_sched->aql_txq_limit_high = ++ INIT_LIST_HEAD(&local->active_txqs[i]); ++ spin_lock_init(&local->active_txq_lock[i]); ++ local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; ++ local->aql_txq_limit_high[i] = + IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; + } + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1584,8 +1584,12 @@ static void sta_ps_start(struct sta_info + + for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { + struct ieee80211_txq *txq = sta->sta.txq[tid]; ++ struct txq_info *txqi = to_txq_info(txq); + +- ieee80211_unschedule_txq(&local->hw, txq, false); ++ spin_lock(&local->active_txq_lock[txq->ac]); ++ if (!list_empty(&txqi->schedule_order)) ++ list_del_init(&txqi->schedule_order); ++ spin_unlock(&local->active_txq_lock[txq->ac]); + + if (txq_has_queue(txq)) + set_bit(tid, &sta->txq_buffered_tids); +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -426,11 +426,15 @@ struct sta_info *sta_info_alloc(struct i + if (sta_prepare_rate_control(local, sta, gfp)) + goto free_txq; + ++ sta->airtime_weight = IEEE80211_DEFAULT_AIRTIME_WEIGHT; + + for (i = 0; i < IEEE80211_NUM_ACS; i++) { + skb_queue_head_init(&sta->ps_tx_buf[i]); + skb_queue_head_init(&sta->tx_filtered[i]); +- init_airtime_info(&sta->airtime[i], &local->airtime[i]); ++ sta->airtime[i].deficit = sta->airtime_weight; ++ atomic_set(&sta->airtime[i].aql_tx_pending, 0); ++ sta->airtime[i].aql_limit_low = local->aql_txq_limit_low[i]; ++ sta->airtime[i].aql_limit_high = local->aql_txq_limit_high[i]; + } + + for (i = 0; i < IEEE80211_NUM_TIDS; i++) +@@ -1889,59 +1893,24 @@ void ieee80211_sta_set_buffered(struct i + } + EXPORT_SYMBOL(ieee80211_sta_set_buffered); + +-void ieee80211_register_airtime(struct ieee80211_txq *txq, +- u32 tx_airtime, u32 rx_airtime) ++void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, ++ u32 tx_airtime, u32 rx_airtime) + { +- struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->vif); +- struct ieee80211_local *local = sdata->local; +- u64 weight_sum, weight_sum_reciprocal; +- struct airtime_sched_info *air_sched; +- struct airtime_info *air_info; ++ struct sta_info *sta = container_of(pubsta, struct sta_info, sta); ++ struct ieee80211_local *local = sta->sdata->local; ++ u8 ac = ieee80211_ac_from_tid(tid); + u32 airtime = 0; + +- air_sched = &local->airtime[txq->ac]; +- air_info = to_airtime_info(txq); +- +- if (local->airtime_flags & AIRTIME_USE_TX) ++ if (sta->local->airtime_flags & AIRTIME_USE_TX) + airtime += tx_airtime; +- if (local->airtime_flags & AIRTIME_USE_RX) ++ if (sta->local->airtime_flags & AIRTIME_USE_RX) + airtime += rx_airtime; + +- /* Weights scale so the unit weight is 256 */ +- airtime <<= 8; +- +- spin_lock_bh(&air_sched->lock); +- +- air_info->tx_airtime += tx_airtime; +- air_info->rx_airtime += rx_airtime; +- +- if (air_sched->weight_sum) { +- weight_sum = air_sched->weight_sum; +- weight_sum_reciprocal = air_sched->weight_sum_reciprocal; +- } else { +- weight_sum = air_info->weight; +- weight_sum_reciprocal = air_info->weight_reciprocal; +- } +- +- /* Round the calculation of global vt */ +- air_sched->v_t += (u64)((airtime + (weight_sum >> 1)) * +- weight_sum_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_64; +- air_info->v_t += (u32)((airtime + (air_info->weight >> 1)) * +- air_info->weight_reciprocal) >> IEEE80211_RECIPROCAL_SHIFT_32; +- ieee80211_resort_txq(&local->hw, txq); +- +- spin_unlock_bh(&air_sched->lock); +-} +- +-void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid, +- u32 tx_airtime, u32 rx_airtime) +-{ +- struct ieee80211_txq *txq = pubsta->txq[tid]; +- +- if (!txq) +- return; +- +- ieee80211_register_airtime(txq, tx_airtime, rx_airtime); ++ spin_lock_bh(&local->active_txq_lock[ac]); ++ sta->airtime[ac].tx_airtime += tx_airtime; ++ sta->airtime[ac].rx_airtime += rx_airtime; ++ sta->airtime[ac].deficit -= airtime; ++ spin_unlock_bh(&local->active_txq_lock[ac]); + } + EXPORT_SYMBOL(ieee80211_sta_register_airtime); + +@@ -2385,7 +2354,7 @@ void sta_set_sinfo(struct sta_info *sta, + } + + if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT))) { +- sinfo->airtime_weight = sta->airtime[0].weight; ++ sinfo->airtime_weight = sta->airtime_weight; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_AIRTIME_WEIGHT); + } + +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -135,25 +135,18 @@ enum ieee80211_agg_stop_reason { + #define AIRTIME_USE_TX BIT(0) + #define AIRTIME_USE_RX BIT(1) + +- + struct airtime_info { + u64 rx_airtime; + u64 tx_airtime; +- u64 v_t; +- u64 last_scheduled; +- struct list_head list; ++ s64 deficit; + atomic_t aql_tx_pending; /* Estimated airtime for frames pending */ + u32 aql_limit_low; + u32 aql_limit_high; +- u32 weight_reciprocal; +- u16 weight; + }; + + void ieee80211_sta_update_pending_airtime(struct ieee80211_local *local, + struct sta_info *sta, u8 ac, + u16 tx_airtime, bool tx_completed); +-void ieee80211_register_airtime(struct ieee80211_txq *txq, +- u32 tx_airtime, u32 rx_airtime); + + struct sta_info; + +@@ -523,6 +516,7 @@ struct ieee80211_fragment_cache { + * @tid_seq: per-TID sequence numbers for sending to this STA + * @airtime: per-AC struct airtime_info describing airtime statistics for this + * station ++ * @airtime_weight: station weight for airtime fairness calculation purposes + * @ampdu_mlme: A-MPDU state machine state + * @mesh: mesh STA information + * @debugfs_dir: debug filesystem directory dentry +@@ -653,6 +647,7 @@ struct sta_info { + u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1]; + + struct airtime_info airtime[IEEE80211_NUM_ACS]; ++ u16 airtime_weight; + + /* + * Aggregation information, locked with lock. +--- a/net/mac80211/status.c ++++ b/net/mac80211/status.c +@@ -983,25 +983,6 @@ static void __ieee80211_tx_status(struct + if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked) + ieee80211_frame_acked(sta, skb); + +- } else if (wiphy_ext_feature_isset(local->hw.wiphy, +- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) { +- struct ieee80211_sub_if_data *sdata; +- struct ieee80211_txq *txq; +- u32 airtime; +- +- /* Account airtime to multicast queue */ +- sdata = ieee80211_sdata_from_skb(local, skb); +- +- if (sdata && (txq = sdata->vif.txq)) { +- airtime = info->status.tx_time ?: +- ieee80211_calc_expected_tx_airtime(hw, +- &sdata->vif, +- NULL, +- skb->len, +- false); +- +- ieee80211_register_airtime(txq, airtime, 0); +- } + } + + /* SNMP counters +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -18,7 +18,6 @@ + #include <linux/bitmap.h> + #include <linux/rcupdate.h> + #include <linux/export.h> +-#include <linux/timekeeping.h> + #include <net/net_namespace.h> + #include <net/ieee80211_radiotap.h> + #include <net/cfg80211.h> +@@ -1480,7 +1479,7 @@ void ieee80211_txq_init(struct ieee80211 + codel_vars_init(&txqi->def_cvars); + codel_stats_init(&txqi->cstats); + __skb_queue_head_init(&txqi->frags); +- RB_CLEAR_NODE(&txqi->schedule_order); ++ INIT_LIST_HEAD(&txqi->schedule_order); + + txqi->txq.vif = &sdata->vif; + +@@ -1524,7 +1523,9 @@ void ieee80211_txq_purge(struct ieee8021 + ieee80211_purge_tx_queue(&local->hw, &txqi->frags); + spin_unlock_bh(&fq->lock); + +- ieee80211_unschedule_txq(&local->hw, &txqi->txq, true); ++ spin_lock_bh(&local->active_txq_lock[txqi->txq.ac]); ++ list_del_init(&txqi->schedule_order); ++ spin_unlock_bh(&local->active_txq_lock[txqi->txq.ac]); + } + + void ieee80211_txq_set_params(struct ieee80211_local *local) +@@ -3819,259 +3820,102 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue); + struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) + { + struct ieee80211_local *local = hw_to_local(hw); +- struct airtime_sched_info *air_sched; +- u64 now = ktime_get_coarse_boottime_ns(); + struct ieee80211_txq *ret = NULL; +- struct airtime_info *air_info; +- struct txq_info *txqi = NULL; +- struct rb_node *node; +- bool first = false; ++ struct txq_info *txqi = NULL, *head = NULL; ++ bool found_eligible_txq = false; + +- air_sched = &local->airtime[ac]; +- spin_lock_bh(&air_sched->lock); ++ spin_lock_bh(&local->active_txq_lock[ac]); + +- node = air_sched->schedule_pos; +- +-begin: +- if (!node) { +- node = rb_first_cached(&air_sched->active_txqs); +- first = true; +- } else { +- node = rb_next(node); +- } +- +- if (!node) +- goto out; +- +- txqi = container_of(node, struct txq_info, schedule_order); +- air_info = to_airtime_info(&txqi->txq); +- +- if (air_info->v_t > air_sched->v_t && +- (!first || !airtime_catchup_v_t(air_sched, air_info->v_t, now))) ++ begin: ++ txqi = list_first_entry_or_null(&local->active_txqs[ac], ++ struct txq_info, ++ schedule_order); ++ if (!txqi) + goto out; + +- if (!ieee80211_txq_airtime_check(hw, &txqi->txq)) { +- first = false; +- goto begin; +- } +- +- air_sched->schedule_pos = node; +- air_sched->last_schedule_activity = now; +- ret = &txqi->txq; +-out: +- spin_unlock_bh(&air_sched->lock); +- return ret; +-} +-EXPORT_SYMBOL(ieee80211_next_txq); +- +-static void __ieee80211_insert_txq(struct rb_root_cached *root, +- struct txq_info *txqi) +-{ +- struct rb_node **new = &root->rb_root.rb_node; +- struct airtime_info *old_air, *new_air; +- struct rb_node *parent = NULL; +- struct txq_info *__txqi; +- bool leftmost = true; +- +- while (*new) { +- parent = *new; +- __txqi = rb_entry(parent, struct txq_info, schedule_order); +- old_air = to_airtime_info(&__txqi->txq); +- new_air = to_airtime_info(&txqi->txq); +- +- if (new_air->v_t <= old_air->v_t) { +- new = &parent->rb_left; +- } else { +- new = &parent->rb_right; +- leftmost = false; +- } ++ if (txqi == head) { ++ if (!found_eligible_txq) ++ goto out; ++ else ++ found_eligible_txq = false; + } + +- rb_link_node(&txqi->schedule_order, parent, new); +- rb_insert_color_cached(&txqi->schedule_order, root, leftmost); +-} +- +-void ieee80211_resort_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq) +-{ +- struct airtime_info *air_info = to_airtime_info(txq); +- struct ieee80211_local *local = hw_to_local(hw); +- struct txq_info *txqi = to_txq_info(txq); +- struct airtime_sched_info *air_sched; +- +- air_sched = &local->airtime[txq->ac]; ++ if (!head) ++ head = txqi; + +- lockdep_assert_held(&air_sched->lock); +- +- if (!RB_EMPTY_NODE(&txqi->schedule_order)) { +- struct airtime_info *a_prev = NULL, *a_next = NULL; +- struct txq_info *t_prev, *t_next; +- struct rb_node *n_prev, *n_next; ++ if (txqi->txq.sta) { ++ struct sta_info *sta = container_of(txqi->txq.sta, ++ struct sta_info, sta); ++ bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); ++ s64 deficit = sta->airtime[txqi->txq.ac].deficit; + +- /* Erasing a node can cause an expensive rebalancing operation, +- * so we check the previous and next nodes first and only remove +- * and re-insert if the current node is not already in the +- * correct position. +- */ +- if ((n_prev = rb_prev(&txqi->schedule_order)) != NULL) { +- t_prev = container_of(n_prev, struct txq_info, +- schedule_order); +- a_prev = to_airtime_info(&t_prev->txq); +- } ++ if (aql_check) ++ found_eligible_txq = true; + +- if ((n_next = rb_next(&txqi->schedule_order)) != NULL) { +- t_next = container_of(n_next, struct txq_info, +- schedule_order); +- a_next = to_airtime_info(&t_next->txq); ++ if (deficit < 0) ++ sta->airtime[txqi->txq.ac].deficit += ++ sta->airtime_weight; ++ ++ if (deficit < 0 || !aql_check) { ++ list_move_tail(&txqi->schedule_order, ++ &local->active_txqs[txqi->txq.ac]); ++ goto begin; + } +- +- if ((!a_prev || a_prev->v_t <= air_info->v_t) && +- (!a_next || a_next->v_t > air_info->v_t)) +- return; +- +- if (air_sched->schedule_pos == &txqi->schedule_order) +- air_sched->schedule_pos = n_prev; +- +- rb_erase_cached(&txqi->schedule_order, +- &air_sched->active_txqs); +- RB_CLEAR_NODE(&txqi->schedule_order); +- __ieee80211_insert_txq(&air_sched->active_txqs, txqi); + } +-} +- +-void ieee80211_update_airtime_weight(struct ieee80211_local *local, +- struct airtime_sched_info *air_sched, +- u64 now, bool force) +-{ +- struct airtime_info *air_info, *tmp; +- u64 weight_sum = 0; +- +- if (unlikely(!now)) +- now = ktime_get_coarse_boottime_ns(); +- +- lockdep_assert_held(&air_sched->lock); +- +- if (!force && (air_sched->last_weight_update < +- now - AIRTIME_ACTIVE_DURATION)) +- return; +- +- list_for_each_entry_safe(air_info, tmp, +- &air_sched->active_list, list) { +- if (airtime_is_active(air_info, now)) +- weight_sum += air_info->weight; +- else +- list_del_init(&air_info->list); +- } +- airtime_weight_sum_set(air_sched, weight_sum); +- air_sched->last_weight_update = now; +-} + +-void ieee80211_schedule_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq) +- __acquires(txq_lock) __releases(txq_lock) +-{ +- struct ieee80211_local *local = hw_to_local(hw); +- struct txq_info *txqi = to_txq_info(txq); +- struct airtime_sched_info *air_sched; +- u64 now = ktime_get_coarse_boottime_ns(); +- struct airtime_info *air_info; +- u8 ac = txq->ac; +- bool was_active; + +- air_sched = &local->airtime[ac]; +- air_info = to_airtime_info(txq); +- +- spin_lock_bh(&air_sched->lock); +- was_active = airtime_is_active(air_info, now); +- airtime_set_active(air_sched, air_info, now); +- +- if (!RB_EMPTY_NODE(&txqi->schedule_order)) ++ if (txqi->schedule_round == local->schedule_round[ac]) + goto out; + +- /* If the station has been inactive for a while, catch up its v_t so it +- * doesn't get indefinite priority; see comment above the definition of +- * AIRTIME_MAX_BEHIND. +- */ +- if ((!was_active && air_info->v_t < air_sched->v_t) || +- air_info->v_t < air_sched->v_t - AIRTIME_MAX_BEHIND) +- air_info->v_t = air_sched->v_t; +- +- ieee80211_update_airtime_weight(local, air_sched, now, !was_active); +- __ieee80211_insert_txq(&air_sched->active_txqs, txqi); ++ list_del_init(&txqi->schedule_order); ++ txqi->schedule_round = local->schedule_round[ac]; ++ ret = &txqi->txq; + + out: +- spin_unlock_bh(&air_sched->lock); +-} +-EXPORT_SYMBOL(ieee80211_schedule_txq); +- +-static void __ieee80211_unschedule_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq, +- bool purge) +-{ +- struct ieee80211_local *local = hw_to_local(hw); +- struct txq_info *txqi = to_txq_info(txq); +- struct airtime_sched_info *air_sched; +- struct airtime_info *air_info; +- +- air_sched = &local->airtime[txq->ac]; +- air_info = to_airtime_info(&txqi->txq); +- +- lockdep_assert_held(&air_sched->lock); +- +- if (purge) { +- list_del_init(&air_info->list); +- ieee80211_update_airtime_weight(local, air_sched, 0, true); +- } +- +- if (RB_EMPTY_NODE(&txqi->schedule_order)) +- return; +- +- if (air_sched->schedule_pos == &txqi->schedule_order) +- air_sched->schedule_pos = rb_prev(&txqi->schedule_order); +- +- if (!purge) +- airtime_set_active(air_sched, air_info, +- ktime_get_coarse_boottime_ns()); +- +- rb_erase_cached(&txqi->schedule_order, +- &air_sched->active_txqs); +- RB_CLEAR_NODE(&txqi->schedule_order); ++ spin_unlock_bh(&local->active_txq_lock[ac]); ++ return ret; + } ++EXPORT_SYMBOL(ieee80211_next_txq); + +-void ieee80211_unschedule_txq(struct ieee80211_hw *hw, ++void __ieee80211_schedule_txq(struct ieee80211_hw *hw, + struct ieee80211_txq *txq, +- bool purge) +- __acquires(txq_lock) __releases(txq_lock) +-{ +- struct ieee80211_local *local = hw_to_local(hw); +- +- spin_lock_bh(&local->airtime[txq->ac].lock); +- __ieee80211_unschedule_txq(hw, txq, purge); +- spin_unlock_bh(&local->airtime[txq->ac].lock); +-} +- +-void ieee80211_return_txq(struct ieee80211_hw *hw, +- struct ieee80211_txq *txq, bool force) ++ bool force) + { + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *txqi = to_txq_info(txq); + +- spin_lock_bh(&local->airtime[txq->ac].lock); ++ spin_lock_bh(&local->active_txq_lock[txq->ac]); + +- if (!RB_EMPTY_NODE(&txqi->schedule_order) && !force && +- !txq_has_queue(txq)) +- __ieee80211_unschedule_txq(hw, txq, false); ++ if (list_empty(&txqi->schedule_order) && ++ (force || !skb_queue_empty(&txqi->frags) || ++ txqi->tin.backlog_packets)) { ++ /* If airtime accounting is active, always enqueue STAs at the ++ * head of the list to ensure that they only get moved to the ++ * back by the airtime DRR scheduler once they have a negative ++ * deficit. A station that already has a negative deficit will ++ * get immediately moved to the back of the list on the next ++ * call to ieee80211_next_txq(). ++ */ ++ if (txqi->txq.sta && local->airtime_flags && ++ wiphy_ext_feature_isset(local->hw.wiphy, ++ NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) ++ list_add(&txqi->schedule_order, ++ &local->active_txqs[txq->ac]); ++ else ++ list_add_tail(&txqi->schedule_order, ++ &local->active_txqs[txq->ac]); ++ } + +- spin_unlock_bh(&local->airtime[txq->ac].lock); ++ spin_unlock_bh(&local->active_txq_lock[txq->ac]); + } +-EXPORT_SYMBOL(ieee80211_return_txq); ++EXPORT_SYMBOL(__ieee80211_schedule_txq); + + DEFINE_STATIC_KEY_FALSE(aql_disable); + + bool ieee80211_txq_airtime_check(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) + { +- struct airtime_info *air_info = to_airtime_info(txq); ++ struct sta_info *sta; + struct ieee80211_local *local = hw_to_local(hw); + + if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) +@@ -4086,12 +3930,15 @@ bool ieee80211_txq_airtime_check(struct + if (unlikely(txq->tid == IEEE80211_NUM_TIDS)) + return true; + +- if (atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_low) ++ sta = container_of(txq->sta, struct sta_info, sta); ++ if (atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < ++ sta->airtime[txq->ac].aql_limit_low) + return true; + + if (atomic_read(&local->aql_total_pending_airtime) < + local->aql_threshold && +- atomic_read(&air_info->aql_tx_pending) < air_info->aql_limit_high) ++ atomic_read(&sta->airtime[txq->ac].aql_tx_pending) < ++ sta->airtime[txq->ac].aql_limit_high) + return true; + + return false; +@@ -4101,59 +3948,60 @@ EXPORT_SYMBOL(ieee80211_txq_airtime_chec + bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) + { +- struct txq_info *first_txqi = NULL, *txqi = to_txq_info(txq); + struct ieee80211_local *local = hw_to_local(hw); +- struct airtime_sched_info *air_sched; +- struct airtime_info *air_info; +- struct rb_node *node = NULL; +- bool ret = false; +- u64 now; +- ++ struct txq_info *iter, *tmp, *txqi = to_txq_info(txq); ++ struct sta_info *sta; ++ u8 ac = txq->ac; + +- if (!ieee80211_txq_airtime_check(hw, txq)) +- return false; ++ spin_lock_bh(&local->active_txq_lock[ac]); + +- air_sched = &local->airtime[txq->ac]; +- spin_lock_bh(&air_sched->lock); ++ if (!txqi->txq.sta) ++ goto out; + +- if (RB_EMPTY_NODE(&txqi->schedule_order)) ++ if (list_empty(&txqi->schedule_order)) + goto out; + +- now = ktime_get_coarse_boottime_ns(); ++ list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac], ++ schedule_order) { ++ if (iter == txqi) ++ break; + +- /* Like in ieee80211_next_txq(), make sure the first station in the +- * scheduling order is eligible for transmission to avoid starvation. +- */ +- node = rb_first_cached(&air_sched->active_txqs); +- if (node) { +- first_txqi = container_of(node, struct txq_info, +- schedule_order); +- air_info = to_airtime_info(&first_txqi->txq); +- +- if (air_sched->v_t < air_info->v_t) +- airtime_catchup_v_t(air_sched, air_info->v_t, now); ++ if (!iter->txq.sta) { ++ list_move_tail(&iter->schedule_order, ++ &local->active_txqs[ac]); ++ continue; ++ } ++ sta = container_of(iter->txq.sta, struct sta_info, sta); ++ if (sta->airtime[ac].deficit < 0) ++ sta->airtime[ac].deficit += sta->airtime_weight; ++ list_move_tail(&iter->schedule_order, &local->active_txqs[ac]); + } + +- air_info = to_airtime_info(&txqi->txq); +- if (air_info->v_t <= air_sched->v_t) { +- air_sched->last_schedule_activity = now; +- ret = true; +- } ++ sta = container_of(txqi->txq.sta, struct sta_info, sta); ++ if (sta->airtime[ac].deficit >= 0) ++ goto out; ++ ++ sta->airtime[ac].deficit += sta->airtime_weight; ++ list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); ++ spin_unlock_bh(&local->active_txq_lock[ac]); + ++ return false; + out: +- spin_unlock_bh(&air_sched->lock); +- return ret; ++ if (!list_empty(&txqi->schedule_order)) ++ list_del_init(&txqi->schedule_order); ++ spin_unlock_bh(&local->active_txq_lock[ac]); ++ ++ return true; + } + EXPORT_SYMBOL(ieee80211_txq_may_transmit); + + void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) + { + struct ieee80211_local *local = hw_to_local(hw); +- struct airtime_sched_info *air_sched = &local->airtime[ac]; + +- spin_lock_bh(&air_sched->lock); +- air_sched->schedule_pos = NULL; +- spin_unlock_bh(&air_sched->lock); ++ spin_lock_bh(&local->active_txq_lock[ac]); ++ local->schedule_round[ac]++; ++ spin_unlock_bh(&local->active_txq_lock[ac]); + } + EXPORT_SYMBOL(ieee80211_txq_schedule_start); + diff --git a/package/kernel/mac80211/patches/subsys/331-mac80211-make-sta-airtime-deficit-field-s32-instead-.patch b/package/kernel/mac80211/patches/subsys/331-mac80211-make-sta-airtime-deficit-field-s32-instead-.patch new file mode 100644 index 0000000000..c006d3762a --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/331-mac80211-make-sta-airtime-deficit-field-s32-instead-.patch @@ -0,0 +1,52 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 20 Jun 2022 14:53:04 +0200 +Subject: [PATCH] mac80211: make sta airtime deficit field s32 instead of + s64 + +32 bit is more than enough range for the airtime deficit + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/debugfs_sta.c ++++ b/net/mac80211/debugfs_sta.c +@@ -202,7 +202,7 @@ static ssize_t sta_airtime_read(struct f + size_t bufsz = 400; + char *buf = kzalloc(bufsz, GFP_KERNEL), *p = buf; + u64 rx_airtime = 0, tx_airtime = 0; +- s64 deficit[IEEE80211_NUM_ACS]; ++ s32 deficit[IEEE80211_NUM_ACS]; + ssize_t rv; + int ac; + +@@ -219,7 +219,7 @@ static ssize_t sta_airtime_read(struct f + + p += scnprintf(p, bufsz + buf - p, + "RX: %llu us\nTX: %llu us\nWeight: %u\n" +- "Deficit: VO: %lld us VI: %lld us BE: %lld us BK: %lld us\n", ++ "Deficit: VO: %d us VI: %d us BE: %d us BK: %d us\n", + rx_airtime, tx_airtime, sta->airtime_weight, + deficit[0], deficit[1], deficit[2], deficit[3]); + +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -138,7 +138,7 @@ enum ieee80211_agg_stop_reason { + struct airtime_info { + u64 rx_airtime; + u64 tx_airtime; +- s64 deficit; ++ s32 deficit; + atomic_t aql_tx_pending; /* Estimated airtime for frames pending */ + u32 aql_limit_low; + u32 aql_limit_high; +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -3847,7 +3847,7 @@ struct ieee80211_txq *ieee80211_next_txq + struct sta_info *sta = container_of(txqi->txq.sta, + struct sta_info, sta); + bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); +- s64 deficit = sta->airtime[txqi->txq.ac].deficit; ++ s32 deficit = sta->airtime[txqi->txq.ac].deficit; + + if (aql_check) + found_eligible_txq = true; diff --git a/package/kernel/mac80211/patches/subsys/332-mac80211-consider-aql_tx_pending-when-checking-airti.patch b/package/kernel/mac80211/patches/subsys/332-mac80211-consider-aql_tx_pending-when-checking-airti.patch new file mode 100644 index 0000000000..c214294603 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/332-mac80211-consider-aql_tx_pending-when-checking-airti.patch @@ -0,0 +1,48 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 20 Jun 2022 14:59:09 +0200 +Subject: [PATCH] mac80211: consider aql_tx_pending when checking airtime + deficit + +When queueing packets for a station, deficit only gets added once the packets +have been transmitted, which could be much later. During that time, a lot of +temporary unfairness could happen, which could lead to bursty behavior. +Fix this by subtracting the aql_tx_pending when checking the deficit in tx +scheduling. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -3817,6 +3817,13 @@ out: + } + EXPORT_SYMBOL(ieee80211_tx_dequeue); + ++static inline s32 ieee80211_sta_deficit(struct sta_info *sta, u8 ac) ++{ ++ struct airtime_info *air_info = &sta->airtime[ac]; ++ ++ return air_info->deficit - atomic_read(&air_info->aql_tx_pending); ++} ++ + struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) + { + struct ieee80211_local *local = hw_to_local(hw); +@@ -3847,7 +3854,7 @@ struct ieee80211_txq *ieee80211_next_txq + struct sta_info *sta = container_of(txqi->txq.sta, + struct sta_info, sta); + bool aql_check = ieee80211_txq_airtime_check(hw, &txqi->txq); +- s32 deficit = sta->airtime[txqi->txq.ac].deficit; ++ s32 deficit = ieee80211_sta_deficit(sta, txqi->txq.ac); + + if (aql_check) + found_eligible_txq = true; +@@ -3972,7 +3979,7 @@ bool ieee80211_txq_may_transmit(struct i + continue; + } + sta = container_of(iter->txq.sta, struct sta_info, sta); +- if (sta->airtime[ac].deficit < 0) ++ if (ieee80211_sta_deficit(sta, ac) < 0) + sta->airtime[ac].deficit += sta->airtime_weight; + list_move_tail(&iter->schedule_order, &local->active_txqs[ac]); + } diff --git a/package/kernel/mac80211/patches/subsys/333-mac80211-keep-recently-active-tx-queues-in-schedulin.patch b/package/kernel/mac80211/patches/subsys/333-mac80211-keep-recently-active-tx-queues-in-schedulin.patch new file mode 100644 index 0000000000..317e4f0653 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/333-mac80211-keep-recently-active-tx-queues-in-schedulin.patch @@ -0,0 +1,118 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 20 Jun 2022 20:52:50 +0200 +Subject: [PATCH] mac80211: keep recently active tx queues in scheduling + list + +This allows proper deficit accounting to ensure that they don't carry their +deficit until the next time they become active + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -83,6 +83,13 @@ extern const u8 ieee80211_ac_to_qos_mask + + #define IEEE80211_MAX_NAN_INSTANCE_ID 255 + ++ ++/* ++ * Keep a station's queues on the active list for deficit accounting purposes ++ * if it was active or queued during the last 100ms ++ */ ++#define AIRTIME_ACTIVE_DURATION (HZ / 10) ++ + struct ieee80211_bss { + u32 device_ts_beacon, device_ts_presp; + +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -138,6 +138,7 @@ enum ieee80211_agg_stop_reason { + struct airtime_info { + u64 rx_airtime; + u64 tx_airtime; ++ u32 last_active; + s32 deficit; + atomic_t aql_tx_pending; /* Estimated airtime for frames pending */ + u32 aql_limit_low; +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -3824,6 +3824,36 @@ static inline s32 ieee80211_sta_deficit( + return air_info->deficit - atomic_read(&air_info->aql_tx_pending); + } + ++static void ++ieee80211_txq_set_active(struct txq_info *txqi) ++{ ++ struct sta_info *sta; ++ ++ if (!txqi->txq.sta) ++ return; ++ ++ sta = container_of(txqi->txq.sta, struct sta_info, sta); ++ sta->airtime[txqi->txq.ac].last_active = (u32)jiffies; ++} ++ ++static bool ++ieee80211_txq_keep_active(struct txq_info *txqi) ++{ ++ struct sta_info *sta; ++ u32 diff; ++ ++ if (!txqi->txq.sta) ++ return false; ++ ++ sta = container_of(txqi->txq.sta, struct sta_info, sta); ++ if (ieee80211_sta_deficit(sta, txqi->txq.ac) >= 0) ++ return false; ++ ++ diff = (u32)jiffies - sta->airtime[txqi->txq.ac].last_active; ++ ++ return diff <= AIRTIME_ACTIVE_DURATION; ++} ++ + struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) + { + struct ieee80211_local *local = hw_to_local(hw); +@@ -3870,7 +3900,6 @@ struct ieee80211_txq *ieee80211_next_txq + } + } + +- + if (txqi->schedule_round == local->schedule_round[ac]) + goto out; + +@@ -3890,12 +3919,13 @@ void __ieee80211_schedule_txq(struct iee + { + struct ieee80211_local *local = hw_to_local(hw); + struct txq_info *txqi = to_txq_info(txq); ++ bool has_queue; + + spin_lock_bh(&local->active_txq_lock[txq->ac]); + ++ has_queue = force || txq_has_queue(txq); + if (list_empty(&txqi->schedule_order) && +- (force || !skb_queue_empty(&txqi->frags) || +- txqi->tin.backlog_packets)) { ++ (has_queue || ieee80211_txq_keep_active(txqi))) { + /* If airtime accounting is active, always enqueue STAs at the + * head of the list to ensure that they only get moved to the + * back by the airtime DRR scheduler once they have a negative +@@ -3903,7 +3933,7 @@ void __ieee80211_schedule_txq(struct iee + * get immediately moved to the back of the list on the next + * call to ieee80211_next_txq(). + */ +- if (txqi->txq.sta && local->airtime_flags && ++ if (txqi->txq.sta && local->airtime_flags && has_queue && + wiphy_ext_feature_isset(local->hw.wiphy, + NL80211_EXT_FEATURE_AIRTIME_FAIRNESS)) + list_add(&txqi->schedule_order, +@@ -3911,6 +3941,8 @@ void __ieee80211_schedule_txq(struct iee + else + list_add_tail(&txqi->schedule_order, + &local->active_txqs[txq->ac]); ++ if (has_queue) ++ ieee80211_txq_set_active(txqi); + } + + spin_unlock_bh(&local->active_txq_lock[txq->ac]); diff --git a/package/kernel/mac80211/patches/subsys/334-mac80211-add-a-per-PHY-AQL-limit-to-improve-fairness.patch b/package/kernel/mac80211/patches/subsys/334-mac80211-add-a-per-PHY-AQL-limit-to-improve-fairness.patch new file mode 100644 index 0000000000..89718a8273 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/334-mac80211-add-a-per-PHY-AQL-limit-to-improve-fairness.patch @@ -0,0 +1,131 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Mon, 20 Jun 2022 21:26:34 +0200 +Subject: [PATCH] mac80211: add a per-PHY AQL limit to improve fairness + +In order to maintain fairness, the amount of queueing needs to be limited +beyond the simple per-station AQL budget, otherwise the driver can simply +repeatedly do scheduling rounds until all queues that have not used their +AQL budget become eligble. + +To be conservative, use the high AQL limit for the first txq and add half +of the low AQL for each subsequent queue. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -1211,6 +1211,7 @@ struct ieee80211_local { + u32 aql_txq_limit_high[IEEE80211_NUM_ACS]; + u32 aql_threshold; + atomic_t aql_total_pending_airtime; ++ atomic_t aql_ac_pending_airtime[IEEE80211_NUM_ACS]; + + const struct ieee80211_ops *ops; + +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -712,6 +712,7 @@ struct ieee80211_hw *ieee80211_alloc_hw_ + local->aql_txq_limit_low[i] = IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L; + local->aql_txq_limit_high[i] = + IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H; ++ atomic_set(&local->aql_ac_pending_airtime[i], 0); + } + + local->airtime_flags = AIRTIME_USE_TX | AIRTIME_USE_RX; +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -1929,6 +1929,7 @@ void ieee80211_sta_update_pending_airtim + &sta->airtime[ac].aql_tx_pending); + + atomic_add(tx_airtime, &local->aql_total_pending_airtime); ++ atomic_add(tx_airtime, &local->aql_ac_pending_airtime[ac]); + return; + } + +@@ -1940,14 +1941,17 @@ void ieee80211_sta_update_pending_airtim + tx_pending, 0); + } + ++ atomic_sub(tx_airtime, &local->aql_total_pending_airtime); + tx_pending = atomic_sub_return(tx_airtime, +- &local->aql_total_pending_airtime); ++ &local->aql_ac_pending_airtime[ac]); + if (WARN_ONCE(tx_pending < 0, + "Device %s AC %d pending airtime underflow: %u, %u", + wiphy_name(local->hw.wiphy), ac, tx_pending, +- tx_airtime)) +- atomic_cmpxchg(&local->aql_total_pending_airtime, ++ tx_airtime)) { ++ atomic_cmpxchg(&local->aql_ac_pending_airtime[ac], + tx_pending, 0); ++ atomic_sub(tx_pending, &local->aql_total_pending_airtime); ++ } + } + + int sta_info_move_state(struct sta_info *sta, +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -3863,6 +3863,9 @@ struct ieee80211_txq *ieee80211_next_txq + + spin_lock_bh(&local->active_txq_lock[ac]); + ++ if (!local->schedule_round[ac]) ++ goto out; ++ + begin: + txqi = list_first_entry_or_null(&local->active_txqs[ac], + struct txq_info, +@@ -3984,6 +3987,25 @@ bool ieee80211_txq_airtime_check(struct + } + EXPORT_SYMBOL(ieee80211_txq_airtime_check); + ++static bool ++ieee80211_txq_schedule_airtime_check(struct ieee80211_local *local, u8 ac) ++{ ++ unsigned int num_txq = 0; ++ struct txq_info *txq; ++ u32 aql_limit; ++ ++ if (!wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) ++ return true; ++ ++ list_for_each_entry(txq, &local->active_txqs[ac], schedule_order) ++ num_txq++; ++ ++ aql_limit = (num_txq - 1) * local->aql_txq_limit_low[ac] / 2 + ++ local->aql_txq_limit_high[ac]; ++ ++ return atomic_read(&local->aql_ac_pending_airtime[ac]) < aql_limit; ++} ++ + bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, + struct ieee80211_txq *txq) + { +@@ -4000,6 +4022,9 @@ bool ieee80211_txq_may_transmit(struct i + if (list_empty(&txqi->schedule_order)) + goto out; + ++ if (!ieee80211_txq_schedule_airtime_check(local, ac)) ++ goto out; ++ + list_for_each_entry_safe(iter, tmp, &local->active_txqs[ac], + schedule_order) { + if (iter == txqi) +@@ -4039,7 +4064,15 @@ void ieee80211_txq_schedule_start(struct + struct ieee80211_local *local = hw_to_local(hw); + + spin_lock_bh(&local->active_txq_lock[ac]); +- local->schedule_round[ac]++; ++ ++ if (ieee80211_txq_schedule_airtime_check(local, ac)) { ++ local->schedule_round[ac]++; ++ if (!local->schedule_round[ac]) ++ local->schedule_round[ac]++; ++ } else { ++ local->schedule_round[ac] = 0; ++ } ++ + spin_unlock_bh(&local->active_txq_lock[ac]); + } + EXPORT_SYMBOL(ieee80211_txq_schedule_start); diff --git a/package/kernel/mac80211/patches/subsys/335-mac80211-add-debugfs-file-to-display-per-phy-AQL-pen.patch b/package/kernel/mac80211/patches/subsys/335-mac80211-add-debugfs-file-to-display-per-phy-AQL-pen.patch new file mode 100644 index 0000000000..df45a520fa --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/335-mac80211-add-debugfs-file-to-display-per-phy-AQL-pen.patch @@ -0,0 +1,58 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sat, 25 Jun 2022 21:25:40 +0200 +Subject: [PATCH] mac80211: add debugfs file to display per-phy AQL pending + airtime + +Now that the global pending airtime is more relevant for airtime fairness, +it makes sense to make it accessible via debugfs for debugging + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/debugfs.c ++++ b/net/mac80211/debugfs.c +@@ -201,6 +201,36 @@ static const struct file_operations airt + .llseek = default_llseek, + }; + ++static ssize_t aql_pending_read(struct file *file, ++ char __user *user_buf, ++ size_t count, loff_t *ppos) ++{ ++ struct ieee80211_local *local = file->private_data; ++ char buf[400]; ++ int len = 0; ++ ++ len = scnprintf(buf, sizeof(buf), ++ "AC AQL pending\n" ++ "VO %u us\n" ++ "VI %u us\n" ++ "BE %u us\n" ++ "BK %u us\n" ++ "total %u us\n", ++ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VO]), ++ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_VI]), ++ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BE]), ++ atomic_read(&local->aql_ac_pending_airtime[IEEE80211_AC_BK]), ++ atomic_read(&local->aql_total_pending_airtime)); ++ return simple_read_from_buffer(user_buf, count, ppos, ++ buf, len); ++} ++ ++static const struct file_operations aql_pending_ops = { ++ .read = aql_pending_read, ++ .open = simple_open, ++ .llseek = default_llseek, ++}; ++ + static ssize_t aql_txq_limit_read(struct file *file, + char __user *user_buf, + size_t count, +@@ -628,6 +658,7 @@ void debugfs_hw_add(struct ieee80211_loc + DEBUGFS_ADD(hw_conf); + DEBUGFS_ADD_MODE(force_tx_status, 0600); + DEBUGFS_ADD_MODE(aql_enable, 0600); ++ DEBUGFS_ADD(aql_pending); + + if (local->ops->wake_tx_queue) + DEBUGFS_ADD_MODE(aqm, 0600); diff --git a/package/kernel/mac80211/patches/subsys/336-mac80211-only-accumulate-airtime-deficit-for-active-.patch b/package/kernel/mac80211/patches/subsys/336-mac80211-only-accumulate-airtime-deficit-for-active-.patch new file mode 100644 index 0000000000..35f07c1a97 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/336-mac80211-only-accumulate-airtime-deficit-for-active-.patch @@ -0,0 +1,36 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sat, 25 Jun 2022 23:10:19 +0200 +Subject: [PATCH] mac80211: only accumulate airtime deficit for active + clients + +When a client does not generate any local tx activity, accumulating airtime +deficit for the round-robin scheduler can be harmful. If this goes on for too +long, the deficit could grow quite large, which might cause unreasonable +initial latency once the client becomes active + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -1900,6 +1900,7 @@ void ieee80211_sta_register_airtime(stru + struct ieee80211_local *local = sta->sdata->local; + u8 ac = ieee80211_ac_from_tid(tid); + u32 airtime = 0; ++ u32 diff; + + if (sta->local->airtime_flags & AIRTIME_USE_TX) + airtime += tx_airtime; +@@ -1909,7 +1910,11 @@ void ieee80211_sta_register_airtime(stru + spin_lock_bh(&local->active_txq_lock[ac]); + sta->airtime[ac].tx_airtime += tx_airtime; + sta->airtime[ac].rx_airtime += rx_airtime; +- sta->airtime[ac].deficit -= airtime; ++ ++ diff = (u32)jiffies - sta->airtime[ac].last_active; ++ if (diff <= AIRTIME_ACTIVE_DURATION) ++ sta->airtime[ac].deficit -= airtime; ++ + spin_unlock_bh(&local->active_txq_lock[ac]); + } + EXPORT_SYMBOL(ieee80211_sta_register_airtime); diff --git a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch index 612b9d66ee..7c598ba3ff 100644 --- a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch +++ b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch @@ -57,7 +57,7 @@ __NL80211_ATTR_AFTER_LAST, --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c -@@ -2845,6 +2845,19 @@ static int ieee80211_get_tx_power(struct +@@ -2812,6 +2812,19 @@ static int ieee80211_get_tx_power(struct return 0; } @@ -77,7 +77,7 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy) { struct ieee80211_local *local = wiphy_priv(wiphy); -@@ -4549,6 +4562,7 @@ const struct cfg80211_ops mac80211_confi +@@ -4516,6 +4529,7 @@ const struct cfg80211_ops mac80211_confi .set_wiphy_params = ieee80211_set_wiphy_params, .set_tx_power = ieee80211_set_tx_power, .get_tx_power = ieee80211_get_tx_power, @@ -87,7 +87,7 @@ CFG80211_TESTMODE_DUMP(ieee80211_testmode_dump) --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h -@@ -1464,6 +1464,7 @@ struct ieee80211_local { +@@ -1443,6 +1443,7 @@ struct ieee80211_local { int dynamic_ps_forced_timeout; int user_power_level; /* in dBm, for all interfaces */ |