diff options
author | Felix Fietkau <nbd@nbd.name> | 2021-02-06 16:46:56 +0100 |
---|---|---|
committer | Felix Fietkau <nbd@nbd.name> | 2021-02-14 19:41:07 +0100 |
commit | 072bfe21132b70ff348f094c5cab9b566d56b6e5 (patch) | |
tree | d6510fab0db0aaeda164329e25ceb76ca4e81312 /package/kernel | |
parent | b6066846adc62aa24dc9970683ad26e8f3f3b4c1 (diff) | |
download | upstream-072bfe21132b70ff348f094c5cab9b566d56b6e5.tar.gz upstream-072bfe21132b70ff348f094c5cab9b566d56b6e5.tar.bz2 upstream-072bfe21132b70ff348f094c5cab9b566d56b6e5.zip |
mac80211: add minstrel performance improvements
Reduce fluctuations in rate selection / statistics
Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'package/kernel')
2 files changed, 181 insertions, 0 deletions
diff --git a/package/kernel/mac80211/patches/subsys/354-mac80211-minstrel_ht-reduce-fluctuations-in-rate-pro.patch b/package/kernel/mac80211/patches/subsys/354-mac80211-minstrel_ht-reduce-fluctuations-in-rate-pro.patch new file mode 100644 index 0000000000..16bcbc2ef9 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/354-mac80211-minstrel_ht-reduce-fluctuations-in-rate-pro.patch @@ -0,0 +1,30 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sat, 6 Feb 2021 16:08:01 +0100 +Subject: [PATCH] mac80211: minstrel_ht: reduce fluctuations in rate + probability stats + +In some scenarios when there is a lot of fluctuation in packet error rates, +rate switching can be amplified when the statistics get skewed by time slots +with very few tries. +Make the input data to the moving average more smooth by adding the +success/attempts count from the last stats window as well. This has the +advantage of smoothing the data without introducing any extra lag to sampling +rates. +This significantly improves rate stability on a strong test link subjected to +periodic noise bursts generated with a SDR + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -700,7 +700,8 @@ minstrel_ht_calc_rate_stats(struct minst + unsigned int cur_prob; + + if (unlikely(mrs->attempts > 0)) { +- cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts); ++ cur_prob = MINSTREL_FRAC(mrs->success + mrs->last_success, ++ mrs->attempts + mrs->last_attempts); + minstrel_filter_avg_add(&mrs->prob_avg, + &mrs->prob_avg_1, cur_prob); + mrs->att_hist += mrs->attempts; diff --git a/package/kernel/mac80211/patches/subsys/355-mac80211-minstrel_ht-rework-rate-downgrade-code-and-.patch b/package/kernel/mac80211/patches/subsys/355-mac80211-minstrel_ht-rework-rate-downgrade-code-and-.patch new file mode 100644 index 0000000000..aec2e07781 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/355-mac80211-minstrel_ht-rework-rate-downgrade-code-and-.patch @@ -0,0 +1,151 @@ +From: Felix Fietkau <nbd@nbd.name> +Date: Sat, 6 Feb 2021 16:33:14 +0100 +Subject: [PATCH] mac80211: minstrel_ht: rework rate downgrade code and + max_prob rate selection + +The current fallback code for fast rate switching on potentially failing rates +is triggering too often if there is some strong noise on the channel. This can +lead to wild fluctuations in the rate selection. +Additionally, switching down to max_prob_rate can create a significant gap down +in throughput, especially when using only 2 spatial streams, because max_prob_rate +is limited to using fewer streams than the max_tp rates. +In order to improve throughput without reducing reliability too much, use the +rate downgrade code for the max_prob_rate only, and allow the non-downgraded +max_prob_rate to use as many spatial streams as the max_tp rates + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- + +--- a/net/mac80211/rc80211_minstrel_ht.c ++++ b/net/mac80211/rc80211_minstrel_ht.c +@@ -511,6 +511,14 @@ minstrel_ht_set_best_prob_rate(struct mi + int cur_tp_avg, cur_group, cur_idx; + int max_gpr_group, max_gpr_idx; + int max_gpr_tp_avg, max_gpr_prob; ++ int min_dur; ++ ++ min_dur = max(minstrel_get_duration(mi->max_tp_rate[0]), ++ minstrel_get_duration(mi->max_tp_rate[1])); ++ ++ /* make the rate at least 18% slower than max tp rates */ ++ if (minstrel_get_duration(index) <= min_dur * 19 / 16) ++ return; + + cur_group = MI_RATE_GROUP(index); + cur_idx = MI_RATE_IDX(index); +@@ -532,11 +540,6 @@ minstrel_ht_set_best_prob_rate(struct mi + !minstrel_ht_is_legacy_group(max_tp_group)) + return; + +- /* skip rates faster than max tp rate with lower prob */ +- if (minstrel_get_duration(mi->max_tp_rate[0]) > minstrel_get_duration(index) && +- mrs->prob_avg < max_tp_prob) +- return; +- + max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate); + max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate); + max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg; +@@ -594,40 +597,6 @@ minstrel_ht_assign_best_tp_rates(struct + + } + +-/* +- * Try to increase robustness of max_prob rate by decrease number of +- * streams if possible. +- */ +-static inline void +-minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi) +-{ +- struct minstrel_mcs_group_data *mg; +- int tmp_max_streams, group, tmp_idx, tmp_prob; +- int tmp_tp = 0; +- +- if (!mi->sta->ht_cap.ht_supported) +- return; +- +- group = MI_RATE_GROUP(mi->max_tp_rate[0]); +- tmp_max_streams = minstrel_mcs_groups[group].streams; +- for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { +- mg = &mi->groups[group]; +- if (!mi->supported[group] || group == MINSTREL_CCK_GROUP) +- continue; +- +- tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate); +- tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg; +- +- if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) && +- (minstrel_mcs_groups[group].streams < tmp_max_streams)) { +- mi->max_prob_rate = mg->max_group_prob_rate; +- tmp_tp = minstrel_ht_get_tp_avg(mi, group, +- tmp_idx, +- tmp_prob); +- } +- } +-} +- + static u16 + __minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi, + enum minstrel_sample_type type) +@@ -1111,8 +1080,6 @@ minstrel_ht_update_stats(struct minstrel + + mi->max_prob_rate = tmp_max_prob_rate; + +- /* Try to increase robustness of max_prob_rate*/ +- minstrel_ht_prob_rate_reduce_streams(mi); + minstrel_ht_refill_sample_rates(mi); + + #ifdef CPTCFG_MAC80211_DEBUGFS +@@ -1157,7 +1124,7 @@ minstrel_ht_txstat_valid(struct minstrel + } + + static void +-minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary) ++minstrel_downgrade_prob_rate(struct minstrel_ht_sta *mi, u16 *idx) + { + int group, orig_group; + +@@ -1172,11 +1139,7 @@ minstrel_downgrade_rate(struct minstrel_ + minstrel_mcs_groups[orig_group].streams) + continue; + +- if (primary) +- *idx = mi->groups[group].max_group_tp_rate[0]; +- else +- *idx = mi->groups[group].max_group_tp_rate[1]; +- break; ++ *idx = mi->groups[group].max_group_prob_rate; + } + } + +@@ -1210,7 +1173,7 @@ minstrel_ht_tx_status(void *priv, struct + struct ieee80211_tx_info *info = st->info; + struct minstrel_ht_sta *mi = priv_sta; + struct ieee80211_tx_rate *ar = info->status.rates; +- struct minstrel_rate_stats *rate, *rate2; ++ struct minstrel_rate_stats *rate; + struct minstrel_priv *mp = priv; + u32 update_interval = mp->update_interval; + bool last, update = false; +@@ -1256,18 +1219,13 @@ minstrel_ht_tx_status(void *priv, struct + /* + * check for sudden death of spatial multiplexing, + * downgrade to a lower number of streams if necessary. ++ * only do this for the max_prob_rate to prevent spurious ++ * rate fluctuations when the link changes suddenly + */ +- rate = minstrel_get_ratestats(mi, mi->max_tp_rate[0]); ++ rate = minstrel_get_ratestats(mi, mi->max_prob_rate); + if (rate->attempts > 30 && + rate->success < rate->attempts / 4) { +- minstrel_downgrade_rate(mi, &mi->max_tp_rate[0], true); +- update = true; +- } +- +- rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate[1]); +- if (rate2->attempts > 30 && +- rate2->success < rate2->attempts / 4) { +- minstrel_downgrade_rate(mi, &mi->max_tp_rate[1], false); ++ minstrel_downgrade_prob_rate(mi, &mi->max_prob_rate); + update = true; + } + } |