aboutsummaryrefslogtreecommitdiffstats
path: root/package/kernel/mac80211/patches/subsys/348-mac80211-minstrel_ht-reduce-the-need-to-sample-slowe.patch
blob: 4b0ba3dbe52a7657eb564851c25c4b8b528a0015 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
From: Felix Fietkau <nbd@nbd.name>
Date: Fri, 22 Jan 2021 19:24:59 +0100
Subject: [PATCH] mac80211: minstrel_ht: reduce the need to sample slower
 rates

In order to more gracefully be able to fall back to lower rates without too
much throughput fluctuations, initialize all untested rates below tested ones
to the maximum probabilty of higher rates.
Usually this leads to untested lower rates getting initialized with a
probability value of 100%, making them better candidates for fallback without
having to rely on random probing

Signed-off-by: Felix Fietkau <nbd@nbd.name>
---

--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minst
 	unsigned int cur_prob;
 
 	if (unlikely(mrs->attempts > 0)) {
-		mrs->sample_skipped = 0;
 		cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
 		minstrel_filter_avg_add(&mrs->prob_avg,
 					&mrs->prob_avg_1, cur_prob);
 		mrs->att_hist += mrs->attempts;
 		mrs->succ_hist += mrs->success;
-	} else {
-		mrs->sample_skipped++;
 	}
 
 	mrs->last_success = mrs->success;
@@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel
 		mi->ampdu_packets = 0;
 	}
 
-	mi->sample_slow = 0;
 	mi->sample_count = 0;
 
 	memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
@@ -883,6 +879,7 @@ minstrel_ht_update_stats(struct minstrel
 	/* Find best rate sets within all MCS groups*/
 	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
 		u16 *tp_rate = tmp_mcs_tp_rate;
+		u16 last_prob = 0;
 
 		mg = &mi->groups[group];
 		if (!mi->supported[group])
@@ -897,7 +894,7 @@ minstrel_ht_update_stats(struct minstrel
 		if (group == MINSTREL_CCK_GROUP && ht_supported)
 			tp_rate = tmp_legacy_tp_rate;
 
-		for (i = 0; i < MCS_GROUP_RATES; i++) {
+		for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
 			if (!(mi->supported[group] & BIT(i)))
 				continue;
 
@@ -906,6 +903,11 @@ minstrel_ht_update_stats(struct minstrel
 			mrs = &mg->rates[i];
 			mrs->retry_updated = false;
 			minstrel_ht_calc_rate_stats(mp, mrs);
+
+			if (mrs->att_hist)
+				last_prob = max(last_prob, mrs->prob_avg);
+			else
+				mrs->prob_avg = max(last_prob, mrs->prob_avg);
 			cur_prob = mrs->prob_avg;
 
 			if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
@@ -1470,13 +1472,9 @@ minstrel_get_sample_rate(struct minstrel
 	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
 	    (cur_max_tp_streams - 1 <
 	     minstrel_mcs_groups[sample_group].streams ||
-	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
-		if (mrs->sample_skipped < 20)
+	     sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
 			return -1;
 
-		if (mi->sample_slow++ > 2)
-			return -1;
-	}
 	mi->sample_tries--;
 
 	return sample_idx;
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -123,7 +123,6 @@ struct minstrel_rate_stats {
 	u8 retry_count;
 	u8 retry_count_rtscts;
 
-	u8 sample_skipped;
 	bool retry_updated;
 };
 
@@ -179,7 +178,6 @@ struct minstrel_ht_sta {
 	u8 sample_wait;
 	u8 sample_tries;
 	u8 sample_count;
-	u8 sample_slow;
 
 	enum minstrel_sample_mode sample_mode;
 	u16 sample_rate;