[openwrt/openwrt] mac80211: add significant minstrel_ht performance improvements

LEDE Commits lede-commits at lists.infradead.org
Mon Jan 25 06:19:46 EST 2021


nbd pushed a commit to openwrt/openwrt.git, branch master:
https://git.openwrt.org/37752336bdfb361d597b316cd5bb9d8dc6ac1762

commit 37752336bdfb361d597b316cd5bb9d8dc6ac1762
Author: Felix Fietkau <nbd at nbd.name>
AuthorDate: Sat Jan 23 00:17:31 2021 +0100

    mac80211: add significant minstrel_ht performance improvements
    
    Completely redesign the rate sampling approach
    
    Signed-off-by: Felix Fietkau <nbd at nbd.name>
---
 ...nstrel_ht-use-bitfields-to-encode-rate-in.patch | 409 +++++++++++
 ...nstrel_ht-update-total-packets-counter-in.patch |  54 ++
 ...nstrel_ht-reduce-the-need-to-sample-slowe.patch | 102 +++
 ...nstrel_ht-significantly-redesign-the-rate.patch | 767 +++++++++++++++++++++
 ...instrel_ht-show-sampling-rates-in-debugfs.patch |  59 ++
 ...nstrel_ht-remove-sample-rate-switching-co.patch | 279 ++++++++
 6 files changed, 1670 insertions(+)

diff --git a/package/kernel/mac80211/patches/subsys/346-mac80211-minstrel_ht-use-bitfields-to-encode-rate-in.patch b/package/kernel/mac80211/patches/subsys/346-mac80211-minstrel_ht-use-bitfields-to-encode-rate-in.patch
new file mode 100644
index 0000000000..a17725d69b
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/346-mac80211-minstrel_ht-use-bitfields-to-encode-rate-in.patch
@@ -0,0 +1,409 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Thu, 21 Jan 2021 18:29:30 +0100
+Subject: [PATCH] mac80211: minstrel_ht: use bitfields to encode rate
+ indexes
+
+Get rid of a lot of divisions and modulo operations
+Reduces code size and improves performance
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -379,14 +379,14 @@ out:
+ static inline struct minstrel_rate_stats *
+ minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index)
+ {
+-	return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES];
++	return &mi->groups[MI_RATE_GROUP(index)].rates[MI_RATE_IDX(index)];
+ }
+ 
+-static inline int
+-minstrel_get_duration(int index)
++static inline int minstrel_get_duration(int index)
+ {
+-	const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
+-	unsigned int duration = group->duration[index % MCS_GROUP_RATES];
++	const struct mcs_group *group = &minstrel_mcs_groups[MI_RATE_GROUP(index)];
++	unsigned int duration = group->duration[MI_RATE_IDX(index)];
++
+ 	return duration << group->shift;
+ }
+ 
+@@ -398,7 +398,7 @@ minstrel_ht_avg_ampdu_len(struct minstre
+ 	if (mi->avg_ampdu_len)
+ 		return MINSTREL_TRUNC(mi->avg_ampdu_len);
+ 
+-	if (minstrel_ht_is_legacy_group(mi->max_tp_rate[0] / MCS_GROUP_RATES))
++	if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_tp_rate[0])))
+ 		return 1;
+ 
+ 	duration = minstrel_get_duration(mi->max_tp_rate[0]);
+@@ -465,14 +465,14 @@ minstrel_ht_sort_best_tp_rates(struct mi
+ 	int tmp_group, tmp_idx, tmp_tp_avg, tmp_prob;
+ 	int j = MAX_THR_RATES;
+ 
+-	cur_group = index / MCS_GROUP_RATES;
+-	cur_idx = index  % MCS_GROUP_RATES;
++	cur_group = MI_RATE_GROUP(index);
++	cur_idx = MI_RATE_IDX(index);
+ 	cur_prob = mi->groups[cur_group].rates[cur_idx].prob_avg;
+ 	cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx, cur_prob);
+ 
+ 	do {
+-		tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
+-		tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
++		tmp_group = MI_RATE_GROUP(tp_list[j - 1]);
++		tmp_idx = MI_RATE_IDX(tp_list[j - 1]);
+ 		tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ 		tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx,
+ 						    tmp_prob);
+@@ -504,23 +504,23 @@ minstrel_ht_set_best_prob_rate(struct mi
+ 	int max_gpr_group, max_gpr_idx;
+ 	int max_gpr_tp_avg, max_gpr_prob;
+ 
+-	cur_group = index / MCS_GROUP_RATES;
+-	cur_idx = index % MCS_GROUP_RATES;
+-	mg = &mi->groups[index / MCS_GROUP_RATES];
+-	mrs = &mg->rates[index % MCS_GROUP_RATES];
++	cur_group = MI_RATE_GROUP(index);
++	cur_idx = MI_RATE_IDX(index);
++	mg = &mi->groups[cur_group];
++	mrs = &mg->rates[cur_idx];
+ 
+-	tmp_group = *dest / MCS_GROUP_RATES;
+-	tmp_idx = *dest % MCS_GROUP_RATES;
++	tmp_group = MI_RATE_GROUP(*dest);
++	tmp_idx = MI_RATE_IDX(*dest);
+ 	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ 	tmp_tp_avg = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+ 
+ 	/* if max_tp_rate[0] is from MCS_GROUP max_prob_rate get selected from
+ 	 * MCS_GROUP as well as CCK_GROUP rates do not allow aggregation */
+-	max_tp_group = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+-	max_tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES;
++	max_tp_group = MI_RATE_GROUP(mi->max_tp_rate[0]);
++	max_tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
+ 	max_tp_prob = mi->groups[max_tp_group].rates[max_tp_idx].prob_avg;
+ 
+-	if (minstrel_ht_is_legacy_group(index / MCS_GROUP_RATES) &&
++	if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index)) &&
+ 	    !minstrel_ht_is_legacy_group(max_tp_group))
+ 		return;
+ 
+@@ -529,8 +529,8 @@ minstrel_ht_set_best_prob_rate(struct mi
+ 	    mrs->prob_avg < max_tp_prob)
+ 		return;
+ 
+-	max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+-	max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
++	max_gpr_group = MI_RATE_GROUP(mg->max_group_prob_rate);
++	max_gpr_idx = MI_RATE_IDX(mg->max_group_prob_rate);
+ 	max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_avg;
+ 
+ 	if (mrs->prob_avg > MINSTREL_FRAC(75, 100)) {
+@@ -567,13 +567,13 @@ minstrel_ht_assign_best_tp_rates(struct
+ 	unsigned int tmp_group, tmp_idx, tmp_cck_tp, tmp_mcs_tp, tmp_prob;
+ 	int i;
+ 
+-	tmp_group = tmp_legacy_tp_rate[0] / MCS_GROUP_RATES;
+-	tmp_idx = tmp_legacy_tp_rate[0] % MCS_GROUP_RATES;
++	tmp_group = MI_RATE_GROUP(tmp_legacy_tp_rate[0]);
++	tmp_idx = MI_RATE_IDX(tmp_legacy_tp_rate[0]);
+ 	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ 	tmp_cck_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+ 
+-	tmp_group = tmp_mcs_tp_rate[0] / MCS_GROUP_RATES;
+-	tmp_idx = tmp_mcs_tp_rate[0] % MCS_GROUP_RATES;
++	tmp_group = MI_RATE_GROUP(tmp_mcs_tp_rate[0]);
++	tmp_idx = MI_RATE_IDX(tmp_mcs_tp_rate[0]);
+ 	tmp_prob = mi->groups[tmp_group].rates[tmp_idx].prob_avg;
+ 	tmp_mcs_tp = minstrel_ht_get_tp_avg(mi, tmp_group, tmp_idx, tmp_prob);
+ 
+@@ -600,14 +600,14 @@ minstrel_ht_prob_rate_reduce_streams(str
+ 	if (!mi->sta->ht_cap.ht_supported)
+ 		return;
+ 
+-	tmp_max_streams = minstrel_mcs_groups[mi->max_tp_rate[0] /
+-			  MCS_GROUP_RATES].streams;
++	group = MI_RATE_GROUP(mi->max_tp_rate[0]);
++	tmp_max_streams = minstrel_mcs_groups[group].streams;
+ 	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ 		mg = &mi->groups[group];
+ 		if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
+ 			continue;
+ 
+-		tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
++		tmp_idx = MI_RATE_IDX(mg->max_group_prob_rate);
+ 		tmp_prob = mi->groups[group].rates[tmp_idx].prob_avg;
+ 
+ 		if (tmp_tp < minstrel_ht_get_tp_avg(mi, group, tmp_idx, tmp_prob) &&
+@@ -644,8 +644,8 @@ minstrel_ht_find_probe_rates(struct mins
+ 	int i, g, max_dur;
+ 	int tp_idx;
+ 
+-	tp_group = &minstrel_mcs_groups[mi->max_tp_rate[0] / MCS_GROUP_RATES];
+-	tp_idx = mi->max_tp_rate[0] % MCS_GROUP_RATES;
++	tp_group = &minstrel_mcs_groups[MI_RATE_GROUP(mi->max_tp_rate[0])];
++	tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
+ 
+ 	max_dur = minstrel_get_duration(mi->max_tp_rate[0]);
+ 	if (faster_rate)
+@@ -670,7 +670,7 @@ minstrel_ht_find_probe_rates(struct mins
+ 			if ((group->duration[i] << group->shift) > max_dur)
+ 				continue;
+ 
+-			idx = g * MCS_GROUP_RATES + i;
++			idx = MI_RATE(g, i);
+ 			if (idx == mi->max_tp_rate[0])
+ 				continue;
+ 
+@@ -712,10 +712,10 @@ minstrel_ht_rate_sample_switch(struct mi
+ 
+ 	/* If no suitable rate was found, try to pick the next one in the group */
+ 	if (!n_rates) {
+-		int g_idx = mi->max_tp_rate[0] / MCS_GROUP_RATES;
++		int g_idx = MI_RATE_GROUP(mi->max_tp_rate[0]);
+ 		u16 supported = mi->supported[g_idx];
+ 
+-		supported >>= mi->max_tp_rate[0] % MCS_GROUP_RATES;
++		supported >>= MI_RATE_IDX(mi->max_tp_rate[0]);
+ 		for (i = 0; supported; supported >>= 1, i++) {
+ 			if (!(supported & 1))
+ 				continue;
+@@ -856,22 +856,26 @@ minstrel_ht_update_stats(struct minstrel
+ 
+ 	memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
+ 	memset(tmp_legacy_tp_rate, 0, sizeof(tmp_legacy_tp_rate));
++
+ 	if (mi->supported[MINSTREL_CCK_GROUP])
+-		for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
+-			tmp_legacy_tp_rate[j] = MINSTREL_CCK_GROUP * MCS_GROUP_RATES;
++		group = MINSTREL_CCK_GROUP;
+ 	else if (mi->supported[MINSTREL_OFDM_GROUP])
+-		for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
+-			tmp_legacy_tp_rate[j] = MINSTREL_OFDM_GROUP * MCS_GROUP_RATES;
++		group = MINSTREL_OFDM_GROUP;
++
++	index = MI_RATE(group, 0);
++	for (j = 0; j < ARRAY_SIZE(tmp_legacy_tp_rate); j++)
++		tmp_legacy_tp_rate[j] = index;
+ 
+ 	if (mi->supported[MINSTREL_VHT_GROUP_0])
+-		index = MINSTREL_VHT_GROUP_0 * MCS_GROUP_RATES;
++		group = MINSTREL_VHT_GROUP_0;
+ 	else if (ht_supported)
+-		index = MINSTREL_HT_GROUP_0 * MCS_GROUP_RATES;
++		group = MINSTREL_HT_GROUP_0;
+ 	else if (mi->supported[MINSTREL_CCK_GROUP])
+-		index = MINSTREL_CCK_GROUP * MCS_GROUP_RATES;
++		group = MINSTREL_CCK_GROUP;
+ 	else
+-		index = MINSTREL_OFDM_GROUP * MCS_GROUP_RATES;
++		group = MINSTREL_OFDM_GROUP;
+ 
++	index = MI_RATE(group, 0);
+ 	tmp_max_prob_rate = index;
+ 	for (j = 0; j < ARRAY_SIZE(tmp_mcs_tp_rate); j++)
+ 		tmp_mcs_tp_rate[j] = index;
+@@ -888,7 +892,7 @@ minstrel_ht_update_stats(struct minstrel
+ 
+ 		/* (re)Initialize group rate indexes */
+ 		for(j = 0; j < MAX_THR_RATES; j++)
+-			tmp_group_tp_rate[j] = MCS_GROUP_RATES * group;
++			tmp_group_tp_rate[j] = MI_RATE(group, 0);
+ 
+ 		if (group == MINSTREL_CCK_GROUP && ht_supported)
+ 			tp_rate = tmp_legacy_tp_rate;
+@@ -897,7 +901,7 @@ minstrel_ht_update_stats(struct minstrel
+ 			if (!(mi->supported[group] & BIT(i)))
+ 				continue;
+ 
+-			index = MCS_GROUP_RATES * group + i;
++			index = MI_RATE(group, i);
+ 
+ 			mrs = &mg->rates[i];
+ 			mrs->retry_updated = false;
+@@ -929,13 +933,13 @@ minstrel_ht_update_stats(struct minstrel
+ 			continue;
+ 
+ 		mg = &mi->groups[group];
+-		mg->max_group_prob_rate = MCS_GROUP_RATES * group;
++		mg->max_group_prob_rate = MI_RATE(group, 0);
+ 
+ 		for (i = 0; i < MCS_GROUP_RATES; i++) {
+ 			if (!(mi->supported[group] & BIT(i)))
+ 				continue;
+ 
+-			index = MCS_GROUP_RATES * group + i;
++			index = MI_RATE(group, i);
+ 
+ 			/* Find max probability rate per group and global */
+ 			minstrel_ht_set_best_prob_rate(mi, &tmp_max_prob_rate,
+@@ -1022,7 +1026,7 @@ minstrel_downgrade_rate(struct minstrel_
+ {
+ 	int group, orig_group;
+ 
+-	orig_group = group = *idx / MCS_GROUP_RATES;
++	orig_group = group = MI_RATE_GROUP(*idx);
+ 	while (group > 0) {
+ 		group--;
+ 
+@@ -1206,7 +1210,7 @@ minstrel_calc_retransmit(struct minstrel
+ 	ctime += (t_slot * cw) >> 1;
+ 	cw = min((cw << 1) | 1, mp->cw_max);
+ 
+-	if (minstrel_ht_is_legacy_group(index / MCS_GROUP_RATES)) {
++	if (minstrel_ht_is_legacy_group(MI_RATE_GROUP(index))) {
+ 		overhead = mi->overhead_legacy;
+ 		overhead_rtscts = mi->overhead_legacy_rtscts;
+ 	} else {
+@@ -1239,7 +1243,7 @@ static void
+ minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+                      struct ieee80211_sta_rates *ratetbl, int offset, int index)
+ {
+-	int group_idx = index / MCS_GROUP_RATES;
++	int group_idx = MI_RATE_GROUP(index);
+ 	const struct mcs_group *group = &minstrel_mcs_groups[group_idx];
+ 	struct minstrel_rate_stats *mrs;
+ 	u8 idx;
+@@ -1259,7 +1263,7 @@ minstrel_ht_set_rate(struct minstrel_pri
+ 		ratetbl->rate[offset].count_rts = mrs->retry_count_rtscts;
+ 	}
+ 
+-	index %= MCS_GROUP_RATES;
++	index = MI_RATE_IDX(index);
+ 	if (group_idx == MINSTREL_CCK_GROUP)
+ 		idx = mp->cck_rates[index % ARRAY_SIZE(mp->cck_rates)];
+ 	else if (group_idx == MINSTREL_OFDM_GROUP)
+@@ -1289,17 +1293,17 @@ minstrel_ht_set_rate(struct minstrel_pri
+ static inline int
+ minstrel_ht_get_prob_avg(struct minstrel_ht_sta *mi, int rate)
+ {
+-	int group = rate / MCS_GROUP_RATES;
+-	rate %= MCS_GROUP_RATES;
++	int group = MI_RATE_GROUP(rate);
++	rate = MI_RATE_IDX(rate);
+ 	return mi->groups[group].rates[rate].prob_avg;
+ }
+ 
+ static int
+ minstrel_ht_get_max_amsdu_len(struct minstrel_ht_sta *mi)
+ {
+-	int group = mi->max_prob_rate / MCS_GROUP_RATES;
++	int group = MI_RATE_GROUP(mi->max_prob_rate);
+ 	const struct mcs_group *g = &minstrel_mcs_groups[group];
+-	int rate = mi->max_prob_rate % MCS_GROUP_RATES;
++	int rate = MI_RATE_IDX(mi->max_prob_rate);
+ 	unsigned int duration;
+ 
+ 	/* Disable A-MSDU if max_prob_rate is bad */
+@@ -1405,7 +1409,7 @@ minstrel_get_sample_rate(struct minstrel
+ 		return -1;
+ 
+ 	mrs = &mg->rates[sample_idx];
+-	sample_idx += sample_group * MCS_GROUP_RATES;
++	sample_idx += MI_RATE(sample_group, 0);
+ 
+ 	tp_rate1 = mi->max_tp_rate[0];
+ 
+@@ -1455,8 +1459,7 @@ minstrel_get_sample_rate(struct minstrel
+ 	 * if the link is working perfectly.
+ 	 */
+ 
+-	cur_max_tp_streams = minstrel_mcs_groups[tp_rate1 /
+-		MCS_GROUP_RATES].streams;
++	cur_max_tp_streams = minstrel_mcs_groups[MI_RATE_GROUP(tp_rate1)].streams;
+ 	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
+ 	    (cur_max_tp_streams - 1 <
+ 	     minstrel_mcs_groups[sample_group].streams ||
+@@ -1484,7 +1487,7 @@ minstrel_ht_get_rate(void *priv, struct
+ 	int sample_idx;
+ 
+ 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
+-	    !minstrel_ht_is_legacy_group(mi->max_prob_rate / MCS_GROUP_RATES))
++	    !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate)))
+ 		minstrel_aggr_check(sta, txrc->skb);
+ 
+ 	info->flags |= mi->tx_flags;
+@@ -1512,8 +1515,8 @@ minstrel_ht_get_rate(void *priv, struct
+ 	if (sample_idx < 0)
+ 		return;
+ 
+-	sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
+-	sample_idx %= MCS_GROUP_RATES;
++	sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)];
++	sample_idx = MI_RATE_IDX(sample_idx);
+ 
+ 	if (sample_group == &minstrel_mcs_groups[MINSTREL_CCK_GROUP] &&
+ 	    (sample_idx >= 4) != txrc->short_preamble)
+@@ -1529,7 +1532,7 @@ minstrel_ht_get_rate(void *priv, struct
+ 		int idx = sample_idx % ARRAY_SIZE(mp->ofdm_rates[0]);
+ 		rate->idx = mp->ofdm_rates[mi->band][idx];
+ 	} else if (sample_group->flags & IEEE80211_TX_RC_VHT_MCS) {
+-		ieee80211_rate_set_vht(rate, sample_idx % MCS_GROUP_RATES,
++		ieee80211_rate_set_vht(rate, MI_RATE_IDX(sample_idx),
+ 				       sample_group->streams);
+ 	} else {
+ 		rate->idx = sample_idx + (sample_group->streams - 1) * 8;
+@@ -1898,8 +1901,8 @@ static u32 minstrel_ht_get_expected_thro
+ 	struct minstrel_ht_sta *mi = priv_sta;
+ 	int i, j, prob, tp_avg;
+ 
+-	i = mi->max_tp_rate[0] / MCS_GROUP_RATES;
+-	j = mi->max_tp_rate[0] % MCS_GROUP_RATES;
++	i = MI_RATE_GROUP(mi->max_tp_rate[0]);
++	j = MI_RATE_IDX(mi->max_tp_rate[0]);
+ 	prob = mi->groups[i].rates[j].prob_avg;
+ 
+ 	/* convert tp_avg from pkt per second in kbps */
+--- a/net/mac80211/rc80211_minstrel_ht.h
++++ b/net/mac80211/rc80211_minstrel_ht.h
+@@ -6,6 +6,8 @@
+ #ifndef __RC_MINSTREL_HT_H
+ #define __RC_MINSTREL_HT_H
+ 
++#include <linux/bitfield.h>
++
+ /* number of highest throughput rates to consider*/
+ #define MAX_THR_RATES 4
+ #define SAMPLE_COLUMNS	10	/* number of columns in sample table */
+@@ -57,6 +59,17 @@
+ 
+ #define MCS_GROUP_RATES		10
+ 
++#define MI_RATE_IDX_MASK	GENMASK(3, 0)
++#define MI_RATE_GROUP_MASK	GENMASK(15, 4)
++
++#define MI_RATE(_group, _idx)				\
++	(FIELD_PREP(MI_RATE_GROUP_MASK, _group) |	\
++	 FIELD_PREP(MI_RATE_IDX_MASK, _idx))
++
++#define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate)
++#define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate)
++
++
+ struct minstrel_priv {
+ 	struct ieee80211_hw *hw;
+ 	bool has_mrr;
+--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
++++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
+@@ -56,7 +56,7 @@ minstrel_ht_stats_dump(struct minstrel_h
+ 
+ 	for (j = 0; j < MCS_GROUP_RATES; j++) {
+ 		struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
+-		int idx = i * MCS_GROUP_RATES + j;
++		int idx = MI_RATE(i, j);
+ 		unsigned int duration;
+ 
+ 		if (!(mi->supported[i] & BIT(j)))
+@@ -201,7 +201,7 @@ minstrel_ht_stats_csv_dump(struct minstr
+ 
+ 	for (j = 0; j < MCS_GROUP_RATES; j++) {
+ 		struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
+-		int idx = i * MCS_GROUP_RATES + j;
++		int idx = MI_RATE(i, j);
+ 		unsigned int duration;
+ 
+ 		if (!(mi->supported[i] & BIT(j)))
diff --git a/package/kernel/mac80211/patches/subsys/347-mac80211-minstrel_ht-update-total-packets-counter-in.patch b/package/kernel/mac80211/patches/subsys/347-mac80211-minstrel_ht-update-total-packets-counter-in.patch
new file mode 100644
index 0000000000..a1cdf99e05
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/347-mac80211-minstrel_ht-update-total-packets-counter-in.patch
@@ -0,0 +1,54 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Fri, 22 Jan 2021 18:21:13 +0100
+Subject: [PATCH] mac80211: minstrel_ht: update total packets counter in tx
+ status path
+
+Keep the update in one place and prepare for further rework
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -1093,6 +1093,16 @@ minstrel_ht_tx_status(void *priv, struct
+ 		info->status.ampdu_len = 1;
+ 	}
+ 
++	/* wraparound */
++	if (mi->total_packets >= ~0 - info->status.ampdu_len) {
++		mi->total_packets = 0;
++		mi->sample_packets = 0;
++	}
++
++	mi->total_packets += info->status.ampdu_len;
++	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
++		mi->sample_packets += info->status.ampdu_len;
++
+ 	mi->ampdu_packets++;
+ 	mi->ampdu_len += info->status.ampdu_len;
+ 
+@@ -1104,9 +1114,6 @@ minstrel_ht_tx_status(void *priv, struct
+ 		mi->sample_count--;
+ 	}
+ 
+-	if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)
+-		mi->sample_packets += info->status.ampdu_len;
+-
+ 	if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
+ 		rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
+ 
+@@ -1504,14 +1511,6 @@ minstrel_ht_get_rate(void *priv, struct
+ 	else
+ 		sample_idx = minstrel_get_sample_rate(mp, mi);
+ 
+-	mi->total_packets++;
+-
+-	/* wraparound */
+-	if (mi->total_packets == ~0) {
+-		mi->total_packets = 0;
+-		mi->sample_packets = 0;
+-	}
+-
+ 	if (sample_idx < 0)
+ 		return;
+ 
diff --git a/package/kernel/mac80211/patches/subsys/348-mac80211-minstrel_ht-reduce-the-need-to-sample-slowe.patch b/package/kernel/mac80211/patches/subsys/348-mac80211-minstrel_ht-reduce-the-need-to-sample-slowe.patch
new file mode 100644
index 0000000000..4b0ba3dbe5
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/348-mac80211-minstrel_ht-reduce-the-need-to-sample-slowe.patch
@@ -0,0 +1,102 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Fri, 22 Jan 2021 19:24:59 +0100
+Subject: [PATCH] mac80211: minstrel_ht: reduce the need to sample slower
+ rates
+
+In order to more gracefully be able to fall back to lower rates without too
+much throughput fluctuations, initialize all untested rates below tested ones
+to the maximum probabilty of higher rates.
+Usually this leads to untested lower rates getting initialized with a
+probability value of 100%, making them better candidates for fallback without
+having to rely on random probing
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -791,14 +791,11 @@ minstrel_ht_calc_rate_stats(struct minst
+ 	unsigned int cur_prob;
+ 
+ 	if (unlikely(mrs->attempts > 0)) {
+-		mrs->sample_skipped = 0;
+ 		cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+ 		minstrel_filter_avg_add(&mrs->prob_avg,
+ 					&mrs->prob_avg_1, cur_prob);
+ 		mrs->att_hist += mrs->attempts;
+ 		mrs->succ_hist += mrs->success;
+-	} else {
+-		mrs->sample_skipped++;
+ 	}
+ 
+ 	mrs->last_success = mrs->success;
+@@ -851,7 +848,6 @@ minstrel_ht_update_stats(struct minstrel
+ 		mi->ampdu_packets = 0;
+ 	}
+ 
+-	mi->sample_slow = 0;
+ 	mi->sample_count = 0;
+ 
+ 	memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
+@@ -883,6 +879,7 @@ minstrel_ht_update_stats(struct minstrel
+ 	/* Find best rate sets within all MCS groups*/
+ 	for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
+ 		u16 *tp_rate = tmp_mcs_tp_rate;
++		u16 last_prob = 0;
+ 
+ 		mg = &mi->groups[group];
+ 		if (!mi->supported[group])
+@@ -897,7 +894,7 @@ minstrel_ht_update_stats(struct minstrel
+ 		if (group == MINSTREL_CCK_GROUP && ht_supported)
+ 			tp_rate = tmp_legacy_tp_rate;
+ 
+-		for (i = 0; i < MCS_GROUP_RATES; i++) {
++		for (i = MCS_GROUP_RATES - 1; i >= 0; i--) {
+ 			if (!(mi->supported[group] & BIT(i)))
+ 				continue;
+ 
+@@ -906,6 +903,11 @@ minstrel_ht_update_stats(struct minstrel
+ 			mrs = &mg->rates[i];
+ 			mrs->retry_updated = false;
+ 			minstrel_ht_calc_rate_stats(mp, mrs);
++
++			if (mrs->att_hist)
++				last_prob = max(last_prob, mrs->prob_avg);
++			else
++				mrs->prob_avg = max(last_prob, mrs->prob_avg);
+ 			cur_prob = mrs->prob_avg;
+ 
+ 			if (minstrel_ht_get_tp_avg(mi, group, i, cur_prob) == 0)
+@@ -1470,13 +1472,9 @@ minstrel_get_sample_rate(struct minstrel
+ 	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
+ 	    (cur_max_tp_streams - 1 <
+ 	     minstrel_mcs_groups[sample_group].streams ||
+-	     sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
+-		if (mrs->sample_skipped < 20)
++	     sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
+ 			return -1;
+ 
+-		if (mi->sample_slow++ > 2)
+-			return -1;
+-	}
+ 	mi->sample_tries--;
+ 
+ 	return sample_idx;
+--- a/net/mac80211/rc80211_minstrel_ht.h
++++ b/net/mac80211/rc80211_minstrel_ht.h
+@@ -123,7 +123,6 @@ struct minstrel_rate_stats {
+ 	u8 retry_count;
+ 	u8 retry_count_rtscts;
+ 
+-	u8 sample_skipped;
+ 	bool retry_updated;
+ };
+ 
+@@ -179,7 +178,6 @@ struct minstrel_ht_sta {
+ 	u8 sample_wait;
+ 	u8 sample_tries;
+ 	u8 sample_count;
+-	u8 sample_slow;
+ 
+ 	enum minstrel_sample_mode sample_mode;
+ 	u16 sample_rate;
diff --git a/package/kernel/mac80211/patches/subsys/349-mac80211-minstrel_ht-significantly-redesign-the-rate.patch b/package/kernel/mac80211/patches/subsys/349-mac80211-minstrel_ht-significantly-redesign-the-rate.patch
new file mode 100644
index 0000000000..6626804eb8
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/349-mac80211-minstrel_ht-significantly-redesign-the-rate.patch
@@ -0,0 +1,767 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Fri, 22 Jan 2021 23:57:50 +0100
+Subject: [PATCH] mac80211: minstrel_ht: significantly redesign the rate
+ probing strategy
+
+The biggest flaw in current minstrel_ht is the fact that it needs way too
+many probing packets to be able to quickly find the best rate.
+Depending on the wifi hardware and operating mode, this can significantly
+reduce throughput when not operating at the highest available data rate.
+
+In order to be able to significantly reduce the amount of rate sampling,
+we need a much smarter selection of probing rates.
+
+The new approach introduced by this patch maintains a limited set of
+available rates to be tested during a statistics window.
+
+They are split into distinct categories:
+- MINSTREL_SAMPLE_TYPE_INC - incremental rate upgrade:
+  Pick the next rate group and find the first rate that is faster than
+  the current max. throughput rate
+- MINSTREL_SAMPLE_TYPE_JUMP - random testing of higher rates:
+  Pick a random rate from the next group that is faster than the current
+  max throughput rate. This allows faster adaptation when the link changes
+  significantly
+- MINSTREL_SAMPLE_TYPE_SLOW - test a rate between max_prob, max_tp2 and
+  max_tp in order to reduce the gap between them
+
+In order to prioritize sampling, every 6 attempts are split into 3x INC,
+2x JUMP, 1x SLOW.
+
+Available rates are checked and refilled on every stats window update.
+
+With this approach, we finally get a very small delta in throughput when
+comparing setting the optimal data rate as a fixed rate vs normal rate
+control operation.
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -266,6 +266,14 @@ const struct mcs_group minstrel_mcs_grou
+ const s16 minstrel_cck_bitrates[4] = { 10, 20, 55, 110 };
+ const s16 minstrel_ofdm_bitrates[8] = { 60, 90, 120, 180, 240, 360, 480, 540 };
+ static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
++static const u8 minstrel_sample_seq[] = {
++	MINSTREL_SAMPLE_TYPE_INC,
++	MINSTREL_SAMPLE_TYPE_JUMP,
++	MINSTREL_SAMPLE_TYPE_INC,
++	MINSTREL_SAMPLE_TYPE_JUMP,
++	MINSTREL_SAMPLE_TYPE_INC,
++	MINSTREL_SAMPLE_TYPE_SLOW,
++};
+ 
+ static void
+ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
+@@ -620,77 +628,31 @@ minstrel_ht_prob_rate_reduce_streams(str
+ 	}
+ }
+ 
+-static bool
+-minstrel_ht_probe_group(struct minstrel_ht_sta *mi, const struct mcs_group *tp_group,
+-						int tp_idx, const struct mcs_group *group)
+-{
+-	if (group->bw < tp_group->bw)
+-		return false;
+-
+-	if (group->streams == tp_group->streams)
+-		return true;
+-
+-	if (tp_idx < 4 && group->streams == tp_group->streams - 1)
+-		return true;
+-
+-	return group->streams == tp_group->streams + 1;
+-}
+-
+-static void
+-minstrel_ht_find_probe_rates(struct minstrel_ht_sta *mi, u16 *rates, int *n_rates,
+-			     bool faster_rate)
++static u16
++__minstrel_ht_get_sample_rate(struct minstrel_ht_sta *mi,
++			      enum minstrel_sample_type type)
+ {
+-	const struct mcs_group *group, *tp_group;
+-	int i, g, max_dur;
+-	int tp_idx;
+-
+-	tp_group = &minstrel_mcs_groups[MI_RATE_GROUP(mi->max_tp_rate[0])];
+-	tp_idx = MI_RATE_IDX(mi->max_tp_rate[0]);
+-
+-	max_dur = minstrel_get_duration(mi->max_tp_rate[0]);
+-	if (faster_rate)
+-		max_dur -= max_dur / 16;
+-
+-	for (g = 0; g < MINSTREL_GROUPS_NB; g++) {
+-		u16 supported = mi->supported[g];
+-
+-		if (!supported)
+-			continue;
++	u16 *rates = mi->sample[type].sample_rates;
++	u16 cur;
++	int i;
+ 
+-		group = &minstrel_mcs_groups[g];
+-		if (!minstrel_ht_probe_group(mi, tp_group, tp_idx, group))
++	for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
++		if (!rates[i])
+ 			continue;
+ 
+-		for (i = 0; supported; supported >>= 1, i++) {
+-			int idx;
+-
+-			if (!(supported & 1))
+-				continue;
+-
+-			if ((group->duration[i] << group->shift) > max_dur)
+-				continue;
+-
+-			idx = MI_RATE(g, i);
+-			if (idx == mi->max_tp_rate[0])
+-				continue;
+-
+-			rates[(*n_rates)++] = idx;
+-			break;
+-		}
++		cur = rates[i];
++		rates[i] = 0;
++		return cur;
+ 	}
++
++	return 0;
+ }
+ 
+ static void
+ minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
+ 			       struct minstrel_ht_sta *mi)
+ {
+-	struct minstrel_rate_stats *mrs;
+-	u16 rates[MINSTREL_GROUPS_NB];
+-	int n_rates = 0;
+-	int probe_rate = 0;
+-	bool faster_rate;
+-	int i;
+-	u8 random;
++	u16 rate;
+ 
+ 	/*
+ 	 * Use rate switching instead of probing packets for devices with
+@@ -699,43 +661,11 @@ minstrel_ht_rate_sample_switch(struct mi
+ 	if (mp->hw->max_rates > 1)
+ 		return;
+ 
+-	/*
+-	 * If the current EWMA prob is >75%, look for a rate that's 6.25%
+-	 * faster than the max tp rate.
+-	 * If that fails, look again for a rate that is at least as fast
+-	 */
+-	mrs = minstrel_get_ratestats(mi, mi->max_tp_rate[0]);
+-	faster_rate = mrs->prob_avg > MINSTREL_FRAC(75, 100);
+-	minstrel_ht_find_probe_rates(mi, rates, &n_rates, faster_rate);
+-	if (!n_rates && faster_rate)
+-		minstrel_ht_find_probe_rates(mi, rates, &n_rates, false);
+-
+-	/* If no suitable rate was found, try to pick the next one in the group */
+-	if (!n_rates) {
+-		int g_idx = MI_RATE_GROUP(mi->max_tp_rate[0]);
+-		u16 supported = mi->supported[g_idx];
+-
+-		supported >>= MI_RATE_IDX(mi->max_tp_rate[0]);
+-		for (i = 0; supported; supported >>= 1, i++) {
+-			if (!(supported & 1))
+-				continue;
+-
+-			probe_rate = mi->max_tp_rate[0] + i;
+-			goto out;
+-		}
+-
++	rate = __minstrel_ht_get_sample_rate(mi, MINSTREL_SAMPLE_TYPE_INC);
++	if (!rate)
+ 		return;
+-	}
+ 
+-	i = 0;
+-	if (n_rates > 1) {
+-		random = prandom_u32();
+-		i = random % n_rates;
+-	}
+-	probe_rate = rates[i];
+-
+-out:
+-	mi->sample_rate = probe_rate;
++	mi->sample_rate = rate;
+ 	mi->sample_mode = MINSTREL_SAMPLE_ACTIVE;
+ }
+ 
+@@ -804,6 +734,274 @@ minstrel_ht_calc_rate_stats(struct minst
+ 	mrs->attempts = 0;
+ }
+ 
++static bool
++minstrel_ht_find_sample_rate(struct minstrel_ht_sta *mi, int type, int idx)
++{
++	int i;
++
++	for (i = 0; i < MINSTREL_SAMPLE_RATES; i++) {
++		u16 cur = mi->sample[type].sample_rates[i];
++
++		if (cur == idx)
++			return true;
++
++		if (!cur)
++			break;
++	}
++
++	return false;
++}
++
++static int
++minstrel_ht_move_sample_rates(struct minstrel_ht_sta *mi, int type,
++			      u32 fast_rate_dur, u32 slow_rate_dur)
++{
++	u16 *rates = mi->sample[type].sample_rates;
++	int i, j;
++
++	for (i = 0, j = 0; i < MINSTREL_SAMPLE_RATES; i++) {
++		u32 duration;
++		bool valid = false;
++		u16 cur;
++
++		cur = rates[i];
++		if (!cur)
++			continue;
++
++		duration = minstrel_get_duration(cur);
++		switch (type) {
++		case MINSTREL_SAMPLE_TYPE_SLOW:
++			valid = duration > fast_rate_dur &&
++				duration < slow_rate_dur;
++			break;
++		case MINSTREL_SAMPLE_TYPE_INC:
++		case MINSTREL_SAMPLE_TYPE_JUMP:
++			valid = duration < fast_rate_dur;
++			break;
++		default:
++			valid = false;
++			break;
++		}
++
++		if (!valid) {
++			rates[i] = 0;
++			continue;
++		}
++
++		if (i == j)
++			continue;
++
++		rates[j++] = cur;
++		rates[i] = 0;
++	}
++
++	return j;
++}
++
++static int
++minstrel_ht_group_min_rate_offset(struct minstrel_ht_sta *mi, int group,
++				  u32 max_duration)
++{
++	u16 supported = mi->supported[group];
++	int i;
++
++	for (i = 0; i < MCS_GROUP_RATES && supported; i++, supported >>= 1) {
++		if (!(supported & BIT(0)))
++			continue;
++
++		if (minstrel_get_duration(MI_RATE(group, i)) >= max_duration)
++			continue;
++
++		return i;
++	}
++
++	return -1;
++}
++
++/*
++ * Incremental update rates:
++ * Flip through groups and pick the first group rate that is faster than the
++ * highest currently selected rate
++ */
++static u16
++minstrel_ht_next_inc_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur)
++{
++	struct minstrel_mcs_group_data *mg;
++	u8 type = MINSTREL_SAMPLE_TYPE_INC;
++	int i, index = 0;
++	u8 group;
++
++	group = mi->sample[type].sample_group;
++	for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
++		group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
++		mg = &mi->groups[group];
++
++		index = minstrel_ht_group_min_rate_offset(mi, group,
++							  fast_rate_dur);
++		if (index < 0)
++			continue;
++
++		index = MI_RATE(group, index & 0xf);
++		if (!minstrel_ht_find_sample_rate(mi, type, index))
++			goto out;
++	}
++	index = 0;
++
++out:
++	mi->sample[type].sample_group = group;
++
++	return index;
++}
++
++static int
++minstrel_ht_next_group_sample_rate(struct minstrel_ht_sta *mi, int group,
++				   u16 supported, int offset)
++{
++	struct minstrel_mcs_group_data *mg = &mi->groups[group];
++	u16 idx;
++	int i;
++
++	for (i = 0; i < MCS_GROUP_RATES; i++) {
++		idx = sample_table[mg->column][mg->index];
++		if (++mg->index >= MCS_GROUP_RATES) {
++			mg->index = 0;
++			if (++mg->column >= ARRAY_SIZE(sample_table))
++				mg->column = 0;
++		}
++
++		if (idx < offset)
++			continue;
++
++		if (!(supported & BIT(idx)))
++			continue;
++
++		return MI_RATE(group, idx);
++	}
++
++	return -1;
++}
++
++/*
++ * Jump rates:
++ * Sample random rates, use those that are faster than the highest
++ * currently selected rate. Rates between the fastest and the slowest
++ * get sorted into the slow sample bucket, but only if it has room
++ */
++static u16
++minstrel_ht_next_jump_rate(struct minstrel_ht_sta *mi, u32 fast_rate_dur,
++			   u32 slow_rate_dur, int *slow_rate_ofs)
++{
++	struct minstrel_mcs_group_data *mg;
++	struct minstrel_rate_stats *mrs;
++	u32 max_duration = slow_rate_dur;
++	int i, index, offset;
++	u16 *slow_rates;
++	u16 supported;
++	u32 duration;
++	u8 group;
++
++	if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
++		max_duration = fast_rate_dur;
++
++	slow_rates = mi->sample[MINSTREL_SAMPLE_TYPE_SLOW].sample_rates;
++	group = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group;
++	for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) {
++		u8 type;
++
++		group = (group + 1) % ARRAY_SIZE(minstrel_mcs_groups);
++		mg = &mi->groups[group];
++
++		supported = mi->supported[group];
++		if (!supported)
++			continue;
++
++		offset = minstrel_ht_group_min_rate_offset(mi, group,
++							   max_duration);
++		if (offset < 0)
++			continue;
++
++		index = minstrel_ht_next_group_sample_rate(mi, group, supported,
++							   offset);
++		if (index < 0)
++			continue;
++
++		duration = minstrel_get_duration(index);
++		if (duration < fast_rate_dur)
++			type = MINSTREL_SAMPLE_TYPE_JUMP;
++		else
++			type = MINSTREL_SAMPLE_TYPE_SLOW;
++
++		if (minstrel_ht_find_sample_rate(mi, type, index))
++			continue;
++
++		if (type == MINSTREL_SAMPLE_TYPE_JUMP)
++			goto found;
++
++		if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
++			continue;
++
++		if (duration >= slow_rate_dur)
++			continue;
++
++		/* skip slow rates with high success probability */
++		mrs = minstrel_get_ratestats(mi, index);
++		if (mrs->prob_avg > MINSTREL_FRAC(95, 100))
++			continue;
++
++		slow_rates[(*slow_rate_ofs)++] = index;
++		if (*slow_rate_ofs >= MINSTREL_SAMPLE_RATES)
++			max_duration = fast_rate_dur;
++	}
++	index = 0;
++
++found:
++	mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_group = group;
++
++	return index;
++}
++
++static void
++minstrel_ht_refill_sample_rates(struct minstrel_ht_sta *mi)
++{
++	u32 prob_dur = minstrel_get_duration(mi->max_prob_rate);
++	u32 tp_dur = minstrel_get_duration(mi->max_tp_rate[0]);
++	u32 tp2_dur = minstrel_get_duration(mi->max_tp_rate[1]);
++	u32 fast_rate_dur = min(min(tp_dur, tp2_dur), prob_dur);
++	u32 slow_rate_dur = max(max(tp_dur, tp2_dur), prob_dur);
++	u16 *rates;
++	int i, j;
++
++	rates = mi->sample[MINSTREL_SAMPLE_TYPE_INC].sample_rates;
++	i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_INC,
++					  fast_rate_dur, slow_rate_dur);
++	while (i < MINSTREL_SAMPLE_RATES) {
++		rates[i] = minstrel_ht_next_inc_rate(mi, tp_dur);
++		if (!rates[i])
++			break;
++
++		i++;
++	}
++
++	rates = mi->sample[MINSTREL_SAMPLE_TYPE_JUMP].sample_rates;
++	i = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_JUMP,
++					  fast_rate_dur, slow_rate_dur);
++	j = minstrel_ht_move_sample_rates(mi, MINSTREL_SAMPLE_TYPE_SLOW,
++					  fast_rate_dur, slow_rate_dur);
++	while (i < MINSTREL_SAMPLE_RATES) {
++		rates[i] = minstrel_ht_next_jump_rate(mi, fast_rate_dur,
++						      slow_rate_dur, &j);
++		if (!rates[i])
++			break;
++
++		i++;
++	}
++
++	for (i = 0; i < ARRAY_SIZE(mi->sample); i++)
++		memcpy(mi->sample[i].cur_sample_rates, mi->sample[i].sample_rates,
++		       sizeof(mi->sample[i].cur_sample_rates));
++}
++
++
+ /*
+  * Update rate statistics and select new primary rates
+  *
+@@ -848,8 +1046,6 @@ minstrel_ht_update_stats(struct minstrel
+ 		mi->ampdu_packets = 0;
+ 	}
+ 
+-	mi->sample_count = 0;
+-
+ 	memset(tmp_mcs_tp_rate, 0, sizeof(tmp_mcs_tp_rate));
+ 	memset(tmp_legacy_tp_rate, 0, sizeof(tmp_legacy_tp_rate));
+ 
+@@ -885,8 +1081,6 @@ minstrel_ht_update_stats(struct minstrel
+ 		if (!mi->supported[group])
+ 			continue;
+ 
+-		mi->sample_count++;
+-
+ 		/* (re)Initialize group rate indexes */
+ 		for(j = 0; j < MAX_THR_RATES; j++)
+ 			tmp_group_tp_rate[j] = MI_RATE(group, 0);
+@@ -953,9 +1147,7 @@ minstrel_ht_update_stats(struct minstrel
+ 
+ 	/* Try to increase robustness of max_prob_rate*/
+ 	minstrel_ht_prob_rate_reduce_streams(mi);
+-
+-	/* try to sample half of all available rates during each interval */
+-	mi->sample_count *= 4;
++	minstrel_ht_refill_sample_rates(mi);
+ 
+ 	if (sample)
+ 		minstrel_ht_rate_sample_switch(mp, mi);
+@@ -972,6 +1164,7 @@ minstrel_ht_update_stats(struct minstrel
+ 
+ 	/* Reset update timer */
+ 	mi->last_stats_update = jiffies;
++	mi->sample_time = jiffies;
+ }
+ 
+ static bool
+@@ -1002,28 +1195,6 @@ minstrel_ht_txstat_valid(struct minstrel
+ }
+ 
+ static void
+-minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
+-{
+-	struct minstrel_mcs_group_data *mg;
+-
+-	for (;;) {
+-		mi->sample_group++;
+-		mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
+-		mg = &mi->groups[mi->sample_group];
+-
+-		if (!mi->supported[mi->sample_group])
+-			continue;
+-
+-		if (++mg->index >= MCS_GROUP_RATES) {
+-			mg->index = 0;
+-			if (++mg->column >= ARRAY_SIZE(sample_table))
+-				mg->column = 0;
+-		}
+-		break;
+-	}
+-}
+-
+-static void
+ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
+ {
+ 	int group, orig_group;
+@@ -1108,14 +1279,6 @@ minstrel_ht_tx_status(void *priv, struct
+ 	mi->ampdu_packets++;
+ 	mi->ampdu_len += info->status.ampdu_len;
+ 
+-	if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
+-		int avg_ampdu_len = minstrel_ht_avg_ampdu_len(mi);
+-
+-		mi->sample_wait = 16 + 2 * avg_ampdu_len;
+-		mi->sample_tries = 1;
+-		mi->sample_count--;
+-	}
+-
+ 	if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
+ 		rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
+ 
+@@ -1387,97 +1550,20 @@ minstrel_ht_update_rates(struct minstrel
+ 	rate_control_set_rates(mp->hw, mi->sta, rates);
+ }
+ 
+-static int
+-minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
++static u16
++minstrel_ht_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+ {
+-	struct minstrel_rate_stats *mrs;
+-	struct minstrel_mcs_group_data *mg;
+-	unsigned int sample_dur, sample_group, cur_max_tp_streams;
+-	int tp_rate1, tp_rate2;
+-	int sample_idx = 0;
+-
+-	if (mp->hw->max_rates == 1 && mp->sample_switch &&
+-	    (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
+-	     mp->sample_switch == 1))
+-		return -1;
+-
+-	if (mi->sample_wait > 0) {
+-		mi->sample_wait--;
+-		return -1;
+-	}
+-
+-	if (!mi->sample_tries)
+-		return -1;
+-
+-	sample_group = mi->sample_group;
+-	mg = &mi->groups[sample_group];
+-	sample_idx = sample_table[mg->column][mg->index];
+-	minstrel_set_next_sample_idx(mi);
+-
+-	if (!(mi->supported[sample_group] & BIT(sample_idx)))
+-		return -1;
+-
+-	mrs = &mg->rates[sample_idx];
+-	sample_idx += MI_RATE(sample_group, 0);
+-
+-	tp_rate1 = mi->max_tp_rate[0];
++	u8 seq;
+ 
+-	/* Set tp_rate2 to the second highest max_tp_rate */
+-	if (minstrel_get_duration(mi->max_tp_rate[0]) >
+-	    minstrel_get_duration(mi->max_tp_rate[1])) {
+-		tp_rate2 = mi->max_tp_rate[0];
++	if (mp->hw->max_rates > 1) {
++		seq = mi->sample_seq;
++		mi->sample_seq = (seq + 1) % ARRAY_SIZE(minstrel_sample_seq);
++		seq = minstrel_sample_seq[seq];
+ 	} else {
+-		tp_rate2 = mi->max_tp_rate[1];
++		seq = MINSTREL_SAMPLE_TYPE_INC;
+ 	}
+ 
+-	/*
+-	 * Sampling might add some overhead (RTS, no aggregation)
+-	 * to the frame. Hence, don't use sampling for the highest currently
+-	 * used highest throughput or probability rate.
+-	 */
+-	if (sample_idx == mi->max_tp_rate[0] || sample_idx == mi->max_prob_rate)
+-		return -1;
+-
+-	/*
+-	 * Do not sample if the probability is already higher than 95%,
+-	 * or if the rate is 3 times slower than the current max probability
+-	 * rate, to avoid wasting airtime.
+-	 */
+-	sample_dur = minstrel_get_duration(sample_idx);
+-	if (mrs->prob_avg > MINSTREL_FRAC(95, 100) ||
+-	    minstrel_get_duration(mi->max_prob_rate) * 3 < sample_dur)
+-		return -1;
+-
+-
+-	/*
+-	 * For devices with no configurable multi-rate retry, skip sampling
+-	 * below the per-group max throughput rate, and only use one sampling
+-	 * attempt per rate
+-	 */
+-	if (mp->hw->max_rates == 1 &&
+-	    (minstrel_get_duration(mg->max_group_tp_rate[0]) < sample_dur ||
+-	     mrs->attempts))
+-		return -1;
+-
+-	/* Skip already sampled slow rates */
+-	if (sample_dur >= minstrel_get_duration(tp_rate1) && mrs->attempts)
+-		return -1;
+-
+-	/*
+-	 * Make sure that lower rates get sampled only occasionally,
+-	 * if the link is working perfectly.
+-	 */
+-
+-	cur_max_tp_streams = minstrel_mcs_groups[MI_RATE_GROUP(tp_rate1)].streams;
+-	if (sample_dur >= minstrel_get_duration(tp_rate2) &&
+-	    (cur_max_tp_streams - 1 <
+-	     minstrel_mcs_groups[sample_group].streams ||
+-	     sample_dur >= minstrel_get_duration(mi->max_prob_rate)))
+-			return -1;
+-
+-	mi->sample_tries--;
+-
+-	return sample_idx;
++	return __minstrel_ht_get_sample_rate(mi, seq);
+ }
+ 
+ static void
+@@ -1489,7 +1575,7 @@ minstrel_ht_get_rate(void *priv, struct
+ 	struct ieee80211_tx_rate *rate = &info->status.rates[0];
+ 	struct minstrel_ht_sta *mi = priv_sta;
+ 	struct minstrel_priv *mp = priv;
+-	int sample_idx;
++	u16 sample_idx;
+ 
+ 	if (!(info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ 	    !minstrel_ht_is_legacy_group(MI_RATE_GROUP(mi->max_prob_rate)))
+@@ -1505,11 +1591,19 @@ minstrel_ht_get_rate(void *priv, struct
+ 	/* Don't use EAPOL frames for sampling on non-mrr hw */
+ 	if (mp->hw->max_rates == 1 &&
+ 	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+-		sample_idx = -1;
+-	else
+-		sample_idx = minstrel_get_sample_rate(mp, mi);
++		return;
+ 
+-	if (sample_idx < 0)
++	if (mp->hw->max_rates == 1 && mp->sample_switch &&
++	    (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
++	     mp->sample_switch == 1))
++		return;
++
++	if (time_is_before_jiffies(mi->sample_time))
++		return;
++
++	mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
++	sample_idx = minstrel_ht_get_sample_rate(mp, mi);
++	if (!sample_idx)
+ 		return;
+ 
+ 	sample_group = &minstrel_mcs_groups[MI_RATE_GROUP(sample_idx)];
+@@ -1630,16 +1724,6 @@ minstrel_ht_update_caps(void *priv, stru
+ 
+ 	mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
+ 
+-	/* When using MRR, sample more on the first attempt, without delay */
+-	if (mp->has_mrr) {
+-		mi->sample_count = 16;
+-		mi->sample_wait = 0;
+-	} else {
+-		mi->sample_count = 8;
+-		mi->sample_wait = 8;
+-	}
+-	mi->sample_tries = 4;
+-
+ 	if (!use_vht) {
+ 		stbc = (ht_cap & IEEE80211_HT_CAP_RX_STBC) >>
+ 			IEEE80211_HT_CAP_RX_STBC_SHIFT;
+--- a/net/mac80211/rc80211_minstrel_ht.h
++++ b/net/mac80211/rc80211_minstrel_ht.h
+@@ -69,6 +69,8 @@
+ #define MI_RATE_IDX(_rate) FIELD_GET(MI_RATE_IDX_MASK, _rate)
+ #define MI_RATE_GROUP(_rate) FIELD_GET(MI_RATE_GROUP_MASK, _rate)
+ 
++#define MINSTREL_SAMPLE_RATES		5 /* rates per sample type */
++#define MINSTREL_SAMPLE_INTERVAL	(HZ / 50)
+ 
+ struct minstrel_priv {
+ 	struct ieee80211_hw *hw;
+@@ -126,6 +128,13 @@ struct minstrel_rate_stats {
+ 	bool retry_updated;
+ };
+ 
++enum minstrel_sample_type {
++	MINSTREL_SAMPLE_TYPE_INC,
++	MINSTREL_SAMPLE_TYPE_JUMP,
++	MINSTREL_SAMPLE_TYPE_SLOW,
++	__MINSTREL_SAMPLE_TYPE_MAX
++};
++
+ struct minstrel_mcs_group_data {
+ 	u8 index;
+ 	u8 column;
+@@ -144,6 +153,12 @@ enum minstrel_sample_mode {
+ 	MINSTREL_SAMPLE_PENDING,
+ };
+ 
++struct minstrel_sample_category {
++	u8 sample_group;
++	u16 sample_rates[MINSTREL_SAMPLE_RATES];
++	u16 cur_sample_rates[MINSTREL_SAMPLE_RATES];
++};
++
+ struct minstrel_ht_sta {
+ 	struct ieee80211_sta *sta;
+ 
+@@ -175,16 +190,14 @@ struct minstrel_ht_sta {
+ 	/* tx flags to add for frames for this sta */
+ 	u32 tx_flags;
+ 
+-	u8 sample_wait;
+-	u8 sample_tries;
+-	u8 sample_count;
++	unsigned long sample_time;
++	struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX];
++
++	u8 sample_seq;
+ 
+ 	enum minstrel_sample_mode sample_mode;
+ 	u16 sample_rate;
+ 
+-	/* current MCS group to be sampled */
+-	u8 sample_group;
+-
+ 	u8 band;
+ 
+ 	/* Bitfield of supported MCS rates of all groups */
diff --git a/package/kernel/mac80211/patches/subsys/350-mac80211-minstrel_ht-show-sampling-rates-in-debugfs.patch b/package/kernel/mac80211/patches/subsys/350-mac80211-minstrel_ht-show-sampling-rates-in-debugfs.patch
new file mode 100644
index 0000000000..41910e0d18
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/350-mac80211-minstrel_ht-show-sampling-rates-in-debugfs.patch
@@ -0,0 +1,59 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Sat, 23 Jan 2021 00:10:34 +0100
+Subject: [PATCH] mac80211: minstrel_ht: show sampling rates in debugfs
+
+This makes it easier to see what rates are going to be tested next
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
++++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
+@@ -32,6 +32,18 @@ minstrel_stats_release(struct inode *ino
+ 	return 0;
+ }
+ 
++static bool
++minstrel_ht_is_sample_rate(struct minstrel_ht_sta *mi, int idx)
++{
++	int type, i;
++
++	for (type = 0; type < ARRAY_SIZE(mi->sample); type++)
++		for (i = 0; i < MINSTREL_SAMPLE_RATES; i++)
++			if (mi->sample[type].cur_sample_rates[i] == idx)
++				return true;
++	return false;
++}
++
+ static char *
+ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
+ {
+@@ -84,6 +96,7 @@ minstrel_ht_stats_dump(struct minstrel_h
+ 		*(p++) = (idx == mi->max_tp_rate[2]) ? 'C' : ' ';
+ 		*(p++) = (idx == mi->max_tp_rate[3]) ? 'D' : ' ';
+ 		*(p++) = (idx == mi->max_prob_rate) ? 'P' : ' ';
++		*(p++) = minstrel_ht_is_sample_rate(mi, idx) ? 'S' : ' ';
+ 
+ 		if (gflags & IEEE80211_TX_RC_MCS) {
+ 			p += sprintf(p, "  MCS%-2u", (mg->streams - 1) * 8 + j);
+@@ -145,9 +158,9 @@ minstrel_ht_stats_open(struct inode *ino
+ 
+ 	p += sprintf(p, "\n");
+ 	p += sprintf(p,
+-		     "              best   ____________rate__________    ____statistics___    _____last____    ______sum-of________\n");
++		     "              best    ____________rate__________    ____statistics___    _____last____    ______sum-of________\n");
+ 	p += sprintf(p,
+-		     "mode guard #  rate  [name   idx airtime  max_tp]  [avg(tp) avg(prob)]  [retry|suc|att]  [#success | #attempts]\n");
++		     "mode guard #  rate   [name   idx airtime  max_tp]  [avg(tp) avg(prob)]  [retry|suc|att]  [#success | #attempts]\n");
+ 
+ 	p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
+ 	for (i = 0; i < MINSTREL_CCK_GROUP; i++)
+@@ -228,6 +241,8 @@ minstrel_ht_stats_csv_dump(struct minstr
+ 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[2]) ? "C" : ""));
+ 		p += sprintf(p, "%s" ,((idx == mi->max_tp_rate[3]) ? "D" : ""));
+ 		p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : ""));
++		p += sprintf(p, "%s" ,((idx == mi->max_prob_rate) ? "P" : ""));
++		p += sprintf(p, "%s", (minstrel_ht_is_sample_rate(mi, idx) ? "S" : ""));
+ 
+ 		if (gflags & IEEE80211_TX_RC_MCS) {
+ 			p += sprintf(p, ",MCS%-2u,", (mg->streams - 1) * 8 + j);
diff --git a/package/kernel/mac80211/patches/subsys/351-mac80211-minstrel_ht-remove-sample-rate-switching-co.patch b/package/kernel/mac80211/patches/subsys/351-mac80211-minstrel_ht-remove-sample-rate-switching-co.patch
new file mode 100644
index 0000000000..64bd59b852
--- /dev/null
+++ b/package/kernel/mac80211/patches/subsys/351-mac80211-minstrel_ht-remove-sample-rate-switching-co.patch
@@ -0,0 +1,279 @@
+From: Felix Fietkau <nbd at nbd.name>
+Date: Sat, 23 Jan 2021 07:18:26 +0100
+Subject: [PATCH] mac80211: minstrel_ht: remove sample rate switching code for
+ constrained devices
+
+This was added to mitigate the effects of too much sampling on devices that
+use a static global fallback table instead of configurable multi-rate retry.
+Now that the sampling algorithm is improved, this code path no longer performs
+any better than the standard probing on affected devices.
+
+Signed-off-by: Felix Fietkau <nbd at nbd.name>
+---
+
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -648,27 +648,6 @@ __minstrel_ht_get_sample_rate(struct min
+ 	return 0;
+ }
+ 
+-static void
+-minstrel_ht_rate_sample_switch(struct minstrel_priv *mp,
+-			       struct minstrel_ht_sta *mi)
+-{
+-	u16 rate;
+-
+-	/*
+-	 * Use rate switching instead of probing packets for devices with
+-	 * little control over retry fallback behavior
+-	 */
+-	if (mp->hw->max_rates > 1)
+-		return;
+-
+-	rate = __minstrel_ht_get_sample_rate(mi, MINSTREL_SAMPLE_TYPE_INC);
+-	if (!rate)
+-		return;
+-
+-	mi->sample_rate = rate;
+-	mi->sample_mode = MINSTREL_SAMPLE_ACTIVE;
+-}
+-
+ static inline int
+ minstrel_ewma(int old, int new, int weight)
+ {
+@@ -1012,8 +991,7 @@ minstrel_ht_refill_sample_rates(struct m
+  *    higher throughput rates, even if the probablity is a bit lower
+  */
+ static void
+-minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
+-			 bool sample)
++minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+ {
+ 	struct minstrel_mcs_group_data *mg;
+ 	struct minstrel_rate_stats *mrs;
+@@ -1023,18 +1001,6 @@ minstrel_ht_update_stats(struct minstrel
+ 	u16 index;
+ 	bool ht_supported = mi->sta->ht_cap.ht_supported;
+ 
+-	mi->sample_mode = MINSTREL_SAMPLE_IDLE;
+-
+-	if (sample) {
+-		mi->total_packets_cur = mi->total_packets -
+-					mi->total_packets_last;
+-		mi->total_packets_last = mi->total_packets;
+-	}
+-	if (!mp->sample_switch)
+-		sample = false;
+-	if (mi->total_packets_cur < SAMPLE_SWITCH_THR && mp->sample_switch != 1)
+-	    sample = false;
+-
+ 	if (mi->ampdu_packets > 0) {
+ 		if (!ieee80211_hw_check(mp->hw, TX_STATUS_NO_AMPDU_LEN))
+ 			mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len,
+@@ -1149,16 +1115,12 @@ minstrel_ht_update_stats(struct minstrel
+ 	minstrel_ht_prob_rate_reduce_streams(mi);
+ 	minstrel_ht_refill_sample_rates(mi);
+ 
+-	if (sample)
+-		minstrel_ht_rate_sample_switch(mp, mi);
+-
+ #ifdef CPTCFG_MAC80211_DEBUGFS
+ 	/* use fixed index if set */
+ 	if (mp->fixed_rate_idx != -1) {
+ 		for (i = 0; i < 4; i++)
+ 			mi->max_tp_rate[i] = mp->fixed_rate_idx;
+ 		mi->max_prob_rate = mp->fixed_rate_idx;
+-		mi->sample_mode = MINSTREL_SAMPLE_IDLE;
+ 	}
+ #endif
+ 
+@@ -1248,11 +1210,10 @@ minstrel_ht_tx_status(void *priv, struct
+ 	struct ieee80211_tx_info *info = st->info;
+ 	struct minstrel_ht_sta *mi = priv_sta;
+ 	struct ieee80211_tx_rate *ar = info->status.rates;
+-	struct minstrel_rate_stats *rate, *rate2, *rate_sample = NULL;
++	struct minstrel_rate_stats *rate, *rate2;
+ 	struct minstrel_priv *mp = priv;
+ 	u32 update_interval = mp->update_interval;
+ 	bool last, update = false;
+-	bool sample_status = false;
+ 	int i;
+ 
+ 	/* This packet was aggregated but doesn't carry status info */
+@@ -1279,49 +1240,18 @@ minstrel_ht_tx_status(void *priv, struct
+ 	mi->ampdu_packets++;
+ 	mi->ampdu_len += info->status.ampdu_len;
+ 
+-	if (mi->sample_mode != MINSTREL_SAMPLE_IDLE)
+-		rate_sample = minstrel_get_ratestats(mi, mi->sample_rate);
+-
+ 	last = !minstrel_ht_txstat_valid(mp, mi, &ar[0]);
+ 	for (i = 0; !last; i++) {
+ 		last = (i == IEEE80211_TX_MAX_RATES - 1) ||
+ 		       !minstrel_ht_txstat_valid(mp, mi, &ar[i + 1]);
+ 
+ 		rate = minstrel_ht_get_stats(mp, mi, &ar[i]);
+-		if (rate == rate_sample)
+-			sample_status = true;
+-
+ 		if (last)
+ 			rate->success += info->status.ampdu_ack_len;
+ 
+ 		rate->attempts += ar[i].count * info->status.ampdu_len;
+ 	}
+ 
+-	switch (mi->sample_mode) {
+-	case MINSTREL_SAMPLE_IDLE:
+-		if (mp->hw->max_rates > 1 ||
+-		     mi->total_packets_cur < SAMPLE_SWITCH_THR)
+-			update_interval /= 2;
+-		break;
+-
+-	case MINSTREL_SAMPLE_ACTIVE:
+-		if (!sample_status)
+-			break;
+-
+-		mi->sample_mode = MINSTREL_SAMPLE_PENDING;
+-		update = true;
+-		break;
+-
+-	case MINSTREL_SAMPLE_PENDING:
+-		if (sample_status)
+-			break;
+-
+-		update = true;
+-		minstrel_ht_update_stats(mp, mi, false);
+-		break;
+-	}
+-
+-
+ 	if (mp->hw->max_rates > 1) {
+ 		/*
+ 		 * check for sudden death of spatial multiplexing,
+@@ -1344,7 +1274,7 @@ minstrel_ht_tx_status(void *priv, struct
+ 
+ 	if (time_after(jiffies, mi->last_stats_update + update_interval)) {
+ 		update = true;
+-		minstrel_ht_update_stats(mp, mi, true);
++		minstrel_ht_update_stats(mp, mi);
+ 	}
+ 
+ 	if (update)
+@@ -1523,18 +1453,14 @@ static void
+ minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+ {
+ 	struct ieee80211_sta_rates *rates;
+-	u16 first_rate = mi->max_tp_rate[0];
+ 	int i = 0;
+ 
+-	if (mi->sample_mode == MINSTREL_SAMPLE_ACTIVE)
+-		first_rate = mi->sample_rate;
+-
+ 	rates = kzalloc(sizeof(*rates), GFP_ATOMIC);
+ 	if (!rates)
+ 		return;
+ 
+ 	/* Start with max_tp_rate[0] */
+-	minstrel_ht_set_rate(mp, mi, rates, i++, first_rate);
++	minstrel_ht_set_rate(mp, mi, rates, i++, mi->max_tp_rate[0]);
+ 
+ 	if (mp->hw->max_rates >= 3) {
+ 		/* At least 3 tx rates supported, use max_tp_rate[1] next */
+@@ -1593,11 +1519,6 @@ minstrel_ht_get_rate(void *priv, struct
+ 	    (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+ 		return;
+ 
+-	if (mp->hw->max_rates == 1 && mp->sample_switch &&
+-	    (mi->total_packets_cur >= SAMPLE_SWITCH_THR ||
+-	     mp->sample_switch == 1))
+-		return;
+-
+ 	if (time_is_before_jiffies(mi->sample_time))
+ 		return;
+ 
+@@ -1811,7 +1732,7 @@ minstrel_ht_update_caps(void *priv, stru
+ 	minstrel_ht_update_ofdm(mp, mi, sband, sta);
+ 
+ 	/* create an initial rate table with the lowest supported rates */
+-	minstrel_ht_update_stats(mp, mi, true);
++	minstrel_ht_update_stats(mp, mi);
+ 	minstrel_ht_update_rates(mp, mi);
+ }
+ 
+@@ -1927,8 +1848,6 @@ minstrel_ht_alloc(struct ieee80211_hw *h
+ 	if (!mp)
+ 		return NULL;
+ 
+-	mp->sample_switch = -1;
+-
+ 	/* contention window settings
+ 	 * Just an approximation. Using the per-queue values would complicate
+ 	 * the calculations and is probably unnecessary */
+@@ -1948,7 +1867,7 @@ minstrel_ht_alloc(struct ieee80211_hw *h
+ 		mp->has_mrr = true;
+ 
+ 	mp->hw = hw;
+-	mp->update_interval = HZ / 10;
++	mp->update_interval = HZ / 20;
+ 
+ 	minstrel_ht_init_cck_rates(mp);
+ 	for (i = 0; i < ARRAY_SIZE(mp->hw->wiphy->bands); i++)
+@@ -1966,8 +1885,6 @@ static void minstrel_ht_add_debugfs(stru
+ 	mp->fixed_rate_idx = (u32) -1;
+ 	debugfs_create_u32("fixed_rate_idx", S_IRUGO | S_IWUGO, debugfsdir,
+ 			   &mp->fixed_rate_idx);
+-	debugfs_create_u32("sample_switch", S_IRUGO | S_IWUSR, debugfsdir,
+-			   &mp->sample_switch);
+ }
+ #endif
+ 
+--- a/net/mac80211/rc80211_minstrel_ht.h
++++ b/net/mac80211/rc80211_minstrel_ht.h
+@@ -75,7 +75,6 @@
+ struct minstrel_priv {
+ 	struct ieee80211_hw *hw;
+ 	bool has_mrr;
+-	u32 sample_switch;
+ 	unsigned int cw_min;
+ 	unsigned int cw_max;
+ 	unsigned int max_retry;
+@@ -147,12 +146,6 @@ struct minstrel_mcs_group_data {
+ 	struct minstrel_rate_stats rates[MCS_GROUP_RATES];
+ };
+ 
+-enum minstrel_sample_mode {
+-	MINSTREL_SAMPLE_IDLE,
+-	MINSTREL_SAMPLE_ACTIVE,
+-	MINSTREL_SAMPLE_PENDING,
+-};
+-
+ struct minstrel_sample_category {
+ 	u8 sample_group;
+ 	u16 sample_rates[MINSTREL_SAMPLE_RATES];
+@@ -182,23 +175,19 @@ struct minstrel_ht_sta {
+ 	unsigned int overhead_legacy;
+ 	unsigned int overhead_legacy_rtscts;
+ 
+-	unsigned int total_packets_last;
+-	unsigned int total_packets_cur;
+ 	unsigned int total_packets;
+ 	unsigned int sample_packets;
+ 
+ 	/* tx flags to add for frames for this sta */
+ 	u32 tx_flags;
+ 
+-	unsigned long sample_time;
+-	struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX];
++	u8 band;
+ 
+ 	u8 sample_seq;
+-
+-	enum minstrel_sample_mode sample_mode;
+ 	u16 sample_rate;
+ 
+-	u8 band;
++	unsigned long sample_time;
++	struct minstrel_sample_category sample[__MINSTREL_SAMPLE_TYPE_MAX];
+ 
+ 	/* Bitfield of supported MCS rates of all groups */
+ 	u16 supported[MINSTREL_GROUPS_NB];



More information about the lede-commits mailing list