diff options
Diffstat (limited to 'package/kernel/mac80211/patches/subsys')
11 files changed, 963 insertions, 1 deletions
diff --git a/package/kernel/mac80211/patches/subsys/380-mac80211-assure-all-fragments-are-encrypted.patch b/package/kernel/mac80211/patches/subsys/380-mac80211-assure-all-fragments-are-encrypted.patch new file mode 100644 index 0000000000..69398459f1 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/380-mac80211-assure-all-fragments-are-encrypted.patch @@ -0,0 +1,69 @@ +From: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Date: Tue, 11 May 2021 20:02:42 +0200 +Subject: [PATCH] mac80211: assure all fragments are encrypted + +Do not mix plaintext and encrypted fragments in protected Wi-Fi +networks. This fixes CVE-2020-26147. + +Previously, an attacker was able to first forward a legitimate encrypted +fragment towards a victim, followed by a plaintext fragment. The +encrypted and plaintext fragment would then be reassembled. For further +details see Section 6.3 and Appendix D in the paper "Fragment and Forge: +Breaking Wi-Fi Through Frame Aggregation and Fragmentation". + +Because of this change there are now two equivalent conditions in the +code to determine if a received fragment requires sequential PNs, so we +also move this test to a separate function to make the code easier to +maintain. + +Cc: stable@vger.kernel.org +Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2204,6 +2204,16 @@ ieee80211_reassemble_find(struct ieee802 + return NULL; + } + ++static bool requires_sequential_pn(struct ieee80211_rx_data *rx, __le16 fc) ++{ ++ return rx->key && ++ (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || ++ rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && ++ ieee80211_has_protected(fc); ++} ++ + static ieee80211_rx_result debug_noinline + ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + { +@@ -2248,12 +2258,7 @@ ieee80211_rx_h_defragment(struct ieee802 + /* This is the first fragment of a new frame. */ + entry = ieee80211_reassemble_add(rx->sdata, frag, seq, + rx->seqno_idx, &(rx->skb)); +- if (rx->key && +- (rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP_256 || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP || +- rx->key->conf.cipher == WLAN_CIPHER_SUITE_GCMP_256) && +- ieee80211_has_protected(fc)) { ++ if (requires_sequential_pn(rx, fc)) { + int queue = rx->security_idx; + + /* Store CCMP/GCMP PN so that we can verify that the +@@ -2295,11 +2300,7 @@ ieee80211_rx_h_defragment(struct ieee802 + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; + int queue; + +- if (!rx->key || +- (rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP_256 && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP && +- rx->key->conf.cipher != WLAN_CIPHER_SUITE_GCMP_256)) ++ if (!requires_sequential_pn(rx, fc)) + return RX_DROP_UNUSABLE; + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { diff --git a/package/kernel/mac80211/patches/subsys/381-mac80211-prevent-mixed-key-and-fragment-cache-attack.patch b/package/kernel/mac80211/patches/subsys/381-mac80211-prevent-mixed-key-and-fragment-cache-attack.patch new file mode 100644 index 0000000000..de0f89a5b0 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/381-mac80211-prevent-mixed-key-and-fragment-cache-attack.patch @@ -0,0 +1,87 @@ +From: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Date: Tue, 11 May 2021 20:02:43 +0200 +Subject: [PATCH] mac80211: prevent mixed key and fragment cache attacks + +Simultaneously prevent mixed key attacks (CVE-2020-24587) and fragment +cache attacks (CVE-2020-24586). This is accomplished by assigning a +unique color to every key (per interface) and using this to track which +key was used to decrypt a fragment. When reassembling frames, it is +now checked whether all fragments were decrypted using the same key. + +To assure that fragment cache attacks are also prevented, the ID that is +assigned to keys is unique even over (re)associations and (re)connects. +This means fragments separated by a (re)association or (re)connect will +not be reassembled. Because mac80211 now also prevents the reassembly of +mixed encrypted and plaintext fragments, all cache attacks are prevented. + +Cc: stable@vger.kernel.org +Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -97,6 +97,7 @@ struct ieee80211_fragment_entry { + u8 rx_queue; + bool check_sequential_pn; /* needed for CCMP/GCMP */ + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ ++ unsigned int key_color; + }; + + +--- a/net/mac80211/key.c ++++ b/net/mac80211/key.c +@@ -799,6 +799,7 @@ int ieee80211_key_link(struct ieee80211_ + struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) + { ++ static atomic_t key_color = ATOMIC_INIT(0); + struct ieee80211_key *old_key; + int idx = key->conf.keyidx; + bool pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; +@@ -850,6 +851,12 @@ int ieee80211_key_link(struct ieee80211_ + key->sdata = sdata; + key->sta = sta; + ++ /* ++ * Assign a unique ID to every key so we can easily prevent mixed ++ * key and fragment cache attacks. ++ */ ++ key->color = atomic_inc_return(&key_color); ++ + increment_tailroom_need_count(sdata); + + ret = ieee80211_key_replace(sdata, sta, pairwise, old_key, key); +--- a/net/mac80211/key.h ++++ b/net/mac80211/key.h +@@ -128,6 +128,8 @@ struct ieee80211_key { + } debugfs; + #endif + ++ unsigned int color; ++ + /* + * key config, must be last because it contains key + * material as variable length member +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2265,6 +2265,7 @@ ieee80211_rx_h_defragment(struct ieee802 + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; ++ entry->key_color = rx->key->color; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[queue], + IEEE80211_CCMP_PN_LEN); +@@ -2302,6 +2303,11 @@ ieee80211_rx_h_defragment(struct ieee802 + + if (!requires_sequential_pn(rx, fc)) + return RX_DROP_UNUSABLE; ++ ++ /* Prevent mixed key and fragment cache attacks */ ++ if (entry->key_color != rx->key->color) ++ return RX_DROP_UNUSABLE; ++ + memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN); + for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) { + pn[i]++; diff --git a/package/kernel/mac80211/patches/subsys/382-mac80211-properly-handle-A-MSDUs-that-start-with-an-.patch b/package/kernel/mac80211/patches/subsys/382-mac80211-properly-handle-A-MSDUs-that-start-with-an-.patch new file mode 100644 index 0000000000..3fdabde219 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/382-mac80211-properly-handle-A-MSDUs-that-start-with-an-.patch @@ -0,0 +1,66 @@ +From: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Date: Tue, 11 May 2021 20:02:44 +0200 +Subject: [PATCH] mac80211: properly handle A-MSDUs that start with an + RFC 1042 header + +Properly parse A-MSDUs whose first 6 bytes happen to equal a rfc1042 +header. This can occur in practice when the destination MAC address +equals AA:AA:03:00:00:00. More importantly, this simplifies the next +patch to mitigate A-MSDU injection attacks. + +Cc: stable@vger.kernel.org +Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/include/net/cfg80211.h ++++ b/include/net/cfg80211.h +@@ -5628,7 +5628,7 @@ unsigned int ieee80211_get_mesh_hdrlen(s + */ + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype, +- u8 data_offset); ++ u8 data_offset, bool is_amsdu); + + /** + * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 +@@ -5640,7 +5640,7 @@ int ieee80211_data_to_8023_exthdr(struct + static inline int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr, + enum nl80211_iftype iftype) + { +- return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0); ++ return ieee80211_data_to_8023_exthdr(skb, NULL, addr, iftype, 0, false); + } + + /** +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2696,7 +2696,7 @@ __ieee80211_rx_h_amsdu(struct ieee80211_ + if (ieee80211_data_to_8023_exthdr(skb, ðhdr, + rx->sdata->vif.addr, + rx->sdata->vif.type, +- data_offset)) ++ data_offset, true)) + return RX_DROP_UNUSABLE; + + ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, +--- a/net/wireless/util.c ++++ b/net/wireless/util.c +@@ -541,7 +541,7 @@ EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen) + + int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr, + const u8 *addr, enum nl80211_iftype iftype, +- u8 data_offset) ++ u8 data_offset, bool is_amsdu) + { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; + struct { +@@ -629,7 +629,7 @@ int ieee80211_data_to_8023_exthdr(struct + skb_copy_bits(skb, hdrlen, &payload, sizeof(payload)); + tmp.h_proto = payload.proto; + +- if (likely((ether_addr_equal(payload.hdr, rfc1042_header) && ++ if (likely((!is_amsdu && ether_addr_equal(payload.hdr, rfc1042_header) && + tmp.h_proto != htons(ETH_P_AARP) && + tmp.h_proto != htons(ETH_P_IPX)) || + ether_addr_equal(payload.hdr, bridge_tunnel_header))) diff --git a/package/kernel/mac80211/patches/subsys/383-cfg80211-mitigate-A-MSDU-aggregation-attacks.patch b/package/kernel/mac80211/patches/subsys/383-cfg80211-mitigate-A-MSDU-aggregation-attacks.patch new file mode 100644 index 0000000000..8ea78dca84 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/383-cfg80211-mitigate-A-MSDU-aggregation-attacks.patch @@ -0,0 +1,40 @@ +From: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Date: Tue, 11 May 2021 20:02:45 +0200 +Subject: [PATCH] cfg80211: mitigate A-MSDU aggregation attacks + +Mitigate A-MSDU injection attacks (CVE-2020-24588) by detecting if the +destination address of a subframe equals an RFC1042 (i.e., LLC/SNAP) +header, and if so dropping the complete A-MSDU frame. This mitigates +known attacks, although new (unknown) aggregation-based attacks may +remain possible. + +This defense works because in A-MSDU aggregation injection attacks, a +normal encrypted Wi-Fi frame is turned into an A-MSDU frame. This means +the first 6 bytes of the first A-MSDU subframe correspond to an RFC1042 +header. In other words, the destination MAC address of the first A-MSDU +subframe contains the start of an RFC1042 header during an aggregation +attack. We can detect this and thereby prevent this specific attack. +For details, see Section 7.2 of "Fragment and Forge: Breaking Wi-Fi +Through Frame Aggregation and Fragmentation". + +Note that for kernel 4.9 and above this patch depends on "mac80211: +properly handle A-MSDUs that start with a rfc1042 header". Otherwise +this patch has no impact and attacks will remain possible. + +Cc: stable@vger.kernel.org +Signed-off-by: Mathy Vanhoef <Mathy.Vanhoef@kuleuven.be> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/wireless/util.c ++++ b/net/wireless/util.c +@@ -775,6 +775,9 @@ void ieee80211_amsdu_to_8023s(struct sk_ + remaining = skb->len - offset; + if (subframe_len > remaining) + goto purge; ++ /* mitigate A-MSDU aggregation injection attacks */ ++ if (ether_addr_equal(eth.h_dest, rfc1042_header)) ++ goto purge; + + offset += sizeof(struct ethhdr); + last = remaining <= subframe_len + padding; diff --git a/package/kernel/mac80211/patches/subsys/384-mac80211-drop-A-MSDUs-on-old-ciphers.patch b/package/kernel/mac80211/patches/subsys/384-mac80211-drop-A-MSDUs-on-old-ciphers.patch new file mode 100644 index 0000000000..1b5084c37d --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/384-mac80211-drop-A-MSDUs-on-old-ciphers.patch @@ -0,0 +1,54 @@ +From: Johannes Berg <johannes.berg@intel.com> +Date: Tue, 11 May 2021 20:02:46 +0200 +Subject: [PATCH] mac80211: drop A-MSDUs on old ciphers + +With old ciphers (WEP and TKIP) we shouldn't be using A-MSDUs +since A-MSDUs are only supported if we know that they are, and +the only practical way for that is HT support which doesn't +support old ciphers. + +However, we would normally accept them anyway. Since we check +the MMIC before deaggregating A-MSDUs, and the A-MSDU bit in +the QoS header is not protected in TKIP (or WEP), this enables +attacks similar to CVE-2020-24588. To prevent that, drop A-MSDUs +completely with old ciphers. + +Cc: stable@vger.kernel.org +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -6,7 +6,7 @@ + * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2015 - 2017 Intel Deutschland GmbH +- * Copyright (C) 2018-2020 Intel Corporation ++ * Copyright (C) 2018-2021 Intel Corporation + */ + + #include <linux/jiffies.h> +@@ -2753,6 +2753,23 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx + if (is_multicast_ether_addr(hdr->addr1)) + return RX_DROP_UNUSABLE; + ++ if (rx->key) { ++ /* ++ * We should not receive A-MSDUs on pre-HT connections, ++ * and HT connections cannot use old ciphers. Thus drop ++ * them, as in those cases we couldn't even have SPP ++ * A-MSDUs or such. ++ */ ++ switch (rx->key->conf.cipher) { ++ case WLAN_CIPHER_SUITE_WEP40: ++ case WLAN_CIPHER_SUITE_WEP104: ++ case WLAN_CIPHER_SUITE_TKIP: ++ return RX_DROP_UNUSABLE; ++ default: ++ break; ++ } ++ } ++ + return __ieee80211_rx_h_amsdu(rx, 0); + } + diff --git a/package/kernel/mac80211/patches/subsys/385-mac80211-add-fragment-cache-to-sta_info.patch b/package/kernel/mac80211/patches/subsys/385-mac80211-add-fragment-cache-to-sta_info.patch new file mode 100644 index 0000000000..b536126d38 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/385-mac80211-add-fragment-cache-to-sta_info.patch @@ -0,0 +1,313 @@ +From: Johannes Berg <johannes.berg@intel.com> +Date: Tue, 11 May 2021 20:02:47 +0200 +Subject: [PATCH] mac80211: add fragment cache to sta_info + +Prior patches protected against fragmentation cache attacks +by coloring keys, but this shows that it can lead to issues +when multiple stations use the same sequence number. Add a +fragment cache to struct sta_info (in addition to the one in +the interface) to separate fragments for different stations +properly. + +This then automatically clear most of the fragment cache when a +station disconnects (or reassociates) from an AP, or when client +interfaces disconnect from the network, etc. + +On the way, also fix the comment there since this brings us in line +with the recommendation in 802.11-2016 ("An AP should support ..."). +Additionally, remove a useless condition (since there's no problem +purging an already empty list). + +Cc: stable@vger.kernel.org +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -50,12 +50,6 @@ struct ieee80211_local; + #define IEEE80211_ENCRYPT_HEADROOM 8 + #define IEEE80211_ENCRYPT_TAILROOM 18 + +-/* IEEE 802.11 (Ch. 9.5 Defragmentation) requires support for concurrent +- * reception of at least three fragmented frames. This limit can be increased +- * by changing this define, at the cost of slower frame reassembly and +- * increased memory use (about 2 kB of RAM per entry). */ +-#define IEEE80211_FRAGMENT_MAX 4 +- + /* power level hasn't been configured (or set to automatic) */ + #define IEEE80211_UNSET_POWER_LEVEL INT_MIN + +@@ -88,19 +82,6 @@ extern const u8 ieee80211_ac_to_qos_mask + + #define IEEE80211_MAX_NAN_INSTANCE_ID 255 + +-struct ieee80211_fragment_entry { +- struct sk_buff_head skb_list; +- unsigned long first_frag_time; +- u16 seq; +- u16 extra_len; +- u16 last_frag; +- u8 rx_queue; +- bool check_sequential_pn; /* needed for CCMP/GCMP */ +- u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ +- unsigned int key_color; +-}; +- +- + struct ieee80211_bss { + u32 device_ts_beacon, device_ts_presp; + +@@ -912,9 +893,7 @@ struct ieee80211_sub_if_data { + + char name[IFNAMSIZ]; + +- /* Fragment table for host-based reassembly */ +- struct ieee80211_fragment_entry fragments[IEEE80211_FRAGMENT_MAX]; +- unsigned int fragment_next; ++ struct ieee80211_fragment_cache frags; + + /* TID bitmap for NoAck policy */ + u16 noack_map; +@@ -2329,4 +2308,7 @@ u32 ieee80211_calc_expected_tx_airtime(s + #define debug_noinline + #endif + ++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache); ++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache); ++ + #endif /* IEEE80211_I_H */ +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -8,7 +8,7 @@ + * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (c) 2016 Intel Deutschland GmbH +- * Copyright (C) 2018-2020 Intel Corporation ++ * Copyright (C) 2018-2021 Intel Corporation + */ + #include <linux/slab.h> + #include <linux/kernel.h> +@@ -679,16 +679,12 @@ static void ieee80211_set_multicast_list + */ + static void ieee80211_teardown_sdata(struct ieee80211_sub_if_data *sdata) + { +- int i; +- + /* free extra data */ + ieee80211_free_keys(sdata, false); + + ieee80211_debugfs_remove_netdev(sdata); + +- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) +- __skb_queue_purge(&sdata->fragments[i].skb_list); +- sdata->fragment_next = 0; ++ ieee80211_destroy_frag_cache(&sdata->frags); + + if (ieee80211_vif_is_mesh(&sdata->vif)) + ieee80211_mesh_teardown_sdata(sdata); +@@ -2038,8 +2034,7 @@ int ieee80211_if_add(struct ieee80211_lo + sdata->wdev.wiphy = local->hw.wiphy; + sdata->local = local; + +- for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) +- skb_queue_head_init(&sdata->fragments[i].skb_list); ++ ieee80211_init_frag_cache(&sdata->frags); + + INIT_LIST_HEAD(&sdata->key_list); + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2133,19 +2133,34 @@ ieee80211_rx_h_decrypt(struct ieee80211_ + return result; + } + ++void ieee80211_init_frag_cache(struct ieee80211_fragment_cache *cache) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cache->entries); i++) ++ skb_queue_head_init(&cache->entries[i].skb_list); ++} ++ ++void ieee80211_destroy_frag_cache(struct ieee80211_fragment_cache *cache) ++{ ++ int i; ++ ++ for (i = 0; i < ARRAY_SIZE(cache->entries); i++) ++ __skb_queue_purge(&cache->entries[i].skb_list); ++} ++ + static inline struct ieee80211_fragment_entry * +-ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata, ++ieee80211_reassemble_add(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, int rx_queue, + struct sk_buff **skb) + { + struct ieee80211_fragment_entry *entry; + +- entry = &sdata->fragments[sdata->fragment_next++]; +- if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX) +- sdata->fragment_next = 0; ++ entry = &cache->entries[cache->next++]; ++ if (cache->next >= IEEE80211_FRAGMENT_MAX) ++ cache->next = 0; + +- if (!skb_queue_empty(&entry->skb_list)) +- __skb_queue_purge(&entry->skb_list); ++ __skb_queue_purge(&entry->skb_list); + + __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */ + *skb = NULL; +@@ -2160,14 +2175,14 @@ ieee80211_reassemble_add(struct ieee8021 + } + + static inline struct ieee80211_fragment_entry * +-ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata, ++ieee80211_reassemble_find(struct ieee80211_fragment_cache *cache, + unsigned int frag, unsigned int seq, + int rx_queue, struct ieee80211_hdr *hdr) + { + struct ieee80211_fragment_entry *entry; + int i, idx; + +- idx = sdata->fragment_next; ++ idx = cache->next; + for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) { + struct ieee80211_hdr *f_hdr; + struct sk_buff *f_skb; +@@ -2176,7 +2191,7 @@ ieee80211_reassemble_find(struct ieee802 + if (idx < 0) + idx = IEEE80211_FRAGMENT_MAX - 1; + +- entry = &sdata->fragments[idx]; ++ entry = &cache->entries[idx]; + if (skb_queue_empty(&entry->skb_list) || entry->seq != seq || + entry->rx_queue != rx_queue || + entry->last_frag + 1 != frag) +@@ -2217,6 +2232,7 @@ static bool requires_sequential_pn(struc + static ieee80211_rx_result debug_noinline + ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + { ++ struct ieee80211_fragment_cache *cache = &rx->sdata->frags; + struct ieee80211_hdr *hdr; + u16 sc; + __le16 fc; +@@ -2238,6 +2254,9 @@ ieee80211_rx_h_defragment(struct ieee802 + goto out_no_led; + } + ++ if (rx->sta) ++ cache = &rx->sta->frags; ++ + if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) + goto out; + +@@ -2256,7 +2275,7 @@ ieee80211_rx_h_defragment(struct ieee802 + + if (frag == 0) { + /* This is the first fragment of a new frame. */ +- entry = ieee80211_reassemble_add(rx->sdata, frag, seq, ++ entry = ieee80211_reassemble_add(cache, frag, seq, + rx->seqno_idx, &(rx->skb)); + if (requires_sequential_pn(rx, fc)) { + int queue = rx->security_idx; +@@ -2284,7 +2303,7 @@ ieee80211_rx_h_defragment(struct ieee802 + /* This is a fragment for a frame that should already be pending in + * fragment cache. Add this fragment to the end of the pending entry. + */ +- entry = ieee80211_reassemble_find(rx->sdata, frag, seq, ++ entry = ieee80211_reassemble_find(cache, frag, seq, + rx->seqno_idx, hdr); + if (!entry) { + I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag); +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -4,7 +4,7 @@ + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright (C) 2015 - 2017 Intel Deutschland GmbH +- * Copyright (C) 2018-2020 Intel Corporation ++ * Copyright (C) 2018-2021 Intel Corporation + */ + + #include <linux/module.h> +@@ -393,6 +393,8 @@ struct sta_info *sta_info_alloc(struct i + + u64_stats_init(&sta->rx_stats.syncp); + ++ ieee80211_init_frag_cache(&sta->frags); ++ + sta->sta_state = IEEE80211_STA_NONE; + + /* Mark TID as unreserved */ +@@ -1103,6 +1105,8 @@ static void __sta_info_destroy_part2(str + + ieee80211_sta_debugfs_remove(sta); + ++ ieee80211_destroy_frag_cache(&sta->frags); ++ + cleanup_single_sta(sta); + } + +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -3,7 +3,7 @@ + * Copyright 2002-2005, Devicescape Software, Inc. + * Copyright 2013-2014 Intel Mobile Communications GmbH + * Copyright(c) 2015-2017 Intel Deutschland GmbH +- * Copyright(c) 2020 Intel Corporation ++ * Copyright(c) 2020-2021 Intel Corporation + */ + + #ifndef STA_INFO_H +@@ -439,6 +439,33 @@ struct ieee80211_sta_rx_stats { + }; + + /* ++ * IEEE 802.11-2016 (10.6 "Defragmentation") recommends support for "concurrent ++ * reception of at least one MSDU per access category per associated STA" ++ * on APs, or "at least one MSDU per access category" on other interface types. ++ * ++ * This limit can be increased by changing this define, at the cost of slower ++ * frame reassembly and increased memory use while fragments are pending. ++ */ ++#define IEEE80211_FRAGMENT_MAX 4 ++ ++struct ieee80211_fragment_entry { ++ struct sk_buff_head skb_list; ++ unsigned long first_frag_time; ++ u16 seq; ++ u16 extra_len; ++ u16 last_frag; ++ u8 rx_queue; ++ bool check_sequential_pn; /* needed for CCMP/GCMP */ ++ u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ ++ unsigned int key_color; ++}; ++ ++struct ieee80211_fragment_cache { ++ struct ieee80211_fragment_entry entries[IEEE80211_FRAGMENT_MAX]; ++ unsigned int next; ++}; ++ ++/* + * The bandwidth threshold below which the per-station CoDel parameters will be + * scaled to be more lenient (to prevent starvation of slow stations). This + * value will be scaled by the number of active stations when it is being +@@ -531,6 +558,7 @@ struct ieee80211_sta_rx_stats { + * @status_stats.last_ack_signal: last ACK signal + * @status_stats.ack_signal_filled: last ACK signal validity + * @status_stats.avg_ack_signal: average ACK signal ++ * @frags: fragment cache + */ + struct sta_info { + /* General information, mostly static */ +@@ -639,6 +667,8 @@ struct sta_info { + + struct cfg80211_chan_def tdls_chandef; + ++ struct ieee80211_fragment_cache frags; ++ + /* keep last! */ + struct ieee80211_sta sta; + }; diff --git a/package/kernel/mac80211/patches/subsys/386-mac80211-check-defrag-PN-against-current-frame.patch b/package/kernel/mac80211/patches/subsys/386-mac80211-check-defrag-PN-against-current-frame.patch new file mode 100644 index 0000000000..fb2747a609 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/386-mac80211-check-defrag-PN-against-current-frame.patch @@ -0,0 +1,109 @@ +From: Johannes Berg <johannes.berg@intel.com> +Date: Tue, 11 May 2021 20:02:48 +0200 +Subject: [PATCH] mac80211: check defrag PN against current frame + +As pointed out by Mathy Vanhoef, we implement the RX PN check +on fragmented frames incorrectly - we check against the last +received PN prior to the new frame, rather than to the one in +this frame itself. + +Prior patches addressed the security issue here, but in order +to be able to reason better about the code, fix it to really +compare against the current frame's PN, not the last stored +one. + +Cc: stable@vger.kernel.org +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/ieee80211_i.h ++++ b/net/mac80211/ieee80211_i.h +@@ -227,8 +227,15 @@ struct ieee80211_rx_data { + */ + int security_idx; + +- u32 tkip_iv32; +- u16 tkip_iv16; ++ union { ++ struct { ++ u32 iv32; ++ u16 iv16; ++ } tkip; ++ struct { ++ u8 pn[IEEE80211_CCMP_PN_LEN]; ++ } ccm_gcm; ++ }; + }; + + struct ieee80211_csa_settings { +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2318,7 +2318,6 @@ ieee80211_rx_h_defragment(struct ieee802 + if (entry->check_sequential_pn) { + int i; + u8 pn[IEEE80211_CCMP_PN_LEN], *rpn; +- int queue; + + if (!requires_sequential_pn(rx, fc)) + return RX_DROP_UNUSABLE; +@@ -2333,8 +2332,8 @@ ieee80211_rx_h_defragment(struct ieee802 + if (pn[i]) + break; + } +- queue = rx->security_idx; +- rpn = rx->key->u.ccmp.rx_pn[queue]; ++ ++ rpn = rx->ccm_gcm.pn; + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); +--- a/net/mac80211/wpa.c ++++ b/net/mac80211/wpa.c +@@ -3,6 +3,7 @@ + * Copyright 2002-2004, Instant802 Networks, Inc. + * Copyright 2008, Jouni Malinen <j@w1.fi> + * Copyright (C) 2016-2017 Intel Deutschland GmbH ++ * Copyright (C) 2020-2021 Intel Corporation + */ + + #include <linux/netdevice.h> +@@ -167,8 +168,8 @@ ieee80211_rx_h_michael_mic_verify(struct + + update_iv: + /* update IV in key information to be able to detect replays */ +- rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32; +- rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16; ++ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip.iv32; ++ rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip.iv16; + + return RX_CONTINUE; + +@@ -294,8 +295,8 @@ ieee80211_crypto_tkip_decrypt(struct iee + key, skb->data + hdrlen, + skb->len - hdrlen, rx->sta->sta.addr, + hdr->addr1, hwaccel, rx->security_idx, +- &rx->tkip_iv32, +- &rx->tkip_iv16); ++ &rx->tkip.iv32, ++ &rx->tkip.iv16); + if (res != TKIP_DECRYPT_OK) + return RX_DROP_UNUSABLE; + +@@ -552,6 +553,8 @@ ieee80211_crypto_ccmp_decrypt(struct iee + } + + memcpy(key->u.ccmp.rx_pn[queue], pn, IEEE80211_CCMP_PN_LEN); ++ if (unlikely(ieee80211_is_frag(hdr))) ++ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove CCMP header and MIC */ +@@ -782,6 +785,8 @@ ieee80211_crypto_gcmp_decrypt(struct iee + } + + memcpy(key->u.gcmp.rx_pn[queue], pn, IEEE80211_GCMP_PN_LEN); ++ if (unlikely(ieee80211_is_frag(hdr))) ++ memcpy(rx->ccm_gcm.pn, pn, IEEE80211_CCMP_PN_LEN); + } + + /* Remove GCMP header and MIC */ diff --git a/package/kernel/mac80211/patches/subsys/387-mac80211-prevent-attacks-on-TKIP-WEP-as-well.patch b/package/kernel/mac80211/patches/subsys/387-mac80211-prevent-attacks-on-TKIP-WEP-as-well.patch new file mode 100644 index 0000000000..bc582a6cc2 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/387-mac80211-prevent-attacks-on-TKIP-WEP-as-well.patch @@ -0,0 +1,62 @@ +From: Johannes Berg <johannes.berg@intel.com> +Date: Tue, 11 May 2021 20:02:49 +0200 +Subject: [PATCH] mac80211: prevent attacks on TKIP/WEP as well + +Similar to the issues fixed in previous patches, TKIP and WEP +should be protected even if for TKIP we have the Michael MIC +protecting it, and WEP is broken anyway. + +However, this also somewhat protects potential other algorithms +that drivers might implement. + +Cc: stable@vger.kernel.org +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2284,6 +2284,7 @@ ieee80211_rx_h_defragment(struct ieee802 + * next fragment has a sequential PN value. + */ + entry->check_sequential_pn = true; ++ entry->is_protected = true; + entry->key_color = rx->key->color; + memcpy(entry->last_pn, + rx->key->u.ccmp.rx_pn[queue], +@@ -2296,6 +2297,9 @@ ieee80211_rx_h_defragment(struct ieee802 + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); ++ } else if (rx->key && ieee80211_has_protected(fc)) { ++ entry->is_protected = true; ++ entry->key_color = rx->key->color; + } + return RX_QUEUED; + } +@@ -2337,6 +2341,14 @@ ieee80211_rx_h_defragment(struct ieee802 + if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN)) + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); ++ } else if (entry->is_protected && ++ (!rx->key || !ieee80211_has_protected(fc) || ++ rx->key->color != entry->key_color)) { ++ /* Drop this as a mixed key or fragment cache attack, even ++ * if for TKIP Michael MIC should protect us, and WEP is a ++ * lost cause anyway. ++ */ ++ return RX_DROP_UNUSABLE; + } + + skb_pull(rx->skb, ieee80211_hdrlen(fc)); +--- a/net/mac80211/sta_info.h ++++ b/net/mac80211/sta_info.h +@@ -455,7 +455,8 @@ struct ieee80211_fragment_entry { + u16 extra_len; + u16 last_frag; + u8 rx_queue; +- bool check_sequential_pn; /* needed for CCMP/GCMP */ ++ u8 check_sequential_pn:1, /* needed for CCMP/GCMP */ ++ is_protected:1; + u8 last_pn[6]; /* PN of the last fragment if CCMP was used */ + unsigned int key_color; + }; diff --git a/package/kernel/mac80211/patches/subsys/388-mac80211-do-not-accept-forward-invalid-EAPOL-frames.patch b/package/kernel/mac80211/patches/subsys/388-mac80211-do-not-accept-forward-invalid-EAPOL-frames.patch new file mode 100644 index 0000000000..9a0b78def1 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/388-mac80211-do-not-accept-forward-invalid-EAPOL-frames.patch @@ -0,0 +1,94 @@ +From: Johannes Berg <johannes.berg@intel.com> +Date: Tue, 11 May 2021 20:02:50 +0200 +Subject: [PATCH] mac80211: do not accept/forward invalid EAPOL frames + +EAPOL frames are used for authentication and key management between the +AP and each individual STA associated in the BSS. Those frames are not +supposed to be sent by one associated STA to another associated STA +(either unicast for broadcast/multicast). + +Similarly, in 802.11 they're supposed to be sent to the authenticator +(AP) address. + +Since it is possible for unexpected EAPOL frames to result in misbehavior +in supplicant implementations, it is better for the AP to not allow such +cases to be forwarded to other clients either directly, or indirectly if +the AP interface is part of a bridge. + +Accept EAPOL (control port) frames only if they're transmitted to the +own address, or, due to interoperability concerns, to the PAE group +address. + +Disable forwarding of EAPOL (or well, the configured control port +protocol) frames back to wireless medium in all cases. Previously, these +frames were accepted from fully authenticated and authorized stations +and also from unauthenticated stations for one of the cases. + +Additionally, to avoid forwarding by the bridge, rewrite the PAE group +address case to the local MAC address. + +Cc: stable@vger.kernel.org +Co-developed-by: Jouni Malinen <jouni@codeaurora.org> +Signed-off-by: Jouni Malinen <jouni@codeaurora.org> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2541,13 +2541,13 @@ static bool ieee80211_frame_allowed(stru + struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data; + + /* +- * Allow EAPOL frames to us/the PAE group address regardless +- * of whether the frame was encrypted or not. ++ * Allow EAPOL frames to us/the PAE group address regardless of ++ * whether the frame was encrypted or not, and always disallow ++ * all other destination addresses for them. + */ +- if (ehdr->h_proto == rx->sdata->control_port_protocol && +- (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || +- ether_addr_equal(ehdr->h_dest, pae_group_addr))) +- return true; ++ if (unlikely(ehdr->h_proto == rx->sdata->control_port_protocol)) ++ return ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) || ++ ether_addr_equal(ehdr->h_dest, pae_group_addr); + + if (ieee80211_802_1x_port_control(rx) || + ieee80211_drop_unencrypted(rx, fc)) +@@ -2572,8 +2572,28 @@ static void ieee80211_deliver_skb_to_loc + cfg80211_rx_control_port(dev, skb, noencrypt); + dev_kfree_skb(skb); + } else { ++ struct ethhdr *ehdr = (void *)skb_mac_header(skb); ++ + memset(skb->cb, 0, sizeof(skb->cb)); + ++ /* ++ * 802.1X over 802.11 requires that the authenticator address ++ * be used for EAPOL frames. However, 802.1X allows the use of ++ * the PAE group address instead. If the interface is part of ++ * a bridge and we pass the frame with the PAE group address, ++ * then the bridge will forward it to the network (even if the ++ * client was not associated yet), which isn't supposed to ++ * happen. ++ * To avoid that, rewrite the destination address to our own ++ * address, so that the authenticator (e.g. hostapd) will see ++ * the frame, but bridge won't forward it anywhere else. Note ++ * that due to earlier filtering, the only other address can ++ * be the PAE group address. ++ */ ++ if (unlikely(skb->protocol == sdata->control_port_protocol && ++ !ether_addr_equal(ehdr->h_dest, sdata->vif.addr))) ++ ether_addr_copy(ehdr->h_dest, sdata->vif.addr); ++ + /* deliver to local stack */ + if (rx->list) + #if LINUX_VERSION_IS_GEQ(4,19,0) +@@ -2617,6 +2637,7 @@ ieee80211_deliver_skb(struct ieee80211_r + if ((sdata->vif.type == NL80211_IFTYPE_AP || + sdata->vif.type == NL80211_IFTYPE_AP_VLAN) && + !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) && ++ ehdr->h_proto != rx->sdata->control_port_protocol && + (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) { + if (is_multicast_ether_addr(ehdr->h_dest) && + ieee80211_vif_get_num_mcast_if(sdata) != 0) { diff --git a/package/kernel/mac80211/patches/subsys/389-mac80211-extend-protection-against-mixed-key-and-fra.patch b/package/kernel/mac80211/patches/subsys/389-mac80211-extend-protection-against-mixed-key-and-fra.patch new file mode 100644 index 0000000000..17809263e9 --- /dev/null +++ b/package/kernel/mac80211/patches/subsys/389-mac80211-extend-protection-against-mixed-key-and-fra.patch @@ -0,0 +1,68 @@ +From: Wen Gong <wgong@codeaurora.org> +Date: Tue, 11 May 2021 20:02:51 +0200 +Subject: [PATCH] mac80211: extend protection against mixed key and + fragment cache attacks + +For some chips/drivers, e.g., QCA6174 with ath10k, the decryption is +done by the hardware, and the Protected bit in the Frame Control field +is cleared in the lower level driver before the frame is passed to +mac80211. In such cases, the condition for ieee80211_has_protected() is +not met in ieee80211_rx_h_defragment() of mac80211 and the new security +validation steps are not executed. + +Extend mac80211 to cover the case where the Protected bit has been +cleared, but the frame is indicated as having been decrypted by the +hardware. This extends protection against mixed key and fragment cache +attack for additional drivers/chips. This fixes CVE-2020-24586 and +CVE-2020-24587 for such cases. + +Tested-on: QCA6174 hw3.2 PCI WLAN.RM.4.4.1-00110-QCARMSWP-1 + +Cc: stable@vger.kernel.org +Signed-off-by: Wen Gong <wgong@codeaurora.org> +Signed-off-by: Jouni Malinen <jouni@codeaurora.org> +Signed-off-by: Johannes Berg <johannes.berg@intel.com> +--- + +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -2239,6 +2239,7 @@ ieee80211_rx_h_defragment(struct ieee802 + unsigned int frag, seq; + struct ieee80211_fragment_entry *entry; + struct sk_buff *skb; ++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); + + hdr = (struct ieee80211_hdr *)rx->skb->data; + fc = hdr->frame_control; +@@ -2297,7 +2298,9 @@ ieee80211_rx_h_defragment(struct ieee802 + sizeof(rx->key->u.gcmp.rx_pn[queue])); + BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != + IEEE80211_GCMP_PN_LEN); +- } else if (rx->key && ieee80211_has_protected(fc)) { ++ } else if (rx->key && ++ (ieee80211_has_protected(fc) || ++ (status->flag & RX_FLAG_DECRYPTED))) { + entry->is_protected = true; + entry->key_color = rx->key->color; + } +@@ -2342,13 +2345,19 @@ ieee80211_rx_h_defragment(struct ieee802 + return RX_DROP_UNUSABLE; + memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN); + } else if (entry->is_protected && +- (!rx->key || !ieee80211_has_protected(fc) || ++ (!rx->key || ++ (!ieee80211_has_protected(fc) && ++ !(status->flag & RX_FLAG_DECRYPTED)) || + rx->key->color != entry->key_color)) { + /* Drop this as a mixed key or fragment cache attack, even + * if for TKIP Michael MIC should protect us, and WEP is a + * lost cause anyway. + */ + return RX_DROP_UNUSABLE; ++ } else if (entry->is_protected && rx->key && ++ entry->key_color != rx->key->color && ++ (status->flag & RX_FLAG_DECRYPTED)) { ++ return RX_DROP_UNUSABLE; + } + + skb_pull(rx->skb, ieee80211_hdrlen(fc)); diff --git a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch index 5d5dcd51ad..eb967f77d7 100644 --- a/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch +++ b/package/kernel/mac80211/patches/subsys/500-mac80211_configure_antenna_gain.patch @@ -87,7 +87,7 @@ CFG80211_TESTMODE_CMD(ieee80211_testmode_cmd) --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h -@@ -1403,6 +1403,7 @@ struct ieee80211_local { +@@ -1390,6 +1390,7 @@ struct ieee80211_local { int dynamic_ps_forced_timeout; int user_power_level; /* in dBm, for all interfaces */ |