From 196f3d586f11d96ba4ab60068cfb12420bcd20fd Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Thu, 4 Mar 2021 13:37:13 -0700 Subject: kernel-5.4: bump to 5.4.102 and refresh patches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 5.4.102 backported a lot of stuff that our WireGuard backport already did, in addition to other patches we had, so those patches were removed from that part of the series. In the process other patches were refreshed or reworked to account for upstream changes. This commit involved `update_kernel.sh -v -u 5.4`. Cc: John Audia Cc: David Bauer Cc: Petr Štetiar Signed-off-by: Jason A. Donenfeld --- include/kernel-version.mk | 4 +- .../450-fix-block-protection-clearing.patch | 2 +- ...pxa_camera-Use-the-new-set_mbus_config-op.patch | 4 +- .../401-bcm963xx_real_rootfs_length.patch | 2 +- ...uce-helper-for-nat-d-source-address-in-ne.patch | 148 - ...-0072-net-WireGuard-secure-network-tunnel.patch | 8071 ++++++++++++++++++++ ...ss-zeroed-opts-from-icmp-v6-_ndo_send-bef.patch | 299 - ...elftests-import-harness-makefile-for-test.patch | 1078 +++ ...-0074-net-WireGuard-secure-network-tunnel.patch | 8071 -------------------- ...config-select-parent-dependency-for-crypt.patch | 30 + ...-global-fix-spelling-mistakes-in-comments.patch | 66 + ...elftests-import-harness-makefile-for-test.patch | 1078 --- ...config-select-parent-dependency-for-crypt.patch | 30 - ...ain-remove-unused-include-linux-version.h.patch | 28 + ...llowedips-use-kfree_rcu-instead-of-call_r.patch | 41 + ...-global-fix-spelling-mistakes-in-comments.patch | 66 - ...ain-remove-unused-include-linux-version.h.patch | 28 - ...elftests-remove-ancient-kernel-compatibil.patch | 373 + ...llowedips-use-kfree_rcu-instead-of-call_r.patch | 41 - ...ueueing-do-not-account-for-pfmemalloc-whe.patch | 39 + ...elftests-remove-ancient-kernel-compatibil.patch | 373 - ...ocket-mark-skbs-as-not-on-list-when-recei.patch | 34 + ...llowedips-fix-use-after-free-in-root_remo.patch | 164 + ...ueueing-do-not-account-for-pfmemalloc-whe.patch | 39 - ...oise-reject-peers-with-low-order-public-k.patch | 233 + ...ocket-mark-skbs-as-not-on-list-when-recei.patch | 34 - ...llowedips-fix-use-after-free-in-root_remo.patch | 164 - ...elftests-ensure-non-addition-of-peers-wit.patch | 34 + ...oise-reject-peers-with-low-order-public-k.patch | 233 - ...elftests-tie-socket-waiting-to-target-pid.patch | 77 + ...wireguard-device-use-icmp_ndo_send-helper.patch | 64 + ...elftests-ensure-non-addition-of-peers-wit.patch | 34 - ...elftests-reduce-complexity-and-fix-make-r.patch | 104 + ...elftests-tie-socket-waiting-to-target-pid.patch | 77 - ...wireguard-device-use-icmp_ndo_send-helper.patch | 64 - ...ard-receive-reset-last_under_load-to-zero.patch | 38 + ...elftests-reduce-complexity-and-fix-make-r.patch | 104 - ...-wireguard-send-account-for-mtu-0-devices.patch | 95 + ...ard-receive-reset-last_under_load-to-zero.patch | 38 - ...ocket-remove-extra-call-to-synchronize_ne.patch | 32 + ...elftests-remove-duplicated-include-sys-ty.patch | 27 + ...-wireguard-send-account-for-mtu-0-devices.patch | 95 - ...guard-queueing-account-for-skb-protocol-0.patch | 100 + ...ocket-remove-extra-call-to-synchronize_ne.patch | 32 - ...eceive-remove-dead-code-from-default-pack.patch | 35 + ...elftests-remove-duplicated-include-sys-ty.patch | 27 - ...oise-error-out-precomputed-DH-during-hand.patch | 224 + ...guard-queueing-account-for-skb-protocol-0.patch | 100 - ...eceive-remove-dead-code-from-default-pack.patch | 35 - ...end-remove-errant-newline-from-packet_enc.patch | 29 + ...oise-error-out-precomputed-DH-during-hand.patch | 224 - ...ueueing-cleanup-ptr_ring-in-error-path-of.patch | 35 + ...eceive-use-tunnel-helpers-for-decapsulati.patch | 50 + ...end-remove-errant-newline-from-packet_enc.patch | 29 - ...ueueing-cleanup-ptr_ring-in-error-path-of.patch | 35 - ...elftests-use-normal-kernel-stack-size-on-.patch | 28 + ...eceive-use-tunnel-helpers-for-decapsulati.patch | 50 - ...ocket-remove-errant-restriction-on-loopin.patch | 162 + ...elftests-use-normal-kernel-stack-size-on-.patch | 28 - ...end-receive-cond_resched-when-processing-.patch | 58 + ...elftests-initalize-ipv6-members-to-NULL-t.patch | 51 + ...ocket-remove-errant-restriction-on-loopin.patch | 162 - ...end-receive-cond_resched-when-processing-.patch | 58 - ...end-receive-use-explicit-unlikely-branch-.patch | 88 + ...elftests-initalize-ipv6-members-to-NULL-t.patch | 51 - ...d-selftests-use-newer-iproute2-for-gcc-10.patch | 31 + ...oise-read-preshared-key-while-taking-lock.patch | 61 + ...end-receive-use-explicit-unlikely-branch-.patch | 88 - ...ueueing-preserve-flow-hash-across-packet-.patch | 116 + ...d-selftests-use-newer-iproute2-for-gcc-10.patch | 31 - ...oise-read-preshared-key-while-taking-lock.patch | 61 - ...oise-separate-receive-counter-from-send-c.patch | 330 + ...oise-do-not-assign-initiation-time-in-if-.patch | 33 + ...ueueing-preserve-flow-hash-across-packet-.patch | 116 - ...rd-device-avoid-circular-netns-references.patch | 296 + ...oise-separate-receive-counter-from-send-c.patch | 330 - ...oise-do-not-assign-initiation-time-in-if-.patch | 33 - ...eceive-account-for-napi_gro_receive-never.patch | 42 + ...tunnel-add-header_ops-for-layer-3-devices.patch | 58 + ...rd-device-avoid-circular-netns-references.patch | 296 - ...mplement-header_ops-parse_protocol-for-AF.patch | 36 + ...eceive-account-for-napi_gro_receive-never.patch | 42 - ...tunnel-add-header_ops-for-layer-3-devices.patch | 58 - ...ueueing-make-use-of-ip_tunnel_parse_proto.patch | 68 + ...ink-consistently-use-NLA_POLICY_EXACT_LEN.patch | 49 + ...mplement-header_ops-parse_protocol-for-AF.patch | 36 - ...tlink-consistently-use-NLA_POLICY_MIN_LEN.patch | 39 + ...ueueing-make-use-of-ip_tunnel_parse_proto.patch | 68 - ...ink-consistently-use-NLA_POLICY_EXACT_LEN.patch | 49 - ...oise-take-lock-when-removing-handshake-en.patch | 127 + ...tlink-consistently-use-NLA_POLICY_MIN_LEN.patch | 39 - ...eerlookup-take-lock-before-checking-hash-.patch | 62 + ...oise-take-lock-when-removing-handshake-en.patch | 127 - ...elftests-check-that-route_me_harder-packe.patch | 56 + ...void-double-unlikely-notation-when-using-.patch | 55 + ...eerlookup-take-lock-before-checking-hash-.patch | 62 - ...elftests-check-that-route_me_harder-packe.patch | 56 - ...ard-socket-remove-bogus-__be32-annotation.patch | 52 + ...void-double-unlikely-notation-when-using-.patch | 55 - ...-selftests-test-multiple-parallel-streams.patch | 52 + ...eer-put-frequently-used-members-above-cac.patch | 42 + ...ard-socket-remove-bogus-__be32-annotation.patch | 52 - ...evice-do-not-generate-ICMP-for-non-IP-pac.patch | 47 + ...-selftests-test-multiple-parallel-streams.patch | 52 - ...eer-put-frequently-used-members-above-cac.patch | 42 - ...queueing-get-rid-of-per-peer-ring-buffers.patch | 560 ++ ...evice-do-not-generate-ICMP-for-non-IP-pac.patch | 47 - ...-kconfig-use-arm-chacha-even-with-no-neon.patch | 30 + ...queueing-get-rid-of-per-peer-ring-buffers.patch | 560 -- ...-kconfig-use-arm-chacha-even-with-no-neon.patch | 30 - ...on-update-interface-mapping-for-ZTE-P685M.patch | 73 - .../linux/generic/hack-5.4/204-module_strip.patch | 4 +- .../generic/hack-5.4/221-module_exports.patch | 2 +- ...unlock_mx25l6406e_with_4bit_block_protect.patch | 6 +- ...-allow-NOR-driver-to-write-fewer-bytes-th.patch | 2 +- ...465-m25p80-mx-disable-software-protection.patch | 2 +- ...spi-nor-fix-Spansion-regressions-aliased-.patch | 4 +- ...-support-limiting-4K-sectors-support-base.patch | 6 +- .../476-mtd-spi-nor-add-eon-en25q128.patch | 2 +- .../479-mtd-spi-nor-add-xtx-xt25f128b.patch | 2 +- ...spi-nor-rework-broken-flash-reset-support.patch | 24 +- ...pi-nor-add-support-for-Gigadevice-GD25D05.patch | 2 +- ...nor-fix-4-byte-opcode-support-for-w25q256.patch | 4 +- ....6-mtd-spi-nor-Add-support-for-mx25r3235f.patch | 2 +- ...om-wdt-disable-pretimeout-on-timer-platfo.patch | 10 +- ...qcom-Use-bulk-clk-api-and-assert-on-error.patch | 4 +- ...ipq806x-PCI-qcom-Add-ipq8064-rev2-variant.patch | 2 +- ...CI-qcom-Support-pci-speed-set-for-ipq806x.patch | 4 +- .../linux/lantiq/patches-5.4/0152-lantiq-VPE.patch | 2 +- ...add-support-for-TLS-1.0-record-encryption.patch | 6 +- ...-net-virtual-network-device-for-Jailhouse.patch | 4 +- ...-spi-nor-Use-1-bit-mode-of-spansion-s25fs.patch | 2 +- ...b-0005-usb-dwc3-add-otg-properties-update.patch | 2 +- ...0310-dts-add-wmac-support-for-mt7622-rfb1.patch | 2 +- ...-propagate-resolved-link-config-via-mac_l.patch | 2 +- ...ediatek-Split-PCIe-node-for-MT2712-MT7622.patch | 4 +- ...t-mtk_eth_soc-add-support-for-coherent-DM.patch | 2 +- ...cie-mediatek-add-support-for-coherent-DMA.patch | 10 +- .../patches-5.4/1020-spi-nor-w25q512jv.patch | 2 +- ...rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch | 2 +- .../006-net-mvneta-add-basic-XDP-support.patch | 8 +- ...-net-mvneta-make-tx-buffer-array-agnostic.patch | 2 +- .../009-net-mvneta-add-XDP_TX-support.patch | 2 +- ...er-disallow-XDP-program-on-hardware-buffe.patch | 2 +- ...fix-XDP-support-if-sw-bm-is-used-as-fallb.patch | 6 +- .../312-helios4-dts-status-led-alias.patch | 21 +- .../700-mvneta-tx-queue-workaround.patch | 4 +- ...mtd-nor-support-mtd-name-from-device-tree.patch | 4 +- .../patches-5.4/302-spi-nor-add-gd25q512.patch | 2 +- .../302-clocksource-add-rtl9300-driver.patch | 4 +- ...nner-a64-sopine-Add-Sopine-flash-partitio.patch | 2 +- 151 files changed, 13824 insertions(+), 14347 deletions(-) delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0072-icmp-introduce-helper-for-nat-d-source-address-in-ne.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0073-net-icmp-pass-zeroed-opts-from-icmp-v6-_ndo_send-bef.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0074-net-WireGuard-secure-network-tunnel.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-selftests-import-harness-makefile-for-test.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-Kconfig-select-parent-dependency-for-crypt.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-global-fix-spelling-mistakes-in-comments.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-main-remove-unused-include-linux-version.h.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-selftests-remove-ancient-kernel-compatibil.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-allowedips-fix-use-after-free-in-root_remo.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-noise-reject-peers-with-low-order-public-k.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-selftests-ensure-non-addition-of-peers-wit.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-tie-socket-waiting-to-target-pid.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-device-use-icmp_ndo_send-helper.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-selftests-reduce-complexity-and-fix-make-r.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-receive-reset-last_under_load-to-zero.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-send-account-for-mtu-0-devices.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-socket-remove-extra-call-to-synchronize_ne.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-selftests-remove-duplicated-include-sys-ty.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-queueing-account-for-skb-protocol-0.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-receive-remove-dead-code-from-default-pack.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-noise-error-out-precomputed-DH-during-hand.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-send-remove-errant-newline-from-packet_enc.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-selftests-use-normal-kernel-stack-size-on-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-socket-remove-errant-restriction-on-loopin.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-cond_resched-when-processing-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-send-receive-use-explicit-unlikely-branch-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-read-preshared-key-while-taking-lock.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-queueing-preserve-flow-hash-across-packet-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-noise-separate-receive-counter-from-send-c.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-noise-do-not-assign-initiation-time-in-if-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0109-wireguard-device-avoid-circular-netns-references.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-receive-account-for-napi_gro_receive-never.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0111-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0112-wireguard-implement-header_ops-parse_protocol-for-AF.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0113-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0114-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0115-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-noise-take-lock-when-removing-handshake-en.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-selftests-check-that-route_me_harder-packe.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-avoid-double-unlikely-notation-when-using-.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-socket-remove-bogus-__be32-annotation.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-selftests-test-multiple-parallel-streams.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-peer-put-frequently-used-members-above-cac.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch create mode 100644 target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0124-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch delete mode 100644 target/linux/generic/backport-5.4/080-wireguard-0125-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch delete mode 100644 target/linux/generic/backport-5.4/830-v5.12-0002-usb-serial-option-update-interface-mapping-for-ZTE-P685M.patch diff --git a/include/kernel-version.mk b/include/kernel-version.mk index d6bb2061ca..9fbd861440 100644 --- a/include/kernel-version.mk +++ b/include/kernel-version.mk @@ -6,10 +6,10 @@ ifdef CONFIG_TESTING_KERNEL KERNEL_PATCHVER:=$(KERNEL_TESTING_PATCHVER) endif -LINUX_VERSION-5.4 = .101 +LINUX_VERSION-5.4 = .102 LINUX_VERSION-5.10 = .18 -LINUX_KERNEL_HASH-5.4.101 = 4e118c072dbe3209ddeaff32ecc558f7e809d54e661550342079f1ee76d9349d +LINUX_KERNEL_HASH-5.4.102 = fd697ce1c3f6024d4ae77d4eb5a1552199407b60cb8e90bc621e23cbce639aed LINUX_KERNEL_HASH-5.10.18 = 3bc1ee2b1bf73b5ba936721953f3f9599fd165cef906cd5163c68d23cb9bb611 remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1)))) diff --git a/target/linux/ath79/patches-5.4/450-fix-block-protection-clearing.patch b/target/linux/ath79/patches-5.4/450-fix-block-protection-clearing.patch index dc76161892..863f9ddd70 100644 --- a/target/linux/ath79/patches-5.4/450-fix-block-protection-clearing.patch +++ b/target/linux/ath79/patches-5.4/450-fix-block-protection-clearing.patch @@ -17,7 +17,7 @@ Signed-off-by: Nick Hainke --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -1985,7 +1985,7 @@ static int sr2_bit7_quad_enable(struct s +@@ -1987,7 +1987,7 @@ static int sr2_bit7_quad_enable(struct s static int spi_nor_clear_sr_bp(struct spi_nor *nor) { int ret; diff --git a/target/linux/bcm27xx/patches-5.4/950-0798-media-pxa_camera-Use-the-new-set_mbus_config-op.patch b/target/linux/bcm27xx/patches-5.4/950-0798-media-pxa_camera-Use-the-new-set_mbus_config-op.patch index d682cf842f..4e5c349072 100644 --- a/target/linux/bcm27xx/patches-5.4/950-0798-media-pxa_camera-Use-the-new-set_mbus_config-op.patch +++ b/target/linux/bcm27xx/patches-5.4/950-0798-media-pxa_camera-Use-the-new-set_mbus_config-op.patch @@ -118,7 +118,7 @@ Signed-off-by: Jacopo Mondi static void pxa_camera_setup_cicr(struct pxa_camera_dev *pcdev, unsigned long flags, __u32 pixfmt) { -@@ -1598,99 +1537,78 @@ static int pxa_camera_init_videobuf2(str +@@ -1601,99 +1540,78 @@ static int pxa_camera_init_videobuf2(str */ static int pxa_camera_set_bus_param(struct pxa_camera_dev *pcdev) { @@ -271,7 +271,7 @@ Signed-off-by: Jacopo Mondi } static const struct pxa_mbus_pixelfmt pxa_camera_formats[] = { -@@ -1738,11 +1656,6 @@ static int pxa_camera_get_formats(struct +@@ -1741,11 +1659,6 @@ static int pxa_camera_get_formats(struct return 0; } diff --git a/target/linux/bcm63xx/patches-5.4/401-bcm963xx_real_rootfs_length.patch b/target/linux/bcm63xx/patches-5.4/401-bcm963xx_real_rootfs_length.patch index e92ec2d94c..eccdf0d7c5 100644 --- a/target/linux/bcm63xx/patches-5.4/401-bcm963xx_real_rootfs_length.patch +++ b/target/linux/bcm63xx/patches-5.4/401-bcm963xx_real_rootfs_length.patch @@ -15,7 +15,7 @@ /* 240-255: Unused at present */ --- a/drivers/mtd/parsers/parser_imagetag.c +++ b/drivers/mtd/parsers/parser_imagetag.c -@@ -132,7 +132,8 @@ static int bcm963xx_parse_imagetag_parti +@@ -136,7 +136,8 @@ static int bcm963xx_parse_imagetag_parti } else { /* OpenWrt layout */ rootfsaddr = kerneladdr + kernellen; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0072-icmp-introduce-helper-for-nat-d-source-address-in-ne.patch b/target/linux/generic/backport-5.4/080-wireguard-0072-icmp-introduce-helper-for-nat-d-source-address-in-ne.patch deleted file mode 100644 index 8b53cc6226..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0072-icmp-introduce-helper-for-nat-d-source-address-in-ne.patch +++ /dev/null @@ -1,148 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 11 Feb 2020 20:47:05 +0100 -Subject: [PATCH] icmp: introduce helper for nat'd source address in network - device context - -commit 0b41713b606694257b90d61ba7e2712d8457648b upstream. - -This introduces a helper function to be called only by network drivers -that wraps calls to icmp[v6]_send in a conntrack transformation, in case -NAT has been used. We don't want to pollute the non-driver path, though, -so we introduce this as a helper to be called by places that actually -make use of this, as suggested by Florian. - -Signed-off-by: Jason A. Donenfeld -Cc: Florian Westphal -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - include/linux/icmpv6.h | 10 ++++++++++ - include/net/icmp.h | 6 ++++++ - net/ipv4/icmp.c | 33 +++++++++++++++++++++++++++++++++ - net/ipv6/ip6_icmp.c | 34 ++++++++++++++++++++++++++++++++++ - 4 files changed, 83 insertions(+) - ---- a/include/linux/icmpv6.h -+++ b/include/linux/icmpv6.h -@@ -22,12 +22,22 @@ extern int inet6_unregister_icmp_sender( - int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, - unsigned int data_len); - -+#if IS_ENABLED(CONFIG_NF_NAT) -+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); -+#else -+#define icmpv6_ndo_send icmpv6_send -+#endif -+ - #else - - static inline void icmpv6_send(struct sk_buff *skb, - u8 type, u8 code, __u32 info) - { -+} - -+static inline void icmpv6_ndo_send(struct sk_buff *skb, -+ u8 type, u8 code, __u32 info) -+{ - } - #endif - ---- a/include/net/icmp.h -+++ b/include/net/icmp.h -@@ -43,6 +43,12 @@ static inline void icmp_send(struct sk_b - __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt); - } - -+#if IS_ENABLED(CONFIG_NF_NAT) -+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); -+#else -+#define icmp_ndo_send icmp_send -+#endif -+ - int icmp_rcv(struct sk_buff *skb); - int icmp_err(struct sk_buff *skb, u32 info); - int icmp_init(void); ---- a/net/ipv4/icmp.c -+++ b/net/ipv4/icmp.c -@@ -750,6 +750,39 @@ out:; - } - EXPORT_SYMBOL(__icmp_send); - -+#if IS_ENABLED(CONFIG_NF_NAT) -+#include -+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) -+{ -+ struct sk_buff *cloned_skb = NULL; -+ enum ip_conntrack_info ctinfo; -+ struct nf_conn *ct; -+ __be32 orig_ip; -+ -+ ct = nf_ct_get(skb_in, &ctinfo); -+ if (!ct || !(ct->status & IPS_SRC_NAT)) { -+ icmp_send(skb_in, type, code, info); -+ return; -+ } -+ -+ if (skb_shared(skb_in)) -+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); -+ -+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || -+ (skb_network_header(skb_in) + sizeof(struct iphdr)) > -+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, -+ skb_network_offset(skb_in) + sizeof(struct iphdr)))) -+ goto out; -+ -+ orig_ip = ip_hdr(skb_in)->saddr; -+ ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; -+ icmp_send(skb_in, type, code, info); -+ ip_hdr(skb_in)->saddr = orig_ip; -+out: -+ consume_skb(cloned_skb); -+} -+EXPORT_SYMBOL(icmp_ndo_send); -+#endif - - static void icmp_socket_deliver(struct sk_buff *skb, u32 info) - { ---- a/net/ipv6/ip6_icmp.c -+++ b/net/ipv6/ip6_icmp.c -@@ -45,4 +45,38 @@ out: - rcu_read_unlock(); - } - EXPORT_SYMBOL(icmpv6_send); -+ -+#if IS_ENABLED(CONFIG_NF_NAT) -+#include -+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) -+{ -+ struct sk_buff *cloned_skb = NULL; -+ enum ip_conntrack_info ctinfo; -+ struct in6_addr orig_ip; -+ struct nf_conn *ct; -+ -+ ct = nf_ct_get(skb_in, &ctinfo); -+ if (!ct || !(ct->status & IPS_SRC_NAT)) { -+ icmpv6_send(skb_in, type, code, info); -+ return; -+ } -+ -+ if (skb_shared(skb_in)) -+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC); -+ -+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head || -+ (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) > -+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in, -+ skb_network_offset(skb_in) + sizeof(struct ipv6hdr)))) -+ goto out; -+ -+ orig_ip = ipv6_hdr(skb_in)->saddr; -+ ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; -+ icmpv6_send(skb_in, type, code, info); -+ ipv6_hdr(skb_in)->saddr = orig_ip; -+out: -+ consume_skb(cloned_skb); -+} -+EXPORT_SYMBOL(icmpv6_ndo_send); -+#endif - #endif diff --git a/target/linux/generic/backport-5.4/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch b/target/linux/generic/backport-5.4/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch new file mode 100644 index 0000000000..9e37bbb60c --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch @@ -0,0 +1,8071 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 9 Dec 2019 00:27:34 +0100 +Subject: [PATCH] net: WireGuard secure network tunnel + +commit e7096c131e5161fa3b8e52a650d7719d2857adfd upstream. + +WireGuard is a layer 3 secure networking tunnel made specifically for +the kernel, that aims to be much simpler and easier to audit than IPsec. +Extensive documentation and description of the protocol and +considerations, along with formal proofs of the cryptography, are +available at: + + * https://www.wireguard.com/ + * https://www.wireguard.com/papers/wireguard.pdf + +This commit implements WireGuard as a simple network device driver, +accessible in the usual RTNL way used by virtual network drivers. It +makes use of the udp_tunnel APIs, GRO, GSO, NAPI, and the usual set of +networking subsystem APIs. It has a somewhat novel multicore queueing +system designed for maximum throughput and minimal latency of encryption +operations, but it is implemented modestly using workqueues and NAPI. +Configuration is done via generic Netlink, and following a review from +the Netlink maintainer a year ago, several high profile userspace tools +have already implemented the API. + +This commit also comes with several different tests, both in-kernel +tests and out-of-kernel tests based on network namespaces, taking profit +of the fact that sockets used by WireGuard intentionally stay in the +namespace the WireGuard interface was originally created, exactly like +the semantics of userspace tun devices. See wireguard.com/netns/ for +pictures and examples. + +The source code is fairly short, but rather than combining everything +into a single file, WireGuard is developed as cleanly separable files, +making auditing and comprehension easier. Things are laid out as +follows: + + * noise.[ch], cookie.[ch], messages.h: These implement the bulk of the + cryptographic aspects of the protocol, and are mostly data-only in + nature, taking in buffers of bytes and spitting out buffers of + bytes. They also handle reference counting for their various shared + pieces of data, like keys and key lists. + + * ratelimiter.[ch]: Used as an integral part of cookie.[ch] for + ratelimiting certain types of cryptographic operations in accordance + with particular WireGuard semantics. + + * allowedips.[ch], peerlookup.[ch]: The main lookup structures of + WireGuard, the former being trie-like with particular semantics, an + integral part of the design of the protocol, and the latter just + being nice helper functions around the various hashtables we use. + + * device.[ch]: Implementation of functions for the netdevice and for + rtnl, responsible for maintaining the life of a given interface and + wiring it up to the rest of WireGuard. + + * peer.[ch]: Each interface has a list of peers, with helper functions + available here for creation, destruction, and reference counting. + + * socket.[ch]: Implementation of functions related to udp_socket and + the general set of kernel socket APIs, for sending and receiving + ciphertext UDP packets, and taking care of WireGuard-specific sticky + socket routing semantics for the automatic roaming. + + * netlink.[ch]: Userspace API entry point for configuring WireGuard + peers and devices. The API has been implemented by several userspace + tools and network management utility, and the WireGuard project + distributes the basic wg(8) tool. + + * queueing.[ch]: Shared function on the rx and tx path for handling + the various queues used in the multicore algorithms. + + * send.c: Handles encrypting outgoing packets in parallel on + multiple cores, before sending them in order on a single core, via + workqueues and ring buffers. Also handles sending handshake and cookie + messages as part of the protocol, in parallel. + + * receive.c: Handles decrypting incoming packets in parallel on + multiple cores, before passing them off in order to be ingested via + the rest of the networking subsystem with GRO via the typical NAPI + poll function. Also handles receiving handshake and cookie messages + as part of the protocol, in parallel. + + * timers.[ch]: Uses the timer wheel to implement protocol particular + event timeouts, and gives a set of very simple event-driven entry + point functions for callers. + + * main.c, version.h: Initialization and deinitialization of the module. + + * selftest/*.h: Runtime unit tests for some of the most security + sensitive functions. + + * tools/testing/selftests/wireguard/netns.sh: Aforementioned testing + script using network namespaces. + +This commit aims to be as self-contained as possible, implementing +WireGuard as a standalone module not needing much special handling or +coordination from the network subsystem. I expect for future +optimizations to the network stack to positively improve WireGuard, and +vice-versa, but for the time being, this exists as intentionally +standalone. + +We introduce a menu option for CONFIG_WIREGUARD, as well as providing a +verbose debug log and self-tests via CONFIG_WIREGUARD_DEBUG. + +Signed-off-by: Jason A. Donenfeld +Cc: David Miller +Cc: Greg KH +Cc: Linus Torvalds +Cc: Herbert Xu +Cc: linux-crypto@vger.kernel.org +Cc: linux-kernel@vger.kernel.org +Cc: netdev@vger.kernel.org +Signed-off-by: David S. Miller +[Jason: ported to 5.4 by doing the following: + - wg_get_device_start uses genl_family_attrbuf + - trival skb_redirect_reset change from 2c64605b590e is folded in + - skb_list_walk_safe was already backported prior] +Signed-off-by: Jason A. Donenfeld +--- + MAINTAINERS | 8 + + drivers/net/Kconfig | 41 + + drivers/net/Makefile | 1 + + drivers/net/wireguard/Makefile | 18 + + drivers/net/wireguard/allowedips.c | 381 +++++++++ + drivers/net/wireguard/allowedips.h | 59 ++ + drivers/net/wireguard/cookie.c | 236 ++++++ + drivers/net/wireguard/cookie.h | 59 ++ + drivers/net/wireguard/device.c | 458 ++++++++++ + drivers/net/wireguard/device.h | 65 ++ + drivers/net/wireguard/main.c | 64 ++ + drivers/net/wireguard/messages.h | 128 +++ + drivers/net/wireguard/netlink.c | 648 +++++++++++++++ + drivers/net/wireguard/netlink.h | 12 + + drivers/net/wireguard/noise.c | 828 +++++++++++++++++++ + drivers/net/wireguard/noise.h | 137 +++ + drivers/net/wireguard/peer.c | 240 ++++++ + drivers/net/wireguard/peer.h | 83 ++ + drivers/net/wireguard/peerlookup.c | 221 +++++ + drivers/net/wireguard/peerlookup.h | 64 ++ + drivers/net/wireguard/queueing.c | 53 ++ + drivers/net/wireguard/queueing.h | 197 +++++ + drivers/net/wireguard/ratelimiter.c | 223 +++++ + drivers/net/wireguard/ratelimiter.h | 19 + + drivers/net/wireguard/receive.c | 595 +++++++++++++ + drivers/net/wireguard/selftest/allowedips.c | 683 +++++++++++++++ + drivers/net/wireguard/selftest/counter.c | 104 +++ + drivers/net/wireguard/selftest/ratelimiter.c | 226 +++++ + drivers/net/wireguard/send.c | 413 +++++++++ + drivers/net/wireguard/socket.c | 437 ++++++++++ + drivers/net/wireguard/socket.h | 44 + + drivers/net/wireguard/timers.c | 243 ++++++ + drivers/net/wireguard/timers.h | 31 + + drivers/net/wireguard/version.h | 1 + + include/uapi/linux/wireguard.h | 196 +++++ + tools/testing/selftests/wireguard/netns.sh | 537 ++++++++++++ + 36 files changed, 7753 insertions(+) + create mode 100644 drivers/net/wireguard/Makefile + create mode 100644 drivers/net/wireguard/allowedips.c + create mode 100644 drivers/net/wireguard/allowedips.h + create mode 100644 drivers/net/wireguard/cookie.c + create mode 100644 drivers/net/wireguard/cookie.h + create mode 100644 drivers/net/wireguard/device.c + create mode 100644 drivers/net/wireguard/device.h + create mode 100644 drivers/net/wireguard/main.c + create mode 100644 drivers/net/wireguard/messages.h + create mode 100644 drivers/net/wireguard/netlink.c + create mode 100644 drivers/net/wireguard/netlink.h + create mode 100644 drivers/net/wireguard/noise.c + create mode 100644 drivers/net/wireguard/noise.h + create mode 100644 drivers/net/wireguard/peer.c + create mode 100644 drivers/net/wireguard/peer.h + create mode 100644 drivers/net/wireguard/peerlookup.c + create mode 100644 drivers/net/wireguard/peerlookup.h + create mode 100644 drivers/net/wireguard/queueing.c + create mode 100644 drivers/net/wireguard/queueing.h + create mode 100644 drivers/net/wireguard/ratelimiter.c + create mode 100644 drivers/net/wireguard/ratelimiter.h + create mode 100644 drivers/net/wireguard/receive.c + create mode 100644 drivers/net/wireguard/selftest/allowedips.c + create mode 100644 drivers/net/wireguard/selftest/counter.c + create mode 100644 drivers/net/wireguard/selftest/ratelimiter.c + create mode 100644 drivers/net/wireguard/send.c + create mode 100644 drivers/net/wireguard/socket.c + create mode 100644 drivers/net/wireguard/socket.h + create mode 100644 drivers/net/wireguard/timers.c + create mode 100644 drivers/net/wireguard/timers.h + create mode 100644 drivers/net/wireguard/version.h + create mode 100644 include/uapi/linux/wireguard.h + create mode 100755 tools/testing/selftests/wireguard/netns.sh + +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -17584,6 +17584,14 @@ L: linux-gpio@vger.kernel.org + S: Maintained + F: drivers/gpio/gpio-ws16c48.c + ++WIREGUARD SECURE NETWORK TUNNEL ++M: Jason A. Donenfeld ++S: Maintained ++F: drivers/net/wireguard/ ++F: tools/testing/selftests/wireguard/ ++L: wireguard@lists.zx2c4.com ++L: netdev@vger.kernel.org ++ + WISTRON LAPTOP BUTTON DRIVER + M: Miloslav Trmac + S: Maintained +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -71,6 +71,47 @@ config DUMMY + To compile this driver as a module, choose M here: the module + will be called dummy. + ++config WIREGUARD ++ tristate "WireGuard secure network tunnel" ++ depends on NET && INET ++ depends on IPV6 || !IPV6 ++ select NET_UDP_TUNNEL ++ select DST_CACHE ++ select CRYPTO ++ select CRYPTO_LIB_CURVE25519 ++ select CRYPTO_LIB_CHACHA20POLY1305 ++ select CRYPTO_LIB_BLAKE2S ++ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT ++ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT ++ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT ++ select CRYPTO_CURVE25519_X86 if X86 && 64BIT ++ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON ++ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON ++ select CRYPTO_POLY1305_ARM if ARM ++ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON ++ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 ++ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT) ++ help ++ WireGuard is a secure, fast, and easy to use replacement for IPSec ++ that uses modern cryptography and clever networking tricks. It's ++ designed to be fairly general purpose and abstract enough to fit most ++ use cases, while at the same time remaining extremely simple to ++ configure. See www.wireguard.com for more info. ++ ++ It's safe to say Y or M here, as the driver is very lightweight and ++ is only in use when an administrator chooses to add an interface. ++ ++config WIREGUARD_DEBUG ++ bool "Debugging checks and verbose messages" ++ depends on WIREGUARD ++ help ++ This will write log messages for handshake and other events ++ that occur for a WireGuard interface. It will also perform some ++ extra validation checks and unit tests at various points. This is ++ only useful for debugging. ++ ++ Say N here unless you know what you're doing. ++ + config EQUALIZER + tristate "EQL (serial line load balancing) support" + ---help--- +--- a/drivers/net/Makefile ++++ b/drivers/net/Makefile +@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/ + obj-$(CONFIG_IPVLAN) += ipvlan/ + obj-$(CONFIG_IPVTAP) += ipvlan/ + obj-$(CONFIG_DUMMY) += dummy.o ++obj-$(CONFIG_WIREGUARD) += wireguard/ + obj-$(CONFIG_EQUALIZER) += eql.o + obj-$(CONFIG_IFB) += ifb.o + obj-$(CONFIG_MACSEC) += macsec.o +--- /dev/null ++++ b/drivers/net/wireguard/Makefile +@@ -0,0 +1,18 @@ ++ccflags-y := -O3 ++ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' ++ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG ++wireguard-y := main.o ++wireguard-y += noise.o ++wireguard-y += device.o ++wireguard-y += peer.o ++wireguard-y += timers.o ++wireguard-y += queueing.o ++wireguard-y += send.o ++wireguard-y += receive.o ++wireguard-y += socket.o ++wireguard-y += peerlookup.o ++wireguard-y += allowedips.o ++wireguard-y += ratelimiter.o ++wireguard-y += cookie.o ++wireguard-y += netlink.o ++obj-$(CONFIG_WIREGUARD) := wireguard.o +--- /dev/null ++++ b/drivers/net/wireguard/allowedips.c +@@ -0,0 +1,381 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "allowedips.h" ++#include "peer.h" ++ ++static void swap_endian(u8 *dst, const u8 *src, u8 bits) ++{ ++ if (bits == 32) { ++ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); ++ } else if (bits == 128) { ++ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); ++ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); ++ } ++} ++ ++static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, ++ u8 cidr, u8 bits) ++{ ++ node->cidr = cidr; ++ node->bit_at_a = cidr / 8U; ++#ifdef __LITTLE_ENDIAN ++ node->bit_at_a ^= (bits / 8U - 1U) % 8U; ++#endif ++ node->bit_at_b = 7U - (cidr % 8U); ++ node->bitlen = bits; ++ memcpy(node->bits, src, bits / 8U); ++} ++#define CHOOSE_NODE(parent, key) \ ++ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] ++ ++static void node_free_rcu(struct rcu_head *rcu) ++{ ++ kfree(container_of(rcu, struct allowedips_node, rcu)); ++} ++ ++static void push_rcu(struct allowedips_node **stack, ++ struct allowedips_node __rcu *p, unsigned int *len) ++{ ++ if (rcu_access_pointer(p)) { ++ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); ++ stack[(*len)++] = rcu_dereference_raw(p); ++ } ++} ++ ++static void root_free_rcu(struct rcu_head *rcu) ++{ ++ struct allowedips_node *node, *stack[128] = { ++ container_of(rcu, struct allowedips_node, rcu) }; ++ unsigned int len = 1; ++ ++ while (len > 0 && (node = stack[--len])) { ++ push_rcu(stack, node->bit[0], &len); ++ push_rcu(stack, node->bit[1], &len); ++ kfree(node); ++ } ++} ++ ++static void root_remove_peer_lists(struct allowedips_node *root) ++{ ++ struct allowedips_node *node, *stack[128] = { root }; ++ unsigned int len = 1; ++ ++ while (len > 0 && (node = stack[--len])) { ++ push_rcu(stack, node->bit[0], &len); ++ push_rcu(stack, node->bit[1], &len); ++ if (rcu_access_pointer(node->peer)) ++ list_del(&node->peer_list); ++ } ++} ++ ++static void walk_remove_by_peer(struct allowedips_node __rcu **top, ++ struct wg_peer *peer, struct mutex *lock) ++{ ++#define REF(p) rcu_access_pointer(p) ++#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) ++#define PUSH(p) ({ \ ++ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ ++ stack[len++] = p; \ ++ }) ++ ++ struct allowedips_node __rcu **stack[128], **nptr; ++ struct allowedips_node *node, *prev; ++ unsigned int len; ++ ++ if (unlikely(!peer || !REF(*top))) ++ return; ++ ++ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { ++ nptr = stack[len - 1]; ++ node = DEREF(nptr); ++ if (!node) { ++ --len; ++ continue; ++ } ++ if (!prev || REF(prev->bit[0]) == node || ++ REF(prev->bit[1]) == node) { ++ if (REF(node->bit[0])) ++ PUSH(&node->bit[0]); ++ else if (REF(node->bit[1])) ++ PUSH(&node->bit[1]); ++ } else if (REF(node->bit[0]) == prev) { ++ if (REF(node->bit[1])) ++ PUSH(&node->bit[1]); ++ } else { ++ if (rcu_dereference_protected(node->peer, ++ lockdep_is_held(lock)) == peer) { ++ RCU_INIT_POINTER(node->peer, NULL); ++ list_del_init(&node->peer_list); ++ if (!node->bit[0] || !node->bit[1]) { ++ rcu_assign_pointer(*nptr, DEREF( ++ &node->bit[!REF(node->bit[0])])); ++ call_rcu(&node->rcu, node_free_rcu); ++ node = DEREF(nptr); ++ } ++ } ++ --len; ++ } ++ } ++ ++#undef REF ++#undef DEREF ++#undef PUSH ++} ++ ++static unsigned int fls128(u64 a, u64 b) ++{ ++ return a ? fls64(a) + 64U : fls64(b); ++} ++ ++static u8 common_bits(const struct allowedips_node *node, const u8 *key, ++ u8 bits) ++{ ++ if (bits == 32) ++ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); ++ else if (bits == 128) ++ return 128U - fls128( ++ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], ++ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); ++ return 0; ++} ++ ++static bool prefix_matches(const struct allowedips_node *node, const u8 *key, ++ u8 bits) ++{ ++ /* This could be much faster if it actually just compared the common ++ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and ++ * the rest, but it turns out that common_bits is already super fast on ++ * modern processors, even taking into account the unfortunate bswap. ++ * So, we just inline it like this instead. ++ */ ++ return common_bits(node, key, bits) >= node->cidr; ++} ++ ++static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, ++ const u8 *key) ++{ ++ struct allowedips_node *node = trie, *found = NULL; ++ ++ while (node && prefix_matches(node, key, bits)) { ++ if (rcu_access_pointer(node->peer)) ++ found = node; ++ if (node->cidr == bits) ++ break; ++ node = rcu_dereference_bh(CHOOSE_NODE(node, key)); ++ } ++ return found; ++} ++ ++/* Returns a strong reference to a peer */ ++static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, ++ const void *be_ip) ++{ ++ /* Aligned so it can be passed to fls/fls64 */ ++ u8 ip[16] __aligned(__alignof(u64)); ++ struct allowedips_node *node; ++ struct wg_peer *peer = NULL; ++ ++ swap_endian(ip, be_ip, bits); ++ ++ rcu_read_lock_bh(); ++retry: ++ node = find_node(rcu_dereference_bh(root), bits, ip); ++ if (node) { ++ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); ++ if (!peer) ++ goto retry; ++ } ++ rcu_read_unlock_bh(); ++ return peer; ++} ++ ++static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, ++ u8 cidr, u8 bits, struct allowedips_node **rnode, ++ struct mutex *lock) ++{ ++ struct allowedips_node *node = rcu_dereference_protected(trie, ++ lockdep_is_held(lock)); ++ struct allowedips_node *parent = NULL; ++ bool exact = false; ++ ++ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { ++ parent = node; ++ if (parent->cidr == cidr) { ++ exact = true; ++ break; ++ } ++ node = rcu_dereference_protected(CHOOSE_NODE(parent, key), ++ lockdep_is_held(lock)); ++ } ++ *rnode = parent; ++ return exact; ++} ++ ++static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, ++ u8 cidr, struct wg_peer *peer, struct mutex *lock) ++{ ++ struct allowedips_node *node, *parent, *down, *newnode; ++ ++ if (unlikely(cidr > bits || !peer)) ++ return -EINVAL; ++ ++ if (!rcu_access_pointer(*trie)) { ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (unlikely(!node)) ++ return -ENOMEM; ++ RCU_INIT_POINTER(node->peer, peer); ++ list_add_tail(&node->peer_list, &peer->allowedips_list); ++ copy_and_assign_cidr(node, key, cidr, bits); ++ rcu_assign_pointer(*trie, node); ++ return 0; ++ } ++ if (node_placement(*trie, key, cidr, bits, &node, lock)) { ++ rcu_assign_pointer(node->peer, peer); ++ list_move_tail(&node->peer_list, &peer->allowedips_list); ++ return 0; ++ } ++ ++ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); ++ if (unlikely(!newnode)) ++ return -ENOMEM; ++ RCU_INIT_POINTER(newnode->peer, peer); ++ list_add_tail(&newnode->peer_list, &peer->allowedips_list); ++ copy_and_assign_cidr(newnode, key, cidr, bits); ++ ++ if (!node) { ++ down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); ++ } else { ++ down = rcu_dereference_protected(CHOOSE_NODE(node, key), ++ lockdep_is_held(lock)); ++ if (!down) { ++ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); ++ return 0; ++ } ++ } ++ cidr = min(cidr, common_bits(down, key, bits)); ++ parent = node; ++ ++ if (newnode->cidr == cidr) { ++ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); ++ if (!parent) ++ rcu_assign_pointer(*trie, newnode); ++ else ++ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), ++ newnode); ++ } else { ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (unlikely(!node)) { ++ kfree(newnode); ++ return -ENOMEM; ++ } ++ INIT_LIST_HEAD(&node->peer_list); ++ copy_and_assign_cidr(node, newnode->bits, cidr, bits); ++ ++ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); ++ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); ++ if (!parent) ++ rcu_assign_pointer(*trie, node); ++ else ++ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), ++ node); ++ } ++ return 0; ++} ++ ++void wg_allowedips_init(struct allowedips *table) ++{ ++ table->root4 = table->root6 = NULL; ++ table->seq = 1; ++} ++ ++void wg_allowedips_free(struct allowedips *table, struct mutex *lock) ++{ ++ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; ++ ++ ++table->seq; ++ RCU_INIT_POINTER(table->root4, NULL); ++ RCU_INIT_POINTER(table->root6, NULL); ++ if (rcu_access_pointer(old4)) { ++ struct allowedips_node *node = rcu_dereference_protected(old4, ++ lockdep_is_held(lock)); ++ ++ root_remove_peer_lists(node); ++ call_rcu(&node->rcu, root_free_rcu); ++ } ++ if (rcu_access_pointer(old6)) { ++ struct allowedips_node *node = rcu_dereference_protected(old6, ++ lockdep_is_held(lock)); ++ ++ root_remove_peer_lists(node); ++ call_rcu(&node->rcu, root_free_rcu); ++ } ++} ++ ++int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, ++ u8 cidr, struct wg_peer *peer, struct mutex *lock) ++{ ++ /* Aligned so it can be passed to fls */ ++ u8 key[4] __aligned(__alignof(u32)); ++ ++ ++table->seq; ++ swap_endian(key, (const u8 *)ip, 32); ++ return add(&table->root4, 32, key, cidr, peer, lock); ++} ++ ++int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, ++ u8 cidr, struct wg_peer *peer, struct mutex *lock) ++{ ++ /* Aligned so it can be passed to fls64 */ ++ u8 key[16] __aligned(__alignof(u64)); ++ ++ ++table->seq; ++ swap_endian(key, (const u8 *)ip, 128); ++ return add(&table->root6, 128, key, cidr, peer, lock); ++} ++ ++void wg_allowedips_remove_by_peer(struct allowedips *table, ++ struct wg_peer *peer, struct mutex *lock) ++{ ++ ++table->seq; ++ walk_remove_by_peer(&table->root4, peer, lock); ++ walk_remove_by_peer(&table->root6, peer, lock); ++} ++ ++int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) ++{ ++ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); ++ swap_endian(ip, node->bits, node->bitlen); ++ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); ++ if (node->cidr) ++ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); ++ ++ *cidr = node->cidr; ++ return node->bitlen == 32 ? AF_INET : AF_INET6; ++} ++ ++/* Returns a strong reference to a peer */ ++struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, ++ struct sk_buff *skb) ++{ ++ if (skb->protocol == htons(ETH_P_IP)) ++ return lookup(table->root4, 32, &ip_hdr(skb)->daddr); ++ else if (skb->protocol == htons(ETH_P_IPV6)) ++ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); ++ return NULL; ++} ++ ++/* Returns a strong reference to a peer */ ++struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, ++ struct sk_buff *skb) ++{ ++ if (skb->protocol == htons(ETH_P_IP)) ++ return lookup(table->root4, 32, &ip_hdr(skb)->saddr); ++ else if (skb->protocol == htons(ETH_P_IPV6)) ++ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); ++ return NULL; ++} ++ ++#include "selftest/allowedips.c" +--- /dev/null ++++ b/drivers/net/wireguard/allowedips.h +@@ -0,0 +1,59 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_ALLOWEDIPS_H ++#define _WG_ALLOWEDIPS_H ++ ++#include ++#include ++#include ++ ++struct wg_peer; ++ ++struct allowedips_node { ++ struct wg_peer __rcu *peer; ++ struct allowedips_node __rcu *bit[2]; ++ /* While it may seem scandalous that we waste space for v4, ++ * we're alloc'ing to the nearest power of 2 anyway, so this ++ * doesn't actually make a difference. ++ */ ++ u8 bits[16] __aligned(__alignof(u64)); ++ u8 cidr, bit_at_a, bit_at_b, bitlen; ++ ++ /* Keep rarely used list at bottom to be beyond cache line. */ ++ union { ++ struct list_head peer_list; ++ struct rcu_head rcu; ++ }; ++}; ++ ++struct allowedips { ++ struct allowedips_node __rcu *root4; ++ struct allowedips_node __rcu *root6; ++ u64 seq; ++}; ++ ++void wg_allowedips_init(struct allowedips *table); ++void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); ++int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, ++ u8 cidr, struct wg_peer *peer, struct mutex *lock); ++int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, ++ u8 cidr, struct wg_peer *peer, struct mutex *lock); ++void wg_allowedips_remove_by_peer(struct allowedips *table, ++ struct wg_peer *peer, struct mutex *lock); ++/* The ip input pointer should be __aligned(__alignof(u64))) */ ++int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr); ++ ++/* These return a strong reference to a peer: */ ++struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, ++ struct sk_buff *skb); ++struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, ++ struct sk_buff *skb); ++ ++#ifdef DEBUG ++bool wg_allowedips_selftest(void); ++#endif ++ ++#endif /* _WG_ALLOWEDIPS_H */ +--- /dev/null ++++ b/drivers/net/wireguard/cookie.c +@@ -0,0 +1,236 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "cookie.h" ++#include "peer.h" ++#include "device.h" ++#include "messages.h" ++#include "ratelimiter.h" ++#include "timers.h" ++ ++#include ++#include ++ ++#include ++#include ++ ++void wg_cookie_checker_init(struct cookie_checker *checker, ++ struct wg_device *wg) ++{ ++ init_rwsem(&checker->secret_lock); ++ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); ++ get_random_bytes(checker->secret, NOISE_HASH_LEN); ++ checker->device = wg; ++} ++ ++enum { COOKIE_KEY_LABEL_LEN = 8 }; ++static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----"; ++static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--"; ++ ++static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ const u8 pubkey[NOISE_PUBLIC_KEY_LEN], ++ const u8 label[COOKIE_KEY_LABEL_LEN]) ++{ ++ struct blake2s_state blake; ++ ++ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); ++ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); ++ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN); ++ blake2s_final(&blake, key); ++} ++ ++/* Must hold peer->handshake.static_identity->lock */ ++void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker) ++{ ++ if (likely(checker->device->static_identity.has_identity)) { ++ precompute_key(checker->cookie_encryption_key, ++ checker->device->static_identity.static_public, ++ cookie_key_label); ++ precompute_key(checker->message_mac1_key, ++ checker->device->static_identity.static_public, ++ mac1_key_label); ++ } else { ++ memset(checker->cookie_encryption_key, 0, ++ NOISE_SYMMETRIC_KEY_LEN); ++ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN); ++ } ++} ++ ++void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer) ++{ ++ precompute_key(peer->latest_cookie.cookie_decryption_key, ++ peer->handshake.remote_static, cookie_key_label); ++ precompute_key(peer->latest_cookie.message_mac1_key, ++ peer->handshake.remote_static, mac1_key_label); ++} ++ ++void wg_cookie_init(struct cookie *cookie) ++{ ++ memset(cookie, 0, sizeof(*cookie)); ++ init_rwsem(&cookie->lock); ++} ++ ++static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, ++ const u8 key[NOISE_SYMMETRIC_KEY_LEN]) ++{ ++ len = len - sizeof(struct message_macs) + ++ offsetof(struct message_macs, mac1); ++ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); ++} ++ ++static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, ++ const u8 cookie[COOKIE_LEN]) ++{ ++ len = len - sizeof(struct message_macs) + ++ offsetof(struct message_macs, mac2); ++ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); ++} ++ ++static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, ++ struct cookie_checker *checker) ++{ ++ struct blake2s_state state; ++ ++ if (wg_birthdate_has_expired(checker->secret_birthdate, ++ COOKIE_SECRET_MAX_AGE)) { ++ down_write(&checker->secret_lock); ++ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); ++ get_random_bytes(checker->secret, NOISE_HASH_LEN); ++ up_write(&checker->secret_lock); ++ } ++ ++ down_read(&checker->secret_lock); ++ ++ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); ++ if (skb->protocol == htons(ETH_P_IP)) ++ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, ++ sizeof(struct in_addr)); ++ else if (skb->protocol == htons(ETH_P_IPV6)) ++ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, ++ sizeof(struct in6_addr)); ++ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); ++ blake2s_final(&state, cookie); ++ ++ up_read(&checker->secret_lock); ++} ++ ++enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, ++ struct sk_buff *skb, ++ bool check_cookie) ++{ ++ struct message_macs *macs = (struct message_macs *) ++ (skb->data + skb->len - sizeof(*macs)); ++ enum cookie_mac_state ret; ++ u8 computed_mac[COOKIE_LEN]; ++ u8 cookie[COOKIE_LEN]; ++ ++ ret = INVALID_MAC; ++ compute_mac1(computed_mac, skb->data, skb->len, ++ checker->message_mac1_key); ++ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN)) ++ goto out; ++ ++ ret = VALID_MAC_BUT_NO_COOKIE; ++ ++ if (!check_cookie) ++ goto out; ++ ++ make_cookie(cookie, skb, checker); ++ ++ compute_mac2(computed_mac, skb->data, skb->len, cookie); ++ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN)) ++ goto out; ++ ++ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED; ++ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev))) ++ goto out; ++ ++ ret = VALID_MAC_WITH_COOKIE; ++ ++out: ++ return ret; ++} ++ ++void wg_cookie_add_mac_to_packet(void *message, size_t len, ++ struct wg_peer *peer) ++{ ++ struct message_macs *macs = (struct message_macs *) ++ ((u8 *)message + len - sizeof(*macs)); ++ ++ down_write(&peer->latest_cookie.lock); ++ compute_mac1(macs->mac1, message, len, ++ peer->latest_cookie.message_mac1_key); ++ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN); ++ peer->latest_cookie.have_sent_mac1 = true; ++ up_write(&peer->latest_cookie.lock); ++ ++ down_read(&peer->latest_cookie.lock); ++ if (peer->latest_cookie.is_valid && ++ !wg_birthdate_has_expired(peer->latest_cookie.birthdate, ++ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY)) ++ compute_mac2(macs->mac2, message, len, ++ peer->latest_cookie.cookie); ++ else ++ memset(macs->mac2, 0, COOKIE_LEN); ++ up_read(&peer->latest_cookie.lock); ++} ++ ++void wg_cookie_message_create(struct message_handshake_cookie *dst, ++ struct sk_buff *skb, __le32 index, ++ struct cookie_checker *checker) ++{ ++ struct message_macs *macs = (struct message_macs *) ++ ((u8 *)skb->data + skb->len - sizeof(*macs)); ++ u8 cookie[COOKIE_LEN]; ++ ++ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE); ++ dst->receiver_index = index; ++ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN); ++ ++ make_cookie(cookie, skb, checker); ++ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN, ++ macs->mac1, COOKIE_LEN, dst->nonce, ++ checker->cookie_encryption_key); ++} ++ ++void wg_cookie_message_consume(struct message_handshake_cookie *src, ++ struct wg_device *wg) ++{ ++ struct wg_peer *peer = NULL; ++ u8 cookie[COOKIE_LEN]; ++ bool ret; ++ ++ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable, ++ INDEX_HASHTABLE_HANDSHAKE | ++ INDEX_HASHTABLE_KEYPAIR, ++ src->receiver_index, &peer))) ++ return; ++ ++ down_read(&peer->latest_cookie.lock); ++ if (unlikely(!peer->latest_cookie.have_sent_mac1)) { ++ up_read(&peer->latest_cookie.lock); ++ goto out; ++ } ++ ret = xchacha20poly1305_decrypt( ++ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie), ++ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce, ++ peer->latest_cookie.cookie_decryption_key); ++ up_read(&peer->latest_cookie.lock); ++ ++ if (ret) { ++ down_write(&peer->latest_cookie.lock); ++ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN); ++ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns(); ++ peer->latest_cookie.is_valid = true; ++ peer->latest_cookie.have_sent_mac1 = false; ++ up_write(&peer->latest_cookie.lock); ++ } else { ++ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n", ++ wg->dev->name); ++ } ++ ++out: ++ wg_peer_put(peer); ++} +--- /dev/null ++++ b/drivers/net/wireguard/cookie.h +@@ -0,0 +1,59 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_COOKIE_H ++#define _WG_COOKIE_H ++ ++#include "messages.h" ++#include ++ ++struct wg_peer; ++ ++struct cookie_checker { ++ u8 secret[NOISE_HASH_LEN]; ++ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN]; ++ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; ++ u64 secret_birthdate; ++ struct rw_semaphore secret_lock; ++ struct wg_device *device; ++}; ++ ++struct cookie { ++ u64 birthdate; ++ bool is_valid; ++ u8 cookie[COOKIE_LEN]; ++ bool have_sent_mac1; ++ u8 last_mac1_sent[COOKIE_LEN]; ++ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN]; ++ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; ++ struct rw_semaphore lock; ++}; ++ ++enum cookie_mac_state { ++ INVALID_MAC, ++ VALID_MAC_BUT_NO_COOKIE, ++ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED, ++ VALID_MAC_WITH_COOKIE ++}; ++ ++void wg_cookie_checker_init(struct cookie_checker *checker, ++ struct wg_device *wg); ++void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker); ++void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer); ++void wg_cookie_init(struct cookie *cookie); ++ ++enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, ++ struct sk_buff *skb, ++ bool check_cookie); ++void wg_cookie_add_mac_to_packet(void *message, size_t len, ++ struct wg_peer *peer); ++ ++void wg_cookie_message_create(struct message_handshake_cookie *src, ++ struct sk_buff *skb, __le32 index, ++ struct cookie_checker *checker); ++void wg_cookie_message_consume(struct message_handshake_cookie *src, ++ struct wg_device *wg); ++ ++#endif /* _WG_COOKIE_H */ +--- /dev/null ++++ b/drivers/net/wireguard/device.c +@@ -0,0 +1,458 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "queueing.h" ++#include "socket.h" ++#include "timers.h" ++#include "device.h" ++#include "ratelimiter.h" ++#include "peer.h" ++#include "messages.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static LIST_HEAD(device_list); ++ ++static int wg_open(struct net_device *dev) ++{ ++ struct in_device *dev_v4 = __in_dev_get_rtnl(dev); ++ struct inet6_dev *dev_v6 = __in6_dev_get(dev); ++ struct wg_device *wg = netdev_priv(dev); ++ struct wg_peer *peer; ++ int ret; ++ ++ if (dev_v4) { ++ /* At some point we might put this check near the ip_rt_send_ ++ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar ++ * to the current secpath check. ++ */ ++ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); ++ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; ++ } ++ if (dev_v6) ++ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; ++ ++ ret = wg_socket_init(wg, wg->incoming_port); ++ if (ret < 0) ++ return ret; ++ mutex_lock(&wg->device_update_lock); ++ list_for_each_entry(peer, &wg->peer_list, peer_list) { ++ wg_packet_send_staged_packets(peer); ++ if (peer->persistent_keepalive_interval) ++ wg_packet_send_keepalive(peer); ++ } ++ mutex_unlock(&wg->device_update_lock); ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int wg_pm_notification(struct notifier_block *nb, unsigned long action, ++ void *data) ++{ ++ struct wg_device *wg; ++ struct wg_peer *peer; ++ ++ /* If the machine is constantly suspending and resuming, as part of ++ * its normal operation rather than as a somewhat rare event, then we ++ * don't actually want to clear keys. ++ */ ++ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) ++ return 0; ++ ++ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) ++ return 0; ++ ++ rtnl_lock(); ++ list_for_each_entry(wg, &device_list, device_list) { ++ mutex_lock(&wg->device_update_lock); ++ list_for_each_entry(peer, &wg->peer_list, peer_list) { ++ del_timer(&peer->timer_zero_key_material); ++ wg_noise_handshake_clear(&peer->handshake); ++ wg_noise_keypairs_clear(&peer->keypairs); ++ } ++ mutex_unlock(&wg->device_update_lock); ++ } ++ rtnl_unlock(); ++ rcu_barrier(); ++ return 0; ++} ++ ++static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; ++#endif ++ ++static int wg_stop(struct net_device *dev) ++{ ++ struct wg_device *wg = netdev_priv(dev); ++ struct wg_peer *peer; ++ ++ mutex_lock(&wg->device_update_lock); ++ list_for_each_entry(peer, &wg->peer_list, peer_list) { ++ wg_packet_purge_staged_packets(peer); ++ wg_timers_stop(peer); ++ wg_noise_handshake_clear(&peer->handshake); ++ wg_noise_keypairs_clear(&peer->keypairs); ++ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); ++ } ++ mutex_unlock(&wg->device_update_lock); ++ skb_queue_purge(&wg->incoming_handshakes); ++ wg_socket_reinit(wg, NULL, NULL); ++ return 0; ++} ++ ++static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) ++{ ++ struct wg_device *wg = netdev_priv(dev); ++ struct sk_buff_head packets; ++ struct wg_peer *peer; ++ struct sk_buff *next; ++ sa_family_t family; ++ u32 mtu; ++ int ret; ++ ++ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { ++ ret = -EPROTONOSUPPORT; ++ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); ++ goto err; ++ } ++ ++ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); ++ if (unlikely(!peer)) { ++ ret = -ENOKEY; ++ if (skb->protocol == htons(ETH_P_IP)) ++ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", ++ dev->name, &ip_hdr(skb)->daddr); ++ else if (skb->protocol == htons(ETH_P_IPV6)) ++ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", ++ dev->name, &ipv6_hdr(skb)->daddr); ++ goto err; ++ } ++ ++ family = READ_ONCE(peer->endpoint.addr.sa_family); ++ if (unlikely(family != AF_INET && family != AF_INET6)) { ++ ret = -EDESTADDRREQ; ++ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", ++ dev->name, peer->internal_id); ++ goto err_peer; ++ } ++ ++ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; ++ ++ __skb_queue_head_init(&packets); ++ if (!skb_is_gso(skb)) { ++ skb_mark_not_on_list(skb); ++ } else { ++ struct sk_buff *segs = skb_gso_segment(skb, 0); ++ ++ if (unlikely(IS_ERR(segs))) { ++ ret = PTR_ERR(segs); ++ goto err_peer; ++ } ++ dev_kfree_skb(skb); ++ skb = segs; ++ } ++ ++ skb_list_walk_safe(skb, skb, next) { ++ skb_mark_not_on_list(skb); ++ ++ skb = skb_share_check(skb, GFP_ATOMIC); ++ if (unlikely(!skb)) ++ continue; ++ ++ /* We only need to keep the original dst around for icmp, ++ * so at this point we're in a position to drop it. ++ */ ++ skb_dst_drop(skb); ++ ++ PACKET_CB(skb)->mtu = mtu; ++ ++ __skb_queue_tail(&packets, skb); ++ } ++ ++ spin_lock_bh(&peer->staged_packet_queue.lock); ++ /* If the queue is getting too big, we start removing the oldest packets ++ * until it's small again. We do this before adding the new packet, so ++ * we don't remove GSO segments that are in excess. ++ */ ++ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { ++ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); ++ ++dev->stats.tx_dropped; ++ } ++ skb_queue_splice_tail(&packets, &peer->staged_packet_queue); ++ spin_unlock_bh(&peer->staged_packet_queue.lock); ++ ++ wg_packet_send_staged_packets(peer); ++ ++ wg_peer_put(peer); ++ return NETDEV_TX_OK; ++ ++err_peer: ++ wg_peer_put(peer); ++err: ++ ++dev->stats.tx_errors; ++ if (skb->protocol == htons(ETH_P_IP)) ++ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); ++ else if (skb->protocol == htons(ETH_P_IPV6)) ++ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); ++ kfree_skb(skb); ++ return ret; ++} ++ ++static const struct net_device_ops netdev_ops = { ++ .ndo_open = wg_open, ++ .ndo_stop = wg_stop, ++ .ndo_start_xmit = wg_xmit, ++ .ndo_get_stats64 = ip_tunnel_get_stats64 ++}; ++ ++static void wg_destruct(struct net_device *dev) ++{ ++ struct wg_device *wg = netdev_priv(dev); ++ ++ rtnl_lock(); ++ list_del(&wg->device_list); ++ rtnl_unlock(); ++ mutex_lock(&wg->device_update_lock); ++ wg->incoming_port = 0; ++ wg_socket_reinit(wg, NULL, NULL); ++ /* The final references are cleared in the below calls to destroy_workqueue. */ ++ wg_peer_remove_all(wg); ++ destroy_workqueue(wg->handshake_receive_wq); ++ destroy_workqueue(wg->handshake_send_wq); ++ destroy_workqueue(wg->packet_crypt_wq); ++ wg_packet_queue_free(&wg->decrypt_queue, true); ++ wg_packet_queue_free(&wg->encrypt_queue, true); ++ rcu_barrier(); /* Wait for all the peers to be actually freed. */ ++ wg_ratelimiter_uninit(); ++ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); ++ skb_queue_purge(&wg->incoming_handshakes); ++ free_percpu(dev->tstats); ++ free_percpu(wg->incoming_handshakes_worker); ++ if (wg->have_creating_net_ref) ++ put_net(wg->creating_net); ++ kvfree(wg->index_hashtable); ++ kvfree(wg->peer_hashtable); ++ mutex_unlock(&wg->device_update_lock); ++ ++ pr_debug("%s: Interface deleted\n", dev->name); ++ free_netdev(dev); ++} ++ ++static const struct device_type device_type = { .name = KBUILD_MODNAME }; ++ ++static void wg_setup(struct net_device *dev) ++{ ++ struct wg_device *wg = netdev_priv(dev); ++ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | ++ NETIF_F_SG | NETIF_F_GSO | ++ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; ++ ++ dev->netdev_ops = &netdev_ops; ++ dev->hard_header_len = 0; ++ dev->addr_len = 0; ++ dev->needed_headroom = DATA_PACKET_HEAD_ROOM; ++ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); ++ dev->type = ARPHRD_NONE; ++ dev->flags = IFF_POINTOPOINT | IFF_NOARP; ++ dev->priv_flags |= IFF_NO_QUEUE; ++ dev->features |= NETIF_F_LLTX; ++ dev->features |= WG_NETDEV_FEATURES; ++ dev->hw_features |= WG_NETDEV_FEATURES; ++ dev->hw_enc_features |= WG_NETDEV_FEATURES; ++ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - ++ sizeof(struct udphdr) - ++ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); ++ ++ SET_NETDEV_DEVTYPE(dev, &device_type); ++ ++ /* We need to keep the dst around in case of icmp replies. */ ++ netif_keep_dst(dev); ++ ++ memset(wg, 0, sizeof(*wg)); ++ wg->dev = dev; ++} ++ ++static int wg_newlink(struct net *src_net, struct net_device *dev, ++ struct nlattr *tb[], struct nlattr *data[], ++ struct netlink_ext_ack *extack) ++{ ++ struct wg_device *wg = netdev_priv(dev); ++ int ret = -ENOMEM; ++ ++ wg->creating_net = src_net; ++ init_rwsem(&wg->static_identity.lock); ++ mutex_init(&wg->socket_update_lock); ++ mutex_init(&wg->device_update_lock); ++ skb_queue_head_init(&wg->incoming_handshakes); ++ wg_allowedips_init(&wg->peer_allowedips); ++ wg_cookie_checker_init(&wg->cookie_checker, wg); ++ INIT_LIST_HEAD(&wg->peer_list); ++ wg->device_update_gen = 1; ++ ++ wg->peer_hashtable = wg_pubkey_hashtable_alloc(); ++ if (!wg->peer_hashtable) ++ return ret; ++ ++ wg->index_hashtable = wg_index_hashtable_alloc(); ++ if (!wg->index_hashtable) ++ goto err_free_peer_hashtable; ++ ++ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); ++ if (!dev->tstats) ++ goto err_free_index_hashtable; ++ ++ wg->incoming_handshakes_worker = ++ wg_packet_percpu_multicore_worker_alloc( ++ wg_packet_handshake_receive_worker, wg); ++ if (!wg->incoming_handshakes_worker) ++ goto err_free_tstats; ++ ++ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", ++ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); ++ if (!wg->handshake_receive_wq) ++ goto err_free_incoming_handshakes; ++ ++ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", ++ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); ++ if (!wg->handshake_send_wq) ++ goto err_destroy_handshake_receive; ++ ++ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", ++ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); ++ if (!wg->packet_crypt_wq) ++ goto err_destroy_handshake_send; ++ ++ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, ++ true, MAX_QUEUED_PACKETS); ++ if (ret < 0) ++ goto err_destroy_packet_crypt; ++ ++ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, ++ true, MAX_QUEUED_PACKETS); ++ if (ret < 0) ++ goto err_free_encrypt_queue; ++ ++ ret = wg_ratelimiter_init(); ++ if (ret < 0) ++ goto err_free_decrypt_queue; ++ ++ ret = register_netdevice(dev); ++ if (ret < 0) ++ goto err_uninit_ratelimiter; ++ ++ list_add(&wg->device_list, &device_list); ++ ++ /* We wait until the end to assign priv_destructor, so that ++ * register_netdevice doesn't call it for us if it fails. ++ */ ++ dev->priv_destructor = wg_destruct; ++ ++ pr_debug("%s: Interface created\n", dev->name); ++ return ret; ++ ++err_uninit_ratelimiter: ++ wg_ratelimiter_uninit(); ++err_free_decrypt_queue: ++ wg_packet_queue_free(&wg->decrypt_queue, true); ++err_free_encrypt_queue: ++ wg_packet_queue_free(&wg->encrypt_queue, true); ++err_destroy_packet_crypt: ++ destroy_workqueue(wg->packet_crypt_wq); ++err_destroy_handshake_send: ++ destroy_workqueue(wg->handshake_send_wq); ++err_destroy_handshake_receive: ++ destroy_workqueue(wg->handshake_receive_wq); ++err_free_incoming_handshakes: ++ free_percpu(wg->incoming_handshakes_worker); ++err_free_tstats: ++ free_percpu(dev->tstats); ++err_free_index_hashtable: ++ kvfree(wg->index_hashtable); ++err_free_peer_hashtable: ++ kvfree(wg->peer_hashtable); ++ return ret; ++} ++ ++static struct rtnl_link_ops link_ops __read_mostly = { ++ .kind = KBUILD_MODNAME, ++ .priv_size = sizeof(struct wg_device), ++ .setup = wg_setup, ++ .newlink = wg_newlink, ++}; ++ ++static int wg_netdevice_notification(struct notifier_block *nb, ++ unsigned long action, void *data) ++{ ++ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; ++ struct wg_device *wg = netdev_priv(dev); ++ ++ ASSERT_RTNL(); ++ ++ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) ++ return 0; ++ ++ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { ++ put_net(wg->creating_net); ++ wg->have_creating_net_ref = false; ++ } else if (dev_net(dev) != wg->creating_net && ++ !wg->have_creating_net_ref) { ++ wg->have_creating_net_ref = true; ++ get_net(wg->creating_net); ++ } ++ return 0; ++} ++ ++static struct notifier_block netdevice_notifier = { ++ .notifier_call = wg_netdevice_notification ++}; ++ ++int __init wg_device_init(void) ++{ ++ int ret; ++ ++#ifdef CONFIG_PM_SLEEP ++ ret = register_pm_notifier(&pm_notifier); ++ if (ret) ++ return ret; ++#endif ++ ++ ret = register_netdevice_notifier(&netdevice_notifier); ++ if (ret) ++ goto error_pm; ++ ++ ret = rtnl_link_register(&link_ops); ++ if (ret) ++ goto error_netdevice; ++ ++ return 0; ++ ++error_netdevice: ++ unregister_netdevice_notifier(&netdevice_notifier); ++error_pm: ++#ifdef CONFIG_PM_SLEEP ++ unregister_pm_notifier(&pm_notifier); ++#endif ++ return ret; ++} ++ ++void wg_device_uninit(void) ++{ ++ rtnl_link_unregister(&link_ops); ++ unregister_netdevice_notifier(&netdevice_notifier); ++#ifdef CONFIG_PM_SLEEP ++ unregister_pm_notifier(&pm_notifier); ++#endif ++ rcu_barrier(); ++} +--- /dev/null ++++ b/drivers/net/wireguard/device.h +@@ -0,0 +1,65 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_DEVICE_H ++#define _WG_DEVICE_H ++ ++#include "noise.h" ++#include "allowedips.h" ++#include "peerlookup.h" ++#include "cookie.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct wg_device; ++ ++struct multicore_worker { ++ void *ptr; ++ struct work_struct work; ++}; ++ ++struct crypt_queue { ++ struct ptr_ring ring; ++ union { ++ struct { ++ struct multicore_worker __percpu *worker; ++ int last_cpu; ++ }; ++ struct work_struct work; ++ }; ++}; ++ ++struct wg_device { ++ struct net_device *dev; ++ struct crypt_queue encrypt_queue, decrypt_queue; ++ struct sock __rcu *sock4, *sock6; ++ struct net *creating_net; ++ struct noise_static_identity static_identity; ++ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; ++ struct workqueue_struct *packet_crypt_wq; ++ struct sk_buff_head incoming_handshakes; ++ int incoming_handshake_cpu; ++ struct multicore_worker __percpu *incoming_handshakes_worker; ++ struct cookie_checker cookie_checker; ++ struct pubkey_hashtable *peer_hashtable; ++ struct index_hashtable *index_hashtable; ++ struct allowedips peer_allowedips; ++ struct mutex device_update_lock, socket_update_lock; ++ struct list_head device_list, peer_list; ++ unsigned int num_peers, device_update_gen; ++ u32 fwmark; ++ u16 incoming_port; ++ bool have_creating_net_ref; ++}; ++ ++int wg_device_init(void); ++void wg_device_uninit(void); ++ ++#endif /* _WG_DEVICE_H */ +--- /dev/null ++++ b/drivers/net/wireguard/main.c +@@ -0,0 +1,64 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "version.h" ++#include "device.h" ++#include "noise.h" ++#include "queueing.h" ++#include "ratelimiter.h" ++#include "netlink.h" ++ ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++static int __init mod_init(void) ++{ ++ int ret; ++ ++#ifdef DEBUG ++ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || ++ !wg_ratelimiter_selftest()) ++ return -ENOTRECOVERABLE; ++#endif ++ wg_noise_init(); ++ ++ ret = wg_device_init(); ++ if (ret < 0) ++ goto err_device; ++ ++ ret = wg_genetlink_init(); ++ if (ret < 0) ++ goto err_netlink; ++ ++ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n"); ++ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved.\n"); ++ ++ return 0; ++ ++err_netlink: ++ wg_device_uninit(); ++err_device: ++ return ret; ++} ++ ++static void __exit mod_exit(void) ++{ ++ wg_genetlink_uninit(); ++ wg_device_uninit(); ++} ++ ++module_init(mod_init); ++module_exit(mod_exit); ++MODULE_LICENSE("GPL v2"); ++MODULE_DESCRIPTION("WireGuard secure network tunnel"); ++MODULE_AUTHOR("Jason A. Donenfeld "); ++MODULE_VERSION(WIREGUARD_VERSION); ++MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME); ++MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME); +--- /dev/null ++++ b/drivers/net/wireguard/messages.h +@@ -0,0 +1,128 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_MESSAGES_H ++#define _WG_MESSAGES_H ++ ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++enum noise_lengths { ++ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE, ++ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE, ++ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32), ++ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE, ++ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE ++}; ++ ++#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN) ++ ++enum cookie_values { ++ COOKIE_SECRET_MAX_AGE = 2 * 60, ++ COOKIE_SECRET_LATENCY = 5, ++ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE, ++ COOKIE_LEN = 16 ++}; ++ ++enum counter_values { ++ COUNTER_BITS_TOTAL = 2048, ++ COUNTER_REDUNDANT_BITS = BITS_PER_LONG, ++ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS ++}; ++ ++enum limits { ++ REKEY_AFTER_MESSAGES = 1ULL << 60, ++ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1, ++ REKEY_TIMEOUT = 5, ++ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3, ++ REKEY_AFTER_TIME = 120, ++ REJECT_AFTER_TIME = 180, ++ INITIATIONS_PER_SECOND = 50, ++ MAX_PEERS_PER_DEVICE = 1U << 20, ++ KEEPALIVE_TIMEOUT = 10, ++ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT, ++ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */ ++ MAX_STAGED_PACKETS = 128, ++ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */ ++}; ++ ++enum message_type { ++ MESSAGE_INVALID = 0, ++ MESSAGE_HANDSHAKE_INITIATION = 1, ++ MESSAGE_HANDSHAKE_RESPONSE = 2, ++ MESSAGE_HANDSHAKE_COOKIE = 3, ++ MESSAGE_DATA = 4 ++}; ++ ++struct message_header { ++ /* The actual layout of this that we want is: ++ * u8 type ++ * u8 reserved_zero[3] ++ * ++ * But it turns out that by encoding this as little endian, ++ * we achieve the same thing, and it makes checking faster. ++ */ ++ __le32 type; ++}; ++ ++struct message_macs { ++ u8 mac1[COOKIE_LEN]; ++ u8 mac2[COOKIE_LEN]; ++}; ++ ++struct message_handshake_initiation { ++ struct message_header header; ++ __le32 sender_index; ++ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; ++ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)]; ++ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)]; ++ struct message_macs macs; ++}; ++ ++struct message_handshake_response { ++ struct message_header header; ++ __le32 sender_index; ++ __le32 receiver_index; ++ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; ++ u8 encrypted_nothing[noise_encrypted_len(0)]; ++ struct message_macs macs; ++}; ++ ++struct message_handshake_cookie { ++ struct message_header header; ++ __le32 receiver_index; ++ u8 nonce[COOKIE_NONCE_LEN]; ++ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)]; ++}; ++ ++struct message_data { ++ struct message_header header; ++ __le32 key_idx; ++ __le64 counter; ++ u8 encrypted_data[]; ++}; ++ ++#define message_data_len(plain_len) \ ++ (noise_encrypted_len(plain_len) + sizeof(struct message_data)) ++ ++enum message_alignments { ++ MESSAGE_PADDING_MULTIPLE = 16, ++ MESSAGE_MINIMUM_LENGTH = message_data_len(0) ++}; ++ ++#define SKB_HEADER_LEN \ ++ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ ++ sizeof(struct udphdr) + NET_SKB_PAD) ++#define DATA_PACKET_HEAD_ROOM \ ++ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4) ++ ++enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ }; ++ ++#endif /* _WG_MESSAGES_H */ +--- /dev/null ++++ b/drivers/net/wireguard/netlink.c +@@ -0,0 +1,648 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "netlink.h" ++#include "device.h" ++#include "peer.h" ++#include "socket.h" ++#include "queueing.h" ++#include "messages.h" ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++static struct genl_family genl_family; ++ ++static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { ++ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, ++ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, ++ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, ++ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, ++ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, ++ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, ++ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, ++ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED } ++}; ++ ++static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { ++ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, ++ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, ++ [WGPEER_A_FLAGS] = { .type = NLA_U32 }, ++ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, ++ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, ++ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, ++ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, ++ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, ++ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, ++ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 } ++}; ++ ++static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { ++ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, ++ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, ++ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } ++}; ++ ++static struct wg_device *lookup_interface(struct nlattr **attrs, ++ struct sk_buff *skb) ++{ ++ struct net_device *dev = NULL; ++ ++ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME]) ++ return ERR_PTR(-EBADR); ++ if (attrs[WGDEVICE_A_IFINDEX]) ++ dev = dev_get_by_index(sock_net(skb->sk), ++ nla_get_u32(attrs[WGDEVICE_A_IFINDEX])); ++ else if (attrs[WGDEVICE_A_IFNAME]) ++ dev = dev_get_by_name(sock_net(skb->sk), ++ nla_data(attrs[WGDEVICE_A_IFNAME])); ++ if (!dev) ++ return ERR_PTR(-ENODEV); ++ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind || ++ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) { ++ dev_put(dev); ++ return ERR_PTR(-EOPNOTSUPP); ++ } ++ return netdev_priv(dev); ++} ++ ++static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr, ++ int family) ++{ ++ struct nlattr *allowedip_nest; ++ ++ allowedip_nest = nla_nest_start(skb, 0); ++ if (!allowedip_nest) ++ return -EMSGSIZE; ++ ++ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) || ++ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) || ++ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ? ++ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) { ++ nla_nest_cancel(skb, allowedip_nest); ++ return -EMSGSIZE; ++ } ++ ++ nla_nest_end(skb, allowedip_nest); ++ return 0; ++} ++ ++struct dump_ctx { ++ struct wg_device *wg; ++ struct wg_peer *next_peer; ++ u64 allowedips_seq; ++ struct allowedips_node *next_allowedip; ++}; ++ ++#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args) ++ ++static int ++get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) ++{ ++ ++ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0); ++ struct allowedips_node *allowedips_node = ctx->next_allowedip; ++ bool fail; ++ ++ if (!peer_nest) ++ return -EMSGSIZE; ++ ++ down_read(&peer->handshake.lock); ++ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN, ++ peer->handshake.remote_static); ++ up_read(&peer->handshake.lock); ++ if (fail) ++ goto err; ++ ++ if (!allowedips_node) { ++ const struct __kernel_timespec last_handshake = { ++ .tv_sec = peer->walltime_last_handshake.tv_sec, ++ .tv_nsec = peer->walltime_last_handshake.tv_nsec ++ }; ++ ++ down_read(&peer->handshake.lock); ++ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY, ++ NOISE_SYMMETRIC_KEY_LEN, ++ peer->handshake.preshared_key); ++ up_read(&peer->handshake.lock); ++ if (fail) ++ goto err; ++ ++ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME, ++ sizeof(last_handshake), &last_handshake) || ++ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, ++ peer->persistent_keepalive_interval) || ++ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes, ++ WGPEER_A_UNSPEC) || ++ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes, ++ WGPEER_A_UNSPEC) || ++ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1)) ++ goto err; ++ ++ read_lock_bh(&peer->endpoint_lock); ++ if (peer->endpoint.addr.sa_family == AF_INET) ++ fail = nla_put(skb, WGPEER_A_ENDPOINT, ++ sizeof(peer->endpoint.addr4), ++ &peer->endpoint.addr4); ++ else if (peer->endpoint.addr.sa_family == AF_INET6) ++ fail = nla_put(skb, WGPEER_A_ENDPOINT, ++ sizeof(peer->endpoint.addr6), ++ &peer->endpoint.addr6); ++ read_unlock_bh(&peer->endpoint_lock); ++ if (fail) ++ goto err; ++ allowedips_node = ++ list_first_entry_or_null(&peer->allowedips_list, ++ struct allowedips_node, peer_list); ++ } ++ if (!allowedips_node) ++ goto no_allowedips; ++ if (!ctx->allowedips_seq) ++ ctx->allowedips_seq = peer->device->peer_allowedips.seq; ++ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) ++ goto no_allowedips; ++ ++ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); ++ if (!allowedips_nest) ++ goto err; ++ ++ list_for_each_entry_from(allowedips_node, &peer->allowedips_list, ++ peer_list) { ++ u8 cidr, ip[16] __aligned(__alignof(u64)); ++ int family; ++ ++ family = wg_allowedips_read_node(allowedips_node, ip, &cidr); ++ if (get_allowedips(skb, ip, cidr, family)) { ++ nla_nest_end(skb, allowedips_nest); ++ nla_nest_end(skb, peer_nest); ++ ctx->next_allowedip = allowedips_node; ++ return -EMSGSIZE; ++ } ++ } ++ nla_nest_end(skb, allowedips_nest); ++no_allowedips: ++ nla_nest_end(skb, peer_nest); ++ ctx->next_allowedip = NULL; ++ ctx->allowedips_seq = 0; ++ return 0; ++err: ++ nla_nest_cancel(skb, peer_nest); ++ return -EMSGSIZE; ++} ++ ++static int wg_get_device_start(struct netlink_callback *cb) ++{ ++ struct nlattr **attrs = genl_family_attrbuf(&genl_family); ++ struct wg_device *wg; ++ int ret; ++ ++ ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, attrs, ++ genl_family.maxattr, device_policy, NULL); ++ if (ret < 0) ++ return ret; ++ wg = lookup_interface(attrs, cb->skb); ++ if (IS_ERR(wg)) ++ return PTR_ERR(wg); ++ DUMP_CTX(cb)->wg = wg; ++ return 0; ++} ++ ++static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) ++{ ++ struct wg_peer *peer, *next_peer_cursor; ++ struct dump_ctx *ctx = DUMP_CTX(cb); ++ struct wg_device *wg = ctx->wg; ++ struct nlattr *peers_nest; ++ int ret = -EMSGSIZE; ++ bool done = true; ++ void *hdr; ++ ++ rtnl_lock(); ++ mutex_lock(&wg->device_update_lock); ++ cb->seq = wg->device_update_gen; ++ next_peer_cursor = ctx->next_peer; ++ ++ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, ++ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE); ++ if (!hdr) ++ goto out; ++ genl_dump_check_consistent(cb, hdr); ++ ++ if (!ctx->next_peer) { ++ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT, ++ wg->incoming_port) || ++ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) || ++ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) || ++ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name)) ++ goto out; ++ ++ down_read(&wg->static_identity.lock); ++ if (wg->static_identity.has_identity) { ++ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY, ++ NOISE_PUBLIC_KEY_LEN, ++ wg->static_identity.static_private) || ++ nla_put(skb, WGDEVICE_A_PUBLIC_KEY, ++ NOISE_PUBLIC_KEY_LEN, ++ wg->static_identity.static_public)) { ++ up_read(&wg->static_identity.lock); ++ goto out; ++ } ++ } ++ up_read(&wg->static_identity.lock); ++ } ++ ++ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS); ++ if (!peers_nest) ++ goto out; ++ ret = 0; ++ /* If the last cursor was removed via list_del_init in peer_remove, then ++ * we just treat this the same as there being no more peers left. The ++ * reason is that seq_nr should indicate to userspace that this isn't a ++ * coherent dump anyway, so they'll try again. ++ */ ++ if (list_empty(&wg->peer_list) || ++ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { ++ nla_nest_cancel(skb, peers_nest); ++ goto out; ++ } ++ lockdep_assert_held(&wg->device_update_lock); ++ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); ++ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { ++ if (get_peer(peer, skb, ctx)) { ++ done = false; ++ break; ++ } ++ next_peer_cursor = peer; ++ } ++ nla_nest_end(skb, peers_nest); ++ ++out: ++ if (!ret && !done && next_peer_cursor) ++ wg_peer_get(next_peer_cursor); ++ wg_peer_put(ctx->next_peer); ++ mutex_unlock(&wg->device_update_lock); ++ rtnl_unlock(); ++ ++ if (ret) { ++ genlmsg_cancel(skb, hdr); ++ return ret; ++ } ++ genlmsg_end(skb, hdr); ++ if (done) { ++ ctx->next_peer = NULL; ++ return 0; ++ } ++ ctx->next_peer = next_peer_cursor; ++ return skb->len; ++ ++ /* At this point, we can't really deal ourselves with safely zeroing out ++ * the private key material after usage. This will need an additional API ++ * in the kernel for marking skbs as zero_on_free. ++ */ ++} ++ ++static int wg_get_device_done(struct netlink_callback *cb) ++{ ++ struct dump_ctx *ctx = DUMP_CTX(cb); ++ ++ if (ctx->wg) ++ dev_put(ctx->wg->dev); ++ wg_peer_put(ctx->next_peer); ++ return 0; ++} ++ ++static int set_port(struct wg_device *wg, u16 port) ++{ ++ struct wg_peer *peer; ++ ++ if (wg->incoming_port == port) ++ return 0; ++ list_for_each_entry(peer, &wg->peer_list, peer_list) ++ wg_socket_clear_peer_endpoint_src(peer); ++ if (!netif_running(wg->dev)) { ++ wg->incoming_port = port; ++ return 0; ++ } ++ return wg_socket_init(wg, port); ++} ++ ++static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs) ++{ ++ int ret = -EINVAL; ++ u16 family; ++ u8 cidr; ++ ++ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] || ++ !attrs[WGALLOWEDIP_A_CIDR_MASK]) ++ return ret; ++ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]); ++ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]); ++ ++ if (family == AF_INET && cidr <= 32 && ++ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) ++ ret = wg_allowedips_insert_v4( ++ &peer->device->peer_allowedips, ++ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, ++ &peer->device->device_update_lock); ++ else if (family == AF_INET6 && cidr <= 128 && ++ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) ++ ret = wg_allowedips_insert_v6( ++ &peer->device->peer_allowedips, ++ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, ++ &peer->device->device_update_lock); ++ ++ return ret; ++} ++ ++static int set_peer(struct wg_device *wg, struct nlattr **attrs) ++{ ++ u8 *public_key = NULL, *preshared_key = NULL; ++ struct wg_peer *peer = NULL; ++ u32 flags = 0; ++ int ret; ++ ++ ret = -EINVAL; ++ if (attrs[WGPEER_A_PUBLIC_KEY] && ++ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN) ++ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]); ++ else ++ goto out; ++ if (attrs[WGPEER_A_PRESHARED_KEY] && ++ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN) ++ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]); ++ ++ if (attrs[WGPEER_A_FLAGS]) ++ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]); ++ ret = -EOPNOTSUPP; ++ if (flags & ~__WGPEER_F_ALL) ++ goto out; ++ ++ ret = -EPFNOSUPPORT; ++ if (attrs[WGPEER_A_PROTOCOL_VERSION]) { ++ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1) ++ goto out; ++ } ++ ++ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, ++ nla_data(attrs[WGPEER_A_PUBLIC_KEY])); ++ ret = 0; ++ if (!peer) { /* Peer doesn't exist yet. Add a new one. */ ++ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY)) ++ goto out; ++ ++ /* The peer is new, so there aren't allowed IPs to remove. */ ++ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS; ++ ++ down_read(&wg->static_identity.lock); ++ if (wg->static_identity.has_identity && ++ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]), ++ wg->static_identity.static_public, ++ NOISE_PUBLIC_KEY_LEN)) { ++ /* We silently ignore peers that have the same public ++ * key as the device. The reason we do it silently is ++ * that we'd like for people to be able to reuse the ++ * same set of API calls across peers. ++ */ ++ up_read(&wg->static_identity.lock); ++ ret = 0; ++ goto out; ++ } ++ up_read(&wg->static_identity.lock); ++ ++ peer = wg_peer_create(wg, public_key, preshared_key); ++ if (IS_ERR(peer)) { ++ /* Similar to the above, if the key is invalid, we skip ++ * it without fanfare, so that services don't need to ++ * worry about doing key validation themselves. ++ */ ++ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); ++ peer = NULL; ++ goto out; ++ } ++ /* Take additional reference, as though we've just been ++ * looked up. ++ */ ++ wg_peer_get(peer); ++ } ++ ++ if (flags & WGPEER_F_REMOVE_ME) { ++ wg_peer_remove(peer); ++ goto out; ++ } ++ ++ if (preshared_key) { ++ down_write(&peer->handshake.lock); ++ memcpy(&peer->handshake.preshared_key, preshared_key, ++ NOISE_SYMMETRIC_KEY_LEN); ++ up_write(&peer->handshake.lock); ++ } ++ ++ if (attrs[WGPEER_A_ENDPOINT]) { ++ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); ++ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); ++ ++ if ((len == sizeof(struct sockaddr_in) && ++ addr->sa_family == AF_INET) || ++ (len == sizeof(struct sockaddr_in6) && ++ addr->sa_family == AF_INET6)) { ++ struct endpoint endpoint = { { { 0 } } }; ++ ++ memcpy(&endpoint.addr, addr, len); ++ wg_socket_set_peer_endpoint(peer, &endpoint); ++ } ++ } ++ ++ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS) ++ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer, ++ &wg->device_update_lock); ++ ++ if (attrs[WGPEER_A_ALLOWEDIPS]) { ++ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; ++ int rem; ++ ++ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { ++ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, ++ attr, allowedip_policy, NULL); ++ if (ret < 0) ++ goto out; ++ ret = set_allowedip(peer, allowedip); ++ if (ret < 0) ++ goto out; ++ } ++ } ++ ++ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) { ++ const u16 persistent_keepalive_interval = nla_get_u16( ++ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]); ++ const bool send_keepalive = ++ !peer->persistent_keepalive_interval && ++ persistent_keepalive_interval && ++ netif_running(wg->dev); ++ ++ peer->persistent_keepalive_interval = persistent_keepalive_interval; ++ if (send_keepalive) ++ wg_packet_send_keepalive(peer); ++ } ++ ++ if (netif_running(wg->dev)) ++ wg_packet_send_staged_packets(peer); ++ ++out: ++ wg_peer_put(peer); ++ if (attrs[WGPEER_A_PRESHARED_KEY]) ++ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]), ++ nla_len(attrs[WGPEER_A_PRESHARED_KEY])); ++ return ret; ++} ++ ++static int wg_set_device(struct sk_buff *skb, struct genl_info *info) ++{ ++ struct wg_device *wg = lookup_interface(info->attrs, skb); ++ u32 flags = 0; ++ int ret; ++ ++ if (IS_ERR(wg)) { ++ ret = PTR_ERR(wg); ++ goto out_nodev; ++ } ++ ++ rtnl_lock(); ++ mutex_lock(&wg->device_update_lock); ++ ++ if (info->attrs[WGDEVICE_A_FLAGS]) ++ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]); ++ ret = -EOPNOTSUPP; ++ if (flags & ~__WGDEVICE_F_ALL) ++ goto out; ++ ++ ret = -EPERM; ++ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || ++ info->attrs[WGDEVICE_A_FWMARK]) && ++ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) ++ goto out; ++ ++ ++wg->device_update_gen; ++ ++ if (info->attrs[WGDEVICE_A_FWMARK]) { ++ struct wg_peer *peer; ++ ++ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); ++ list_for_each_entry(peer, &wg->peer_list, peer_list) ++ wg_socket_clear_peer_endpoint_src(peer); ++ } ++ ++ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) { ++ ret = set_port(wg, ++ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT])); ++ if (ret) ++ goto out; ++ } ++ ++ if (flags & WGDEVICE_F_REPLACE_PEERS) ++ wg_peer_remove_all(wg); ++ ++ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] && ++ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) == ++ NOISE_PUBLIC_KEY_LEN) { ++ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); ++ u8 public_key[NOISE_PUBLIC_KEY_LEN]; ++ struct wg_peer *peer, *temp; ++ ++ if (!crypto_memneq(wg->static_identity.static_private, ++ private_key, NOISE_PUBLIC_KEY_LEN)) ++ goto skip_set_private_key; ++ ++ /* We remove before setting, to prevent race, which means doing ++ * two 25519-genpub ops. ++ */ ++ if (curve25519_generate_public(public_key, private_key)) { ++ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, ++ public_key); ++ if (peer) { ++ wg_peer_put(peer); ++ wg_peer_remove(peer); ++ } ++ } ++ ++ down_write(&wg->static_identity.lock); ++ wg_noise_set_static_identity_private_key(&wg->static_identity, ++ private_key); ++ list_for_each_entry_safe(peer, temp, &wg->peer_list, ++ peer_list) { ++ if (wg_noise_precompute_static_static(peer)) ++ wg_noise_expire_current_peer_keypairs(peer); ++ else ++ wg_peer_remove(peer); ++ } ++ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); ++ up_write(&wg->static_identity.lock); ++ } ++skip_set_private_key: ++ ++ if (info->attrs[WGDEVICE_A_PEERS]) { ++ struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; ++ int rem; ++ ++ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { ++ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, ++ peer_policy, NULL); ++ if (ret < 0) ++ goto out; ++ ret = set_peer(wg, peer); ++ if (ret < 0) ++ goto out; ++ } ++ } ++ ret = 0; ++ ++out: ++ mutex_unlock(&wg->device_update_lock); ++ rtnl_unlock(); ++ dev_put(wg->dev); ++out_nodev: ++ if (info->attrs[WGDEVICE_A_PRIVATE_KEY]) ++ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]), ++ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY])); ++ return ret; ++} ++ ++static const struct genl_ops genl_ops[] = { ++ { ++ .cmd = WG_CMD_GET_DEVICE, ++ .start = wg_get_device_start, ++ .dumpit = wg_get_device_dump, ++ .done = wg_get_device_done, ++ .flags = GENL_UNS_ADMIN_PERM ++ }, { ++ .cmd = WG_CMD_SET_DEVICE, ++ .doit = wg_set_device, ++ .flags = GENL_UNS_ADMIN_PERM ++ } ++}; ++ ++static struct genl_family genl_family __ro_after_init = { ++ .ops = genl_ops, ++ .n_ops = ARRAY_SIZE(genl_ops), ++ .name = WG_GENL_NAME, ++ .version = WG_GENL_VERSION, ++ .maxattr = WGDEVICE_A_MAX, ++ .module = THIS_MODULE, ++ .policy = device_policy, ++ .netnsok = true ++}; ++ ++int __init wg_genetlink_init(void) ++{ ++ return genl_register_family(&genl_family); ++} ++ ++void __exit wg_genetlink_uninit(void) ++{ ++ genl_unregister_family(&genl_family); ++} +--- /dev/null ++++ b/drivers/net/wireguard/netlink.h +@@ -0,0 +1,12 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_NETLINK_H ++#define _WG_NETLINK_H ++ ++int wg_genetlink_init(void); ++void wg_genetlink_uninit(void); ++ ++#endif /* _WG_NETLINK_H */ +--- /dev/null ++++ b/drivers/net/wireguard/noise.c +@@ -0,0 +1,828 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "noise.h" ++#include "device.h" ++#include "peer.h" ++#include "messages.h" ++#include "queueing.h" ++#include "peerlookup.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* This implements Noise_IKpsk2: ++ * ++ * <- s ++ * ****** ++ * -> e, es, s, ss, {t} ++ * <- e, ee, se, psk, {} ++ */ ++ ++static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; ++static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com"; ++static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init; ++static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init; ++static atomic64_t keypair_counter = ATOMIC64_INIT(0); ++ ++void __init wg_noise_init(void) ++{ ++ struct blake2s_state blake; ++ ++ blake2s(handshake_init_chaining_key, handshake_name, NULL, ++ NOISE_HASH_LEN, sizeof(handshake_name), 0); ++ blake2s_init(&blake, NOISE_HASH_LEN); ++ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); ++ blake2s_update(&blake, identifier_name, sizeof(identifier_name)); ++ blake2s_final(&blake, handshake_init_hash); ++} ++ ++/* Must hold peer->handshake.static_identity->lock */ ++bool wg_noise_precompute_static_static(struct wg_peer *peer) ++{ ++ bool ret = true; ++ ++ down_write(&peer->handshake.lock); ++ if (peer->handshake.static_identity->has_identity) ++ ret = curve25519( ++ peer->handshake.precomputed_static_static, ++ peer->handshake.static_identity->static_private, ++ peer->handshake.remote_static); ++ else ++ memset(peer->handshake.precomputed_static_static, 0, ++ NOISE_PUBLIC_KEY_LEN); ++ up_write(&peer->handshake.lock); ++ return ret; ++} ++ ++bool wg_noise_handshake_init(struct noise_handshake *handshake, ++ struct noise_static_identity *static_identity, ++ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], ++ struct wg_peer *peer) ++{ ++ memset(handshake, 0, sizeof(*handshake)); ++ init_rwsem(&handshake->lock); ++ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE; ++ handshake->entry.peer = peer; ++ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN); ++ if (peer_preshared_key) ++ memcpy(handshake->preshared_key, peer_preshared_key, ++ NOISE_SYMMETRIC_KEY_LEN); ++ handshake->static_identity = static_identity; ++ handshake->state = HANDSHAKE_ZEROED; ++ return wg_noise_precompute_static_static(peer); ++} ++ ++static void handshake_zero(struct noise_handshake *handshake) ++{ ++ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN); ++ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN); ++ memset(&handshake->hash, 0, NOISE_HASH_LEN); ++ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN); ++ handshake->remote_index = 0; ++ handshake->state = HANDSHAKE_ZEROED; ++} ++ ++void wg_noise_handshake_clear(struct noise_handshake *handshake) ++{ ++ wg_index_hashtable_remove( ++ handshake->entry.peer->device->index_hashtable, ++ &handshake->entry); ++ down_write(&handshake->lock); ++ handshake_zero(handshake); ++ up_write(&handshake->lock); ++ wg_index_hashtable_remove( ++ handshake->entry.peer->device->index_hashtable, ++ &handshake->entry); ++} ++ ++static struct noise_keypair *keypair_create(struct wg_peer *peer) ++{ ++ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL); ++ ++ if (unlikely(!keypair)) ++ return NULL; ++ keypair->internal_id = atomic64_inc_return(&keypair_counter); ++ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; ++ keypair->entry.peer = peer; ++ kref_init(&keypair->refcount); ++ return keypair; ++} ++ ++static void keypair_free_rcu(struct rcu_head *rcu) ++{ ++ kzfree(container_of(rcu, struct noise_keypair, rcu)); ++} ++ ++static void keypair_free_kref(struct kref *kref) ++{ ++ struct noise_keypair *keypair = ++ container_of(kref, struct noise_keypair, refcount); ++ ++ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n", ++ keypair->entry.peer->device->dev->name, ++ keypair->internal_id, ++ keypair->entry.peer->internal_id); ++ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable, ++ &keypair->entry); ++ call_rcu(&keypair->rcu, keypair_free_rcu); ++} ++ ++void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now) ++{ ++ if (unlikely(!keypair)) ++ return; ++ if (unlikely(unreference_now)) ++ wg_index_hashtable_remove( ++ keypair->entry.peer->device->index_hashtable, ++ &keypair->entry); ++ kref_put(&keypair->refcount, keypair_free_kref); ++} ++ ++struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair) ++{ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), ++ "Taking noise keypair reference without holding the RCU BH read lock"); ++ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount))) ++ return NULL; ++ return keypair; ++} ++ ++void wg_noise_keypairs_clear(struct noise_keypairs *keypairs) ++{ ++ struct noise_keypair *old; ++ ++ spin_lock_bh(&keypairs->keypair_update_lock); ++ ++ /* We zero the next_keypair before zeroing the others, so that ++ * wg_noise_received_with_keypair returns early before subsequent ones ++ * are zeroed. ++ */ ++ old = rcu_dereference_protected(keypairs->next_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ RCU_INIT_POINTER(keypairs->next_keypair, NULL); ++ wg_noise_keypair_put(old, true); ++ ++ old = rcu_dereference_protected(keypairs->previous_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); ++ wg_noise_keypair_put(old, true); ++ ++ old = rcu_dereference_protected(keypairs->current_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ RCU_INIT_POINTER(keypairs->current_keypair, NULL); ++ wg_noise_keypair_put(old, true); ++ ++ spin_unlock_bh(&keypairs->keypair_update_lock); ++} ++ ++void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer) ++{ ++ struct noise_keypair *keypair; ++ ++ wg_noise_handshake_clear(&peer->handshake); ++ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); ++ ++ spin_lock_bh(&peer->keypairs.keypair_update_lock); ++ keypair = rcu_dereference_protected(peer->keypairs.next_keypair, ++ lockdep_is_held(&peer->keypairs.keypair_update_lock)); ++ if (keypair) ++ keypair->sending.is_valid = false; ++ keypair = rcu_dereference_protected(peer->keypairs.current_keypair, ++ lockdep_is_held(&peer->keypairs.keypair_update_lock)); ++ if (keypair) ++ keypair->sending.is_valid = false; ++ spin_unlock_bh(&peer->keypairs.keypair_update_lock); ++} ++ ++static void add_new_keypair(struct noise_keypairs *keypairs, ++ struct noise_keypair *new_keypair) ++{ ++ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair; ++ ++ spin_lock_bh(&keypairs->keypair_update_lock); ++ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ next_keypair = rcu_dereference_protected(keypairs->next_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ current_keypair = rcu_dereference_protected(keypairs->current_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ if (new_keypair->i_am_the_initiator) { ++ /* If we're the initiator, it means we've sent a handshake, and ++ * received a confirmation response, which means this new ++ * keypair can now be used. ++ */ ++ if (next_keypair) { ++ /* If there already was a next keypair pending, we ++ * demote it to be the previous keypair, and free the ++ * existing current. Note that this means KCI can result ++ * in this transition. It would perhaps be more sound to ++ * always just get rid of the unused next keypair ++ * instead of putting it in the previous slot, but this ++ * might be a bit less robust. Something to think about ++ * for the future. ++ */ ++ RCU_INIT_POINTER(keypairs->next_keypair, NULL); ++ rcu_assign_pointer(keypairs->previous_keypair, ++ next_keypair); ++ wg_noise_keypair_put(current_keypair, true); ++ } else /* If there wasn't an existing next keypair, we replace ++ * the previous with the current one. ++ */ ++ rcu_assign_pointer(keypairs->previous_keypair, ++ current_keypair); ++ /* At this point we can get rid of the old previous keypair, and ++ * set up the new keypair. ++ */ ++ wg_noise_keypair_put(previous_keypair, true); ++ rcu_assign_pointer(keypairs->current_keypair, new_keypair); ++ } else { ++ /* If we're the responder, it means we can't use the new keypair ++ * until we receive confirmation via the first data packet, so ++ * we get rid of the existing previous one, the possibly ++ * existing next one, and slide in the new next one. ++ */ ++ rcu_assign_pointer(keypairs->next_keypair, new_keypair); ++ wg_noise_keypair_put(next_keypair, true); ++ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); ++ wg_noise_keypair_put(previous_keypair, true); ++ } ++ spin_unlock_bh(&keypairs->keypair_update_lock); ++} ++ ++bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, ++ struct noise_keypair *received_keypair) ++{ ++ struct noise_keypair *old_keypair; ++ bool key_is_new; ++ ++ /* We first check without taking the spinlock. */ ++ key_is_new = received_keypair == ++ rcu_access_pointer(keypairs->next_keypair); ++ if (likely(!key_is_new)) ++ return false; ++ ++ spin_lock_bh(&keypairs->keypair_update_lock); ++ /* After locking, we double check that things didn't change from ++ * beneath us. ++ */ ++ if (unlikely(received_keypair != ++ rcu_dereference_protected(keypairs->next_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)))) { ++ spin_unlock_bh(&keypairs->keypair_update_lock); ++ return false; ++ } ++ ++ /* When we've finally received the confirmation, we slide the next ++ * into the current, the current into the previous, and get rid of ++ * the old previous. ++ */ ++ old_keypair = rcu_dereference_protected(keypairs->previous_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock)); ++ rcu_assign_pointer(keypairs->previous_keypair, ++ rcu_dereference_protected(keypairs->current_keypair, ++ lockdep_is_held(&keypairs->keypair_update_lock))); ++ wg_noise_keypair_put(old_keypair, true); ++ rcu_assign_pointer(keypairs->current_keypair, received_keypair); ++ RCU_INIT_POINTER(keypairs->next_keypair, NULL); ++ ++ spin_unlock_bh(&keypairs->keypair_update_lock); ++ return true; ++} ++ ++/* Must hold static_identity->lock */ ++void wg_noise_set_static_identity_private_key( ++ struct noise_static_identity *static_identity, ++ const u8 private_key[NOISE_PUBLIC_KEY_LEN]) ++{ ++ memcpy(static_identity->static_private, private_key, ++ NOISE_PUBLIC_KEY_LEN); ++ curve25519_clamp_secret(static_identity->static_private); ++ static_identity->has_identity = curve25519_generate_public( ++ static_identity->static_public, private_key); ++} ++ ++/* This is Hugo Krawczyk's HKDF: ++ * - https://eprint.iacr.org/2010/264.pdf ++ * - https://tools.ietf.org/html/rfc5869 ++ */ ++static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, ++ size_t first_len, size_t second_len, size_t third_len, ++ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN]) ++{ ++ u8 output[BLAKE2S_HASH_SIZE + 1]; ++ u8 secret[BLAKE2S_HASH_SIZE]; ++ ++ WARN_ON(IS_ENABLED(DEBUG) && ++ (first_len > BLAKE2S_HASH_SIZE || ++ second_len > BLAKE2S_HASH_SIZE || ++ third_len > BLAKE2S_HASH_SIZE || ++ ((second_len || second_dst || third_len || third_dst) && ++ (!first_len || !first_dst)) || ++ ((third_len || third_dst) && (!second_len || !second_dst)))); ++ ++ /* Extract entropy from data into secret */ ++ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); ++ ++ if (!first_dst || !first_len) ++ goto out; ++ ++ /* Expand first key: key = secret, data = 0x1 */ ++ output[0] = 1; ++ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); ++ memcpy(first_dst, output, first_len); ++ ++ if (!second_dst || !second_len) ++ goto out; ++ ++ /* Expand second key: key = secret, data = first-key || 0x2 */ ++ output[BLAKE2S_HASH_SIZE] = 2; ++ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, ++ BLAKE2S_HASH_SIZE); ++ memcpy(second_dst, output, second_len); ++ ++ if (!third_dst || !third_len) ++ goto out; ++ ++ /* Expand third key: key = secret, data = second-key || 0x3 */ ++ output[BLAKE2S_HASH_SIZE] = 3; ++ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, ++ BLAKE2S_HASH_SIZE); ++ memcpy(third_dst, output, third_len); ++ ++out: ++ /* Clear sensitive data from stack */ ++ memzero_explicit(secret, BLAKE2S_HASH_SIZE); ++ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); ++} ++ ++static void symmetric_key_init(struct noise_symmetric_key *key) ++{ ++ spin_lock_init(&key->counter.receive.lock); ++ atomic64_set(&key->counter.counter, 0); ++ memset(key->counter.receive.backtrack, 0, ++ sizeof(key->counter.receive.backtrack)); ++ key->birthdate = ktime_get_coarse_boottime_ns(); ++ key->is_valid = true; ++} ++ ++static void derive_keys(struct noise_symmetric_key *first_dst, ++ struct noise_symmetric_key *second_dst, ++ const u8 chaining_key[NOISE_HASH_LEN]) ++{ ++ kdf(first_dst->key, second_dst->key, NULL, NULL, ++ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, ++ chaining_key); ++ symmetric_key_init(first_dst); ++ symmetric_key_init(second_dst); ++} ++ ++static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ++ u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ const u8 private[NOISE_PUBLIC_KEY_LEN], ++ const u8 public[NOISE_PUBLIC_KEY_LEN]) ++{ ++ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN]; ++ ++ if (unlikely(!curve25519(dh_calculation, private, public))) ++ return false; ++ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN, ++ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key); ++ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN); ++ return true; ++} ++ ++static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) ++{ ++ struct blake2s_state blake; ++ ++ blake2s_init(&blake, NOISE_HASH_LEN); ++ blake2s_update(&blake, hash, NOISE_HASH_LEN); ++ blake2s_update(&blake, src, src_len); ++ blake2s_final(&blake, hash); ++} ++ ++static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN], ++ u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ const u8 psk[NOISE_SYMMETRIC_KEY_LEN]) ++{ ++ u8 temp_hash[NOISE_HASH_LEN]; ++ ++ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN, ++ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key); ++ mix_hash(hash, temp_hash, NOISE_HASH_LEN); ++ memzero_explicit(temp_hash, NOISE_HASH_LEN); ++} ++ ++static void handshake_init(u8 chaining_key[NOISE_HASH_LEN], ++ u8 hash[NOISE_HASH_LEN], ++ const u8 remote_static[NOISE_PUBLIC_KEY_LEN]) ++{ ++ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN); ++ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN); ++ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN); ++} ++ ++static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext, ++ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ u8 hash[NOISE_HASH_LEN]) ++{ ++ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash, ++ NOISE_HASH_LEN, ++ 0 /* Always zero for Noise_IK */, key); ++ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len)); ++} ++ ++static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext, ++ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ u8 hash[NOISE_HASH_LEN]) ++{ ++ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len, ++ hash, NOISE_HASH_LEN, ++ 0 /* Always zero for Noise_IK */, key)) ++ return false; ++ mix_hash(hash, src_ciphertext, src_len); ++ return true; ++} ++ ++static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN], ++ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN], ++ u8 chaining_key[NOISE_HASH_LEN], ++ u8 hash[NOISE_HASH_LEN]) ++{ ++ if (ephemeral_dst != ephemeral_src) ++ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN); ++ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN); ++ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0, ++ NOISE_PUBLIC_KEY_LEN, chaining_key); ++} ++ ++static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN]) ++{ ++ struct timespec64 now; ++ ++ ktime_get_real_ts64(&now); ++ ++ /* In order to prevent some sort of infoleak from precise timers, we ++ * round down the nanoseconds part to the closest rounded-down power of ++ * two to the maximum initiations per second allowed anyway by the ++ * implementation. ++ */ ++ now.tv_nsec = ALIGN_DOWN(now.tv_nsec, ++ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND)); ++ ++ /* https://cr.yp.to/libtai/tai64.html */ ++ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec); ++ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec); ++} ++ ++bool ++wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, ++ struct noise_handshake *handshake) ++{ ++ u8 timestamp[NOISE_TIMESTAMP_LEN]; ++ u8 key[NOISE_SYMMETRIC_KEY_LEN]; ++ bool ret = false; ++ ++ /* We need to wait for crng _before_ taking any locks, since ++ * curve25519_generate_secret uses get_random_bytes_wait. ++ */ ++ wait_for_random_bytes(); ++ ++ down_read(&handshake->static_identity->lock); ++ down_write(&handshake->lock); ++ ++ if (unlikely(!handshake->static_identity->has_identity)) ++ goto out; ++ ++ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION); ++ ++ handshake_init(handshake->chaining_key, handshake->hash, ++ handshake->remote_static); ++ ++ /* e */ ++ curve25519_generate_secret(handshake->ephemeral_private); ++ if (!curve25519_generate_public(dst->unencrypted_ephemeral, ++ handshake->ephemeral_private)) ++ goto out; ++ message_ephemeral(dst->unencrypted_ephemeral, ++ dst->unencrypted_ephemeral, handshake->chaining_key, ++ handshake->hash); ++ ++ /* es */ ++ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private, ++ handshake->remote_static)) ++ goto out; ++ ++ /* s */ ++ message_encrypt(dst->encrypted_static, ++ handshake->static_identity->static_public, ++ NOISE_PUBLIC_KEY_LEN, key, handshake->hash); ++ ++ /* ss */ ++ kdf(handshake->chaining_key, key, NULL, ++ handshake->precomputed_static_static, NOISE_HASH_LEN, ++ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, ++ handshake->chaining_key); ++ ++ /* {t} */ ++ tai64n_now(timestamp); ++ message_encrypt(dst->encrypted_timestamp, timestamp, ++ NOISE_TIMESTAMP_LEN, key, handshake->hash); ++ ++ dst->sender_index = wg_index_hashtable_insert( ++ handshake->entry.peer->device->index_hashtable, ++ &handshake->entry); ++ ++ handshake->state = HANDSHAKE_CREATED_INITIATION; ++ ret = true; ++ ++out: ++ up_write(&handshake->lock); ++ up_read(&handshake->static_identity->lock); ++ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); ++ return ret; ++} ++ ++struct wg_peer * ++wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, ++ struct wg_device *wg) ++{ ++ struct wg_peer *peer = NULL, *ret_peer = NULL; ++ struct noise_handshake *handshake; ++ bool replay_attack, flood_attack; ++ u8 key[NOISE_SYMMETRIC_KEY_LEN]; ++ u8 chaining_key[NOISE_HASH_LEN]; ++ u8 hash[NOISE_HASH_LEN]; ++ u8 s[NOISE_PUBLIC_KEY_LEN]; ++ u8 e[NOISE_PUBLIC_KEY_LEN]; ++ u8 t[NOISE_TIMESTAMP_LEN]; ++ u64 initiation_consumption; ++ ++ down_read(&wg->static_identity.lock); ++ if (unlikely(!wg->static_identity.has_identity)) ++ goto out; ++ ++ handshake_init(chaining_key, hash, wg->static_identity.static_public); ++ ++ /* e */ ++ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); ++ ++ /* es */ ++ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e)) ++ goto out; ++ ++ /* s */ ++ if (!message_decrypt(s, src->encrypted_static, ++ sizeof(src->encrypted_static), key, hash)) ++ goto out; ++ ++ /* Lookup which peer we're actually talking to */ ++ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s); ++ if (!peer) ++ goto out; ++ handshake = &peer->handshake; ++ ++ /* ss */ ++ kdf(chaining_key, key, NULL, handshake->precomputed_static_static, ++ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, ++ chaining_key); ++ ++ /* {t} */ ++ if (!message_decrypt(t, src->encrypted_timestamp, ++ sizeof(src->encrypted_timestamp), key, hash)) ++ goto out; ++ ++ down_read(&handshake->lock); ++ replay_attack = memcmp(t, handshake->latest_timestamp, ++ NOISE_TIMESTAMP_LEN) <= 0; ++ flood_attack = (s64)handshake->last_initiation_consumption + ++ NSEC_PER_SEC / INITIATIONS_PER_SECOND > ++ (s64)ktime_get_coarse_boottime_ns(); ++ up_read(&handshake->lock); ++ if (replay_attack || flood_attack) ++ goto out; ++ ++ /* Success! Copy everything to peer */ ++ down_write(&handshake->lock); ++ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); ++ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0) ++ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN); ++ memcpy(handshake->hash, hash, NOISE_HASH_LEN); ++ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); ++ handshake->remote_index = src->sender_index; ++ if ((s64)(handshake->last_initiation_consumption - ++ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) ++ handshake->last_initiation_consumption = initiation_consumption; ++ handshake->state = HANDSHAKE_CONSUMED_INITIATION; ++ up_write(&handshake->lock); ++ ret_peer = peer; ++ ++out: ++ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); ++ memzero_explicit(hash, NOISE_HASH_LEN); ++ memzero_explicit(chaining_key, NOISE_HASH_LEN); ++ up_read(&wg->static_identity.lock); ++ if (!ret_peer) ++ wg_peer_put(peer); ++ return ret_peer; ++} ++ ++bool wg_noise_handshake_create_response(struct message_handshake_response *dst, ++ struct noise_handshake *handshake) ++{ ++ u8 key[NOISE_SYMMETRIC_KEY_LEN]; ++ bool ret = false; ++ ++ /* We need to wait for crng _before_ taking any locks, since ++ * curve25519_generate_secret uses get_random_bytes_wait. ++ */ ++ wait_for_random_bytes(); ++ ++ down_read(&handshake->static_identity->lock); ++ down_write(&handshake->lock); ++ ++ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION) ++ goto out; ++ ++ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE); ++ dst->receiver_index = handshake->remote_index; ++ ++ /* e */ ++ curve25519_generate_secret(handshake->ephemeral_private); ++ if (!curve25519_generate_public(dst->unencrypted_ephemeral, ++ handshake->ephemeral_private)) ++ goto out; ++ message_ephemeral(dst->unencrypted_ephemeral, ++ dst->unencrypted_ephemeral, handshake->chaining_key, ++ handshake->hash); ++ ++ /* ee */ ++ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, ++ handshake->remote_ephemeral)) ++ goto out; ++ ++ /* se */ ++ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, ++ handshake->remote_static)) ++ goto out; ++ ++ /* psk */ ++ mix_psk(handshake->chaining_key, handshake->hash, key, ++ handshake->preshared_key); ++ ++ /* {} */ ++ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash); ++ ++ dst->sender_index = wg_index_hashtable_insert( ++ handshake->entry.peer->device->index_hashtable, ++ &handshake->entry); ++ ++ handshake->state = HANDSHAKE_CREATED_RESPONSE; ++ ret = true; ++ ++out: ++ up_write(&handshake->lock); ++ up_read(&handshake->static_identity->lock); ++ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); ++ return ret; ++} ++ ++struct wg_peer * ++wg_noise_handshake_consume_response(struct message_handshake_response *src, ++ struct wg_device *wg) ++{ ++ enum noise_handshake_state state = HANDSHAKE_ZEROED; ++ struct wg_peer *peer = NULL, *ret_peer = NULL; ++ struct noise_handshake *handshake; ++ u8 key[NOISE_SYMMETRIC_KEY_LEN]; ++ u8 hash[NOISE_HASH_LEN]; ++ u8 chaining_key[NOISE_HASH_LEN]; ++ u8 e[NOISE_PUBLIC_KEY_LEN]; ++ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; ++ u8 static_private[NOISE_PUBLIC_KEY_LEN]; ++ ++ down_read(&wg->static_identity.lock); ++ ++ if (unlikely(!wg->static_identity.has_identity)) ++ goto out; ++ ++ handshake = (struct noise_handshake *)wg_index_hashtable_lookup( ++ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE, ++ src->receiver_index, &peer); ++ if (unlikely(!handshake)) ++ goto out; ++ ++ down_read(&handshake->lock); ++ state = handshake->state; ++ memcpy(hash, handshake->hash, NOISE_HASH_LEN); ++ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); ++ memcpy(ephemeral_private, handshake->ephemeral_private, ++ NOISE_PUBLIC_KEY_LEN); ++ up_read(&handshake->lock); ++ ++ if (state != HANDSHAKE_CREATED_INITIATION) ++ goto fail; ++ ++ /* e */ ++ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); ++ ++ /* ee */ ++ if (!mix_dh(chaining_key, NULL, ephemeral_private, e)) ++ goto fail; ++ ++ /* se */ ++ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e)) ++ goto fail; ++ ++ /* psk */ ++ mix_psk(chaining_key, hash, key, handshake->preshared_key); ++ ++ /* {} */ ++ if (!message_decrypt(NULL, src->encrypted_nothing, ++ sizeof(src->encrypted_nothing), key, hash)) ++ goto fail; ++ ++ /* Success! Copy everything to peer */ ++ down_write(&handshake->lock); ++ /* It's important to check that the state is still the same, while we ++ * have an exclusive lock. ++ */ ++ if (handshake->state != state) { ++ up_write(&handshake->lock); ++ goto fail; ++ } ++ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); ++ memcpy(handshake->hash, hash, NOISE_HASH_LEN); ++ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); ++ handshake->remote_index = src->sender_index; ++ handshake->state = HANDSHAKE_CONSUMED_RESPONSE; ++ up_write(&handshake->lock); ++ ret_peer = peer; ++ goto out; ++ ++fail: ++ wg_peer_put(peer); ++out: ++ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); ++ memzero_explicit(hash, NOISE_HASH_LEN); ++ memzero_explicit(chaining_key, NOISE_HASH_LEN); ++ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); ++ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); ++ up_read(&wg->static_identity.lock); ++ return ret_peer; ++} ++ ++bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, ++ struct noise_keypairs *keypairs) ++{ ++ struct noise_keypair *new_keypair; ++ bool ret = false; ++ ++ down_write(&handshake->lock); ++ if (handshake->state != HANDSHAKE_CREATED_RESPONSE && ++ handshake->state != HANDSHAKE_CONSUMED_RESPONSE) ++ goto out; ++ ++ new_keypair = keypair_create(handshake->entry.peer); ++ if (!new_keypair) ++ goto out; ++ new_keypair->i_am_the_initiator = handshake->state == ++ HANDSHAKE_CONSUMED_RESPONSE; ++ new_keypair->remote_index = handshake->remote_index; ++ ++ if (new_keypair->i_am_the_initiator) ++ derive_keys(&new_keypair->sending, &new_keypair->receiving, ++ handshake->chaining_key); ++ else ++ derive_keys(&new_keypair->receiving, &new_keypair->sending, ++ handshake->chaining_key); ++ ++ handshake_zero(handshake); ++ rcu_read_lock_bh(); ++ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer, ++ handshake)->is_dead))) { ++ add_new_keypair(keypairs, new_keypair); ++ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n", ++ handshake->entry.peer->device->dev->name, ++ new_keypair->internal_id, ++ handshake->entry.peer->internal_id); ++ ret = wg_index_hashtable_replace( ++ handshake->entry.peer->device->index_hashtable, ++ &handshake->entry, &new_keypair->entry); ++ } else { ++ kzfree(new_keypair); ++ } ++ rcu_read_unlock_bh(); ++ ++out: ++ up_write(&handshake->lock); ++ return ret; ++} +--- /dev/null ++++ b/drivers/net/wireguard/noise.h +@@ -0,0 +1,137 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++#ifndef _WG_NOISE_H ++#define _WG_NOISE_H ++ ++#include "messages.h" ++#include "peerlookup.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++union noise_counter { ++ struct { ++ u64 counter; ++ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; ++ spinlock_t lock; ++ } receive; ++ atomic64_t counter; ++}; ++ ++struct noise_symmetric_key { ++ u8 key[NOISE_SYMMETRIC_KEY_LEN]; ++ union noise_counter counter; ++ u64 birthdate; ++ bool is_valid; ++}; ++ ++struct noise_keypair { ++ struct index_hashtable_entry entry; ++ struct noise_symmetric_key sending; ++ struct noise_symmetric_key receiving; ++ __le32 remote_index; ++ bool i_am_the_initiator; ++ struct kref refcount; ++ struct rcu_head rcu; ++ u64 internal_id; ++}; ++ ++struct noise_keypairs { ++ struct noise_keypair __rcu *current_keypair; ++ struct noise_keypair __rcu *previous_keypair; ++ struct noise_keypair __rcu *next_keypair; ++ spinlock_t keypair_update_lock; ++}; ++ ++struct noise_static_identity { ++ u8 static_public[NOISE_PUBLIC_KEY_LEN]; ++ u8 static_private[NOISE_PUBLIC_KEY_LEN]; ++ struct rw_semaphore lock; ++ bool has_identity; ++}; ++ ++enum noise_handshake_state { ++ HANDSHAKE_ZEROED, ++ HANDSHAKE_CREATED_INITIATION, ++ HANDSHAKE_CONSUMED_INITIATION, ++ HANDSHAKE_CREATED_RESPONSE, ++ HANDSHAKE_CONSUMED_RESPONSE ++}; ++ ++struct noise_handshake { ++ struct index_hashtable_entry entry; ++ ++ enum noise_handshake_state state; ++ u64 last_initiation_consumption; ++ ++ struct noise_static_identity *static_identity; ++ ++ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; ++ u8 remote_static[NOISE_PUBLIC_KEY_LEN]; ++ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN]; ++ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN]; ++ ++ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; ++ ++ u8 hash[NOISE_HASH_LEN]; ++ u8 chaining_key[NOISE_HASH_LEN]; ++ ++ u8 latest_timestamp[NOISE_TIMESTAMP_LEN]; ++ __le32 remote_index; ++ ++ /* Protects all members except the immutable (after noise_handshake_ ++ * init): remote_static, precomputed_static_static, static_identity. ++ */ ++ struct rw_semaphore lock; ++}; ++ ++struct wg_device; ++ ++void wg_noise_init(void); ++bool wg_noise_handshake_init(struct noise_handshake *handshake, ++ struct noise_static_identity *static_identity, ++ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], ++ struct wg_peer *peer); ++void wg_noise_handshake_clear(struct noise_handshake *handshake); ++static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) ++{ ++ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() - ++ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC); ++} ++ ++void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now); ++struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair); ++void wg_noise_keypairs_clear(struct noise_keypairs *keypairs); ++bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, ++ struct noise_keypair *received_keypair); ++void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); ++ ++void wg_noise_set_static_identity_private_key( ++ struct noise_static_identity *static_identity, ++ const u8 private_key[NOISE_PUBLIC_KEY_LEN]); ++bool wg_noise_precompute_static_static(struct wg_peer *peer); ++ ++bool ++wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, ++ struct noise_handshake *handshake); ++struct wg_peer * ++wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, ++ struct wg_device *wg); ++ ++bool wg_noise_handshake_create_response(struct message_handshake_response *dst, ++ struct noise_handshake *handshake); ++struct wg_peer * ++wg_noise_handshake_consume_response(struct message_handshake_response *src, ++ struct wg_device *wg); ++ ++bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, ++ struct noise_keypairs *keypairs); ++ ++#endif /* _WG_NOISE_H */ +--- /dev/null ++++ b/drivers/net/wireguard/peer.c +@@ -0,0 +1,240 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "peer.h" ++#include "device.h" ++#include "queueing.h" ++#include "timers.h" ++#include "peerlookup.h" ++#include "noise.h" ++ ++#include ++#include ++#include ++#include ++ ++static atomic64_t peer_counter = ATOMIC64_INIT(0); ++ ++struct wg_peer *wg_peer_create(struct wg_device *wg, ++ const u8 public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) ++{ ++ struct wg_peer *peer; ++ int ret = -ENOMEM; ++ ++ lockdep_assert_held(&wg->device_update_lock); ++ ++ if (wg->num_peers >= MAX_PEERS_PER_DEVICE) ++ return ERR_PTR(ret); ++ ++ peer = kzalloc(sizeof(*peer), GFP_KERNEL); ++ if (unlikely(!peer)) ++ return ERR_PTR(ret); ++ peer->device = wg; ++ ++ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, ++ public_key, preshared_key, peer)) { ++ ret = -EKEYREJECTED; ++ goto err_1; ++ } ++ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) ++ goto err_1; ++ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, ++ MAX_QUEUED_PACKETS)) ++ goto err_2; ++ if (wg_packet_queue_init(&peer->rx_queue, NULL, false, ++ MAX_QUEUED_PACKETS)) ++ goto err_3; ++ ++ peer->internal_id = atomic64_inc_return(&peer_counter); ++ peer->serial_work_cpu = nr_cpumask_bits; ++ wg_cookie_init(&peer->latest_cookie); ++ wg_timers_init(peer); ++ wg_cookie_checker_precompute_peer_keys(peer); ++ spin_lock_init(&peer->keypairs.keypair_update_lock); ++ INIT_WORK(&peer->transmit_handshake_work, ++ wg_packet_handshake_send_worker); ++ rwlock_init(&peer->endpoint_lock); ++ kref_init(&peer->refcount); ++ skb_queue_head_init(&peer->staged_packet_queue); ++ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); ++ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); ++ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, ++ NAPI_POLL_WEIGHT); ++ napi_enable(&peer->napi); ++ list_add_tail(&peer->peer_list, &wg->peer_list); ++ INIT_LIST_HEAD(&peer->allowedips_list); ++ wg_pubkey_hashtable_add(wg->peer_hashtable, peer); ++ ++wg->num_peers; ++ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); ++ return peer; ++ ++err_3: ++ wg_packet_queue_free(&peer->tx_queue, false); ++err_2: ++ dst_cache_destroy(&peer->endpoint_cache); ++err_1: ++ kfree(peer); ++ return ERR_PTR(ret); ++} ++ ++struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) ++{ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), ++ "Taking peer reference without holding the RCU read lock"); ++ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) ++ return NULL; ++ return peer; ++} ++ ++static void peer_make_dead(struct wg_peer *peer) ++{ ++ /* Remove from configuration-time lookup structures. */ ++ list_del_init(&peer->peer_list); ++ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, ++ &peer->device->device_update_lock); ++ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); ++ ++ /* Mark as dead, so that we don't allow jumping contexts after. */ ++ WRITE_ONCE(peer->is_dead, true); ++ ++ /* The caller must now synchronize_rcu() for this to take effect. */ ++} ++ ++static void peer_remove_after_dead(struct wg_peer *peer) ++{ ++ WARN_ON(!peer->is_dead); ++ ++ /* No more keypairs can be created for this peer, since is_dead protects ++ * add_new_keypair, so we can now destroy existing ones. ++ */ ++ wg_noise_keypairs_clear(&peer->keypairs); ++ ++ /* Destroy all ongoing timers that were in-flight at the beginning of ++ * this function. ++ */ ++ wg_timers_stop(peer); ++ ++ /* The transition between packet encryption/decryption queues isn't ++ * guarded by is_dead, but each reference's life is strictly bounded by ++ * two generations: once for parallel crypto and once for serial ++ * ingestion, so we can simply flush twice, and be sure that we no ++ * longer have references inside these queues. ++ */ ++ ++ /* a) For encrypt/decrypt. */ ++ flush_workqueue(peer->device->packet_crypt_wq); ++ /* b.1) For send (but not receive, since that's napi). */ ++ flush_workqueue(peer->device->packet_crypt_wq); ++ /* b.2.1) For receive (but not send, since that's wq). */ ++ napi_disable(&peer->napi); ++ /* b.2.1) It's now safe to remove the napi struct, which must be done ++ * here from process context. ++ */ ++ netif_napi_del(&peer->napi); ++ ++ /* Ensure any workstructs we own (like transmit_handshake_work or ++ * clear_peer_work) no longer are in use. ++ */ ++ flush_workqueue(peer->device->handshake_send_wq); ++ ++ /* After the above flushes, a peer might still be active in a few ++ * different contexts: 1) from xmit(), before hitting is_dead and ++ * returning, 2) from wg_packet_consume_data(), before hitting is_dead ++ * and returning, 3) from wg_receive_handshake_packet() after a point ++ * where it has processed an incoming handshake packet, but where ++ * all calls to pass it off to timers fails because of is_dead. We won't ++ * have new references in (1) eventually, because we're removed from ++ * allowedips; we won't have new references in (2) eventually, because ++ * wg_index_hashtable_lookup will always return NULL, since we removed ++ * all existing keypairs and no more can be created; we won't have new ++ * references in (3) eventually, because we're removed from the pubkey ++ * hash table, which allows for a maximum of one handshake response, ++ * via the still-uncleared index hashtable entry, but not more than one, ++ * and in wg_cookie_message_consume, the lookup eventually gets a peer ++ * with a refcount of zero, so no new reference is taken. ++ */ ++ ++ --peer->device->num_peers; ++ wg_peer_put(peer); ++} ++ ++/* We have a separate "remove" function make sure that all active places where ++ * a peer is currently operating will eventually come to an end and not pass ++ * their reference onto another context. ++ */ ++void wg_peer_remove(struct wg_peer *peer) ++{ ++ if (unlikely(!peer)) ++ return; ++ lockdep_assert_held(&peer->device->device_update_lock); ++ ++ peer_make_dead(peer); ++ synchronize_rcu(); ++ peer_remove_after_dead(peer); ++} ++ ++void wg_peer_remove_all(struct wg_device *wg) ++{ ++ struct wg_peer *peer, *temp; ++ LIST_HEAD(dead_peers); ++ ++ lockdep_assert_held(&wg->device_update_lock); ++ ++ /* Avoid having to traverse individually for each one. */ ++ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); ++ ++ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { ++ peer_make_dead(peer); ++ list_add_tail(&peer->peer_list, &dead_peers); ++ } ++ synchronize_rcu(); ++ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) ++ peer_remove_after_dead(peer); ++} ++ ++static void rcu_release(struct rcu_head *rcu) ++{ ++ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); ++ ++ dst_cache_destroy(&peer->endpoint_cache); ++ wg_packet_queue_free(&peer->rx_queue, false); ++ wg_packet_queue_free(&peer->tx_queue, false); ++ ++ /* The final zeroing takes care of clearing any remaining handshake key ++ * material and other potentially sensitive information. ++ */ ++ kzfree(peer); ++} ++ ++static void kref_release(struct kref *refcount) ++{ ++ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); ++ ++ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ ++ /* Remove ourself from dynamic runtime lookup structures, now that the ++ * last reference is gone. ++ */ ++ wg_index_hashtable_remove(peer->device->index_hashtable, ++ &peer->handshake.entry); ++ ++ /* Remove any lingering packets that didn't have a chance to be ++ * transmitted. ++ */ ++ wg_packet_purge_staged_packets(peer); ++ ++ /* Free the memory used. */ ++ call_rcu(&peer->rcu, rcu_release); ++} ++ ++void wg_peer_put(struct wg_peer *peer) ++{ ++ if (unlikely(!peer)) ++ return; ++ kref_put(&peer->refcount, kref_release); ++} +--- /dev/null ++++ b/drivers/net/wireguard/peer.h +@@ -0,0 +1,83 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_PEER_H ++#define _WG_PEER_H ++ ++#include "device.h" ++#include "noise.h" ++#include "cookie.h" ++ ++#include ++#include ++#include ++#include ++#include ++ ++struct wg_device; ++ ++struct endpoint { ++ union { ++ struct sockaddr addr; ++ struct sockaddr_in addr4; ++ struct sockaddr_in6 addr6; ++ }; ++ union { ++ struct { ++ struct in_addr src4; ++ /* Essentially the same as addr6->scope_id */ ++ int src_if4; ++ }; ++ struct in6_addr src6; ++ }; ++}; ++ ++struct wg_peer { ++ struct wg_device *device; ++ struct crypt_queue tx_queue, rx_queue; ++ struct sk_buff_head staged_packet_queue; ++ int serial_work_cpu; ++ struct noise_keypairs keypairs; ++ struct endpoint endpoint; ++ struct dst_cache endpoint_cache; ++ rwlock_t endpoint_lock; ++ struct noise_handshake handshake; ++ atomic64_t last_sent_handshake; ++ struct work_struct transmit_handshake_work, clear_peer_work; ++ struct cookie latest_cookie; ++ struct hlist_node pubkey_hash; ++ u64 rx_bytes, tx_bytes; ++ struct timer_list timer_retransmit_handshake, timer_send_keepalive; ++ struct timer_list timer_new_handshake, timer_zero_key_material; ++ struct timer_list timer_persistent_keepalive; ++ unsigned int timer_handshake_attempts; ++ u16 persistent_keepalive_interval; ++ bool timer_need_another_keepalive; ++ bool sent_lastminute_handshake; ++ struct timespec64 walltime_last_handshake; ++ struct kref refcount; ++ struct rcu_head rcu; ++ struct list_head peer_list; ++ struct list_head allowedips_list; ++ u64 internal_id; ++ struct napi_struct napi; ++ bool is_dead; ++}; ++ ++struct wg_peer *wg_peer_create(struct wg_device *wg, ++ const u8 public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]); ++ ++struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer); ++static inline struct wg_peer *wg_peer_get(struct wg_peer *peer) ++{ ++ kref_get(&peer->refcount); ++ return peer; ++} ++void wg_peer_put(struct wg_peer *peer); ++void wg_peer_remove(struct wg_peer *peer); ++void wg_peer_remove_all(struct wg_device *wg); ++ ++#endif /* _WG_PEER_H */ +--- /dev/null ++++ b/drivers/net/wireguard/peerlookup.c +@@ -0,0 +1,221 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "peerlookup.h" ++#include "peer.h" ++#include "noise.h" ++ ++static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, ++ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) ++{ ++ /* siphash gives us a secure 64bit number based on a random key. Since ++ * the bits are uniformly distributed, we can then mask off to get the ++ * bits we need. ++ */ ++ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); ++ ++ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; ++} ++ ++struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) ++{ ++ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); ++ ++ if (!table) ++ return NULL; ++ ++ get_random_bytes(&table->key, sizeof(table->key)); ++ hash_init(table->hashtable); ++ mutex_init(&table->lock); ++ return table; ++} ++ ++void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, ++ struct wg_peer *peer) ++{ ++ mutex_lock(&table->lock); ++ hlist_add_head_rcu(&peer->pubkey_hash, ++ pubkey_bucket(table, peer->handshake.remote_static)); ++ mutex_unlock(&table->lock); ++} ++ ++void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, ++ struct wg_peer *peer) ++{ ++ mutex_lock(&table->lock); ++ hlist_del_init_rcu(&peer->pubkey_hash); ++ mutex_unlock(&table->lock); ++} ++ ++/* Returns a strong reference to a peer */ ++struct wg_peer * ++wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, ++ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) ++{ ++ struct wg_peer *iter_peer, *peer = NULL; ++ ++ rcu_read_lock_bh(); ++ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), ++ pubkey_hash) { ++ if (!memcmp(pubkey, iter_peer->handshake.remote_static, ++ NOISE_PUBLIC_KEY_LEN)) { ++ peer = iter_peer; ++ break; ++ } ++ } ++ peer = wg_peer_get_maybe_zero(peer); ++ rcu_read_unlock_bh(); ++ return peer; ++} ++ ++static struct hlist_head *index_bucket(struct index_hashtable *table, ++ const __le32 index) ++{ ++ /* Since the indices are random and thus all bits are uniformly ++ * distributed, we can find its bucket simply by masking. ++ */ ++ return &table->hashtable[(__force u32)index & ++ (HASH_SIZE(table->hashtable) - 1)]; ++} ++ ++struct index_hashtable *wg_index_hashtable_alloc(void) ++{ ++ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); ++ ++ if (!table) ++ return NULL; ++ ++ hash_init(table->hashtable); ++ spin_lock_init(&table->lock); ++ return table; ++} ++ ++/* At the moment, we limit ourselves to 2^20 total peers, which generally might ++ * amount to 2^20*3 items in this hashtable. The algorithm below works by ++ * picking a random number and testing it. We can see that these limits mean we ++ * usually succeed pretty quickly: ++ * ++ * >>> def calculation(tries, size): ++ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) ++ * ... ++ * >>> calculation(1, 2**20 * 3) ++ * 0.999267578125 ++ * >>> calculation(2, 2**20 * 3) ++ * 0.0007318854331970215 ++ * >>> calculation(3, 2**20 * 3) ++ * 5.360489012673497e-07 ++ * >>> calculation(4, 2**20 * 3) ++ * 3.9261394135792216e-10 ++ * ++ * At the moment, we don't do any masking, so this algorithm isn't exactly ++ * constant time in either the random guessing or in the hash list lookup. We ++ * could require a minimum of 3 tries, which would successfully mask the ++ * guessing. this would not, however, help with the growing hash lengths, which ++ * is another thing to consider moving forward. ++ */ ++ ++__le32 wg_index_hashtable_insert(struct index_hashtable *table, ++ struct index_hashtable_entry *entry) ++{ ++ struct index_hashtable_entry *existing_entry; ++ ++ spin_lock_bh(&table->lock); ++ hlist_del_init_rcu(&entry->index_hash); ++ spin_unlock_bh(&table->lock); ++ ++ rcu_read_lock_bh(); ++ ++search_unused_slot: ++ /* First we try to find an unused slot, randomly, while unlocked. */ ++ entry->index = (__force __le32)get_random_u32(); ++ hlist_for_each_entry_rcu_bh(existing_entry, ++ index_bucket(table, entry->index), ++ index_hash) { ++ if (existing_entry->index == entry->index) ++ /* If it's already in use, we continue searching. */ ++ goto search_unused_slot; ++ } ++ ++ /* Once we've found an unused slot, we lock it, and then double-check ++ * that nobody else stole it from us. ++ */ ++ spin_lock_bh(&table->lock); ++ hlist_for_each_entry_rcu_bh(existing_entry, ++ index_bucket(table, entry->index), ++ index_hash) { ++ if (existing_entry->index == entry->index) { ++ spin_unlock_bh(&table->lock); ++ /* If it was stolen, we start over. */ ++ goto search_unused_slot; ++ } ++ } ++ /* Otherwise, we know we have it exclusively (since we're locked), ++ * so we insert. ++ */ ++ hlist_add_head_rcu(&entry->index_hash, ++ index_bucket(table, entry->index)); ++ spin_unlock_bh(&table->lock); ++ ++ rcu_read_unlock_bh(); ++ ++ return entry->index; ++} ++ ++bool wg_index_hashtable_replace(struct index_hashtable *table, ++ struct index_hashtable_entry *old, ++ struct index_hashtable_entry *new) ++{ ++ if (unlikely(hlist_unhashed(&old->index_hash))) ++ return false; ++ spin_lock_bh(&table->lock); ++ new->index = old->index; ++ hlist_replace_rcu(&old->index_hash, &new->index_hash); ++ ++ /* Calling init here NULLs out index_hash, and in fact after this ++ * function returns, it's theoretically possible for this to get ++ * reinserted elsewhere. That means the RCU lookup below might either ++ * terminate early or jump between buckets, in which case the packet ++ * simply gets dropped, which isn't terrible. ++ */ ++ INIT_HLIST_NODE(&old->index_hash); ++ spin_unlock_bh(&table->lock); ++ return true; ++} ++ ++void wg_index_hashtable_remove(struct index_hashtable *table, ++ struct index_hashtable_entry *entry) ++{ ++ spin_lock_bh(&table->lock); ++ hlist_del_init_rcu(&entry->index_hash); ++ spin_unlock_bh(&table->lock); ++} ++ ++/* Returns a strong reference to a entry->peer */ ++struct index_hashtable_entry * ++wg_index_hashtable_lookup(struct index_hashtable *table, ++ const enum index_hashtable_type type_mask, ++ const __le32 index, struct wg_peer **peer) ++{ ++ struct index_hashtable_entry *iter_entry, *entry = NULL; ++ ++ rcu_read_lock_bh(); ++ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), ++ index_hash) { ++ if (iter_entry->index == index) { ++ if (likely(iter_entry->type & type_mask)) ++ entry = iter_entry; ++ break; ++ } ++ } ++ if (likely(entry)) { ++ entry->peer = wg_peer_get_maybe_zero(entry->peer); ++ if (likely(entry->peer)) ++ *peer = entry->peer; ++ else ++ entry = NULL; ++ } ++ rcu_read_unlock_bh(); ++ return entry; ++} +--- /dev/null ++++ b/drivers/net/wireguard/peerlookup.h +@@ -0,0 +1,64 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_PEERLOOKUP_H ++#define _WG_PEERLOOKUP_H ++ ++#include "messages.h" ++ ++#include ++#include ++#include ++ ++struct wg_peer; ++ ++struct pubkey_hashtable { ++ /* TODO: move to rhashtable */ ++ DECLARE_HASHTABLE(hashtable, 11); ++ siphash_key_t key; ++ struct mutex lock; ++}; ++ ++struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void); ++void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, ++ struct wg_peer *peer); ++void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, ++ struct wg_peer *peer); ++struct wg_peer * ++wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, ++ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]); ++ ++struct index_hashtable { ++ /* TODO: move to rhashtable */ ++ DECLARE_HASHTABLE(hashtable, 13); ++ spinlock_t lock; ++}; ++ ++enum index_hashtable_type { ++ INDEX_HASHTABLE_HANDSHAKE = 1U << 0, ++ INDEX_HASHTABLE_KEYPAIR = 1U << 1 ++}; ++ ++struct index_hashtable_entry { ++ struct wg_peer *peer; ++ struct hlist_node index_hash; ++ enum index_hashtable_type type; ++ __le32 index; ++}; ++ ++struct index_hashtable *wg_index_hashtable_alloc(void); ++__le32 wg_index_hashtable_insert(struct index_hashtable *table, ++ struct index_hashtable_entry *entry); ++bool wg_index_hashtable_replace(struct index_hashtable *table, ++ struct index_hashtable_entry *old, ++ struct index_hashtable_entry *new); ++void wg_index_hashtable_remove(struct index_hashtable *table, ++ struct index_hashtable_entry *entry); ++struct index_hashtable_entry * ++wg_index_hashtable_lookup(struct index_hashtable *table, ++ const enum index_hashtable_type type_mask, ++ const __le32 index, struct wg_peer **peer); ++ ++#endif /* _WG_PEERLOOKUP_H */ +--- /dev/null ++++ b/drivers/net/wireguard/queueing.c +@@ -0,0 +1,53 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "queueing.h" ++ ++struct multicore_worker __percpu * ++wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) ++{ ++ int cpu; ++ struct multicore_worker __percpu *worker = ++ alloc_percpu(struct multicore_worker); ++ ++ if (!worker) ++ return NULL; ++ ++ for_each_possible_cpu(cpu) { ++ per_cpu_ptr(worker, cpu)->ptr = ptr; ++ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); ++ } ++ return worker; ++} ++ ++int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, ++ bool multicore, unsigned int len) ++{ ++ int ret; ++ ++ memset(queue, 0, sizeof(*queue)); ++ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); ++ if (ret) ++ return ret; ++ if (function) { ++ if (multicore) { ++ queue->worker = wg_packet_percpu_multicore_worker_alloc( ++ function, queue); ++ if (!queue->worker) ++ return -ENOMEM; ++ } else { ++ INIT_WORK(&queue->work, function); ++ } ++ } ++ return 0; ++} ++ ++void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) ++{ ++ if (multicore) ++ free_percpu(queue->worker); ++ WARN_ON(!__ptr_ring_empty(&queue->ring)); ++ ptr_ring_cleanup(&queue->ring, NULL); ++} +--- /dev/null ++++ b/drivers/net/wireguard/queueing.h +@@ -0,0 +1,197 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_QUEUEING_H ++#define _WG_QUEUEING_H ++ ++#include "peer.h" ++#include ++#include ++#include ++#include ++ ++struct wg_device; ++struct wg_peer; ++struct multicore_worker; ++struct crypt_queue; ++struct sk_buff; ++ ++/* queueing.c APIs: */ ++int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, ++ bool multicore, unsigned int len); ++void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); ++struct multicore_worker __percpu * ++wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); ++ ++/* receive.c APIs: */ ++void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); ++void wg_packet_handshake_receive_worker(struct work_struct *work); ++/* NAPI poll function: */ ++int wg_packet_rx_poll(struct napi_struct *napi, int budget); ++/* Workqueue worker: */ ++void wg_packet_decrypt_worker(struct work_struct *work); ++ ++/* send.c APIs: */ ++void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, ++ bool is_retry); ++void wg_packet_send_handshake_response(struct wg_peer *peer); ++void wg_packet_send_handshake_cookie(struct wg_device *wg, ++ struct sk_buff *initiating_skb, ++ __le32 sender_index); ++void wg_packet_send_keepalive(struct wg_peer *peer); ++void wg_packet_purge_staged_packets(struct wg_peer *peer); ++void wg_packet_send_staged_packets(struct wg_peer *peer); ++/* Workqueue workers: */ ++void wg_packet_handshake_send_worker(struct work_struct *work); ++void wg_packet_tx_worker(struct work_struct *work); ++void wg_packet_encrypt_worker(struct work_struct *work); ++ ++enum packet_state { ++ PACKET_STATE_UNCRYPTED, ++ PACKET_STATE_CRYPTED, ++ PACKET_STATE_DEAD ++}; ++ ++struct packet_cb { ++ u64 nonce; ++ struct noise_keypair *keypair; ++ atomic_t state; ++ u32 mtu; ++ u8 ds; ++}; ++ ++#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) ++#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) ++ ++/* Returns either the correct skb->protocol value, or 0 if invalid. */ ++static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) ++{ ++ if (skb_network_header(skb) >= skb->head && ++ (skb_network_header(skb) + sizeof(struct iphdr)) <= ++ skb_tail_pointer(skb) && ++ ip_hdr(skb)->version == 4) ++ return htons(ETH_P_IP); ++ if (skb_network_header(skb) >= skb->head && ++ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= ++ skb_tail_pointer(skb) && ++ ipv6_hdr(skb)->version == 6) ++ return htons(ETH_P_IPV6); ++ return 0; ++} ++ ++static inline void wg_reset_packet(struct sk_buff *skb) ++{ ++ const int pfmemalloc = skb->pfmemalloc; ++ ++ skb_scrub_packet(skb, true); ++ memset(&skb->headers_start, 0, ++ offsetof(struct sk_buff, headers_end) - ++ offsetof(struct sk_buff, headers_start)); ++ skb->pfmemalloc = pfmemalloc; ++ skb->queue_mapping = 0; ++ skb->nohdr = 0; ++ skb->peeked = 0; ++ skb->mac_len = 0; ++ skb->dev = NULL; ++#ifdef CONFIG_NET_SCHED ++ skb->tc_index = 0; ++#endif ++ skb_reset_redirect(skb); ++ skb->hdr_len = skb_headroom(skb); ++ skb_reset_mac_header(skb); ++ skb_reset_network_header(skb); ++ skb_reset_transport_header(skb); ++ skb_probe_transport_header(skb); ++ skb_reset_inner_headers(skb); ++} ++ ++static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) ++{ ++ unsigned int cpu = *stored_cpu, cpu_index, i; ++ ++ if (unlikely(cpu == nr_cpumask_bits || ++ !cpumask_test_cpu(cpu, cpu_online_mask))) { ++ cpu_index = id % cpumask_weight(cpu_online_mask); ++ cpu = cpumask_first(cpu_online_mask); ++ for (i = 0; i < cpu_index; ++i) ++ cpu = cpumask_next(cpu, cpu_online_mask); ++ *stored_cpu = cpu; ++ } ++ return cpu; ++} ++ ++/* This function is racy, in the sense that next is unlocked, so it could return ++ * the same CPU twice. A race-free version of this would be to instead store an ++ * atomic sequence number, do an increment-and-return, and then iterate through ++ * every possible CPU until we get to that index -- choose_cpu. However that's ++ * a bit slower, and it doesn't seem like this potential race actually ++ * introduces any performance loss, so we live with it. ++ */ ++static inline int wg_cpumask_next_online(int *next) ++{ ++ int cpu = *next; ++ ++ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) ++ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; ++ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; ++ return cpu; ++} ++ ++static inline int wg_queue_enqueue_per_device_and_peer( ++ struct crypt_queue *device_queue, struct crypt_queue *peer_queue, ++ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) ++{ ++ int cpu; ++ ++ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); ++ /* We first queue this up for the peer ingestion, but the consumer ++ * will wait for the state to change to CRYPTED or DEAD before. ++ */ ++ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) ++ return -ENOSPC; ++ /* Then we queue it up in the device queue, which consumes the ++ * packet as soon as it can. ++ */ ++ cpu = wg_cpumask_next_online(next_cpu); ++ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) ++ return -EPIPE; ++ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); ++ return 0; ++} ++ ++static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, ++ struct sk_buff *skb, ++ enum packet_state state) ++{ ++ /* We take a reference, because as soon as we call atomic_set, the ++ * peer can be freed from below us. ++ */ ++ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); ++ ++ atomic_set_release(&PACKET_CB(skb)->state, state); ++ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, ++ peer->internal_id), ++ peer->device->packet_crypt_wq, &queue->work); ++ wg_peer_put(peer); ++} ++ ++static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, ++ enum packet_state state) ++{ ++ /* We take a reference, because as soon as we call atomic_set, the ++ * peer can be freed from below us. ++ */ ++ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); ++ ++ atomic_set_release(&PACKET_CB(skb)->state, state); ++ napi_schedule(&peer->napi); ++ wg_peer_put(peer); ++} ++ ++#ifdef DEBUG ++bool wg_packet_counter_selftest(void); ++#endif ++ ++#endif /* _WG_QUEUEING_H */ +--- /dev/null ++++ b/drivers/net/wireguard/ratelimiter.c +@@ -0,0 +1,223 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "ratelimiter.h" ++#include ++#include ++#include ++#include ++ ++static struct kmem_cache *entry_cache; ++static hsiphash_key_t key; ++static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock"); ++static DEFINE_MUTEX(init_lock); ++static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */ ++static atomic_t total_entries = ATOMIC_INIT(0); ++static unsigned int max_entries, table_size; ++static void wg_ratelimiter_gc_entries(struct work_struct *); ++static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); ++static struct hlist_head *table_v4; ++#if IS_ENABLED(CONFIG_IPV6) ++static struct hlist_head *table_v6; ++#endif ++ ++struct ratelimiter_entry { ++ u64 last_time_ns, tokens, ip; ++ void *net; ++ spinlock_t lock; ++ struct hlist_node hash; ++ struct rcu_head rcu; ++}; ++ ++enum { ++ PACKETS_PER_SECOND = 20, ++ PACKETS_BURSTABLE = 5, ++ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND, ++ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE ++}; ++ ++static void entry_free(struct rcu_head *rcu) ++{ ++ kmem_cache_free(entry_cache, ++ container_of(rcu, struct ratelimiter_entry, rcu)); ++ atomic_dec(&total_entries); ++} ++ ++static void entry_uninit(struct ratelimiter_entry *entry) ++{ ++ hlist_del_rcu(&entry->hash); ++ call_rcu(&entry->rcu, entry_free); ++} ++ ++/* Calling this function with a NULL work uninits all entries. */ ++static void wg_ratelimiter_gc_entries(struct work_struct *work) ++{ ++ const u64 now = ktime_get_coarse_boottime_ns(); ++ struct ratelimiter_entry *entry; ++ struct hlist_node *temp; ++ unsigned int i; ++ ++ for (i = 0; i < table_size; ++i) { ++ spin_lock(&table_lock); ++ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { ++ if (unlikely(!work) || ++ now - entry->last_time_ns > NSEC_PER_SEC) ++ entry_uninit(entry); ++ } ++#if IS_ENABLED(CONFIG_IPV6) ++ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { ++ if (unlikely(!work) || ++ now - entry->last_time_ns > NSEC_PER_SEC) ++ entry_uninit(entry); ++ } ++#endif ++ spin_unlock(&table_lock); ++ if (likely(work)) ++ cond_resched(); ++ } ++ if (likely(work)) ++ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); ++} ++ ++bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net) ++{ ++ /* We only take the bottom half of the net pointer, so that we can hash ++ * 3 words in the end. This way, siphash's len param fits into the final ++ * u32, and we don't incur an extra round. ++ */ ++ const u32 net_word = (unsigned long)net; ++ struct ratelimiter_entry *entry; ++ struct hlist_head *bucket; ++ u64 ip; ++ ++ if (skb->protocol == htons(ETH_P_IP)) { ++ ip = (u64 __force)ip_hdr(skb)->saddr; ++ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & ++ (table_size - 1)]; ++ } ++#if IS_ENABLED(CONFIG_IPV6) ++ else if (skb->protocol == htons(ETH_P_IPV6)) { ++ /* Only use 64 bits, so as to ratelimit the whole /64. */ ++ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip)); ++ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & ++ (table_size - 1)]; ++ } ++#endif ++ else ++ return false; ++ rcu_read_lock(); ++ hlist_for_each_entry_rcu(entry, bucket, hash) { ++ if (entry->net == net && entry->ip == ip) { ++ u64 now, tokens; ++ bool ret; ++ /* Quasi-inspired by nft_limit.c, but this is actually a ++ * slightly different algorithm. Namely, we incorporate ++ * the burst as part of the maximum tokens, rather than ++ * as part of the rate. ++ */ ++ spin_lock(&entry->lock); ++ now = ktime_get_coarse_boottime_ns(); ++ tokens = min_t(u64, TOKEN_MAX, ++ entry->tokens + now - ++ entry->last_time_ns); ++ entry->last_time_ns = now; ++ ret = tokens >= PACKET_COST; ++ entry->tokens = ret ? tokens - PACKET_COST : tokens; ++ spin_unlock(&entry->lock); ++ rcu_read_unlock(); ++ return ret; ++ } ++ } ++ rcu_read_unlock(); ++ ++ if (atomic_inc_return(&total_entries) > max_entries) ++ goto err_oom; ++ ++ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL); ++ if (unlikely(!entry)) ++ goto err_oom; ++ ++ entry->net = net; ++ entry->ip = ip; ++ INIT_HLIST_NODE(&entry->hash); ++ spin_lock_init(&entry->lock); ++ entry->last_time_ns = ktime_get_coarse_boottime_ns(); ++ entry->tokens = TOKEN_MAX - PACKET_COST; ++ spin_lock(&table_lock); ++ hlist_add_head_rcu(&entry->hash, bucket); ++ spin_unlock(&table_lock); ++ return true; ++ ++err_oom: ++ atomic_dec(&total_entries); ++ return false; ++} ++ ++int wg_ratelimiter_init(void) ++{ ++ mutex_lock(&init_lock); ++ if (++init_refcnt != 1) ++ goto out; ++ ++ entry_cache = KMEM_CACHE(ratelimiter_entry, 0); ++ if (!entry_cache) ++ goto err; ++ ++ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting, ++ * but what it shares in common is that it uses a massive hashtable. So, ++ * we borrow their wisdom about good table sizes on different systems ++ * dependent on RAM. This calculation here comes from there. ++ */ ++ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 : ++ max_t(unsigned long, 16, roundup_pow_of_two( ++ (totalram_pages() << PAGE_SHIFT) / ++ (1U << 14) / sizeof(struct hlist_head))); ++ max_entries = table_size * 8; ++ ++ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); ++ if (unlikely(!table_v4)) ++ goto err_kmemcache; ++ ++#if IS_ENABLED(CONFIG_IPV6) ++ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); ++ if (unlikely(!table_v6)) { ++ kvfree(table_v4); ++ goto err_kmemcache; ++ } ++#endif ++ ++ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); ++ get_random_bytes(&key, sizeof(key)); ++out: ++ mutex_unlock(&init_lock); ++ return 0; ++ ++err_kmemcache: ++ kmem_cache_destroy(entry_cache); ++err: ++ --init_refcnt; ++ mutex_unlock(&init_lock); ++ return -ENOMEM; ++} ++ ++void wg_ratelimiter_uninit(void) ++{ ++ mutex_lock(&init_lock); ++ if (!init_refcnt || --init_refcnt) ++ goto out; ++ ++ cancel_delayed_work_sync(&gc_work); ++ wg_ratelimiter_gc_entries(NULL); ++ rcu_barrier(); ++ kvfree(table_v4); ++#if IS_ENABLED(CONFIG_IPV6) ++ kvfree(table_v6); ++#endif ++ kmem_cache_destroy(entry_cache); ++out: ++ mutex_unlock(&init_lock); ++} ++ ++#include "selftest/ratelimiter.c" +--- /dev/null ++++ b/drivers/net/wireguard/ratelimiter.h +@@ -0,0 +1,19 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_RATELIMITER_H ++#define _WG_RATELIMITER_H ++ ++#include ++ ++int wg_ratelimiter_init(void); ++void wg_ratelimiter_uninit(void); ++bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net); ++ ++#ifdef DEBUG ++bool wg_ratelimiter_selftest(void); ++#endif ++ ++#endif /* _WG_RATELIMITER_H */ +--- /dev/null ++++ b/drivers/net/wireguard/receive.c +@@ -0,0 +1,595 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "queueing.h" ++#include "device.h" ++#include "peer.h" ++#include "timers.h" ++#include "messages.h" ++#include "cookie.h" ++#include "socket.h" ++ ++#include ++#include ++#include ++#include ++ ++/* Must be called with bh disabled. */ ++static void update_rx_stats(struct wg_peer *peer, size_t len) ++{ ++ struct pcpu_sw_netstats *tstats = ++ get_cpu_ptr(peer->device->dev->tstats); ++ ++ u64_stats_update_begin(&tstats->syncp); ++ ++tstats->rx_packets; ++ tstats->rx_bytes += len; ++ peer->rx_bytes += len; ++ u64_stats_update_end(&tstats->syncp); ++ put_cpu_ptr(tstats); ++} ++ ++#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) ++ ++static size_t validate_header_len(struct sk_buff *skb) ++{ ++ if (unlikely(skb->len < sizeof(struct message_header))) ++ return 0; ++ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && ++ skb->len >= MESSAGE_MINIMUM_LENGTH) ++ return sizeof(struct message_data); ++ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && ++ skb->len == sizeof(struct message_handshake_initiation)) ++ return sizeof(struct message_handshake_initiation); ++ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && ++ skb->len == sizeof(struct message_handshake_response)) ++ return sizeof(struct message_handshake_response); ++ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && ++ skb->len == sizeof(struct message_handshake_cookie)) ++ return sizeof(struct message_handshake_cookie); ++ return 0; ++} ++ ++static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) ++{ ++ size_t data_offset, data_len, header_len; ++ struct udphdr *udp; ++ ++ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || ++ skb_transport_header(skb) < skb->head || ++ (skb_transport_header(skb) + sizeof(struct udphdr)) > ++ skb_tail_pointer(skb))) ++ return -EINVAL; /* Bogus IP header */ ++ udp = udp_hdr(skb); ++ data_offset = (u8 *)udp - skb->data; ++ if (unlikely(data_offset > U16_MAX || ++ data_offset + sizeof(struct udphdr) > skb->len)) ++ /* Packet has offset at impossible location or isn't big enough ++ * to have UDP fields. ++ */ ++ return -EINVAL; ++ data_len = ntohs(udp->len); ++ if (unlikely(data_len < sizeof(struct udphdr) || ++ data_len > skb->len - data_offset)) ++ /* UDP packet is reporting too small of a size or lying about ++ * its size. ++ */ ++ return -EINVAL; ++ data_len -= sizeof(struct udphdr); ++ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; ++ if (unlikely(!pskb_may_pull(skb, ++ data_offset + sizeof(struct message_header)) || ++ pskb_trim(skb, data_len + data_offset) < 0)) ++ return -EINVAL; ++ skb_pull(skb, data_offset); ++ if (unlikely(skb->len != data_len)) ++ /* Final len does not agree with calculated len */ ++ return -EINVAL; ++ header_len = validate_header_len(skb); ++ if (unlikely(!header_len)) ++ return -EINVAL; ++ __skb_push(skb, data_offset); ++ if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) ++ return -EINVAL; ++ __skb_pull(skb, data_offset); ++ return 0; ++} ++ ++static void wg_receive_handshake_packet(struct wg_device *wg, ++ struct sk_buff *skb) ++{ ++ enum cookie_mac_state mac_state; ++ struct wg_peer *peer = NULL; ++ /* This is global, so that our load calculation applies to the whole ++ * system. We don't care about races with it at all. ++ */ ++ static u64 last_under_load; ++ bool packet_needs_cookie; ++ bool under_load; ++ ++ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { ++ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", ++ wg->dev->name, skb); ++ wg_cookie_message_consume( ++ (struct message_handshake_cookie *)skb->data, wg); ++ return; ++ } ++ ++ under_load = skb_queue_len(&wg->incoming_handshakes) >= ++ MAX_QUEUED_INCOMING_HANDSHAKES / 8; ++ if (under_load) ++ last_under_load = ktime_get_coarse_boottime_ns(); ++ else if (last_under_load) ++ under_load = !wg_birthdate_has_expired(last_under_load, 1); ++ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, ++ under_load); ++ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || ++ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { ++ packet_needs_cookie = false; ++ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { ++ packet_needs_cookie = true; ++ } else { ++ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", ++ wg->dev->name, skb); ++ return; ++ } ++ ++ switch (SKB_TYPE_LE32(skb)) { ++ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { ++ struct message_handshake_initiation *message = ++ (struct message_handshake_initiation *)skb->data; ++ ++ if (packet_needs_cookie) { ++ wg_packet_send_handshake_cookie(wg, skb, ++ message->sender_index); ++ return; ++ } ++ peer = wg_noise_handshake_consume_initiation(message, wg); ++ if (unlikely(!peer)) { ++ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", ++ wg->dev->name, skb); ++ return; ++ } ++ wg_socket_set_peer_endpoint_from_skb(peer, skb); ++ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", ++ wg->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ wg_packet_send_handshake_response(peer); ++ break; ++ } ++ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { ++ struct message_handshake_response *message = ++ (struct message_handshake_response *)skb->data; ++ ++ if (packet_needs_cookie) { ++ wg_packet_send_handshake_cookie(wg, skb, ++ message->sender_index); ++ return; ++ } ++ peer = wg_noise_handshake_consume_response(message, wg); ++ if (unlikely(!peer)) { ++ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", ++ wg->dev->name, skb); ++ return; ++ } ++ wg_socket_set_peer_endpoint_from_skb(peer, skb); ++ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", ++ wg->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ if (wg_noise_handshake_begin_session(&peer->handshake, ++ &peer->keypairs)) { ++ wg_timers_session_derived(peer); ++ wg_timers_handshake_complete(peer); ++ /* Calling this function will either send any existing ++ * packets in the queue and not send a keepalive, which ++ * is the best case, Or, if there's nothing in the ++ * queue, it will send a keepalive, in order to give ++ * immediate confirmation of the session. ++ */ ++ wg_packet_send_keepalive(peer); ++ } ++ break; ++ } ++ } ++ ++ if (unlikely(!peer)) { ++ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); ++ return; ++ } ++ ++ local_bh_disable(); ++ update_rx_stats(peer, skb->len); ++ local_bh_enable(); ++ ++ wg_timers_any_authenticated_packet_received(peer); ++ wg_timers_any_authenticated_packet_traversal(peer); ++ wg_peer_put(peer); ++} ++ ++void wg_packet_handshake_receive_worker(struct work_struct *work) ++{ ++ struct wg_device *wg = container_of(work, struct multicore_worker, ++ work)->ptr; ++ struct sk_buff *skb; ++ ++ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { ++ wg_receive_handshake_packet(wg, skb); ++ dev_kfree_skb(skb); ++ cond_resched(); ++ } ++} ++ ++static void keep_key_fresh(struct wg_peer *peer) ++{ ++ struct noise_keypair *keypair; ++ bool send = false; ++ ++ if (peer->sent_lastminute_handshake) ++ return; ++ ++ rcu_read_lock_bh(); ++ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); ++ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && ++ keypair->i_am_the_initiator && ++ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, ++ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) ++ send = true; ++ rcu_read_unlock_bh(); ++ ++ if (send) { ++ peer->sent_lastminute_handshake = true; ++ wg_packet_send_queued_handshake_initiation(peer, false); ++ } ++} ++ ++static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) ++{ ++ struct scatterlist sg[MAX_SKB_FRAGS + 8]; ++ struct sk_buff *trailer; ++ unsigned int offset; ++ int num_frags; ++ ++ if (unlikely(!key)) ++ return false; ++ ++ if (unlikely(!READ_ONCE(key->is_valid) || ++ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || ++ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { ++ WRITE_ONCE(key->is_valid, false); ++ return false; ++ } ++ ++ PACKET_CB(skb)->nonce = ++ le64_to_cpu(((struct message_data *)skb->data)->counter); ++ ++ /* We ensure that the network header is part of the packet before we ++ * call skb_cow_data, so that there's no chance that data is removed ++ * from the skb, so that later we can extract the original endpoint. ++ */ ++ offset = skb->data - skb_network_header(skb); ++ skb_push(skb, offset); ++ num_frags = skb_cow_data(skb, 0, &trailer); ++ offset += sizeof(struct message_data); ++ skb_pull(skb, offset); ++ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) ++ return false; ++ ++ sg_init_table(sg, num_frags); ++ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) ++ return false; ++ ++ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, ++ PACKET_CB(skb)->nonce, ++ key->key)) ++ return false; ++ ++ /* Another ugly situation of pushing and pulling the header so as to ++ * keep endpoint information intact. ++ */ ++ skb_push(skb, offset); ++ if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) ++ return false; ++ skb_pull(skb, offset); ++ ++ return true; ++} ++ ++/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ ++static bool counter_validate(union noise_counter *counter, u64 their_counter) ++{ ++ unsigned long index, index_current, top, i; ++ bool ret = false; ++ ++ spin_lock_bh(&counter->receive.lock); ++ ++ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || ++ their_counter >= REJECT_AFTER_MESSAGES)) ++ goto out; ++ ++ ++their_counter; ++ ++ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < ++ counter->receive.counter)) ++ goto out; ++ ++ index = their_counter >> ilog2(BITS_PER_LONG); ++ ++ if (likely(their_counter > counter->receive.counter)) { ++ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); ++ top = min_t(unsigned long, index - index_current, ++ COUNTER_BITS_TOTAL / BITS_PER_LONG); ++ for (i = 1; i <= top; ++i) ++ counter->receive.backtrack[(i + index_current) & ++ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; ++ counter->receive.counter = their_counter; ++ } ++ ++ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; ++ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), ++ &counter->receive.backtrack[index]); ++ ++out: ++ spin_unlock_bh(&counter->receive.lock); ++ return ret; ++} ++ ++#include "selftest/counter.c" ++ ++static void wg_packet_consume_data_done(struct wg_peer *peer, ++ struct sk_buff *skb, ++ struct endpoint *endpoint) ++{ ++ struct net_device *dev = peer->device->dev; ++ unsigned int len, len_before_trim; ++ struct wg_peer *routed_peer; ++ ++ wg_socket_set_peer_endpoint(peer, endpoint); ++ ++ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, ++ PACKET_CB(skb)->keypair))) { ++ wg_timers_handshake_complete(peer); ++ wg_packet_send_staged_packets(peer); ++ } ++ ++ keep_key_fresh(peer); ++ ++ wg_timers_any_authenticated_packet_received(peer); ++ wg_timers_any_authenticated_packet_traversal(peer); ++ ++ /* A packet with length 0 is a keepalive packet */ ++ if (unlikely(!skb->len)) { ++ update_rx_stats(peer, message_data_len(0)); ++ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", ++ dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ goto packet_processed; ++ } ++ ++ wg_timers_data_received(peer); ++ ++ if (unlikely(skb_network_header(skb) < skb->head)) ++ goto dishonest_packet_size; ++ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && ++ (ip_hdr(skb)->version == 4 || ++ (ip_hdr(skb)->version == 6 && ++ pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) ++ goto dishonest_packet_type; ++ ++ skb->dev = dev; ++ /* We've already verified the Poly1305 auth tag, which means this packet ++ * was not modified in transit. We can therefore tell the networking ++ * stack that all checksums of every layer of encapsulation have already ++ * been checked "by the hardware" and therefore is unneccessary to check ++ * again in software. ++ */ ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ skb->csum_level = ~0; /* All levels */ ++ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); ++ if (skb->protocol == htons(ETH_P_IP)) { ++ len = ntohs(ip_hdr(skb)->tot_len); ++ if (unlikely(len < sizeof(struct iphdr))) ++ goto dishonest_packet_size; ++ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) ++ IP_ECN_set_ce(ip_hdr(skb)); ++ } else if (skb->protocol == htons(ETH_P_IPV6)) { ++ len = ntohs(ipv6_hdr(skb)->payload_len) + ++ sizeof(struct ipv6hdr); ++ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) ++ IP6_ECN_set_ce(skb, ipv6_hdr(skb)); ++ } else { ++ goto dishonest_packet_type; ++ } ++ ++ if (unlikely(len > skb->len)) ++ goto dishonest_packet_size; ++ len_before_trim = skb->len; ++ if (unlikely(pskb_trim(skb, len))) ++ goto packet_processed; ++ ++ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, ++ skb); ++ wg_peer_put(routed_peer); /* We don't need the extra reference. */ ++ ++ if (unlikely(routed_peer != peer)) ++ goto dishonest_packet_peer; ++ ++ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { ++ ++dev->stats.rx_dropped; ++ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", ++ dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ } else { ++ update_rx_stats(peer, message_data_len(len_before_trim)); ++ } ++ return; ++ ++dishonest_packet_peer: ++ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", ++ dev->name, skb, peer->internal_id, ++ &peer->endpoint.addr); ++ ++dev->stats.rx_errors; ++ ++dev->stats.rx_frame_errors; ++ goto packet_processed; ++dishonest_packet_type: ++ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", ++ dev->name, peer->internal_id, &peer->endpoint.addr); ++ ++dev->stats.rx_errors; ++ ++dev->stats.rx_frame_errors; ++ goto packet_processed; ++dishonest_packet_size: ++ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", ++ dev->name, peer->internal_id, &peer->endpoint.addr); ++ ++dev->stats.rx_errors; ++ ++dev->stats.rx_length_errors; ++ goto packet_processed; ++packet_processed: ++ dev_kfree_skb(skb); ++} ++ ++int wg_packet_rx_poll(struct napi_struct *napi, int budget) ++{ ++ struct wg_peer *peer = container_of(napi, struct wg_peer, napi); ++ struct crypt_queue *queue = &peer->rx_queue; ++ struct noise_keypair *keypair; ++ struct endpoint endpoint; ++ enum packet_state state; ++ struct sk_buff *skb; ++ int work_done = 0; ++ bool free; ++ ++ if (unlikely(budget <= 0)) ++ return 0; ++ ++ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && ++ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != ++ PACKET_STATE_UNCRYPTED) { ++ __ptr_ring_discard_one(&queue->ring); ++ peer = PACKET_PEER(skb); ++ keypair = PACKET_CB(skb)->keypair; ++ free = true; ++ ++ if (unlikely(state != PACKET_STATE_CRYPTED)) ++ goto next; ++ ++ if (unlikely(!counter_validate(&keypair->receiving.counter, ++ PACKET_CB(skb)->nonce))) { ++ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", ++ peer->device->dev->name, ++ PACKET_CB(skb)->nonce, ++ keypair->receiving.counter.receive.counter); ++ goto next; ++ } ++ ++ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) ++ goto next; ++ ++ wg_reset_packet(skb); ++ wg_packet_consume_data_done(peer, skb, &endpoint); ++ free = false; ++ ++next: ++ wg_noise_keypair_put(keypair, false); ++ wg_peer_put(peer); ++ if (unlikely(free)) ++ dev_kfree_skb(skb); ++ ++ if (++work_done >= budget) ++ break; ++ } ++ ++ if (work_done < budget) ++ napi_complete_done(napi, work_done); ++ ++ return work_done; ++} ++ ++void wg_packet_decrypt_worker(struct work_struct *work) ++{ ++ struct crypt_queue *queue = container_of(work, struct multicore_worker, ++ work)->ptr; ++ struct sk_buff *skb; ++ ++ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { ++ enum packet_state state = likely(decrypt_packet(skb, ++ &PACKET_CB(skb)->keypair->receiving)) ? ++ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; ++ wg_queue_enqueue_per_peer_napi(skb, state); ++ } ++} ++ ++static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) ++{ ++ __le32 idx = ((struct message_data *)skb->data)->key_idx; ++ struct wg_peer *peer = NULL; ++ int ret; ++ ++ rcu_read_lock_bh(); ++ PACKET_CB(skb)->keypair = ++ (struct noise_keypair *)wg_index_hashtable_lookup( ++ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, ++ &peer); ++ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) ++ goto err_keypair; ++ ++ if (unlikely(READ_ONCE(peer->is_dead))) ++ goto err; ++ ++ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, ++ &peer->rx_queue, skb, ++ wg->packet_crypt_wq, ++ &wg->decrypt_queue.last_cpu); ++ if (unlikely(ret == -EPIPE)) ++ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); ++ if (likely(!ret || ret == -EPIPE)) { ++ rcu_read_unlock_bh(); ++ return; ++ } ++err: ++ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); ++err_keypair: ++ rcu_read_unlock_bh(); ++ wg_peer_put(peer); ++ dev_kfree_skb(skb); ++} ++ ++void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) ++{ ++ if (unlikely(prepare_skb_header(skb, wg) < 0)) ++ goto err; ++ switch (SKB_TYPE_LE32(skb)) { ++ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): ++ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): ++ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { ++ int cpu; ++ ++ if (skb_queue_len(&wg->incoming_handshakes) > ++ MAX_QUEUED_INCOMING_HANDSHAKES || ++ unlikely(!rng_is_initialized())) { ++ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", ++ wg->dev->name, skb); ++ goto err; ++ } ++ skb_queue_tail(&wg->incoming_handshakes, skb); ++ /* Queues up a call to packet_process_queued_handshake_ ++ * packets(skb): ++ */ ++ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); ++ queue_work_on(cpu, wg->handshake_receive_wq, ++ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); ++ break; ++ } ++ case cpu_to_le32(MESSAGE_DATA): ++ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); ++ wg_packet_consume_data(wg, skb); ++ break; ++ default: ++ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", ++ wg->dev->name, skb); ++ goto err; ++ } ++ return; ++ ++err: ++ dev_kfree_skb(skb); ++} +--- /dev/null ++++ b/drivers/net/wireguard/selftest/allowedips.c +@@ -0,0 +1,683 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ * ++ * This contains some basic static unit tests for the allowedips data structure. ++ * It also has two additional modes that are disabled and meant to be used by ++ * folks directly playing with this file. If you define the macro ++ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in ++ * memory, it will be printed out as KERN_DEBUG in a format that can be passed ++ * to graphviz (the dot command) to visualize it. If you define the macro ++ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of ++ * randomized tests done against a trivial implementation, which may take ++ * upwards of a half-hour to complete. There's no set of users who should be ++ * enabling these, and the only developers that should go anywhere near these ++ * nobs are the ones who are reading this comment. ++ */ ++ ++#ifdef DEBUG ++ ++#include ++ ++static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, ++ u8 cidr) ++{ ++ swap_endian(dst, src, bits); ++ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); ++ if (cidr) ++ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); ++} ++ ++static __init void print_node(struct allowedips_node *node, u8 bits) ++{ ++ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; ++ char *fmt_declaration = KERN_DEBUG ++ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; ++ char *style = "dotted"; ++ u8 ip1[16], ip2[16]; ++ u32 color = 0; ++ ++ if (bits == 32) { ++ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; ++ fmt_declaration = KERN_DEBUG ++ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; ++ } else if (bits == 128) { ++ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; ++ fmt_declaration = KERN_DEBUG ++ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; ++ } ++ if (node->peer) { ++ hsiphash_key_t key = { { 0 } }; ++ ++ memcpy(&key, &node->peer, sizeof(node->peer)); ++ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | ++ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | ++ hsiphash_1u32(0xabad1dea, &key) % 200; ++ style = "bold"; ++ } ++ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); ++ printk(fmt_declaration, ip1, node->cidr, style, color); ++ if (node->bit[0]) { ++ swap_endian_and_apply_cidr(ip2, ++ rcu_dereference_raw(node->bit[0])->bits, bits, ++ node->cidr); ++ printk(fmt_connection, ip1, node->cidr, ip2, ++ rcu_dereference_raw(node->bit[0])->cidr); ++ print_node(rcu_dereference_raw(node->bit[0]), bits); ++ } ++ if (node->bit[1]) { ++ swap_endian_and_apply_cidr(ip2, ++ rcu_dereference_raw(node->bit[1])->bits, ++ bits, node->cidr); ++ printk(fmt_connection, ip1, node->cidr, ip2, ++ rcu_dereference_raw(node->bit[1])->cidr); ++ print_node(rcu_dereference_raw(node->bit[1]), bits); ++ } ++} ++ ++static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) ++{ ++ printk(KERN_DEBUG "digraph trie {\n"); ++ print_node(rcu_dereference_raw(top), bits); ++ printk(KERN_DEBUG "}\n"); ++} ++ ++enum { ++ NUM_PEERS = 2000, ++ NUM_RAND_ROUTES = 400, ++ NUM_MUTATED_ROUTES = 100, ++ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 ++}; ++ ++struct horrible_allowedips { ++ struct hlist_head head; ++}; ++ ++struct horrible_allowedips_node { ++ struct hlist_node table; ++ union nf_inet_addr ip; ++ union nf_inet_addr mask; ++ u8 ip_version; ++ void *value; ++}; ++ ++static __init void horrible_allowedips_init(struct horrible_allowedips *table) ++{ ++ INIT_HLIST_HEAD(&table->head); ++} ++ ++static __init void horrible_allowedips_free(struct horrible_allowedips *table) ++{ ++ struct horrible_allowedips_node *node; ++ struct hlist_node *h; ++ ++ hlist_for_each_entry_safe(node, h, &table->head, table) { ++ hlist_del(&node->table); ++ kfree(node); ++ } ++} ++ ++static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) ++{ ++ union nf_inet_addr mask; ++ ++ memset(&mask, 0x00, 128 / 8); ++ memset(&mask, 0xff, cidr / 8); ++ if (cidr % 32) ++ mask.all[cidr / 32] = (__force u32)htonl( ++ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); ++ return mask; ++} ++ ++static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) ++{ ++ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + ++ hweight32(subnet.all[2]) + hweight32(subnet.all[3]); ++} ++ ++static __init inline void ++horrible_mask_self(struct horrible_allowedips_node *node) ++{ ++ if (node->ip_version == 4) { ++ node->ip.ip &= node->mask.ip; ++ } else if (node->ip_version == 6) { ++ node->ip.ip6[0] &= node->mask.ip6[0]; ++ node->ip.ip6[1] &= node->mask.ip6[1]; ++ node->ip.ip6[2] &= node->mask.ip6[2]; ++ node->ip.ip6[3] &= node->mask.ip6[3]; ++ } ++} ++ ++static __init inline bool ++horrible_match_v4(const struct horrible_allowedips_node *node, ++ struct in_addr *ip) ++{ ++ return (ip->s_addr & node->mask.ip) == node->ip.ip; ++} ++ ++static __init inline bool ++horrible_match_v6(const struct horrible_allowedips_node *node, ++ struct in6_addr *ip) ++{ ++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == ++ node->ip.ip6[0] && ++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == ++ node->ip.ip6[1] && ++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == ++ node->ip.ip6[2] && ++ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; ++} ++ ++static __init void ++horrible_insert_ordered(struct horrible_allowedips *table, ++ struct horrible_allowedips_node *node) ++{ ++ struct horrible_allowedips_node *other = NULL, *where = NULL; ++ u8 my_cidr = horrible_mask_to_cidr(node->mask); ++ ++ hlist_for_each_entry(other, &table->head, table) { ++ if (!memcmp(&other->mask, &node->mask, ++ sizeof(union nf_inet_addr)) && ++ !memcmp(&other->ip, &node->ip, ++ sizeof(union nf_inet_addr)) && ++ other->ip_version == node->ip_version) { ++ other->value = node->value; ++ kfree(node); ++ return; ++ } ++ where = other; ++ if (horrible_mask_to_cidr(other->mask) <= my_cidr) ++ break; ++ } ++ if (!other && !where) ++ hlist_add_head(&node->table, &table->head); ++ else if (!other) ++ hlist_add_behind(&node->table, &where->table); ++ else ++ hlist_add_before(&node->table, &where->table); ++} ++ ++static __init int ++horrible_allowedips_insert_v4(struct horrible_allowedips *table, ++ struct in_addr *ip, u8 cidr, void *value) ++{ ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), ++ GFP_KERNEL); ++ ++ if (unlikely(!node)) ++ return -ENOMEM; ++ node->ip.in = *ip; ++ node->mask = horrible_cidr_to_mask(cidr); ++ node->ip_version = 4; ++ node->value = value; ++ horrible_mask_self(node); ++ horrible_insert_ordered(table, node); ++ return 0; ++} ++ ++static __init int ++horrible_allowedips_insert_v6(struct horrible_allowedips *table, ++ struct in6_addr *ip, u8 cidr, void *value) ++{ ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), ++ GFP_KERNEL); ++ ++ if (unlikely(!node)) ++ return -ENOMEM; ++ node->ip.in6 = *ip; ++ node->mask = horrible_cidr_to_mask(cidr); ++ node->ip_version = 6; ++ node->value = value; ++ horrible_mask_self(node); ++ horrible_insert_ordered(table, node); ++ return 0; ++} ++ ++static __init void * ++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, ++ struct in_addr *ip) ++{ ++ struct horrible_allowedips_node *node; ++ void *ret = NULL; ++ ++ hlist_for_each_entry(node, &table->head, table) { ++ if (node->ip_version != 4) ++ continue; ++ if (horrible_match_v4(node, ip)) { ++ ret = node->value; ++ break; ++ } ++ } ++ return ret; ++} ++ ++static __init void * ++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, ++ struct in6_addr *ip) ++{ ++ struct horrible_allowedips_node *node; ++ void *ret = NULL; ++ ++ hlist_for_each_entry(node, &table->head, table) { ++ if (node->ip_version != 6) ++ continue; ++ if (horrible_match_v6(node, ip)) { ++ ret = node->value; ++ break; ++ } ++ } ++ return ret; ++} ++ ++static __init bool randomized_test(void) ++{ ++ unsigned int i, j, k, mutate_amount, cidr; ++ u8 ip[16], mutate_mask[16], mutated[16]; ++ struct wg_peer **peers, *peer; ++ struct horrible_allowedips h; ++ DEFINE_MUTEX(mutex); ++ struct allowedips t; ++ bool ret = false; ++ ++ mutex_init(&mutex); ++ ++ wg_allowedips_init(&t); ++ horrible_allowedips_init(&h); ++ ++ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); ++ if (unlikely(!peers)) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free; ++ } ++ for (i = 0; i < NUM_PEERS; ++i) { ++ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); ++ if (unlikely(!peers[i])) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free; ++ } ++ kref_init(&peers[i]->refcount); ++ } ++ ++ mutex_lock(&mutex); ++ ++ for (i = 0; i < NUM_RAND_ROUTES; ++i) { ++ prandom_bytes(ip, 4); ++ cidr = prandom_u32_max(32) + 1; ++ peer = peers[prandom_u32_max(NUM_PEERS)]; ++ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, ++ peer, &mutex) < 0) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, ++ cidr, peer) < 0) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { ++ memcpy(mutated, ip, 4); ++ prandom_bytes(mutate_mask, 4); ++ mutate_amount = prandom_u32_max(32); ++ for (k = 0; k < mutate_amount / 8; ++k) ++ mutate_mask[k] = 0xff; ++ mutate_mask[k] = 0xff ++ << ((8 - (mutate_amount % 8)) % 8); ++ for (; k < 4; ++k) ++ mutate_mask[k] = 0; ++ for (k = 0; k < 4; ++k) ++ mutated[k] = (mutated[k] & mutate_mask[k]) | ++ (~mutate_mask[k] & ++ prandom_u32_max(256)); ++ cidr = prandom_u32_max(32) + 1; ++ peer = peers[prandom_u32_max(NUM_PEERS)]; ++ if (wg_allowedips_insert_v4(&t, ++ (struct in_addr *)mutated, ++ cidr, peer, &mutex) < 0) { ++ pr_err("allowedips random malloc: FAIL\n"); ++ goto free_locked; ++ } ++ if (horrible_allowedips_insert_v4(&h, ++ (struct in_addr *)mutated, cidr, peer)) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ } ++ } ++ ++ for (i = 0; i < NUM_RAND_ROUTES; ++i) { ++ prandom_bytes(ip, 16); ++ cidr = prandom_u32_max(128) + 1; ++ peer = peers[prandom_u32_max(NUM_PEERS)]; ++ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, ++ peer, &mutex) < 0) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, ++ cidr, peer) < 0) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { ++ memcpy(mutated, ip, 16); ++ prandom_bytes(mutate_mask, 16); ++ mutate_amount = prandom_u32_max(128); ++ for (k = 0; k < mutate_amount / 8; ++k) ++ mutate_mask[k] = 0xff; ++ mutate_mask[k] = 0xff ++ << ((8 - (mutate_amount % 8)) % 8); ++ for (; k < 4; ++k) ++ mutate_mask[k] = 0; ++ for (k = 0; k < 4; ++k) ++ mutated[k] = (mutated[k] & mutate_mask[k]) | ++ (~mutate_mask[k] & ++ prandom_u32_max(256)); ++ cidr = prandom_u32_max(128) + 1; ++ peer = peers[prandom_u32_max(NUM_PEERS)]; ++ if (wg_allowedips_insert_v6(&t, ++ (struct in6_addr *)mutated, ++ cidr, peer, &mutex) < 0) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ if (horrible_allowedips_insert_v6( ++ &h, (struct in6_addr *)mutated, cidr, ++ peer)) { ++ pr_err("allowedips random self-test malloc: FAIL\n"); ++ goto free_locked; ++ } ++ } ++ } ++ ++ mutex_unlock(&mutex); ++ ++ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { ++ print_tree(t.root4, 32); ++ print_tree(t.root6, 128); ++ } ++ ++ for (i = 0; i < NUM_QUERIES; ++i) { ++ prandom_bytes(ip, 4); ++ if (lookup(t.root4, 32, ip) != ++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { ++ pr_err("allowedips random self-test: FAIL\n"); ++ goto free; ++ } ++ } ++ ++ for (i = 0; i < NUM_QUERIES; ++i) { ++ prandom_bytes(ip, 16); ++ if (lookup(t.root6, 128, ip) != ++ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { ++ pr_err("allowedips random self-test: FAIL\n"); ++ goto free; ++ } ++ } ++ ret = true; ++ ++free: ++ mutex_lock(&mutex); ++free_locked: ++ wg_allowedips_free(&t, &mutex); ++ mutex_unlock(&mutex); ++ horrible_allowedips_free(&h); ++ if (peers) { ++ for (i = 0; i < NUM_PEERS; ++i) ++ kfree(peers[i]); ++ } ++ kfree(peers); ++ return ret; ++} ++ ++static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) ++{ ++ static struct in_addr ip; ++ u8 *split = (u8 *)&ip; ++ ++ split[0] = a; ++ split[1] = b; ++ split[2] = c; ++ split[3] = d; ++ return &ip; ++} ++ ++static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) ++{ ++ static struct in6_addr ip; ++ __be32 *split = (__be32 *)&ip; ++ ++ split[0] = cpu_to_be32(a); ++ split[1] = cpu_to_be32(b); ++ split[2] = cpu_to_be32(c); ++ split[3] = cpu_to_be32(d); ++ return &ip; ++} ++ ++static __init struct wg_peer *init_peer(void) ++{ ++ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); ++ ++ if (!peer) ++ return NULL; ++ kref_init(&peer->refcount); ++ INIT_LIST_HEAD(&peer->allowedips_list); ++ return peer; ++} ++ ++#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ ++ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ ++ cidr, mem, &mutex) ++ ++#define maybe_fail() do { \ ++ ++i; \ ++ if (!_s) { \ ++ pr_info("allowedips self-test %zu: FAIL\n", i); \ ++ success = false; \ ++ } \ ++ } while (0) ++ ++#define test(version, mem, ipa, ipb, ipc, ipd) do { \ ++ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ ++ ip##version(ipa, ipb, ipc, ipd)) == (mem); \ ++ maybe_fail(); \ ++ } while (0) ++ ++#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ ++ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ ++ ip##version(ipa, ipb, ipc, ipd)) != (mem); \ ++ maybe_fail(); \ ++ } while (0) ++ ++#define test_boolean(cond) do { \ ++ bool _s = (cond); \ ++ maybe_fail(); \ ++ } while (0) ++ ++bool __init wg_allowedips_selftest(void) ++{ ++ bool found_a = false, found_b = false, found_c = false, found_d = false, ++ found_e = false, found_other = false; ++ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), ++ *d = init_peer(), *e = init_peer(), *f = init_peer(), ++ *g = init_peer(), *h = init_peer(); ++ struct allowedips_node *iter_node; ++ bool success = false; ++ struct allowedips t; ++ DEFINE_MUTEX(mutex); ++ struct in6_addr ip; ++ size_t i = 0, count = 0; ++ __be64 part; ++ ++ mutex_init(&mutex); ++ mutex_lock(&mutex); ++ wg_allowedips_init(&t); ++ ++ if (!a || !b || !c || !d || !e || !f || !g || !h) { ++ pr_err("allowedips self-test malloc: FAIL\n"); ++ goto free; ++ } ++ ++ insert(4, a, 192, 168, 4, 0, 24); ++ insert(4, b, 192, 168, 4, 4, 32); ++ insert(4, c, 192, 168, 0, 0, 16); ++ insert(4, d, 192, 95, 5, 64, 27); ++ /* replaces previous entry, and maskself is required */ ++ insert(4, c, 192, 95, 5, 65, 27); ++ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); ++ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); ++ insert(4, e, 0, 0, 0, 0, 0); ++ insert(6, e, 0, 0, 0, 0, 0); ++ /* replaces previous entry */ ++ insert(6, f, 0, 0, 0, 0, 0); ++ insert(6, g, 0x24046800, 0, 0, 0, 32); ++ /* maskself is required */ ++ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); ++ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); ++ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); ++ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); ++ insert(4, g, 64, 15, 112, 0, 20); ++ /* maskself is required */ ++ insert(4, h, 64, 15, 123, 211, 25); ++ insert(4, a, 10, 0, 0, 0, 25); ++ insert(4, b, 10, 0, 0, 128, 25); ++ insert(4, a, 10, 1, 0, 0, 30); ++ insert(4, b, 10, 1, 0, 4, 30); ++ insert(4, c, 10, 1, 0, 8, 29); ++ insert(4, d, 10, 1, 0, 16, 29); ++ ++ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { ++ print_tree(t.root4, 32); ++ print_tree(t.root6, 128); ++ } ++ ++ success = true; ++ ++ test(4, a, 192, 168, 4, 20); ++ test(4, a, 192, 168, 4, 0); ++ test(4, b, 192, 168, 4, 4); ++ test(4, c, 192, 168, 200, 182); ++ test(4, c, 192, 95, 5, 68); ++ test(4, e, 192, 95, 5, 96); ++ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); ++ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); ++ test(6, f, 0x26075300, 0x60006b01, 0, 0); ++ test(6, g, 0x24046800, 0x40040806, 0, 0x1006); ++ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); ++ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); ++ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); ++ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); ++ test(6, h, 0x24046800, 0x40040800, 0, 0); ++ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); ++ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); ++ test(4, g, 64, 15, 116, 26); ++ test(4, g, 64, 15, 127, 3); ++ test(4, g, 64, 15, 123, 1); ++ test(4, h, 64, 15, 123, 128); ++ test(4, h, 64, 15, 123, 129); ++ test(4, a, 10, 0, 0, 52); ++ test(4, b, 10, 0, 0, 220); ++ test(4, a, 10, 1, 0, 2); ++ test(4, b, 10, 1, 0, 6); ++ test(4, c, 10, 1, 0, 10); ++ test(4, d, 10, 1, 0, 20); ++ ++ insert(4, a, 1, 0, 0, 0, 32); ++ insert(4, a, 64, 0, 0, 0, 32); ++ insert(4, a, 128, 0, 0, 0, 32); ++ insert(4, a, 192, 0, 0, 0, 32); ++ insert(4, a, 255, 0, 0, 0, 32); ++ wg_allowedips_remove_by_peer(&t, a, &mutex); ++ test_negative(4, a, 1, 0, 0, 0); ++ test_negative(4, a, 64, 0, 0, 0); ++ test_negative(4, a, 128, 0, 0, 0); ++ test_negative(4, a, 192, 0, 0, 0); ++ test_negative(4, a, 255, 0, 0, 0); ++ ++ wg_allowedips_free(&t, &mutex); ++ wg_allowedips_init(&t); ++ insert(4, a, 192, 168, 0, 0, 16); ++ insert(4, a, 192, 168, 0, 0, 24); ++ wg_allowedips_remove_by_peer(&t, a, &mutex); ++ test_negative(4, a, 192, 168, 0, 1); ++ ++ /* These will hit the WARN_ON(len >= 128) in free_node if something ++ * goes wrong. ++ */ ++ for (i = 0; i < 128; ++i) { ++ part = cpu_to_be64(~(1LLU << (i % 64))); ++ memset(&ip, 0xff, 16); ++ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); ++ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); ++ } ++ ++ wg_allowedips_free(&t, &mutex); ++ ++ wg_allowedips_init(&t); ++ insert(4, a, 192, 95, 5, 93, 27); ++ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); ++ insert(4, a, 10, 1, 0, 20, 29); ++ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); ++ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); ++ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { ++ u8 cidr, ip[16] __aligned(__alignof(u64)); ++ int family = wg_allowedips_read_node(iter_node, ip, &cidr); ++ ++ count++; ++ ++ if (cidr == 27 && family == AF_INET && ++ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) ++ found_a = true; ++ else if (cidr == 128 && family == AF_INET6 && ++ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), ++ sizeof(struct in6_addr))) ++ found_b = true; ++ else if (cidr == 29 && family == AF_INET && ++ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) ++ found_c = true; ++ else if (cidr == 83 && family == AF_INET6 && ++ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), ++ sizeof(struct in6_addr))) ++ found_d = true; ++ else if (cidr == 21 && family == AF_INET6 && ++ !memcmp(ip, ip6(0x26075000, 0, 0, 0), ++ sizeof(struct in6_addr))) ++ found_e = true; ++ else ++ found_other = true; ++ } ++ test_boolean(count == 5); ++ test_boolean(found_a); ++ test_boolean(found_b); ++ test_boolean(found_c); ++ test_boolean(found_d); ++ test_boolean(found_e); ++ test_boolean(!found_other); ++ ++ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) ++ success = randomized_test(); ++ ++ if (success) ++ pr_info("allowedips self-tests: pass\n"); ++ ++free: ++ wg_allowedips_free(&t, &mutex); ++ kfree(a); ++ kfree(b); ++ kfree(c); ++ kfree(d); ++ kfree(e); ++ kfree(f); ++ kfree(g); ++ kfree(h); ++ mutex_unlock(&mutex); ++ ++ return success; ++} ++ ++#undef test_negative ++#undef test ++#undef remove ++#undef insert ++#undef init_peer ++ ++#endif +--- /dev/null ++++ b/drivers/net/wireguard/selftest/counter.c +@@ -0,0 +1,104 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifdef DEBUG ++bool __init wg_packet_counter_selftest(void) ++{ ++ unsigned int test_num = 0, i; ++ union noise_counter counter; ++ bool success = true; ++ ++#define T_INIT do { \ ++ memset(&counter, 0, sizeof(union noise_counter)); \ ++ spin_lock_init(&counter.receive.lock); \ ++ } while (0) ++#define T_LIM (COUNTER_WINDOW_SIZE + 1) ++#define T(n, v) do { \ ++ ++test_num; \ ++ if (counter_validate(&counter, n) != (v)) { \ ++ pr_err("nonce counter self-test %u: FAIL\n", \ ++ test_num); \ ++ success = false; \ ++ } \ ++ } while (0) ++ ++ T_INIT; ++ /* 1 */ T(0, true); ++ /* 2 */ T(1, true); ++ /* 3 */ T(1, false); ++ /* 4 */ T(9, true); ++ /* 5 */ T(8, true); ++ /* 6 */ T(7, true); ++ /* 7 */ T(7, false); ++ /* 8 */ T(T_LIM, true); ++ /* 9 */ T(T_LIM - 1, true); ++ /* 10 */ T(T_LIM - 1, false); ++ /* 11 */ T(T_LIM - 2, true); ++ /* 12 */ T(2, true); ++ /* 13 */ T(2, false); ++ /* 14 */ T(T_LIM + 16, true); ++ /* 15 */ T(3, false); ++ /* 16 */ T(T_LIM + 16, false); ++ /* 17 */ T(T_LIM * 4, true); ++ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true); ++ /* 19 */ T(10, false); ++ /* 20 */ T(T_LIM * 4 - T_LIM, false); ++ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false); ++ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true); ++ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false); ++ /* 24 */ T(0, false); ++ /* 25 */ T(REJECT_AFTER_MESSAGES, false); ++ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true); ++ /* 27 */ T(REJECT_AFTER_MESSAGES, false); ++ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false); ++ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true); ++ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false); ++ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false); ++ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false); ++ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true); ++ /* 34 */ T(0, false); ++ ++ T_INIT; ++ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i) ++ T(i, true); ++ T(0, true); ++ T(0, false); ++ ++ T_INIT; ++ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i) ++ T(i, true); ++ T(1, true); ++ T(0, false); ++ ++ T_INIT; ++ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;) ++ T(i, true); ++ ++ T_INIT; ++ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;) ++ T(i, true); ++ T(0, false); ++ ++ T_INIT; ++ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) ++ T(i, true); ++ T(COUNTER_WINDOW_SIZE + 1, true); ++ T(0, false); ++ ++ T_INIT; ++ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) ++ T(i, true); ++ T(0, true); ++ T(COUNTER_WINDOW_SIZE + 1, true); ++ ++#undef T ++#undef T_LIM ++#undef T_INIT ++ ++ if (success) ++ pr_info("nonce counter self-tests: pass\n"); ++ return success; ++} ++#endif +--- /dev/null ++++ b/drivers/net/wireguard/selftest/ratelimiter.c +@@ -0,0 +1,226 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifdef DEBUG ++ ++#include ++ ++static const struct { ++ bool result; ++ unsigned int msec_to_sleep_before; ++} expected_results[] __initconst = { ++ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, ++ [PACKETS_BURSTABLE] = { false, 0 }, ++ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, ++ [PACKETS_BURSTABLE + 2] = { false, 0 }, ++ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, ++ [PACKETS_BURSTABLE + 4] = { true, 0 }, ++ [PACKETS_BURSTABLE + 5] = { false, 0 } ++}; ++ ++static __init unsigned int maximum_jiffies_at_index(int index) ++{ ++ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; ++ int i; ++ ++ for (i = 0; i <= index; ++i) ++ total_msecs += expected_results[i].msec_to_sleep_before; ++ return msecs_to_jiffies(total_msecs); ++} ++ ++static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, ++ struct sk_buff *skb6, struct ipv6hdr *hdr6, ++ int *test) ++{ ++ unsigned long loop_start_time; ++ int i; ++ ++ wg_ratelimiter_gc_entries(NULL); ++ rcu_barrier(); ++ loop_start_time = jiffies; ++ ++ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { ++ if (expected_results[i].msec_to_sleep_before) ++ msleep(expected_results[i].msec_to_sleep_before); ++ ++ if (time_is_before_jiffies(loop_start_time + ++ maximum_jiffies_at_index(i))) ++ return -ETIMEDOUT; ++ if (wg_ratelimiter_allow(skb4, &init_net) != ++ expected_results[i].result) ++ return -EXFULL; ++ ++(*test); ++ ++ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1); ++ if (time_is_before_jiffies(loop_start_time + ++ maximum_jiffies_at_index(i))) ++ return -ETIMEDOUT; ++ if (!wg_ratelimiter_allow(skb4, &init_net)) ++ return -EXFULL; ++ ++(*test); ++ ++ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1); ++ ++#if IS_ENABLED(CONFIG_IPV6) ++ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i); ++ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i); ++ if (time_is_before_jiffies(loop_start_time + ++ maximum_jiffies_at_index(i))) ++ return -ETIMEDOUT; ++ if (wg_ratelimiter_allow(skb6, &init_net) != ++ expected_results[i].result) ++ return -EXFULL; ++ ++(*test); ++ ++ hdr6->saddr.in6_u.u6_addr32[0] = ++ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1); ++ if (time_is_before_jiffies(loop_start_time + ++ maximum_jiffies_at_index(i))) ++ return -ETIMEDOUT; ++ if (!wg_ratelimiter_allow(skb6, &init_net)) ++ return -EXFULL; ++ ++(*test); ++ ++ hdr6->saddr.in6_u.u6_addr32[0] = ++ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1); ++ ++ if (time_is_before_jiffies(loop_start_time + ++ maximum_jiffies_at_index(i))) ++ return -ETIMEDOUT; ++#endif ++ } ++ return 0; ++} ++ ++static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4, ++ int *test) ++{ ++ int i; ++ ++ wg_ratelimiter_gc_entries(NULL); ++ rcu_barrier(); ++ ++ if (atomic_read(&total_entries)) ++ return -EXFULL; ++ ++(*test); ++ ++ for (i = 0; i <= max_entries; ++i) { ++ hdr4->saddr = htonl(i); ++ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries)) ++ return -EXFULL; ++ ++(*test); ++ } ++ return 0; ++} ++ ++bool __init wg_ratelimiter_selftest(void) ++{ ++ enum { TRIALS_BEFORE_GIVING_UP = 5000 }; ++ bool success = false; ++ int test = 0, trials; ++ struct sk_buff *skb4, *skb6; ++ struct iphdr *hdr4; ++ struct ipv6hdr *hdr6; ++ ++ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) ++ return true; ++ ++ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); ++ ++ if (wg_ratelimiter_init()) ++ goto out; ++ ++test; ++ if (wg_ratelimiter_init()) { ++ wg_ratelimiter_uninit(); ++ goto out; ++ } ++ ++test; ++ if (wg_ratelimiter_init()) { ++ wg_ratelimiter_uninit(); ++ wg_ratelimiter_uninit(); ++ goto out; ++ } ++ ++test; ++ ++ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL); ++ if (unlikely(!skb4)) ++ goto err_nofree; ++ skb4->protocol = htons(ETH_P_IP); ++ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4)); ++ hdr4->saddr = htonl(8182); ++ skb_reset_network_header(skb4); ++ ++test; ++ ++#if IS_ENABLED(CONFIG_IPV6) ++ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL); ++ if (unlikely(!skb6)) { ++ kfree_skb(skb4); ++ goto err_nofree; ++ } ++ skb6->protocol = htons(ETH_P_IPV6); ++ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6)); ++ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212); ++ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188); ++ skb_reset_network_header(skb6); ++ ++test; ++#endif ++ ++ for (trials = TRIALS_BEFORE_GIVING_UP;;) { ++ int test_count = 0, ret; ++ ++ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); ++ if (ret == -ETIMEDOUT) { ++ if (!trials--) { ++ test += test_count; ++ goto err; ++ } ++ msleep(500); ++ continue; ++ } else if (ret < 0) { ++ test += test_count; ++ goto err; ++ } else { ++ test += test_count; ++ break; ++ } ++ } ++ ++ for (trials = TRIALS_BEFORE_GIVING_UP;;) { ++ int test_count = 0; ++ ++ if (capacity_test(skb4, hdr4, &test_count) < 0) { ++ if (!trials--) { ++ test += test_count; ++ goto err; ++ } ++ msleep(50); ++ continue; ++ } ++ test += test_count; ++ break; ++ } ++ ++ success = true; ++ ++err: ++ kfree_skb(skb4); ++#if IS_ENABLED(CONFIG_IPV6) ++ kfree_skb(skb6); ++#endif ++err_nofree: ++ wg_ratelimiter_uninit(); ++ wg_ratelimiter_uninit(); ++ wg_ratelimiter_uninit(); ++ /* Uninit one extra time to check underflow detection. */ ++ wg_ratelimiter_uninit(); ++out: ++ if (success) ++ pr_info("ratelimiter self-tests: pass\n"); ++ else ++ pr_err("ratelimiter self-test %d: FAIL\n", test); ++ ++ return success; ++} ++#endif +--- /dev/null ++++ b/drivers/net/wireguard/send.c +@@ -0,0 +1,413 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "queueing.h" ++#include "timers.h" ++#include "device.h" ++#include "peer.h" ++#include "socket.h" ++#include "messages.h" ++#include "cookie.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static void wg_packet_send_handshake_initiation(struct wg_peer *peer) ++{ ++ struct message_handshake_initiation packet; ++ ++ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), ++ REKEY_TIMEOUT)) ++ return; /* This function is rate limited. */ ++ ++ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); ++ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ ++ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) { ++ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); ++ wg_timers_any_authenticated_packet_traversal(peer); ++ wg_timers_any_authenticated_packet_sent(peer); ++ atomic64_set(&peer->last_sent_handshake, ++ ktime_get_coarse_boottime_ns()); ++ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet), ++ HANDSHAKE_DSCP); ++ wg_timers_handshake_initiated(peer); ++ } ++} ++ ++void wg_packet_handshake_send_worker(struct work_struct *work) ++{ ++ struct wg_peer *peer = container_of(work, struct wg_peer, ++ transmit_handshake_work); ++ ++ wg_packet_send_handshake_initiation(peer); ++ wg_peer_put(peer); ++} ++ ++void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, ++ bool is_retry) ++{ ++ if (!is_retry) ++ peer->timer_handshake_attempts = 0; ++ ++ rcu_read_lock_bh(); ++ /* We check last_sent_handshake here in addition to the actual function ++ * we're queueing up, so that we don't queue things if not strictly ++ * necessary: ++ */ ++ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), ++ REKEY_TIMEOUT) || ++ unlikely(READ_ONCE(peer->is_dead))) ++ goto out; ++ ++ wg_peer_get(peer); ++ /* Queues up calling packet_send_queued_handshakes(peer), where we do a ++ * peer_put(peer) after: ++ */ ++ if (!queue_work(peer->device->handshake_send_wq, ++ &peer->transmit_handshake_work)) ++ /* If the work was already queued, we want to drop the ++ * extra reference: ++ */ ++ wg_peer_put(peer); ++out: ++ rcu_read_unlock_bh(); ++} ++ ++void wg_packet_send_handshake_response(struct wg_peer *peer) ++{ ++ struct message_handshake_response packet; ++ ++ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); ++ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ ++ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) { ++ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); ++ if (wg_noise_handshake_begin_session(&peer->handshake, ++ &peer->keypairs)) { ++ wg_timers_session_derived(peer); ++ wg_timers_any_authenticated_packet_traversal(peer); ++ wg_timers_any_authenticated_packet_sent(peer); ++ atomic64_set(&peer->last_sent_handshake, ++ ktime_get_coarse_boottime_ns()); ++ wg_socket_send_buffer_to_peer(peer, &packet, ++ sizeof(packet), ++ HANDSHAKE_DSCP); ++ } ++ } ++} ++ ++void wg_packet_send_handshake_cookie(struct wg_device *wg, ++ struct sk_buff *initiating_skb, ++ __le32 sender_index) ++{ ++ struct message_handshake_cookie packet; ++ ++ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", ++ wg->dev->name, initiating_skb); ++ wg_cookie_message_create(&packet, initiating_skb, sender_index, ++ &wg->cookie_checker); ++ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, ++ sizeof(packet)); ++} ++ ++static void keep_key_fresh(struct wg_peer *peer) ++{ ++ struct noise_keypair *keypair; ++ bool send = false; ++ ++ rcu_read_lock_bh(); ++ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); ++ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && ++ (unlikely(atomic64_read(&keypair->sending.counter.counter) > ++ REKEY_AFTER_MESSAGES) || ++ (keypair->i_am_the_initiator && ++ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, ++ REKEY_AFTER_TIME))))) ++ send = true; ++ rcu_read_unlock_bh(); ++ ++ if (send) ++ wg_packet_send_queued_handshake_initiation(peer, false); ++} ++ ++static unsigned int calculate_skb_padding(struct sk_buff *skb) ++{ ++ /* We do this modulo business with the MTU, just in case the networking ++ * layer gives us a packet that's bigger than the MTU. In that case, we ++ * wouldn't want the final subtraction to overflow in the case of the ++ * padded_size being clamped. ++ */ ++ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; ++ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); ++ ++ if (padded_size > PACKET_CB(skb)->mtu) ++ padded_size = PACKET_CB(skb)->mtu; ++ return padded_size - last_unit; ++} ++ ++static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) ++{ ++ unsigned int padding_len, plaintext_len, trailer_len; ++ struct scatterlist sg[MAX_SKB_FRAGS + 8]; ++ struct message_data *header; ++ struct sk_buff *trailer; ++ int num_frags; ++ ++ /* Calculate lengths. */ ++ padding_len = calculate_skb_padding(skb); ++ trailer_len = padding_len + noise_encrypted_len(0); ++ plaintext_len = skb->len + padding_len; ++ ++ /* Expand data section to have room for padding and auth tag. */ ++ num_frags = skb_cow_data(skb, trailer_len, &trailer); ++ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) ++ return false; ++ ++ /* Set the padding to zeros, and make sure it and the auth tag are part ++ * of the skb. ++ */ ++ memset(skb_tail_pointer(trailer), 0, padding_len); ++ ++ /* Expand head section to have room for our header and the network ++ * stack's headers. ++ */ ++ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) ++ return false; ++ ++ /* Finalize checksum calculation for the inner packet, if required. */ ++ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && ++ skb_checksum_help(skb))) ++ return false; ++ ++ /* Only after checksumming can we safely add on the padding at the end ++ * and the header. ++ */ ++ skb_set_inner_network_header(skb, 0); ++ header = (struct message_data *)skb_push(skb, sizeof(*header)); ++ header->header.type = cpu_to_le32(MESSAGE_DATA); ++ header->key_idx = keypair->remote_index; ++ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); ++ pskb_put(skb, trailer, trailer_len); ++ ++ /* Now we can encrypt the scattergather segments */ ++ sg_init_table(sg, num_frags); ++ if (skb_to_sgvec(skb, sg, sizeof(struct message_data), ++ noise_encrypted_len(plaintext_len)) <= 0) ++ return false; ++ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0, ++ PACKET_CB(skb)->nonce, ++ keypair->sending.key); ++} ++ ++void wg_packet_send_keepalive(struct wg_peer *peer) ++{ ++ struct sk_buff *skb; ++ ++ if (skb_queue_empty(&peer->staged_packet_queue)) { ++ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, ++ GFP_ATOMIC); ++ if (unlikely(!skb)) ++ return; ++ skb_reserve(skb, DATA_PACKET_HEAD_ROOM); ++ skb->dev = peer->device->dev; ++ PACKET_CB(skb)->mtu = skb->dev->mtu; ++ skb_queue_tail(&peer->staged_packet_queue, skb); ++ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr); ++ } ++ ++ wg_packet_send_staged_packets(peer); ++} ++ ++static void wg_packet_create_data_done(struct sk_buff *first, ++ struct wg_peer *peer) ++{ ++ struct sk_buff *skb, *next; ++ bool is_keepalive, data_sent = false; ++ ++ wg_timers_any_authenticated_packet_traversal(peer); ++ wg_timers_any_authenticated_packet_sent(peer); ++ skb_list_walk_safe(first, skb, next) { ++ is_keepalive = skb->len == message_data_len(0); ++ if (likely(!wg_socket_send_skb_to_peer(peer, skb, ++ PACKET_CB(skb)->ds) && !is_keepalive)) ++ data_sent = true; ++ } ++ ++ if (likely(data_sent)) ++ wg_timers_data_sent(peer); ++ ++ keep_key_fresh(peer); ++} ++ ++void wg_packet_tx_worker(struct work_struct *work) ++{ ++ struct crypt_queue *queue = container_of(work, struct crypt_queue, ++ work); ++ struct noise_keypair *keypair; ++ enum packet_state state; ++ struct sk_buff *first; ++ struct wg_peer *peer; ++ ++ while ((first = __ptr_ring_peek(&queue->ring)) != NULL && ++ (state = atomic_read_acquire(&PACKET_CB(first)->state)) != ++ PACKET_STATE_UNCRYPTED) { ++ __ptr_ring_discard_one(&queue->ring); ++ peer = PACKET_PEER(first); ++ keypair = PACKET_CB(first)->keypair; ++ ++ if (likely(state == PACKET_STATE_CRYPTED)) ++ wg_packet_create_data_done(first, peer); ++ else ++ kfree_skb_list(first); ++ ++ wg_noise_keypair_put(keypair, false); ++ wg_peer_put(peer); ++ } ++} ++ ++void wg_packet_encrypt_worker(struct work_struct *work) ++{ ++ struct crypt_queue *queue = container_of(work, struct multicore_worker, ++ work)->ptr; ++ struct sk_buff *first, *skb, *next; ++ ++ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { ++ enum packet_state state = PACKET_STATE_CRYPTED; ++ ++ skb_list_walk_safe(first, skb, next) { ++ if (likely(encrypt_packet(skb, ++ PACKET_CB(first)->keypair))) { ++ wg_reset_packet(skb); ++ } else { ++ state = PACKET_STATE_DEAD; ++ break; ++ } ++ } ++ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, ++ state); ++ ++ } ++} ++ ++static void wg_packet_create_data(struct sk_buff *first) ++{ ++ struct wg_peer *peer = PACKET_PEER(first); ++ struct wg_device *wg = peer->device; ++ int ret = -EINVAL; ++ ++ rcu_read_lock_bh(); ++ if (unlikely(READ_ONCE(peer->is_dead))) ++ goto err; ++ ++ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, ++ &peer->tx_queue, first, ++ wg->packet_crypt_wq, ++ &wg->encrypt_queue.last_cpu); ++ if (unlikely(ret == -EPIPE)) ++ wg_queue_enqueue_per_peer(&peer->tx_queue, first, ++ PACKET_STATE_DEAD); ++err: ++ rcu_read_unlock_bh(); ++ if (likely(!ret || ret == -EPIPE)) ++ return; ++ wg_noise_keypair_put(PACKET_CB(first)->keypair, false); ++ wg_peer_put(peer); ++ kfree_skb_list(first); ++} ++ ++void wg_packet_purge_staged_packets(struct wg_peer *peer) ++{ ++ spin_lock_bh(&peer->staged_packet_queue.lock); ++ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; ++ __skb_queue_purge(&peer->staged_packet_queue); ++ spin_unlock_bh(&peer->staged_packet_queue.lock); ++} ++ ++void wg_packet_send_staged_packets(struct wg_peer *peer) ++{ ++ struct noise_symmetric_key *key; ++ struct noise_keypair *keypair; ++ struct sk_buff_head packets; ++ struct sk_buff *skb; ++ ++ /* Steal the current queue into our local one. */ ++ __skb_queue_head_init(&packets); ++ spin_lock_bh(&peer->staged_packet_queue.lock); ++ skb_queue_splice_init(&peer->staged_packet_queue, &packets); ++ spin_unlock_bh(&peer->staged_packet_queue.lock); ++ if (unlikely(skb_queue_empty(&packets))) ++ return; ++ ++ /* First we make sure we have a valid reference to a valid key. */ ++ rcu_read_lock_bh(); ++ keypair = wg_noise_keypair_get( ++ rcu_dereference_bh(peer->keypairs.current_keypair)); ++ rcu_read_unlock_bh(); ++ if (unlikely(!keypair)) ++ goto out_nokey; ++ key = &keypair->sending; ++ if (unlikely(!READ_ONCE(key->is_valid))) ++ goto out_nokey; ++ if (unlikely(wg_birthdate_has_expired(key->birthdate, ++ REJECT_AFTER_TIME))) ++ goto out_invalid; ++ ++ /* After we know we have a somewhat valid key, we now try to assign ++ * nonces to all of the packets in the queue. If we can't assign nonces ++ * for all of them, we just consider it a failure and wait for the next ++ * handshake. ++ */ ++ skb_queue_walk(&packets, skb) { ++ /* 0 for no outer TOS: no leak. TODO: at some later point, we ++ * might consider using flowi->tos as outer instead. ++ */ ++ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); ++ PACKET_CB(skb)->nonce = ++ atomic64_inc_return(&key->counter.counter) - 1; ++ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) ++ goto out_invalid; ++ } ++ ++ packets.prev->next = NULL; ++ wg_peer_get(keypair->entry.peer); ++ PACKET_CB(packets.next)->keypair = keypair; ++ wg_packet_create_data(packets.next); ++ return; ++ ++out_invalid: ++ WRITE_ONCE(key->is_valid, false); ++out_nokey: ++ wg_noise_keypair_put(keypair, false); ++ ++ /* We orphan the packets if we're waiting on a handshake, so that they ++ * don't block a socket's pool. ++ */ ++ skb_queue_walk(&packets, skb) ++ skb_orphan(skb); ++ /* Then we put them back on the top of the queue. We're not too ++ * concerned about accidentally getting things a little out of order if ++ * packets are being added really fast, because this queue is for before ++ * packets can even be sent and it's small anyway. ++ */ ++ spin_lock_bh(&peer->staged_packet_queue.lock); ++ skb_queue_splice(&packets, &peer->staged_packet_queue); ++ spin_unlock_bh(&peer->staged_packet_queue.lock); ++ ++ /* If we're exiting because there's something wrong with the key, it ++ * means we should initiate a new handshake. ++ */ ++ wg_packet_send_queued_handshake_initiation(peer, false); ++} +--- /dev/null ++++ b/drivers/net/wireguard/socket.c +@@ -0,0 +1,437 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "device.h" ++#include "peer.h" ++#include "socket.h" ++#include "queueing.h" ++#include "messages.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static int send4(struct wg_device *wg, struct sk_buff *skb, ++ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) ++{ ++ struct flowi4 fl = { ++ .saddr = endpoint->src4.s_addr, ++ .daddr = endpoint->addr4.sin_addr.s_addr, ++ .fl4_dport = endpoint->addr4.sin_port, ++ .flowi4_mark = wg->fwmark, ++ .flowi4_proto = IPPROTO_UDP ++ }; ++ struct rtable *rt = NULL; ++ struct sock *sock; ++ int ret = 0; ++ ++ skb_mark_not_on_list(skb); ++ skb->dev = wg->dev; ++ skb->mark = wg->fwmark; ++ ++ rcu_read_lock_bh(); ++ sock = rcu_dereference_bh(wg->sock4); ++ ++ if (unlikely(!sock)) { ++ ret = -ENONET; ++ goto err; ++ } ++ ++ fl.fl4_sport = inet_sk(sock)->inet_sport; ++ ++ if (cache) ++ rt = dst_cache_get_ip4(cache, &fl.saddr); ++ ++ if (!rt) { ++ security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); ++ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, ++ fl.saddr, RT_SCOPE_HOST))) { ++ endpoint->src4.s_addr = 0; ++ *(__force __be32 *)&endpoint->src_if4 = 0; ++ fl.saddr = 0; ++ if (cache) ++ dst_cache_reset(cache); ++ } ++ rt = ip_route_output_flow(sock_net(sock), &fl, sock); ++ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) && ++ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && ++ rt->dst.dev->ifindex != endpoint->src_if4)))) { ++ endpoint->src4.s_addr = 0; ++ *(__force __be32 *)&endpoint->src_if4 = 0; ++ fl.saddr = 0; ++ if (cache) ++ dst_cache_reset(cache); ++ if (!IS_ERR(rt)) ++ ip_rt_put(rt); ++ rt = ip_route_output_flow(sock_net(sock), &fl, sock); ++ } ++ if (unlikely(IS_ERR(rt))) { ++ ret = PTR_ERR(rt); ++ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", ++ wg->dev->name, &endpoint->addr, ret); ++ goto err; ++ } else if (unlikely(rt->dst.dev == skb->dev)) { ++ ip_rt_put(rt); ++ ret = -ELOOP; ++ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", ++ wg->dev->name, &endpoint->addr); ++ goto err; ++ } ++ if (cache) ++ dst_cache_set_ip4(cache, &rt->dst, fl.saddr); ++ } ++ ++ skb->ignore_df = 1; ++ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, ++ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, ++ fl.fl4_dport, false, false); ++ goto out; ++ ++err: ++ kfree_skb(skb); ++out: ++ rcu_read_unlock_bh(); ++ return ret; ++} ++ ++static int send6(struct wg_device *wg, struct sk_buff *skb, ++ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) ++{ ++#if IS_ENABLED(CONFIG_IPV6) ++ struct flowi6 fl = { ++ .saddr = endpoint->src6, ++ .daddr = endpoint->addr6.sin6_addr, ++ .fl6_dport = endpoint->addr6.sin6_port, ++ .flowi6_mark = wg->fwmark, ++ .flowi6_oif = endpoint->addr6.sin6_scope_id, ++ .flowi6_proto = IPPROTO_UDP ++ /* TODO: addr->sin6_flowinfo */ ++ }; ++ struct dst_entry *dst = NULL; ++ struct sock *sock; ++ int ret = 0; ++ ++ skb_mark_not_on_list(skb); ++ skb->dev = wg->dev; ++ skb->mark = wg->fwmark; ++ ++ rcu_read_lock_bh(); ++ sock = rcu_dereference_bh(wg->sock6); ++ ++ if (unlikely(!sock)) { ++ ret = -ENONET; ++ goto err; ++ } ++ ++ fl.fl6_sport = inet_sk(sock)->inet_sport; ++ ++ if (cache) ++ dst = dst_cache_get_ip6(cache, &fl.saddr); ++ ++ if (!dst) { ++ security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); ++ if (unlikely(!ipv6_addr_any(&fl.saddr) && ++ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { ++ endpoint->src6 = fl.saddr = in6addr_any; ++ if (cache) ++ dst_cache_reset(cache); ++ } ++ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, ++ NULL); ++ if (unlikely(IS_ERR(dst))) { ++ ret = PTR_ERR(dst); ++ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", ++ wg->dev->name, &endpoint->addr, ret); ++ goto err; ++ } else if (unlikely(dst->dev == skb->dev)) { ++ dst_release(dst); ++ ret = -ELOOP; ++ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", ++ wg->dev->name, &endpoint->addr); ++ goto err; ++ } ++ if (cache) ++ dst_cache_set_ip6(cache, dst, &fl.saddr); ++ } ++ ++ skb->ignore_df = 1; ++ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, ++ ip6_dst_hoplimit(dst), 0, fl.fl6_sport, ++ fl.fl6_dport, false); ++ goto out; ++ ++err: ++ kfree_skb(skb); ++out: ++ rcu_read_unlock_bh(); ++ return ret; ++#else ++ return -EAFNOSUPPORT; ++#endif ++} ++ ++int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds) ++{ ++ size_t skb_len = skb->len; ++ int ret = -EAFNOSUPPORT; ++ ++ read_lock_bh(&peer->endpoint_lock); ++ if (peer->endpoint.addr.sa_family == AF_INET) ++ ret = send4(peer->device, skb, &peer->endpoint, ds, ++ &peer->endpoint_cache); ++ else if (peer->endpoint.addr.sa_family == AF_INET6) ++ ret = send6(peer->device, skb, &peer->endpoint, ds, ++ &peer->endpoint_cache); ++ else ++ dev_kfree_skb(skb); ++ if (likely(!ret)) ++ peer->tx_bytes += skb_len; ++ read_unlock_bh(&peer->endpoint_lock); ++ ++ return ret; ++} ++ ++int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer, ++ size_t len, u8 ds) ++{ ++ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); ++ ++ if (unlikely(!skb)) ++ return -ENOMEM; ++ ++ skb_reserve(skb, SKB_HEADER_LEN); ++ skb_set_inner_network_header(skb, 0); ++ skb_put_data(skb, buffer, len); ++ return wg_socket_send_skb_to_peer(peer, skb, ds); ++} ++ ++int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, ++ struct sk_buff *in_skb, void *buffer, ++ size_t len) ++{ ++ int ret = 0; ++ struct sk_buff *skb; ++ struct endpoint endpoint; ++ ++ if (unlikely(!in_skb)) ++ return -EINVAL; ++ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb); ++ if (unlikely(ret < 0)) ++ return ret; ++ ++ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); ++ if (unlikely(!skb)) ++ return -ENOMEM; ++ skb_reserve(skb, SKB_HEADER_LEN); ++ skb_set_inner_network_header(skb, 0); ++ skb_put_data(skb, buffer, len); ++ ++ if (endpoint.addr.sa_family == AF_INET) ++ ret = send4(wg, skb, &endpoint, 0, NULL); ++ else if (endpoint.addr.sa_family == AF_INET6) ++ ret = send6(wg, skb, &endpoint, 0, NULL); ++ /* No other possibilities if the endpoint is valid, which it is, ++ * as we checked above. ++ */ ++ ++ return ret; ++} ++ ++int wg_socket_endpoint_from_skb(struct endpoint *endpoint, ++ const struct sk_buff *skb) ++{ ++ memset(endpoint, 0, sizeof(*endpoint)); ++ if (skb->protocol == htons(ETH_P_IP)) { ++ endpoint->addr4.sin_family = AF_INET; ++ endpoint->addr4.sin_port = udp_hdr(skb)->source; ++ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; ++ endpoint->src4.s_addr = ip_hdr(skb)->daddr; ++ endpoint->src_if4 = skb->skb_iif; ++ } else if (skb->protocol == htons(ETH_P_IPV6)) { ++ endpoint->addr6.sin6_family = AF_INET6; ++ endpoint->addr6.sin6_port = udp_hdr(skb)->source; ++ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; ++ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id( ++ &ipv6_hdr(skb)->saddr, skb->skb_iif); ++ endpoint->src6 = ipv6_hdr(skb)->daddr; ++ } else { ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) ++{ ++ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && ++ a->addr4.sin_port == b->addr4.sin_port && ++ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr && ++ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) || ++ (a->addr.sa_family == AF_INET6 && ++ b->addr.sa_family == AF_INET6 && ++ a->addr6.sin6_port == b->addr6.sin6_port && ++ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) && ++ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id && ++ ipv6_addr_equal(&a->src6, &b->src6)) || ++ unlikely(!a->addr.sa_family && !b->addr.sa_family); ++} ++ ++void wg_socket_set_peer_endpoint(struct wg_peer *peer, ++ const struct endpoint *endpoint) ++{ ++ /* First we check unlocked, in order to optimize, since it's pretty rare ++ * that an endpoint will change. If we happen to be mid-write, and two ++ * CPUs wind up writing the same thing or something slightly different, ++ * it doesn't really matter much either. ++ */ ++ if (endpoint_eq(endpoint, &peer->endpoint)) ++ return; ++ write_lock_bh(&peer->endpoint_lock); ++ if (endpoint->addr.sa_family == AF_INET) { ++ peer->endpoint.addr4 = endpoint->addr4; ++ peer->endpoint.src4 = endpoint->src4; ++ peer->endpoint.src_if4 = endpoint->src_if4; ++ } else if (endpoint->addr.sa_family == AF_INET6) { ++ peer->endpoint.addr6 = endpoint->addr6; ++ peer->endpoint.src6 = endpoint->src6; ++ } else { ++ goto out; ++ } ++ dst_cache_reset(&peer->endpoint_cache); ++out: ++ write_unlock_bh(&peer->endpoint_lock); ++} ++ ++void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, ++ const struct sk_buff *skb) ++{ ++ struct endpoint endpoint; ++ ++ if (!wg_socket_endpoint_from_skb(&endpoint, skb)) ++ wg_socket_set_peer_endpoint(peer, &endpoint); ++} ++ ++void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) ++{ ++ write_lock_bh(&peer->endpoint_lock); ++ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); ++ dst_cache_reset(&peer->endpoint_cache); ++ write_unlock_bh(&peer->endpoint_lock); ++} ++ ++static int wg_receive(struct sock *sk, struct sk_buff *skb) ++{ ++ struct wg_device *wg; ++ ++ if (unlikely(!sk)) ++ goto err; ++ wg = sk->sk_user_data; ++ if (unlikely(!wg)) ++ goto err; ++ wg_packet_receive(wg, skb); ++ return 0; ++ ++err: ++ kfree_skb(skb); ++ return 0; ++} ++ ++static void sock_free(struct sock *sock) ++{ ++ if (unlikely(!sock)) ++ return; ++ sk_clear_memalloc(sock); ++ udp_tunnel_sock_release(sock->sk_socket); ++} ++ ++static void set_sock_opts(struct socket *sock) ++{ ++ sock->sk->sk_allocation = GFP_ATOMIC; ++ sock->sk->sk_sndbuf = INT_MAX; ++ sk_set_memalloc(sock->sk); ++} ++ ++int wg_socket_init(struct wg_device *wg, u16 port) ++{ ++ int ret; ++ struct udp_tunnel_sock_cfg cfg = { ++ .sk_user_data = wg, ++ .encap_type = 1, ++ .encap_rcv = wg_receive ++ }; ++ struct socket *new4 = NULL, *new6 = NULL; ++ struct udp_port_cfg port4 = { ++ .family = AF_INET, ++ .local_ip.s_addr = htonl(INADDR_ANY), ++ .local_udp_port = htons(port), ++ .use_udp_checksums = true ++ }; ++#if IS_ENABLED(CONFIG_IPV6) ++ int retries = 0; ++ struct udp_port_cfg port6 = { ++ .family = AF_INET6, ++ .local_ip6 = IN6ADDR_ANY_INIT, ++ .use_udp6_tx_checksums = true, ++ .use_udp6_rx_checksums = true, ++ .ipv6_v6only = true ++ }; ++#endif ++ ++#if IS_ENABLED(CONFIG_IPV6) ++retry: ++#endif ++ ++ ret = udp_sock_create(wg->creating_net, &port4, &new4); ++ if (ret < 0) { ++ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); ++ return ret; ++ } ++ set_sock_opts(new4); ++ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); ++ ++#if IS_ENABLED(CONFIG_IPV6) ++ if (ipv6_mod_enabled()) { ++ port6.local_udp_port = inet_sk(new4->sk)->inet_sport; ++ ret = udp_sock_create(wg->creating_net, &port6, &new6); ++ if (ret < 0) { ++ udp_tunnel_sock_release(new4); ++ if (ret == -EADDRINUSE && !port && retries++ < 100) ++ goto retry; ++ pr_err("%s: Could not create IPv6 socket\n", ++ wg->dev->name); ++ return ret; ++ } ++ set_sock_opts(new6); ++ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); ++ } ++#endif ++ ++ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); ++ return 0; ++} ++ ++void wg_socket_reinit(struct wg_device *wg, struct sock *new4, ++ struct sock *new6) ++{ ++ struct sock *old4, *old6; ++ ++ mutex_lock(&wg->socket_update_lock); ++ old4 = rcu_dereference_protected(wg->sock4, ++ lockdep_is_held(&wg->socket_update_lock)); ++ old6 = rcu_dereference_protected(wg->sock6, ++ lockdep_is_held(&wg->socket_update_lock)); ++ rcu_assign_pointer(wg->sock4, new4); ++ rcu_assign_pointer(wg->sock6, new6); ++ if (new4) ++ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); ++ mutex_unlock(&wg->socket_update_lock); ++ synchronize_rcu(); ++ synchronize_net(); ++ sock_free(old4); ++ sock_free(old6); ++} +--- /dev/null ++++ b/drivers/net/wireguard/socket.h +@@ -0,0 +1,44 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_SOCKET_H ++#define _WG_SOCKET_H ++ ++#include ++#include ++#include ++#include ++ ++int wg_socket_init(struct wg_device *wg, u16 port); ++void wg_socket_reinit(struct wg_device *wg, struct sock *new4, ++ struct sock *new6); ++int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data, ++ size_t len, u8 ds); ++int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, ++ u8 ds); ++int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, ++ struct sk_buff *in_skb, ++ void *out_buffer, size_t len); ++ ++int wg_socket_endpoint_from_skb(struct endpoint *endpoint, ++ const struct sk_buff *skb); ++void wg_socket_set_peer_endpoint(struct wg_peer *peer, ++ const struct endpoint *endpoint); ++void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, ++ const struct sk_buff *skb); ++void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer); ++ ++#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) ++#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \ ++ struct endpoint __endpoint; \ ++ wg_socket_endpoint_from_skb(&__endpoint, skb); \ ++ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \ ++ ##__VA_ARGS__); \ ++ } while (0) ++#else ++#define net_dbg_skb_ratelimited(fmt, skb, ...) ++#endif ++ ++#endif /* _WG_SOCKET_H */ +--- /dev/null ++++ b/drivers/net/wireguard/timers.c +@@ -0,0 +1,243 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#include "timers.h" ++#include "device.h" ++#include "peer.h" ++#include "queueing.h" ++#include "socket.h" ++ ++/* ++ * - Timer for retransmitting the handshake if we don't hear back after ++ * `REKEY_TIMEOUT + jitter` ms. ++ * ++ * - Timer for sending empty packet if we have received a packet but after have ++ * not sent one for `KEEPALIVE_TIMEOUT` ms. ++ * ++ * - Timer for initiating new handshake if we have sent a packet but after have ++ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) + ++ * jitter` ms. ++ * ++ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms ++ * if no new keys have been received. ++ * ++ * - Timer for, if enabled, sending an empty authenticated packet every user- ++ * specified seconds. ++ */ ++ ++static inline void mod_peer_timer(struct wg_peer *peer, ++ struct timer_list *timer, ++ unsigned long expires) ++{ ++ rcu_read_lock_bh(); ++ if (likely(netif_running(peer->device->dev) && ++ !READ_ONCE(peer->is_dead))) ++ mod_timer(timer, expires); ++ rcu_read_unlock_bh(); ++} ++ ++static void wg_expired_retransmit_handshake(struct timer_list *timer) ++{ ++ struct wg_peer *peer = from_timer(peer, timer, ++ timer_retransmit_handshake); ++ ++ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { ++ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); ++ ++ del_timer(&peer->timer_send_keepalive); ++ /* We drop all packets without a keypair and don't try again, ++ * if we try unsuccessfully for too long to make a handshake. ++ */ ++ wg_packet_purge_staged_packets(peer); ++ ++ /* We set a timer for destroying any residue that might be left ++ * of a partial exchange. ++ */ ++ if (!timer_pending(&peer->timer_zero_key_material)) ++ mod_peer_timer(peer, &peer->timer_zero_key_material, ++ jiffies + REJECT_AFTER_TIME * 3 * HZ); ++ } else { ++ ++peer->timer_handshake_attempts; ++ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr, REKEY_TIMEOUT, ++ peer->timer_handshake_attempts + 1); ++ ++ /* We clear the endpoint address src address, in case this is ++ * the cause of trouble. ++ */ ++ wg_socket_clear_peer_endpoint_src(peer); ++ ++ wg_packet_send_queued_handshake_initiation(peer, true); ++ } ++} ++ ++static void wg_expired_send_keepalive(struct timer_list *timer) ++{ ++ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive); ++ ++ wg_packet_send_keepalive(peer); ++ if (peer->timer_need_another_keepalive) { ++ peer->timer_need_another_keepalive = false; ++ mod_peer_timer(peer, &peer->timer_send_keepalive, ++ jiffies + KEEPALIVE_TIMEOUT * HZ); ++ } ++} ++ ++static void wg_expired_new_handshake(struct timer_list *timer) ++{ ++ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake); ++ ++ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); ++ /* We clear the endpoint address src address, in case this is the cause ++ * of trouble. ++ */ ++ wg_socket_clear_peer_endpoint_src(peer); ++ wg_packet_send_queued_handshake_initiation(peer, false); ++} ++ ++static void wg_expired_zero_key_material(struct timer_list *timer) ++{ ++ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material); ++ ++ rcu_read_lock_bh(); ++ if (!READ_ONCE(peer->is_dead)) { ++ wg_peer_get(peer); ++ if (!queue_work(peer->device->handshake_send_wq, ++ &peer->clear_peer_work)) ++ /* If the work was already on the queue, we want to drop ++ * the extra reference. ++ */ ++ wg_peer_put(peer); ++ } ++ rcu_read_unlock_bh(); ++} ++ ++static void wg_queued_expired_zero_key_material(struct work_struct *work) ++{ ++ struct wg_peer *peer = container_of(work, struct wg_peer, ++ clear_peer_work); ++ ++ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", ++ peer->device->dev->name, peer->internal_id, ++ &peer->endpoint.addr, REJECT_AFTER_TIME * 3); ++ wg_noise_handshake_clear(&peer->handshake); ++ wg_noise_keypairs_clear(&peer->keypairs); ++ wg_peer_put(peer); ++} ++ ++static void wg_expired_send_persistent_keepalive(struct timer_list *timer) ++{ ++ struct wg_peer *peer = from_timer(peer, timer, ++ timer_persistent_keepalive); ++ ++ if (likely(peer->persistent_keepalive_interval)) ++ wg_packet_send_keepalive(peer); ++} ++ ++/* Should be called after an authenticated data packet is sent. */ ++void wg_timers_data_sent(struct wg_peer *peer) ++{ ++ if (!timer_pending(&peer->timer_new_handshake)) ++ mod_peer_timer(peer, &peer->timer_new_handshake, ++ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ + ++ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); ++} ++ ++/* Should be called after an authenticated data packet is received. */ ++void wg_timers_data_received(struct wg_peer *peer) ++{ ++ if (likely(netif_running(peer->device->dev))) { ++ if (!timer_pending(&peer->timer_send_keepalive)) ++ mod_peer_timer(peer, &peer->timer_send_keepalive, ++ jiffies + KEEPALIVE_TIMEOUT * HZ); ++ else ++ peer->timer_need_another_keepalive = true; ++ } ++} ++ ++/* Should be called after any type of authenticated packet is sent, whether ++ * keepalive, data, or handshake. ++ */ ++void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) ++{ ++ del_timer(&peer->timer_send_keepalive); ++} ++ ++/* Should be called after any type of authenticated packet is received, whether ++ * keepalive, data, or handshake. ++ */ ++void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) ++{ ++ del_timer(&peer->timer_new_handshake); ++} ++ ++/* Should be called after a handshake initiation message is sent. */ ++void wg_timers_handshake_initiated(struct wg_peer *peer) ++{ ++ mod_peer_timer(peer, &peer->timer_retransmit_handshake, ++ jiffies + REKEY_TIMEOUT * HZ + ++ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); ++} ++ ++/* Should be called after a handshake response message is received and processed ++ * or when getting key confirmation via the first data message. ++ */ ++void wg_timers_handshake_complete(struct wg_peer *peer) ++{ ++ del_timer(&peer->timer_retransmit_handshake); ++ peer->timer_handshake_attempts = 0; ++ peer->sent_lastminute_handshake = false; ++ ktime_get_real_ts64(&peer->walltime_last_handshake); ++} ++ ++/* Should be called after an ephemeral key is created, which is before sending a ++ * handshake response or after receiving a handshake response. ++ */ ++void wg_timers_session_derived(struct wg_peer *peer) ++{ ++ mod_peer_timer(peer, &peer->timer_zero_key_material, ++ jiffies + REJECT_AFTER_TIME * 3 * HZ); ++} ++ ++/* Should be called before a packet with authentication, whether ++ * keepalive, data, or handshakem is sent, or after one is received. ++ */ ++void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer) ++{ ++ if (peer->persistent_keepalive_interval) ++ mod_peer_timer(peer, &peer->timer_persistent_keepalive, ++ jiffies + peer->persistent_keepalive_interval * HZ); ++} ++ ++void wg_timers_init(struct wg_peer *peer) ++{ ++ timer_setup(&peer->timer_retransmit_handshake, ++ wg_expired_retransmit_handshake, 0); ++ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0); ++ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0); ++ timer_setup(&peer->timer_zero_key_material, ++ wg_expired_zero_key_material, 0); ++ timer_setup(&peer->timer_persistent_keepalive, ++ wg_expired_send_persistent_keepalive, 0); ++ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material); ++ peer->timer_handshake_attempts = 0; ++ peer->sent_lastminute_handshake = false; ++ peer->timer_need_another_keepalive = false; ++} ++ ++void wg_timers_stop(struct wg_peer *peer) ++{ ++ del_timer_sync(&peer->timer_retransmit_handshake); ++ del_timer_sync(&peer->timer_send_keepalive); ++ del_timer_sync(&peer->timer_new_handshake); ++ del_timer_sync(&peer->timer_zero_key_material); ++ del_timer_sync(&peer->timer_persistent_keepalive); ++ flush_work(&peer->clear_peer_work); ++} +--- /dev/null ++++ b/drivers/net/wireguard/timers.h +@@ -0,0 +1,31 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#ifndef _WG_TIMERS_H ++#define _WG_TIMERS_H ++ ++#include ++ ++struct wg_peer; ++ ++void wg_timers_init(struct wg_peer *peer); ++void wg_timers_stop(struct wg_peer *peer); ++void wg_timers_data_sent(struct wg_peer *peer); ++void wg_timers_data_received(struct wg_peer *peer); ++void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer); ++void wg_timers_any_authenticated_packet_received(struct wg_peer *peer); ++void wg_timers_handshake_initiated(struct wg_peer *peer); ++void wg_timers_handshake_complete(struct wg_peer *peer); ++void wg_timers_session_derived(struct wg_peer *peer); ++void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer); ++ ++static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds, ++ u64 expiration_seconds) ++{ ++ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC) ++ <= (s64)ktime_get_coarse_boottime_ns(); ++} ++ ++#endif /* _WG_TIMERS_H */ +--- /dev/null ++++ b/drivers/net/wireguard/version.h +@@ -0,0 +1 @@ ++#define WIREGUARD_VERSION "1.0.0" +--- /dev/null ++++ b/include/uapi/linux/wireguard.h +@@ -0,0 +1,196 @@ ++/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ * ++ * Documentation ++ * ============= ++ * ++ * The below enums and macros are for interfacing with WireGuard, using generic ++ * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two ++ * methods: get and set. Note that while they share many common attributes, ++ * these two functions actually accept a slightly different set of inputs and ++ * outputs. ++ * ++ * WG_CMD_GET_DEVICE ++ * ----------------- ++ * ++ * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain ++ * one but not both of: ++ * ++ * WGDEVICE_A_IFINDEX: NLA_U32 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * ++ * The kernel will then return several messages (NLM_F_MULTI) containing the ++ * following tree of nested items: ++ * ++ * WGDEVICE_A_IFINDEX: NLA_U32 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN ++ * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN ++ * WGDEVICE_A_LISTEN_PORT: NLA_U16 ++ * WGDEVICE_A_FWMARK: NLA_U32 ++ * WGDEVICE_A_PEERS: NLA_NESTED ++ * 0: NLA_NESTED ++ * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN ++ * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN ++ * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6 ++ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16 ++ * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec ++ * WGPEER_A_RX_BYTES: NLA_U64 ++ * WGPEER_A_TX_BYTES: NLA_U64 ++ * WGPEER_A_ALLOWEDIPS: NLA_NESTED ++ * 0: NLA_NESTED ++ * WGALLOWEDIP_A_FAMILY: NLA_U16 ++ * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr ++ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 ++ * 0: NLA_NESTED ++ * ... ++ * 0: NLA_NESTED ++ * ... ++ * ... ++ * WGPEER_A_PROTOCOL_VERSION: NLA_U32 ++ * 0: NLA_NESTED ++ * ... ++ * ... ++ * ++ * It is possible that all of the allowed IPs of a single peer will not ++ * fit within a single netlink message. In that case, the same peer will ++ * be written in the following message, except it will only contain ++ * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several ++ * times in a row for the same peer. It is then up to the receiver to ++ * coalesce adjacent peers. Likewise, it is possible that all peers will ++ * not fit within a single message. So, subsequent peers will be sent ++ * in following messages, except those will only contain WGDEVICE_A_IFNAME ++ * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these ++ * messages to form the complete list of peers. ++ * ++ * Since this is an NLA_F_DUMP command, the final message will always be ++ * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message ++ * contains an integer error code. It is either zero or a negative error ++ * code corresponding to the errno. ++ * ++ * WG_CMD_SET_DEVICE ++ * ----------------- ++ * ++ * May only be called via NLM_F_REQUEST. The command should contain the ++ * following tree of nested items, containing one but not both of ++ * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: ++ * ++ * WGDEVICE_A_IFINDEX: NLA_U32 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current ++ * peers should be removed prior to adding the list below. ++ * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove ++ * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly ++ * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable ++ * WGDEVICE_A_PEERS: NLA_NESTED ++ * 0: NLA_NESTED ++ * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN ++ * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the ++ * specified peer should not exist at the end of the ++ * operation, rather than added/updated and/or ++ * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed ++ * IPs of this peer should be removed prior to adding ++ * the list below and/or WGPEER_F_UPDATE_ONLY if the ++ * peer should only be set if it already exists. ++ * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove ++ * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6 ++ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable ++ * WGPEER_A_ALLOWEDIPS: NLA_NESTED ++ * 0: NLA_NESTED ++ * WGALLOWEDIP_A_FAMILY: NLA_U16 ++ * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr ++ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 ++ * 0: NLA_NESTED ++ * ... ++ * 0: NLA_NESTED ++ * ... ++ * ... ++ * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at ++ * all by most users of this API, as the ++ * most recent protocol will be used when ++ * this is unset. Otherwise, must be set ++ * to 1. ++ * 0: NLA_NESTED ++ * ... ++ * ... ++ * ++ * It is possible that the amount of configuration data exceeds that of ++ * the maximum message length accepted by the kernel. In that case, several ++ * messages should be sent one after another, with each successive one ++ * filling in information not contained in the prior. Note that if ++ * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably ++ * should not be specified in fragments that come after, so that the list ++ * of peers is only cleared the first time but appened after. Likewise for ++ * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message ++ * of a peer, it likely should not be specified in subsequent fragments. ++ * ++ * If an error occurs, NLMSG_ERROR will reply containing an errno. ++ */ ++ ++#ifndef _WG_UAPI_WIREGUARD_H ++#define _WG_UAPI_WIREGUARD_H ++ ++#define WG_GENL_NAME "wireguard" ++#define WG_GENL_VERSION 1 ++ ++#define WG_KEY_LEN 32 ++ ++enum wg_cmd { ++ WG_CMD_GET_DEVICE, ++ WG_CMD_SET_DEVICE, ++ __WG_CMD_MAX ++}; ++#define WG_CMD_MAX (__WG_CMD_MAX - 1) ++ ++enum wgdevice_flag { ++ WGDEVICE_F_REPLACE_PEERS = 1U << 0, ++ __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS ++}; ++enum wgdevice_attribute { ++ WGDEVICE_A_UNSPEC, ++ WGDEVICE_A_IFINDEX, ++ WGDEVICE_A_IFNAME, ++ WGDEVICE_A_PRIVATE_KEY, ++ WGDEVICE_A_PUBLIC_KEY, ++ WGDEVICE_A_FLAGS, ++ WGDEVICE_A_LISTEN_PORT, ++ WGDEVICE_A_FWMARK, ++ WGDEVICE_A_PEERS, ++ __WGDEVICE_A_LAST ++}; ++#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1) ++ ++enum wgpeer_flag { ++ WGPEER_F_REMOVE_ME = 1U << 0, ++ WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1, ++ WGPEER_F_UPDATE_ONLY = 1U << 2, ++ __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS | ++ WGPEER_F_UPDATE_ONLY ++}; ++enum wgpeer_attribute { ++ WGPEER_A_UNSPEC, ++ WGPEER_A_PUBLIC_KEY, ++ WGPEER_A_PRESHARED_KEY, ++ WGPEER_A_FLAGS, ++ WGPEER_A_ENDPOINT, ++ WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, ++ WGPEER_A_LAST_HANDSHAKE_TIME, ++ WGPEER_A_RX_BYTES, ++ WGPEER_A_TX_BYTES, ++ WGPEER_A_ALLOWEDIPS, ++ WGPEER_A_PROTOCOL_VERSION, ++ __WGPEER_A_LAST ++}; ++#define WGPEER_A_MAX (__WGPEER_A_LAST - 1) ++ ++enum wgallowedip_attribute { ++ WGALLOWEDIP_A_UNSPEC, ++ WGALLOWEDIP_A_FAMILY, ++ WGALLOWEDIP_A_IPADDR, ++ WGALLOWEDIP_A_CIDR_MASK, ++ __WGALLOWEDIP_A_LAST ++}; ++#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1) ++ ++#endif /* _WG_UAPI_WIREGUARD_H */ +--- /dev/null ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -0,0 +1,537 @@ ++#!/bin/bash ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++# ++# This script tests the below topology: ++# ++# ┌─────────────────────┐ ┌──────────────────────────────────┐ ┌─────────────────────┐ ++# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ ++# │ │ │ │ │ │ ++# │┌────────┐ │ │ ┌────────┐ │ │ ┌────────┐│ ++# ││ wg0 │───────────┼───┼────────────│ lo │────────────┼───┼───────────│ wg0 ││ ++# │├────────┴──────────┐│ │ ┌───────┴────────┴────────┐ │ │┌──────────┴────────┤│ ++# ││192.168.241.1/24 ││ │ │(ns1) (ns2) │ │ ││192.168.241.2/24 ││ ++# ││fd00::1/24 ││ │ │127.0.0.1:1 127.0.0.1:2│ │ ││fd00::2/24 ││ ++# │└───────────────────┘│ │ │[::]:1 [::]:2 │ │ │└───────────────────┘│ ++# └─────────────────────┘ │ └─────────────────────────┘ │ └─────────────────────┘ ++# └──────────────────────────────────┘ ++# ++# After the topology is prepared we run a series of TCP/UDP iperf3 tests between the ++# wireguard peers in $ns1 and $ns2. Note that $ns0 is the endpoint for the wg0 ++# interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further ++# details on how this is accomplished. ++set -e ++ ++exec 3>&1 ++export WG_HIDE_KEYS=never ++netns0="wg-test-$$-0" ++netns1="wg-test-$$-1" ++netns2="wg-test-$$-2" ++pretty() { echo -e "\x1b[32m\x1b[1m[+] ${1:+NS$1: }${2}\x1b[0m" >&3; } ++pp() { pretty "" "$*"; "$@"; } ++maybe_exec() { if [[ $BASHPID -eq $$ ]]; then "$@"; else exec "$@"; fi; } ++n0() { pretty 0 "$*"; maybe_exec ip netns exec $netns0 "$@"; } ++n1() { pretty 1 "$*"; maybe_exec ip netns exec $netns1 "$@"; } ++n2() { pretty 2 "$*"; maybe_exec ip netns exec $netns2 "$@"; } ++ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } ++ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } ++ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } ++sleep() { read -t "$1" -N 0 || true; } ++waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } ++waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } ++waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } ++waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } ++ ++cleanup() { ++ set +e ++ exec 2>/dev/null ++ printf "$orig_message_cost" > /proc/sys/net/core/message_cost ++ ip0 link del dev wg0 ++ ip1 link del dev wg0 ++ ip2 link del dev wg0 ++ local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" ++ [[ -n $to_kill ]] && kill $to_kill ++ pp ip netns del $netns1 ++ pp ip netns del $netns2 ++ pp ip netns del $netns0 ++ exit ++} ++ ++orig_message_cost="$(< /proc/sys/net/core/message_cost)" ++trap cleanup EXIT ++printf 0 > /proc/sys/net/core/message_cost ++ ++ip netns del $netns0 2>/dev/null || true ++ip netns del $netns1 2>/dev/null || true ++ip netns del $netns2 2>/dev/null || true ++pp ip netns add $netns0 ++pp ip netns add $netns1 ++pp ip netns add $netns2 ++ip0 link set up dev lo ++ ++ip0 link add dev wg0 type wireguard ++ip0 link set wg0 netns $netns1 ++ip0 link add dev wg0 type wireguard ++ip0 link set wg0 netns $netns2 ++key1="$(pp wg genkey)" ++key2="$(pp wg genkey)" ++key3="$(pp wg genkey)" ++pub1="$(pp wg pubkey <<<"$key1")" ++pub2="$(pp wg pubkey <<<"$key2")" ++pub3="$(pp wg pubkey <<<"$key3")" ++psk="$(pp wg genpsk)" ++[[ -n $key1 && -n $key2 && -n $psk ]] ++ ++configure_peers() { ++ ip1 addr add 192.168.241.1/24 dev wg0 ++ ip1 addr add fd00::1/24 dev wg0 ++ ++ ip2 addr add 192.168.241.2/24 dev wg0 ++ ip2 addr add fd00::2/24 dev wg0 ++ ++ n1 wg set wg0 \ ++ private-key <(echo "$key1") \ ++ listen-port 1 \ ++ peer "$pub2" \ ++ preshared-key <(echo "$psk") \ ++ allowed-ips 192.168.241.2/32,fd00::2/128 ++ n2 wg set wg0 \ ++ private-key <(echo "$key2") \ ++ listen-port 2 \ ++ peer "$pub1" \ ++ preshared-key <(echo "$psk") \ ++ allowed-ips 192.168.241.1/32,fd00::1/128 ++ ++ ip1 link set up dev wg0 ++ ip2 link set up dev wg0 ++} ++configure_peers ++ ++tests() { ++ # Ping over IPv4 ++ n2 ping -c 10 -f -W 1 192.168.241.1 ++ n1 ping -c 10 -f -W 1 192.168.241.2 ++ ++ # Ping over IPv6 ++ n2 ping6 -c 10 -f -W 1 fd00::1 ++ n1 ping6 -c 10 -f -W 1 fd00::2 ++ ++ # TCP over IPv4 ++ n2 iperf3 -s -1 -B 192.168.241.2 & ++ waitiperf $netns2 ++ n1 iperf3 -Z -t 3 -c 192.168.241.2 ++ ++ # TCP over IPv6 ++ n1 iperf3 -s -1 -B fd00::1 & ++ waitiperf $netns1 ++ n2 iperf3 -Z -t 3 -c fd00::1 ++ ++ # UDP over IPv4 ++ n1 iperf3 -s -1 -B 192.168.241.1 & ++ waitiperf $netns1 ++ n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 ++ ++ # UDP over IPv6 ++ n2 iperf3 -s -1 -B fd00::2 & ++ waitiperf $netns2 ++ n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 ++} ++ ++[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" ++big_mtu=$(( 34816 - 1500 + $orig_mtu )) ++ ++# Test using IPv4 as outer transport ++n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 ++n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 ++# Before calling tests, we first make sure that the stats counters and timestamper are working ++n2 ping -c 10 -f -W 1 192.168.241.1 ++{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip2 -stats link show dev wg0) ++(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) ++{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip1 -stats link show dev wg0) ++(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) ++read _ rx_bytes tx_bytes < <(n2 wg show wg0 transfer) ++(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) ++read _ rx_bytes tx_bytes < <(n1 wg show wg0 transfer) ++(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) ++read _ timestamp < <(n1 wg show wg0 latest-handshakes) ++(( timestamp != 0 )) ++ ++tests ++ip1 link set wg0 mtu $big_mtu ++ip2 link set wg0 mtu $big_mtu ++tests ++ ++ip1 link set wg0 mtu $orig_mtu ++ip2 link set wg0 mtu $orig_mtu ++ ++# Test using IPv6 as outer transport ++n1 wg set wg0 peer "$pub2" endpoint [::1]:2 ++n2 wg set wg0 peer "$pub1" endpoint [::1]:1 ++tests ++ip1 link set wg0 mtu $big_mtu ++ip2 link set wg0 mtu $big_mtu ++tests ++ ++# Test that route MTUs work with the padding ++ip1 link set wg0 mtu 1300 ++ip2 link set wg0 mtu 1300 ++n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 ++n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 ++n0 iptables -A INPUT -m length --length 1360 -j DROP ++n1 ip route add 192.168.241.2/32 dev wg0 mtu 1299 ++n2 ip route add 192.168.241.1/32 dev wg0 mtu 1299 ++n2 ping -c 1 -W 1 -s 1269 192.168.241.1 ++n2 ip route delete 192.168.241.1/32 dev wg0 mtu 1299 ++n1 ip route delete 192.168.241.2/32 dev wg0 mtu 1299 ++n0 iptables -F INPUT ++ ++ip1 link set wg0 mtu $orig_mtu ++ip2 link set wg0 mtu $orig_mtu ++ ++# Test using IPv4 that roaming works ++ip0 -4 addr del 127.0.0.1/8 dev lo ++ip0 -4 addr add 127.212.121.99/8 dev lo ++n1 wg set wg0 listen-port 9999 ++n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 ++n1 ping6 -W 1 -c 1 fd00::2 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 127.212.121.99:9999" ]] ++ ++# Test using IPv6 that roaming works ++n1 wg set wg0 listen-port 9998 ++n1 wg set wg0 peer "$pub2" endpoint [::1]:2 ++n1 ping -W 1 -c 1 192.168.241.2 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 [::1]:9998" ]] ++ ++# Test that crypto-RP filter works ++n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 ++exec 4< <(n1 ncat -l -u -p 1111) ++ncat_pid=$! ++waitncatudp $netns1 ++n2 ncat -u 192.168.241.1 1111 <<<"X" ++read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] ++kill $ncat_pid ++more_specific_key="$(pp wg genkey | pp wg pubkey)" ++n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32 ++n2 wg set wg0 listen-port 9997 ++exec 4< <(n1 ncat -l -u -p 1111) ++ncat_pid=$! ++waitncatudp $netns1 ++n2 ncat -u 192.168.241.1 1111 <<<"X" ++! read -r -N 1 -t 1 out <&4 || false ++kill $ncat_pid ++n1 wg set wg0 peer "$more_specific_key" remove ++[[ $(n1 wg show wg0 endpoints) == "$pub2 [::1]:9997" ]] ++ ++# Test that we can change private keys keys and immediately handshake ++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips 192.168.241.2/32 endpoint 127.0.0.1:2 ++n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 ++n1 ping -W 1 -c 1 192.168.241.2 ++n1 wg set wg0 private-key <(echo "$key3") ++n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove ++n1 ping -W 1 -c 1 192.168.241.2 ++ ++ip1 link del wg0 ++ip2 link del wg0 ++ ++# Test using NAT. We now change the topology to this: ++# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ ++# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ ++# │ │ │ │ │ │ ++# │ ┌─────┐ ┌─────┐ │ │ ┌──────┐ ┌──────┐ │ │ ┌─────┐ ┌─────┐ │ ++# │ │ wg0 │─────────────│vethc│───────────┼────┼────│vethrc│ │vethrs│──────────────┼─────┼──│veths│────────────│ wg0 │ │ ++# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├──────┴─────────┐ ├──────┴────────────┐ │ │ ├─────┴──────────┐ ├─────┴──────────┐ │ ++# │ │192.168.241.1/24│ │192.168.1.100/24││ │ │192.168.1.1/24 │ │10.0.0.1/24 │ │ │ │10.0.0.100/24 │ │192.168.241.2/24│ │ ++# │ │fd00::1/24 │ │ ││ │ │ │ │SNAT:192.168.1.0/24│ │ │ │ │ │fd00::2/24 │ │ ++# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └───────────────────┘ │ │ └────────────────┘ └────────────────┘ │ ++# └────────────────────────────────────────┘ └────────────────────────────────────────────────┘ └────────────────────────────────────────┘ ++ ++ip1 link add dev wg0 type wireguard ++ip2 link add dev wg0 type wireguard ++configure_peers ++ ++ip0 link add vethrc type veth peer name vethc ++ip0 link add vethrs type veth peer name veths ++ip0 link set vethc netns $netns1 ++ip0 link set veths netns $netns2 ++ip0 link set vethrc up ++ip0 link set vethrs up ++ip0 addr add 192.168.1.1/24 dev vethrc ++ip0 addr add 10.0.0.1/24 dev vethrs ++ip1 addr add 192.168.1.100/24 dev vethc ++ip1 link set vethc up ++ip1 route add default via 192.168.1.1 ++ip2 addr add 10.0.0.100/24 dev veths ++ip2 link set veths up ++waitiface $netns0 vethrc ++waitiface $netns0 vethrs ++waitiface $netns1 vethc ++waitiface $netns2 veths ++ ++n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' ++n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout' ++n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream' ++n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1 ++ ++n1 wg set wg0 peer "$pub2" endpoint 10.0.0.100:2 persistent-keepalive 1 ++n1 ping -W 1 -c 1 192.168.241.2 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] ++# Demonstrate n2 can still send packets to n1, since persistent-keepalive will prevent connection tracking entry from expiring (to see entries: `n0 conntrack -L`). ++pp sleep 3 ++n2 ping -W 1 -c 1 192.168.241.1 ++n1 wg set wg0 peer "$pub2" persistent-keepalive 0 ++ ++# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. ++ip1 -6 addr add fc00::9/96 dev vethc ++ip1 -6 route add default via fc00::1 ++ip2 -4 addr add 192.168.99.7/32 dev wg0 ++ip2 -6 addr add abab::1111/128 dev wg0 ++n1 wg set wg0 fwmark 51820 peer "$pub2" allowed-ips 192.168.99.7,abab::1111 ++ip1 -6 route add default dev wg0 table 51820 ++ip1 -6 rule add not fwmark 51820 table 51820 ++ip1 -6 rule add table main suppress_prefixlength 0 ++ip1 -4 route add default dev wg0 table 51820 ++ip1 -4 rule add not fwmark 51820 table 51820 ++ip1 -4 rule add table main suppress_prefixlength 0 ++# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. ++if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then ++ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. ++ n1 ping -W 1 -c 100 -f 192.168.99.7 ++ n1 ping -W 1 -c 100 -f abab::1111 ++fi ++ ++n0 iptables -t nat -F ++ip0 link del vethrc ++ip0 link del vethrs ++ip1 link del wg0 ++ip2 link del wg0 ++ ++# Test that saddr routing is sticky but not too sticky, changing to this topology: ++# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────┐ ++# │ $ns1 namespace │ │ $ns2 namespace │ ++# │ │ │ │ ++# │ ┌─────┐ ┌─────┐ │ │ ┌─────┐ ┌─────┐ │ ++# │ │ wg0 │─────────────│veth1│───────────┼────┼──│veth2│────────────│ wg0 │ │ ++# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├─────┴──────────┐ ├─────┴──────────┐ │ ++# │ │192.168.241.1/24│ │10.0.0.1/24 ││ │ │10.0.0.2/24 │ │192.168.241.2/24│ │ ++# │ │fd00::1/24 │ │fd00:aa::1/96 ││ │ │fd00:aa::2/96 │ │fd00::2/24 │ │ ++# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └────────────────┘ │ ++# └────────────────────────────────────────┘ └────────────────────────────────────────┘ ++ ++ip1 link add dev wg0 type wireguard ++ip2 link add dev wg0 type wireguard ++configure_peers ++ip1 link add veth1 type veth peer name veth2 ++ip1 link set veth2 netns $netns2 ++n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' ++n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' ++n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth1/accept_dad' ++n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth2/accept_dad' ++n1 bash -c 'printf 1 > /proc/sys/net/ipv4/conf/veth1/promote_secondaries' ++ ++# First we check that we aren't overly sticky and can fall over to new IPs when old ones are removed ++ip1 addr add 10.0.0.1/24 dev veth1 ++ip1 addr add fd00:aa::1/96 dev veth1 ++ip2 addr add 10.0.0.2/24 dev veth2 ++ip2 addr add fd00:aa::2/96 dev veth2 ++ip1 link set veth1 up ++ip2 link set veth2 up ++waitiface $netns1 veth1 ++waitiface $netns2 veth2 ++n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 ++n1 ping -W 1 -c 1 192.168.241.2 ++ip1 addr add 10.0.0.10/24 dev veth1 ++ip1 addr del 10.0.0.1/24 dev veth1 ++n1 ping -W 1 -c 1 192.168.241.2 ++n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 ++n1 ping -W 1 -c 1 192.168.241.2 ++ip1 addr add fd00:aa::10/96 dev veth1 ++ip1 addr del fd00:aa::1/96 dev veth1 ++n1 ping -W 1 -c 1 192.168.241.2 ++ ++# Now we show that we can successfully do reply to sender routing ++ip1 link set veth1 down ++ip2 link set veth2 down ++ip1 addr flush dev veth1 ++ip2 addr flush dev veth2 ++ip1 addr add 10.0.0.1/24 dev veth1 ++ip1 addr add 10.0.0.2/24 dev veth1 ++ip1 addr add fd00:aa::1/96 dev veth1 ++ip1 addr add fd00:aa::2/96 dev veth1 ++ip2 addr add 10.0.0.3/24 dev veth2 ++ip2 addr add fd00:aa::3/96 dev veth2 ++ip1 link set veth1 up ++ip2 link set veth2 up ++waitiface $netns1 veth1 ++waitiface $netns2 veth2 ++n2 wg set wg0 peer "$pub1" endpoint 10.0.0.1:1 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] ++n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::1]:1" ]] ++n2 wg set wg0 peer "$pub1" endpoint 10.0.0.2:1 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.2:1" ]] ++n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::2]:1 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::2]:1" ]] ++ ++# What happens if the inbound destination address belongs to a different interface as the default route? ++ip1 link add dummy0 type dummy ++ip1 addr add 10.50.0.1/24 dev dummy0 ++ip1 link set dummy0 up ++ip2 route add 10.50.0.0/24 dev veth2 ++n2 wg set wg0 peer "$pub1" endpoint 10.50.0.1:1 ++n2 ping -W 1 -c 1 192.168.241.1 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.50.0.1:1" ]] ++ ++ip1 link del dummy0 ++ip1 addr flush dev veth1 ++ip2 addr flush dev veth2 ++ip1 route flush dev veth1 ++ip2 route flush dev veth2 ++ ++# Now we see what happens if another interface route takes precedence over an ongoing one ++ip1 link add veth3 type veth peer name veth4 ++ip1 link set veth4 netns $netns2 ++ip1 addr add 10.0.0.1/24 dev veth1 ++ip2 addr add 10.0.0.2/24 dev veth2 ++ip1 addr add 10.0.0.3/24 dev veth3 ++ip1 link set veth1 up ++ip2 link set veth2 up ++ip1 link set veth3 up ++ip2 link set veth4 up ++waitiface $netns1 veth1 ++waitiface $netns2 veth2 ++waitiface $netns1 veth3 ++waitiface $netns2 veth4 ++ip1 route flush dev veth1 ++ip1 route flush dev veth3 ++ip1 route add 10.0.0.0/24 dev veth1 src 10.0.0.1 metric 2 ++n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 ++n1 ping -W 1 -c 1 192.168.241.2 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] ++ip1 route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1 ++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth1/rp_filter' ++n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth4/rp_filter' ++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' ++n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' ++n1 ping -W 1 -c 1 192.168.241.2 ++[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]] ++ ++ip1 link del veth1 ++ip1 link del veth3 ++ip1 link del wg0 ++ip2 link del wg0 ++ ++# We test that Netlink/IPC is working properly by doing things that usually cause split responses ++ip0 link add dev wg0 type wireguard ++config=( "[Interface]" "PrivateKey=$(wg genkey)" "[Peer]" "PublicKey=$(wg genkey)" ) ++for a in {1..255}; do ++ for b in {0..255}; do ++ config+=( "AllowedIPs=$a.$b.0.0/16,$a::$b/128" ) ++ done ++done ++n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") ++i=0 ++for ip in $(n0 wg show wg0 allowed-ips); do ++ ((++i)) ++done ++((i == 255*256*2+1)) ++ip0 link del wg0 ++ip0 link add dev wg0 type wireguard ++config=( "[Interface]" "PrivateKey=$(wg genkey)" ) ++for a in {1..40}; do ++ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) ++ for b in {1..52}; do ++ config+=( "AllowedIPs=$a.$b.0.0/16" ) ++ done ++done ++n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") ++i=0 ++while read -r line; do ++ j=0 ++ for ip in $line; do ++ ((++j)) ++ done ++ ((j == 53)) ++ ((++i)) ++done < <(n0 wg show wg0 allowed-ips) ++((i == 40)) ++ip0 link del wg0 ++ip0 link add wg0 type wireguard ++config=( ) ++for i in {1..29}; do ++ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) ++done ++config+=( "[Peer]" "PublicKey=$(wg genkey)" "AllowedIPs=255.2.3.4/32,abcd::255/128" ) ++n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") ++n0 wg showconf wg0 > /dev/null ++ip0 link del wg0 ++ ++allowedips=( ) ++for i in {1..197}; do ++ allowedips+=( abcd::$i ) ++done ++saved_ifs="$IFS" ++IFS=, ++allowedips="${allowedips[*]}" ++IFS="$saved_ifs" ++ip0 link add wg0 type wireguard ++n0 wg set wg0 peer "$pub1" ++n0 wg set wg0 peer "$pub2" allowed-ips "$allowedips" ++{ ++ read -r pub allowedips ++ [[ $pub == "$pub1" && $allowedips == "(none)" ]] ++ read -r pub allowedips ++ [[ $pub == "$pub2" ]] ++ i=0 ++ for _ in $allowedips; do ++ ((++i)) ++ done ++ ((i == 197)) ++} < <(n0 wg show wg0 allowed-ips) ++ip0 link del wg0 ++ ++! n0 wg show doesnotexist || false ++ ++ip0 link add wg0 type wireguard ++n0 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") ++[[ $(n0 wg show wg0 private-key) == "$key1" ]] ++[[ $(n0 wg show wg0 preshared-keys) == "$pub2 $psk" ]] ++n0 wg set wg0 private-key /dev/null peer "$pub2" preshared-key /dev/null ++[[ $(n0 wg show wg0 private-key) == "(none)" ]] ++[[ $(n0 wg show wg0 preshared-keys) == "$pub2 (none)" ]] ++n0 wg set wg0 peer "$pub2" ++n0 wg set wg0 private-key <(echo "$key2") ++[[ $(n0 wg show wg0 public-key) == "$pub2" ]] ++[[ -z $(n0 wg show wg0 peers) ]] ++n0 wg set wg0 peer "$pub2" ++[[ -z $(n0 wg show wg0 peers) ]] ++n0 wg set wg0 private-key <(echo "$key1") ++n0 wg set wg0 peer "$pub2" ++[[ $(n0 wg show wg0 peers) == "$pub2" ]] ++n0 wg set wg0 private-key <(echo "/${key1:1}") ++[[ $(n0 wg show wg0 private-key) == "+${key1:1}" ]] ++n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.0.0/12,192.168.0.0/16 ++n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 ++n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 ++n0 wg set wg0 peer "$pub2" allowed-ips ::/0 ++ip0 link del wg0 ++ ++declare -A objects ++while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do ++ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue ++ objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" ++done < /dev/kmsg ++alldeleted=1 ++for object in "${!objects[@]}"; do ++ if [[ ${objects["$object"]} != *createddestroyed ]]; then ++ echo "Error: $object: merely ${objects["$object"]}" >&3 ++ alldeleted=0 ++ fi ++done ++[[ $alldeleted -eq 1 ]] ++pretty "" "Objects that were created were also destroyed." diff --git a/target/linux/generic/backport-5.4/080-wireguard-0073-net-icmp-pass-zeroed-opts-from-icmp-v6-_ndo_send-bef.patch b/target/linux/generic/backport-5.4/080-wireguard-0073-net-icmp-pass-zeroed-opts-from-icmp-v6-_ndo_send-bef.patch deleted file mode 100644 index 2a5c356f14..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0073-net-icmp-pass-zeroed-opts-from-icmp-v6-_ndo_send-bef.patch +++ /dev/null @@ -1,299 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 23 Feb 2021 14:18:58 +0100 -Subject: [PATCH] net: icmp: pass zeroed opts from icmp{,v6}_ndo_send before - sending - -commit ee576c47db60432c37e54b1e2b43a8ca6d3a8dca upstream. - -The icmp{,v6}_send functions make all sorts of use of skb->cb, casting -it with IPCB or IP6CB, assuming the skb to have come directly from the -inet layer. But when the packet comes from the ndo layer, especially -when forwarded, there's no telling what might be in skb->cb at that -point. As a result, the icmp sending code risks reading bogus memory -contents, which can result in nasty stack overflows such as this one -reported by a user: - - panic+0x108/0x2ea - __stack_chk_fail+0x14/0x20 - __icmp_send+0x5bd/0x5c0 - icmp_ndo_send+0x148/0x160 - -In icmp_send, skb->cb is cast with IPCB and an ip_options struct is read -from it. The optlen parameter there is of particular note, as it can -induce writes beyond bounds. There are quite a few ways that can happen -in __ip_options_echo. For example: - - // sptr/skb are attacker-controlled skb bytes - sptr = skb_network_header(skb); - // dptr/dopt points to stack memory allocated by __icmp_send - dptr = dopt->__data; - // sopt is the corrupt skb->cb in question - if (sopt->rr) { - optlen = sptr[sopt->rr+1]; // corrupt skb->cb + skb->data - soffset = sptr[sopt->rr+2]; // corrupt skb->cb + skb->data - // this now writes potentially attacker-controlled data, over - // flowing the stack: - memcpy(dptr, sptr+sopt->rr, optlen); - } - -In the icmpv6_send case, the story is similar, but not as dire, as only -IP6CB(skb)->iif and IP6CB(skb)->dsthao are used. The dsthao case is -worse than the iif case, but it is passed to ipv6_find_tlv, which does -a bit of bounds checking on the value. - -This is easy to simulate by doing a `memset(skb->cb, 0x41, -sizeof(skb->cb));` before calling icmp{,v6}_ndo_send, and it's only by -good fortune and the rarity of icmp sending from that context that we've -avoided reports like this until now. For example, in KASAN: - - BUG: KASAN: stack-out-of-bounds in __ip_options_echo+0xa0e/0x12b0 - Write of size 38 at addr ffff888006f1f80e by task ping/89 - CPU: 2 PID: 89 Comm: ping Not tainted 5.10.0-rc7-debug+ #5 - Call Trace: - dump_stack+0x9a/0xcc - print_address_description.constprop.0+0x1a/0x160 - __kasan_report.cold+0x20/0x38 - kasan_report+0x32/0x40 - check_memory_region+0x145/0x1a0 - memcpy+0x39/0x60 - __ip_options_echo+0xa0e/0x12b0 - __icmp_send+0x744/0x1700 - -Actually, out of the 4 drivers that do this, only gtp zeroed the cb for -the v4 case, while the rest did not. So this commit actually removes the -gtp-specific zeroing, while putting the code where it belongs in the -shared infrastructure of icmp{,v6}_ndo_send. - -This commit fixes the issue by passing an empty IPCB or IP6CB along to -the functions that actually do the work. For the icmp_send, this was -already trivial, thanks to __icmp_send providing the plumbing function. -For icmpv6_send, this required a tiny bit of refactoring to make it -behave like the v4 case, after which it was straight forward. - -Fixes: a2b78e9b2cac ("sunvnet: generate ICMP PTMUD messages for smaller port MTUs") -Reported-by: SinYu -Reviewed-by: Willem de Bruijn -Link: https://lore.kernel.org/netdev/CAF=yD-LOF116aHub6RMe8vB8ZpnrrnoTdqhobEx+bvoA8AsP0w@mail.gmail.com/T/ -Signed-off-by: Jason A. Donenfeld -Link: https://lore.kernel.org/r/20210223131858.72082-1-Jason@zx2c4.com -Signed-off-by: Jakub Kicinski -[Jason: the gtp part didn't apply because it doesn't use icmp_ndo_send on 5.4] -Signed-off-by: Jason A. Donenfeld ---- - include/linux/icmpv6.h | 17 ++++++++++++++--- - include/linux/ipv6.h | 1 - - include/net/icmp.h | 6 +++++- - net/ipv4/icmp.c | 5 +++-- - net/ipv6/icmp.c | 16 ++++++++-------- - net/ipv6/ip6_icmp.c | 12 +++++++----- - 6 files changed, 37 insertions(+), 20 deletions(-) - ---- a/include/linux/icmpv6.h -+++ b/include/linux/icmpv6.h -@@ -3,6 +3,7 @@ - #define _LINUX_ICMPV6_H - - #include -+#include - #include - - static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) -@@ -13,10 +14,16 @@ static inline struct icmp6hdr *icmp6_hdr - #include - - #if IS_ENABLED(CONFIG_IPV6) --extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info); -+extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, -+ const struct inet6_skb_parm *parm); - -+static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) -+{ -+ __icmpv6_send(skb, type, code, info, IP6CB(skb)); -+} - typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info, -- const struct in6_addr *force_saddr); -+ const struct in6_addr *force_saddr, -+ const struct inet6_skb_parm *parm); - extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn); - extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn); - int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type, -@@ -25,7 +32,11 @@ int ip6_err_gen_icmpv6_unreach(struct sk - #if IS_ENABLED(CONFIG_NF_NAT) - void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info); - #else --#define icmpv6_ndo_send icmpv6_send -+static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) -+{ -+ struct inet6_skb_parm parm = { 0 }; -+ __icmpv6_send(skb_in, type, code, info, &parm); -+} - #endif - - #else ---- a/include/linux/ipv6.h -+++ b/include/linux/ipv6.h -@@ -83,7 +83,6 @@ struct ipv6_params { - __s32 autoconf; - }; - extern struct ipv6_params ipv6_defaults; --#include - #include - #include - ---- a/include/net/icmp.h -+++ b/include/net/icmp.h -@@ -46,7 +46,11 @@ static inline void icmp_send(struct sk_b - #if IS_ENABLED(CONFIG_NF_NAT) - void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info); - #else --#define icmp_ndo_send icmp_send -+static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) -+{ -+ struct ip_options opts = { 0 }; -+ __icmp_send(skb_in, type, code, info, &opts); -+} - #endif - - int icmp_rcv(struct sk_buff *skb); ---- a/net/ipv4/icmp.c -+++ b/net/ipv4/icmp.c -@@ -755,13 +755,14 @@ EXPORT_SYMBOL(__icmp_send); - void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info) - { - struct sk_buff *cloned_skb = NULL; -+ struct ip_options opts = { 0 }; - enum ip_conntrack_info ctinfo; - struct nf_conn *ct; - __be32 orig_ip; - - ct = nf_ct_get(skb_in, &ctinfo); - if (!ct || !(ct->status & IPS_SRC_NAT)) { -- icmp_send(skb_in, type, code, info); -+ __icmp_send(skb_in, type, code, info, &opts); - return; - } - -@@ -776,7 +777,7 @@ void icmp_ndo_send(struct sk_buff *skb_i - - orig_ip = ip_hdr(skb_in)->saddr; - ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip; -- icmp_send(skb_in, type, code, info); -+ __icmp_send(skb_in, type, code, info, &opts); - ip_hdr(skb_in)->saddr = orig_ip; - out: - consume_skb(cloned_skb); ---- a/net/ipv6/icmp.c -+++ b/net/ipv6/icmp.c -@@ -312,10 +312,9 @@ static int icmpv6_getfrag(void *from, ch - } - - #if IS_ENABLED(CONFIG_IPV6_MIP6) --static void mip6_addr_swap(struct sk_buff *skb) -+static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) - { - struct ipv6hdr *iph = ipv6_hdr(skb); -- struct inet6_skb_parm *opt = IP6CB(skb); - struct ipv6_destopt_hao *hao; - struct in6_addr tmp; - int off; -@@ -332,7 +331,7 @@ static void mip6_addr_swap(struct sk_buf - } - } - #else --static inline void mip6_addr_swap(struct sk_buff *skb) {} -+static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {} - #endif - - static struct dst_entry *icmpv6_route_lookup(struct net *net, -@@ -427,7 +426,8 @@ static int icmp6_iif(const struct sk_buf - * Send an ICMP message in response to a packet in error - */ - static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, -- const struct in6_addr *force_saddr) -+ const struct in6_addr *force_saddr, -+ const struct inet6_skb_parm *parm) - { - struct inet6_dev *idev = NULL; - struct ipv6hdr *hdr = ipv6_hdr(skb); -@@ -520,7 +520,7 @@ static void icmp6_send(struct sk_buff *s - if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type)) - goto out_bh_enable; - -- mip6_addr_swap(skb); -+ mip6_addr_swap(skb, parm); - - memset(&fl6, 0, sizeof(fl6)); - fl6.flowi6_proto = IPPROTO_ICMPV6; -@@ -605,7 +605,7 @@ out_bh_enable: - */ - void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos) - { -- icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL); -+ icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb)); - kfree_skb(skb); - } - -@@ -662,10 +662,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk - } - if (type == ICMP_TIME_EXCEEDED) - icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, -- info, &temp_saddr); -+ info, &temp_saddr, IP6CB(skb2)); - else - icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, -- info, &temp_saddr); -+ info, &temp_saddr, IP6CB(skb2)); - if (rt) - ip6_rt_put(rt); - ---- a/net/ipv6/ip6_icmp.c -+++ b/net/ipv6/ip6_icmp.c -@@ -31,7 +31,8 @@ int inet6_unregister_icmp_sender(ip6_icm - } - EXPORT_SYMBOL(inet6_unregister_icmp_sender); - --void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) -+void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, -+ const struct inet6_skb_parm *parm) - { - ip6_icmp_send_t *send; - -@@ -40,16 +41,17 @@ void icmpv6_send(struct sk_buff *skb, u8 - - if (!send) - goto out; -- send(skb, type, code, info, NULL); -+ send(skb, type, code, info, NULL, parm); - out: - rcu_read_unlock(); - } --EXPORT_SYMBOL(icmpv6_send); -+EXPORT_SYMBOL(__icmpv6_send); - - #if IS_ENABLED(CONFIG_NF_NAT) - #include - void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info) - { -+ struct inet6_skb_parm parm = { 0 }; - struct sk_buff *cloned_skb = NULL; - enum ip_conntrack_info ctinfo; - struct in6_addr orig_ip; -@@ -57,7 +59,7 @@ void icmpv6_ndo_send(struct sk_buff *skb - - ct = nf_ct_get(skb_in, &ctinfo); - if (!ct || !(ct->status & IPS_SRC_NAT)) { -- icmpv6_send(skb_in, type, code, info); -+ __icmpv6_send(skb_in, type, code, info, &parm); - return; - } - -@@ -72,7 +74,7 @@ void icmpv6_ndo_send(struct sk_buff *skb - - orig_ip = ipv6_hdr(skb_in)->saddr; - ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6; -- icmpv6_send(skb_in, type, code, info); -+ __icmpv6_send(skb_in, type, code, info, &parm); - ipv6_hdr(skb_in)->saddr = orig_ip; - out: - consume_skb(cloned_skb); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch b/target/linux/generic/backport-5.4/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch new file mode 100644 index 0000000000..ca3853aa19 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch @@ -0,0 +1,1078 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Sun, 15 Dec 2019 22:08:00 +0100 +Subject: [PATCH] wireguard: selftests: import harness makefile for test suite + +commit 65d88d04114bca7d85faebd5fed61069cb2b632c upstream. + +WireGuard has been using this on build.wireguard.com for the last +several years with considerable success. It allows for very quick and +iterative development cycles, and supports several platforms. + +To run the test suite on your current platform in QEMU: + + $ make -C tools/testing/selftests/wireguard/qemu -j$(nproc) + +To run it with KASAN and such turned on: + + $ DEBUG_KERNEL=yes make -C tools/testing/selftests/wireguard/qemu -j$(nproc) + +To run it emulated for another platform in QEMU: + + $ ARCH=arm make -C tools/testing/selftests/wireguard/qemu -j$(nproc) + +At the moment, we support aarch64_be, aarch64, arm, armeb, i686, m68k, +mips64, mips64el, mips, mipsel, powerpc64le, powerpc, and x86_64. + +The system supports incremental rebuilding, so it should be very fast to +change a single file and then test it out and have immediate feedback. + +This requires for the right toolchain and qemu to be installed prior. +I've had success with those from musl.cc. + +This is tailored for WireGuard at the moment, though later projects +might generalize it for other network testing. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + .../selftests/wireguard/qemu/.gitignore | 2 + + .../testing/selftests/wireguard/qemu/Makefile | 385 ++++++++++++++++++ + .../wireguard/qemu/arch/aarch64.config | 5 + + .../wireguard/qemu/arch/aarch64_be.config | 6 + + .../selftests/wireguard/qemu/arch/arm.config | 9 + + .../wireguard/qemu/arch/armeb.config | 10 + + .../selftests/wireguard/qemu/arch/i686.config | 5 + + .../selftests/wireguard/qemu/arch/m68k.config | 9 + + .../selftests/wireguard/qemu/arch/mips.config | 11 + + .../wireguard/qemu/arch/mips64.config | 14 + + .../wireguard/qemu/arch/mips64el.config | 15 + + .../wireguard/qemu/arch/mipsel.config | 12 + + .../wireguard/qemu/arch/powerpc.config | 10 + + .../wireguard/qemu/arch/powerpc64le.config | 12 + + .../wireguard/qemu/arch/x86_64.config | 5 + + .../selftests/wireguard/qemu/debug.config | 67 +++ + tools/testing/selftests/wireguard/qemu/init.c | 284 +++++++++++++ + .../selftests/wireguard/qemu/kernel.config | 86 ++++ + 18 files changed, 947 insertions(+) + create mode 100644 tools/testing/selftests/wireguard/qemu/.gitignore + create mode 100644 tools/testing/selftests/wireguard/qemu/Makefile + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/arm.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/armeb.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/i686.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/m68k.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64el.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mipsel.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config + create mode 100644 tools/testing/selftests/wireguard/qemu/arch/x86_64.config + create mode 100644 tools/testing/selftests/wireguard/qemu/debug.config + create mode 100644 tools/testing/selftests/wireguard/qemu/init.c + create mode 100644 tools/testing/selftests/wireguard/qemu/kernel.config + +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/.gitignore +@@ -0,0 +1,2 @@ ++build/ ++distfiles/ +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/Makefile +@@ -0,0 +1,385 @@ ++# SPDX-License-Identifier: GPL-2.0 ++# ++# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ ++PWD := $(shell pwd) ++ ++CHOST := $(shell gcc -dumpmachine) ++ifneq (,$(ARCH)) ++CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) ++ifeq (,$(CBUILD)) ++$(error The toolchain for $(ARCH) is not installed) ++endif ++else ++CBUILD := $(CHOST) ++ARCH := $(firstword $(subst -, ,$(CBUILD))) ++endif ++ ++# Set these from the environment to override ++KERNEL_PATH ?= $(PWD)/../../../../.. ++BUILD_PATH ?= $(PWD)/build/$(ARCH) ++DISTFILES_PATH ?= $(PWD)/distfiles ++NR_CPUS ?= 4 ++ ++MIRROR := https://download.wireguard.com/qemu-test/distfiles/ ++ ++default: qemu ++ ++# variable name, tarball project name, version, tarball extension, default URI base ++define tar_download = ++$(1)_VERSION := $(3) ++$(1)_NAME := $(2)-$$($(1)_VERSION) ++$(1)_TAR := $(DISTFILES_PATH)/$$($(1)_NAME)$(4) ++$(1)_PATH := $(BUILD_PATH)/$$($(1)_NAME) ++$(call file_download,$$($(1)_NAME)$(4),$(5),$(6)) ++endef ++ ++define file_download = ++$(DISTFILES_PATH)/$(1): ++ mkdir -p $(DISTFILES_PATH) ++ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' ++ if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi ++endef ++ ++$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) ++$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) ++$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) ++$(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) ++$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) ++$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) ++$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) ++$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) ++$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) ++ ++KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) ++rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) ++WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*) ++ ++export CFLAGS ?= -O3 -pipe ++export LDFLAGS ?= ++export CPPFLAGS := -I$(BUILD_PATH)/include ++ ++ifeq ($(CHOST),$(CBUILD)) ++CROSS_COMPILE_FLAG := --host=$(CHOST) ++NOPIE_GCC := gcc -fno-PIE ++CFLAGS += -march=native ++STRIP := strip ++else ++$(info Cross compilation: building for $(CBUILD) using $(CHOST)) ++CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) ++export CROSS_COMPILE=$(CBUILD)- ++NOPIE_GCC := $(CBUILD)-gcc -fno-PIE ++STRIP := $(CBUILD)-strip ++endif ++ifeq ($(ARCH),aarch64) ++QEMU_ARCH := aarch64 ++KERNEL_ARCH := arm64 ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm ++else ++QEMU_MACHINE := -cpu cortex-a53 -machine virt ++CFLAGS += -march=armv8-a -mtune=cortex-a53 ++endif ++else ifeq ($(ARCH),aarch64_be) ++QEMU_ARCH := aarch64 ++KERNEL_ARCH := arm64 ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm ++else ++QEMU_MACHINE := -cpu cortex-a53 -machine virt ++CFLAGS += -march=armv8-a -mtune=cortex-a53 ++endif ++else ifeq ($(ARCH),arm) ++QEMU_ARCH := arm ++KERNEL_ARCH := arm ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm ++else ++QEMU_MACHINE := -cpu cortex-a15 -machine virt ++CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux ++endif ++else ifeq ($(ARCH),armeb) ++QEMU_ARCH := arm ++KERNEL_ARCH := arm ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm ++else ++QEMU_MACHINE := -cpu cortex-a15 -machine virt ++CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian. ++LDFLAGS += -Wl,--be8 ++endif ++else ifeq ($(ARCH),x86_64) ++QEMU_ARCH := x86_64 ++KERNEL_ARCH := x86_64 ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine q35,accel=kvm ++else ++QEMU_MACHINE := -cpu Skylake-Server -machine q35 ++CFLAGS += -march=skylake-avx512 ++endif ++else ifeq ($(ARCH),i686) ++QEMU_ARCH := i386 ++KERNEL_ARCH := x86 ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage ++ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) ++QEMU_MACHINE := -cpu host -machine q35,accel=kvm ++else ++QEMU_MACHINE := -cpu coreduo -machine q35 ++CFLAGS += -march=prescott ++endif ++else ifeq ($(ARCH),mips64) ++QEMU_ARCH := mips64 ++KERNEL_ARCH := mips ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine malta,accel=kvm ++CFLAGS += -EB ++else ++QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 ++CFLAGS += -march=mips64r2 -EB ++endif ++else ifeq ($(ARCH),mips64el) ++QEMU_ARCH := mips64el ++KERNEL_ARCH := mips ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine malta,accel=kvm ++CFLAGS += -EL ++else ++QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 ++CFLAGS += -march=mips64r2 -EL ++endif ++else ifeq ($(ARCH),mips) ++QEMU_ARCH := mips ++KERNEL_ARCH := mips ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine malta,accel=kvm ++CFLAGS += -EB ++else ++QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 ++CFLAGS += -march=mips32r2 -EB ++endif ++else ifeq ($(ARCH),mipsel) ++QEMU_ARCH := mipsel ++KERNEL_ARCH := mips ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host -machine malta,accel=kvm ++CFLAGS += -EL ++else ++QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 ++CFLAGS += -march=mips32r2 -EL ++endif ++else ifeq ($(ARCH),powerpc64le) ++QEMU_ARCH := ppc64 ++KERNEL_ARCH := powerpc ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host,accel=kvm -machine pseries ++else ++QEMU_MACHINE := -machine pseries ++endif ++CFLAGS += -mcpu=powerpc64le -mlong-double-64 ++else ifeq ($(ARCH),powerpc) ++QEMU_ARCH := ppc ++KERNEL_ARCH := powerpc ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 ++else ++QEMU_MACHINE := -machine ppce500 ++endif ++CFLAGS += -mcpu=powerpc -mlong-double-64 -msecure-plt ++else ifeq ($(ARCH),m68k) ++QEMU_ARCH := m68k ++KERNEL_ARCH := m68k ++KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux ++ifeq ($(CHOST),$(CBUILD)) ++QEMU_MACHINE := -cpu host,accel=kvm -machine q800 ++else ++QEMU_MACHINE := -machine q800 ++endif ++else ++$(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) ++endif ++ ++REAL_CC := $(CBUILD)-gcc ++MUSL_CC := $(BUILD_PATH)/musl-gcc ++export CC := $(MUSL_CC) ++USERSPACE_DEPS := $(MUSL_CC) $(BUILD_PATH)/include/.installed $(BUILD_PATH)/include/linux/.installed ++ ++build: $(KERNEL_BZIMAGE) ++qemu: $(KERNEL_BZIMAGE) ++ rm -f $(BUILD_PATH)/result ++ timeout --foreground 20m qemu-system-$(QEMU_ARCH) \ ++ -nodefaults \ ++ -nographic \ ++ -smp $(NR_CPUS) \ ++ $(QEMU_MACHINE) \ ++ -m $$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \ ++ -serial stdio \ ++ -serial file:$(BUILD_PATH)/result \ ++ -no-reboot \ ++ -monitor none \ ++ -kernel $< ++ grep -Fq success $(BUILD_PATH)/result ++ ++$(BUILD_PATH)/init-cpio-spec.txt: ++ mkdir -p $(BUILD_PATH) ++ echo "file /init $(BUILD_PATH)/init 755 0 0" > $@ ++ echo "file /init.sh $(PWD)/../netns.sh 755 0 0" >> $@ ++ echo "dir /dev 755 0 0" >> $@ ++ echo "nod /dev/console 644 0 0 c 5 1" >> $@ ++ echo "dir /bin 755 0 0" >> $@ ++ echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ ++ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ ++ echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ ++ echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ ++ echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ ++ echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ ++ echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ ++ echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ ++ echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ ++ echo "slink /bin/ping6 ping 777 0 0" >> $@ ++ echo "dir /lib 755 0 0" >> $@ ++ echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ ++ echo "slink /lib/ld-linux.so.1 libc.so 777 0 0" >> $@ ++ ++$(KERNEL_BUILD_PATH)/.config: kernel.config arch/$(ARCH).config ++ mkdir -p $(KERNEL_BUILD_PATH) ++ cp kernel.config $(KERNEL_BUILD_PATH)/minimal.config ++ printf 'CONFIG_NR_CPUS=$(NR_CPUS)\nCONFIG_INITRAMFS_SOURCE="$(BUILD_PATH)/init-cpio-spec.txt"\n' >> $(KERNEL_BUILD_PATH)/minimal.config ++ cat arch/$(ARCH).config >> $(KERNEL_BUILD_PATH)/minimal.config ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) allnoconfig ++ cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config ++ $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) ++ ++$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" ++ ++$(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install ++ touch $@ ++ ++$(MUSL_PATH)/lib/libc.so: $(MUSL_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ cd $(MUSL_PATH) && CC=$(REAL_CC) ./configure --prefix=/ --disable-static --build=$(CBUILD) ++ $(MAKE) -C $(MUSL_PATH) ++ $(STRIP) -s $@ ++ ++$(BUILD_PATH)/include/.installed: $(MUSL_PATH)/lib/libc.so ++ $(MAKE) -C $(MUSL_PATH) DESTDIR=$(BUILD_PATH) install-headers ++ touch $@ ++ ++$(MUSL_CC): $(MUSL_PATH)/lib/libc.so ++ sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs ++ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc ++ chmod +x $(BUILD_PATH)/musl-gcc ++ ++$(IPERF_PATH)/.installed: $(IPERF_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ sed -i '1s/^/#include /' $(IPERF_PATH)/src/cjson.h $(IPERF_PATH)/src/timer.h ++ sed -i -r 's/-p?g//g' $(IPERF_PATH)/src/Makefile* ++ touch $@ ++ ++$(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) ++ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared ++ $(MAKE) -C $(IPERF_PATH) ++ $(STRIP) -s $@ ++ ++$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ touch $@ ++ ++$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) ++ cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared ++ $(MAKE) -C $(LIBMNL_PATH) ++ sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc ++ ++$(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ touch $@ ++ ++$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg ++ $(STRIP) -s $@ ++ ++$(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) ++ mkdir -p $(BUILD_PATH) ++ $(MUSL_CC) -o $@ $(CFLAGS) $(LDFLAGS) -std=gnu11 $< ++ $(STRIP) -s $@ ++ ++$(IPUTILS_PATH)/.installed: $(IPUTILS_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ touch $@ ++ ++$(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) ++ $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping ++ $(STRIP) -s $@ ++ ++$(BASH_PATH)/.installed: $(BASH_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ touch $@ ++ ++$(BASH_PATH)/bash: | $(BASH_PATH)/.installed $(USERSPACE_DEPS) ++ cd $(BASH_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --without-bash-malloc --disable-debugger --disable-help-builtin --disable-history --disable-multibyte --disable-progcomp --disable-readline --disable-mem-scramble ++ $(MAKE) -C $(BASH_PATH) ++ $(STRIP) -s $@ ++ ++$(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk ++ printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile ++ touch $@ ++ ++$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip ++ $(STRIP) -s $(IPROUTE2_PATH)/ip/ip ++ ++$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss ++ $(STRIP) -s $(IPROUTE2_PATH)/misc/ss ++ ++$(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure ++ touch $@ ++ ++$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++ cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include ++ $(MAKE) -C $(IPTABLES_PATH) ++ $(STRIP) -s $@ ++ ++$(NMAP_PATH)/.installed: $(NMAP_TAR) ++ mkdir -p $(BUILD_PATH) ++ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< ++ touch $@ ++ ++$(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) ++ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux ++ $(MAKE) -C $(NMAP_PATH) build-ncat ++ $(STRIP) -s $@ ++ ++clean: ++ rm -rf $(BUILD_PATH) ++ ++distclean: clean ++ rm -rf $(DISTFILES_PATH) ++ ++menuconfig: $(KERNEL_BUILD_PATH)/.config ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig ++ ++.PHONY: qemu build clean distclean menuconfig ++.DELETE_ON_ERROR: +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64.config +@@ -0,0 +1,5 @@ ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config +@@ -0,0 +1,6 @@ ++CONFIG_CPU_BIG_ENDIAN=y ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/arm.config +@@ -0,0 +1,9 @@ ++CONFIG_MMU=y ++CONFIG_ARCH_MULTI_V7=y ++CONFIG_ARCH_VIRT=y ++CONFIG_THUMB2_KERNEL=n ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/armeb.config +@@ -0,0 +1,10 @@ ++CONFIG_MMU=y ++CONFIG_ARCH_MULTI_V7=y ++CONFIG_ARCH_VIRT=y ++CONFIG_THUMB2_KERNEL=n ++CONFIG_SERIAL_AMBA_PL011=y ++CONFIG_SERIAL_AMBA_PL011_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" ++CONFIG_CPU_BIG_ENDIAN=y ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config +@@ -0,0 +1,5 @@ ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config +@@ -0,0 +1,9 @@ ++CONFIG_MMU=y ++CONFIG_M68040=y ++CONFIG_MAC=y ++CONFIG_SERIAL_PMACZILOG=y ++CONFIG_SERIAL_PMACZILOG_TTYS=y ++CONFIG_SERIAL_PMACZILOG_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/mips.config +@@ -0,0 +1,11 @@ ++CONFIG_CPU_MIPS32_R2=y ++CONFIG_MIPS_MALTA=y ++CONFIG_MIPS_CPS=y ++CONFIG_MIPS_FP_SUPPORT=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/mips64.config +@@ -0,0 +1,14 @@ ++CONFIG_64BIT=y ++CONFIG_CPU_MIPS64_R2=y ++CONFIG_MIPS32_N32=y ++CONFIG_CPU_HAS_MSA=y ++CONFIG_MIPS_MALTA=y ++CONFIG_MIPS_CPS=y ++CONFIG_MIPS_FP_SUPPORT=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/mips64el.config +@@ -0,0 +1,15 @@ ++CONFIG_64BIT=y ++CONFIG_CPU_MIPS64_R2=y ++CONFIG_MIPS32_N32=y ++CONFIG_CPU_HAS_MSA=y ++CONFIG_MIPS_MALTA=y ++CONFIG_CPU_LITTLE_ENDIAN=y ++CONFIG_MIPS_CPS=y ++CONFIG_MIPS_FP_SUPPORT=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config +@@ -0,0 +1,12 @@ ++CONFIG_CPU_MIPS32_R2=y ++CONFIG_MIPS_MALTA=y ++CONFIG_CPU_LITTLE_ENDIAN=y ++CONFIG_MIPS_CPS=y ++CONFIG_MIPS_FP_SUPPORT=y ++CONFIG_POWER_RESET=y ++CONFIG_POWER_RESET_SYSCON=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config +@@ -0,0 +1,10 @@ ++CONFIG_PPC_QEMU_E500=y ++CONFIG_FSL_SOC_BOOKE=y ++CONFIG_PPC_85xx=y ++CONFIG_PHYS_64BIT=y ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_MATH_EMULATION=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1024 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config +@@ -0,0 +1,12 @@ ++CONFIG_PPC64=y ++CONFIG_PPC_PSERIES=y ++CONFIG_ALTIVEC=y ++CONFIG_VSX=y ++CONFIG_PPC_OF_BOOT_TRAMPOLINE=y ++CONFIG_PPC_RADIX_MMU=y ++CONFIG_HVC_CONSOLE=y ++CONFIG_CPU_LITTLE_ENDIAN=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" ++CONFIG_SECTION_MISMATCH_WARN_ONLY=y ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config +@@ -0,0 +1,5 @@ ++CONFIG_SERIAL_8250=y ++CONFIG_SERIAL_8250_CONSOLE=y ++CONFIG_CMDLINE_BOOL=y ++CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" ++CONFIG_FRAME_WARN=1280 +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/debug.config +@@ -0,0 +1,67 @@ ++CONFIG_LOCALVERSION="-debug" ++CONFIG_ENABLE_WARN_DEPRECATED=y ++CONFIG_ENABLE_MUST_CHECK=y ++CONFIG_FRAME_POINTER=y ++CONFIG_STACK_VALIDATION=y ++CONFIG_DEBUG_KERNEL=y ++CONFIG_DEBUG_INFO=y ++CONFIG_DEBUG_INFO_DWARF4=y ++CONFIG_PAGE_EXTENSION=y ++CONFIG_PAGE_POISONING=y ++CONFIG_DEBUG_OBJECTS=y ++CONFIG_DEBUG_OBJECTS_FREE=y ++CONFIG_DEBUG_OBJECTS_TIMERS=y ++CONFIG_DEBUG_OBJECTS_WORK=y ++CONFIG_DEBUG_OBJECTS_RCU_HEAD=y ++CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y ++CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 ++CONFIG_SLUB_DEBUG_ON=y ++CONFIG_DEBUG_VM=y ++CONFIG_DEBUG_MEMORY_INIT=y ++CONFIG_HAVE_DEBUG_STACKOVERFLOW=y ++CONFIG_DEBUG_STACKOVERFLOW=y ++CONFIG_HAVE_ARCH_KMEMCHECK=y ++CONFIG_HAVE_ARCH_KASAN=y ++CONFIG_KASAN=y ++CONFIG_KASAN_INLINE=y ++CONFIG_UBSAN=y ++CONFIG_UBSAN_SANITIZE_ALL=y ++CONFIG_UBSAN_NO_ALIGNMENT=y ++CONFIG_UBSAN_NULL=y ++CONFIG_DEBUG_KMEMLEAK=y ++CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192 ++CONFIG_DEBUG_STACK_USAGE=y ++CONFIG_DEBUG_SHIRQ=y ++CONFIG_WQ_WATCHDOG=y ++CONFIG_SCHED_DEBUG=y ++CONFIG_SCHED_INFO=y ++CONFIG_SCHEDSTATS=y ++CONFIG_SCHED_STACK_END_CHECK=y ++CONFIG_DEBUG_TIMEKEEPING=y ++CONFIG_TIMER_STATS=y ++CONFIG_DEBUG_PREEMPT=y ++CONFIG_DEBUG_RT_MUTEXES=y ++CONFIG_DEBUG_SPINLOCK=y ++CONFIG_DEBUG_MUTEXES=y ++CONFIG_DEBUG_LOCK_ALLOC=y ++CONFIG_PROVE_LOCKING=y ++CONFIG_LOCKDEP=y ++CONFIG_DEBUG_ATOMIC_SLEEP=y ++CONFIG_TRACE_IRQFLAGS=y ++CONFIG_DEBUG_BUGVERBOSE=y ++CONFIG_DEBUG_LIST=y ++CONFIG_DEBUG_PI_LIST=y ++CONFIG_PROVE_RCU=y ++CONFIG_SPARSE_RCU_POINTER=y ++CONFIG_RCU_CPU_STALL_TIMEOUT=21 ++CONFIG_RCU_TRACE=y ++CONFIG_RCU_EQS_DEBUG=y ++CONFIG_USER_STACKTRACE_SUPPORT=y ++CONFIG_DEBUG_SG=y ++CONFIG_DEBUG_NOTIFIERS=y ++CONFIG_DOUBLEFAULT=y ++CONFIG_X86_DEBUG_FPU=y ++CONFIG_DEBUG_SECTION_MISMATCH=y ++CONFIG_DEBUG_PAGEALLOC=y ++CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y ++CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/init.c +@@ -0,0 +1,284 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. ++ */ ++ ++#define _GNU_SOURCE ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++__attribute__((noreturn)) static void poweroff(void) ++{ ++ fflush(stdout); ++ fflush(stderr); ++ reboot(RB_AUTOBOOT); ++ sleep(30); ++ fprintf(stderr, "\x1b[37m\x1b[41m\x1b[1mFailed to power off!!!\x1b[0m\n"); ++ exit(1); ++} ++ ++static void panic(const char *what) ++{ ++ fprintf(stderr, "\n\n\x1b[37m\x1b[41m\x1b[1mSOMETHING WENT HORRIBLY WRONG\x1b[0m\n\n \x1b[31m\x1b[1m%s: %s\x1b[0m\n\n\x1b[37m\x1b[44m\x1b[1mPower off...\x1b[0m\n\n", what, strerror(errno)); ++ poweroff(); ++} ++ ++#define pretty_message(msg) puts("\x1b[32m\x1b[1m" msg "\x1b[0m") ++ ++static void print_banner(void) ++{ ++ struct utsname utsname; ++ int len; ++ ++ if (uname(&utsname) < 0) ++ panic("uname"); ++ ++ len = strlen(" WireGuard Test Suite on ") + strlen(utsname.sysname) + strlen(utsname.release) + strlen(utsname.machine); ++ printf("\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\x1b[45m\x1b[33m\x1b[1m WireGuard Test Suite on %s %s %s \x1b[0m\n\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\n", len, "", utsname.sysname, utsname.release, utsname.machine, len, ""); ++} ++ ++static void seed_rng(void) ++{ ++ int fd; ++ struct { ++ int entropy_count; ++ int buffer_size; ++ unsigned char buffer[256]; ++ } entropy = { ++ .entropy_count = sizeof(entropy.buffer) * 8, ++ .buffer_size = sizeof(entropy.buffer), ++ .buffer = "Adding real entropy is not actually important for these tests. Don't try this at home, kids!" ++ }; ++ ++ if (mknod("/dev/urandom", S_IFCHR | 0644, makedev(1, 9))) ++ panic("mknod(/dev/urandom)"); ++ fd = open("/dev/urandom", O_WRONLY); ++ if (fd < 0) ++ panic("open(urandom)"); ++ for (int i = 0; i < 256; ++i) { ++ if (ioctl(fd, RNDADDENTROPY, &entropy) < 0) ++ panic("ioctl(urandom)"); ++ } ++ close(fd); ++} ++ ++static void mount_filesystems(void) ++{ ++ pretty_message("[+] Mounting filesystems..."); ++ mkdir("/dev", 0755); ++ mkdir("/proc", 0755); ++ mkdir("/sys", 0755); ++ mkdir("/tmp", 0755); ++ mkdir("/run", 0755); ++ mkdir("/var", 0755); ++ if (mount("none", "/dev", "devtmpfs", 0, NULL)) ++ panic("devtmpfs mount"); ++ if (mount("none", "/proc", "proc", 0, NULL)) ++ panic("procfs mount"); ++ if (mount("none", "/sys", "sysfs", 0, NULL)) ++ panic("sysfs mount"); ++ if (mount("none", "/tmp", "tmpfs", 0, NULL)) ++ panic("tmpfs mount"); ++ if (mount("none", "/run", "tmpfs", 0, NULL)) ++ panic("tmpfs mount"); ++ if (mount("none", "/sys/kernel/debug", "debugfs", 0, NULL)) ++ ; /* Not a problem if it fails.*/ ++ if (symlink("/run", "/var/run")) ++ panic("run symlink"); ++ if (symlink("/proc/self/fd", "/dev/fd")) ++ panic("fd symlink"); ++} ++ ++static void enable_logging(void) ++{ ++ int fd; ++ pretty_message("[+] Enabling logging..."); ++ fd = open("/proc/sys/kernel/printk", O_WRONLY); ++ if (fd >= 0) { ++ if (write(fd, "9\n", 2) != 2) ++ panic("write(printk)"); ++ close(fd); ++ } ++ fd = open("/proc/sys/debug/exception-trace", O_WRONLY); ++ if (fd >= 0) { ++ if (write(fd, "1\n", 2) != 2) ++ panic("write(exception-trace)"); ++ close(fd); ++ } ++ fd = open("/proc/sys/kernel/panic_on_warn", O_WRONLY); ++ if (fd >= 0) { ++ if (write(fd, "1\n", 2) != 2) ++ panic("write(panic_on_warn)"); ++ close(fd); ++ } ++} ++ ++static void kmod_selftests(void) ++{ ++ FILE *file; ++ char line[2048], *start, *pass; ++ bool success = true; ++ pretty_message("[+] Module self-tests:"); ++ file = fopen("/proc/kmsg", "r"); ++ if (!file) ++ panic("fopen(kmsg)"); ++ if (fcntl(fileno(file), F_SETFL, O_NONBLOCK) < 0) ++ panic("fcntl(kmsg, nonblock)"); ++ while (fgets(line, sizeof(line), file)) { ++ start = strstr(line, "wireguard: "); ++ if (!start) ++ continue; ++ start += 11; ++ *strchrnul(start, '\n') = '\0'; ++ if (strstr(start, "www.wireguard.com")) ++ break; ++ pass = strstr(start, ": pass"); ++ if (!pass || pass[6] != '\0') { ++ success = false; ++ printf(" \x1b[31m* %s\x1b[0m\n", start); ++ } else ++ printf(" \x1b[32m* %s\x1b[0m\n", start); ++ } ++ fclose(file); ++ if (!success) { ++ puts("\x1b[31m\x1b[1m[-] Tests failed! \u2639\x1b[0m"); ++ poweroff(); ++ } ++} ++ ++static void launch_tests(void) ++{ ++ char cmdline[4096], *success_dev; ++ int status, fd; ++ pid_t pid; ++ ++ pretty_message("[+] Launching tests..."); ++ pid = fork(); ++ if (pid == -1) ++ panic("fork"); ++ else if (pid == 0) { ++ execl("/init.sh", "init", NULL); ++ panic("exec"); ++ } ++ if (waitpid(pid, &status, 0) < 0) ++ panic("waitpid"); ++ if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { ++ pretty_message("[+] Tests successful! :-)"); ++ fd = open("/proc/cmdline", O_RDONLY); ++ if (fd < 0) ++ panic("open(/proc/cmdline)"); ++ if (read(fd, cmdline, sizeof(cmdline) - 1) <= 0) ++ panic("read(/proc/cmdline)"); ++ cmdline[sizeof(cmdline) - 1] = '\0'; ++ for (success_dev = strtok(cmdline, " \n"); success_dev; success_dev = strtok(NULL, " \n")) { ++ if (strncmp(success_dev, "wg.success=", 11)) ++ continue; ++ memcpy(success_dev + 11 - 5, "/dev/", 5); ++ success_dev += 11 - 5; ++ break; ++ } ++ if (!success_dev || !strlen(success_dev)) ++ panic("Unable to find success device"); ++ ++ fd = open(success_dev, O_WRONLY); ++ if (fd < 0) ++ panic("open(success_dev)"); ++ if (write(fd, "success\n", 8) != 8) ++ panic("write(success_dev)"); ++ close(fd); ++ } else { ++ const char *why = "unknown cause"; ++ int what = -1; ++ ++ if (WIFEXITED(status)) { ++ why = "exit code"; ++ what = WEXITSTATUS(status); ++ } else if (WIFSIGNALED(status)) { ++ why = "signal"; ++ what = WTERMSIG(status); ++ } ++ printf("\x1b[31m\x1b[1m[-] Tests failed with %s %d! \u2639\x1b[0m\n", why, what); ++ } ++} ++ ++static void ensure_console(void) ++{ ++ for (unsigned int i = 0; i < 1000; ++i) { ++ int fd = open("/dev/console", O_RDWR); ++ if (fd < 0) { ++ usleep(50000); ++ continue; ++ } ++ dup2(fd, 0); ++ dup2(fd, 1); ++ dup2(fd, 2); ++ close(fd); ++ if (write(1, "\0\0\0\0\n", 5) == 5) ++ return; ++ } ++ panic("Unable to open console device"); ++} ++ ++static void clear_leaks(void) ++{ ++ int fd; ++ ++ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); ++ if (fd < 0) ++ return; ++ pretty_message("[+] Starting memory leak detection..."); ++ write(fd, "clear\n", 5); ++ close(fd); ++} ++ ++static void check_leaks(void) ++{ ++ int fd; ++ ++ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); ++ if (fd < 0) ++ return; ++ pretty_message("[+] Scanning for memory leaks..."); ++ sleep(2); /* Wait for any grace periods. */ ++ write(fd, "scan\n", 5); ++ close(fd); ++ ++ fd = open("/sys/kernel/debug/kmemleak", O_RDONLY); ++ if (fd < 0) ++ return; ++ if (sendfile(1, fd, NULL, 0x7ffff000) > 0) ++ panic("Memory leaks encountered"); ++ close(fd); ++} ++ ++int main(int argc, char *argv[]) ++{ ++ seed_rng(); ++ ensure_console(); ++ print_banner(); ++ mount_filesystems(); ++ kmod_selftests(); ++ enable_logging(); ++ clear_leaks(); ++ launch_tests(); ++ check_leaks(); ++ poweroff(); ++ return 1; ++} +--- /dev/null ++++ b/tools/testing/selftests/wireguard/qemu/kernel.config +@@ -0,0 +1,86 @@ ++CONFIG_LOCALVERSION="" ++CONFIG_NET=y ++CONFIG_NETDEVICES=y ++CONFIG_NET_CORE=y ++CONFIG_NET_IPIP=y ++CONFIG_DUMMY=y ++CONFIG_VETH=y ++CONFIG_MULTIUSER=y ++CONFIG_NAMESPACES=y ++CONFIG_NET_NS=y ++CONFIG_UNIX=y ++CONFIG_INET=y ++CONFIG_IPV6=y ++CONFIG_NETFILTER=y ++CONFIG_NETFILTER_ADVANCED=y ++CONFIG_NF_CONNTRACK=y ++CONFIG_NF_NAT=y ++CONFIG_NETFILTER_XTABLES=y ++CONFIG_NETFILTER_XT_NAT=y ++CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NF_CONNTRACK_IPV4=y ++CONFIG_NF_NAT_IPV4=y ++CONFIG_IP_NF_IPTABLES=y ++CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_NAT=y ++CONFIG_IP_ADVANCED_ROUTER=y ++CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_TTY=y ++CONFIG_BINFMT_ELF=y ++CONFIG_BINFMT_SCRIPT=y ++CONFIG_VDSO=y ++CONFIG_VIRTUALIZATION=y ++CONFIG_HYPERVISOR_GUEST=y ++CONFIG_PARAVIRT=y ++CONFIG_KVM_GUEST=y ++CONFIG_PARAVIRT_SPINLOCKS=y ++CONFIG_PRINTK=y ++CONFIG_KALLSYMS=y ++CONFIG_BUG=y ++CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++CONFIG_EMBEDDED=n ++CONFIG_BASE_FULL=y ++CONFIG_FUTEX=y ++CONFIG_SHMEM=y ++CONFIG_SLUB=y ++CONFIG_SPARSEMEM_VMEMMAP=y ++CONFIG_SMP=y ++CONFIG_SCHED_SMT=y ++CONFIG_SCHED_MC=y ++CONFIG_NUMA=y ++CONFIG_PREEMPT=y ++CONFIG_NO_HZ=y ++CONFIG_NO_HZ_IDLE=y ++CONFIG_NO_HZ_FULL=n ++CONFIG_HZ_PERIODIC=n ++CONFIG_HIGH_RES_TIMERS=y ++CONFIG_ARCH_RANDOM=y ++CONFIG_FILE_LOCKING=y ++CONFIG_POSIX_TIMERS=y ++CONFIG_DEVTMPFS=y ++CONFIG_PROC_FS=y ++CONFIG_PROC_SYSCTL=y ++CONFIG_SYSFS=y ++CONFIG_TMPFS=y ++CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 ++CONFIG_PRINTK_TIME=y ++CONFIG_BLK_DEV_INITRD=y ++CONFIG_LEGACY_VSYSCALL_NONE=y ++CONFIG_KERNEL_GZIP=y ++CONFIG_PANIC_ON_OOPS=y ++CONFIG_BUG_ON_DATA_CORRUPTION=y ++CONFIG_LOCKUP_DETECTOR=y ++CONFIG_SOFTLOCKUP_DETECTOR=y ++CONFIG_HARDLOCKUP_DETECTOR=y ++CONFIG_WQ_WATCHDOG=y ++CONFIG_DETECT_HUNG_TASK=y ++CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y ++CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y ++CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y ++CONFIG_PANIC_TIMEOUT=-1 ++CONFIG_STACKTRACE=y ++CONFIG_EARLY_PRINTK=y ++CONFIG_GDB_SCRIPTS=y ++CONFIG_WIREGUARD=y ++CONFIG_WIREGUARD_DEBUG=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0074-net-WireGuard-secure-network-tunnel.patch b/target/linux/generic/backport-5.4/080-wireguard-0074-net-WireGuard-secure-network-tunnel.patch deleted file mode 100644 index 9e37bbb60c..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0074-net-WireGuard-secure-network-tunnel.patch +++ /dev/null @@ -1,8071 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 9 Dec 2019 00:27:34 +0100 -Subject: [PATCH] net: WireGuard secure network tunnel - -commit e7096c131e5161fa3b8e52a650d7719d2857adfd upstream. - -WireGuard is a layer 3 secure networking tunnel made specifically for -the kernel, that aims to be much simpler and easier to audit than IPsec. -Extensive documentation and description of the protocol and -considerations, along with formal proofs of the cryptography, are -available at: - - * https://www.wireguard.com/ - * https://www.wireguard.com/papers/wireguard.pdf - -This commit implements WireGuard as a simple network device driver, -accessible in the usual RTNL way used by virtual network drivers. It -makes use of the udp_tunnel APIs, GRO, GSO, NAPI, and the usual set of -networking subsystem APIs. It has a somewhat novel multicore queueing -system designed for maximum throughput and minimal latency of encryption -operations, but it is implemented modestly using workqueues and NAPI. -Configuration is done via generic Netlink, and following a review from -the Netlink maintainer a year ago, several high profile userspace tools -have already implemented the API. - -This commit also comes with several different tests, both in-kernel -tests and out-of-kernel tests based on network namespaces, taking profit -of the fact that sockets used by WireGuard intentionally stay in the -namespace the WireGuard interface was originally created, exactly like -the semantics of userspace tun devices. See wireguard.com/netns/ for -pictures and examples. - -The source code is fairly short, but rather than combining everything -into a single file, WireGuard is developed as cleanly separable files, -making auditing and comprehension easier. Things are laid out as -follows: - - * noise.[ch], cookie.[ch], messages.h: These implement the bulk of the - cryptographic aspects of the protocol, and are mostly data-only in - nature, taking in buffers of bytes and spitting out buffers of - bytes. They also handle reference counting for their various shared - pieces of data, like keys and key lists. - - * ratelimiter.[ch]: Used as an integral part of cookie.[ch] for - ratelimiting certain types of cryptographic operations in accordance - with particular WireGuard semantics. - - * allowedips.[ch], peerlookup.[ch]: The main lookup structures of - WireGuard, the former being trie-like with particular semantics, an - integral part of the design of the protocol, and the latter just - being nice helper functions around the various hashtables we use. - - * device.[ch]: Implementation of functions for the netdevice and for - rtnl, responsible for maintaining the life of a given interface and - wiring it up to the rest of WireGuard. - - * peer.[ch]: Each interface has a list of peers, with helper functions - available here for creation, destruction, and reference counting. - - * socket.[ch]: Implementation of functions related to udp_socket and - the general set of kernel socket APIs, for sending and receiving - ciphertext UDP packets, and taking care of WireGuard-specific sticky - socket routing semantics for the automatic roaming. - - * netlink.[ch]: Userspace API entry point for configuring WireGuard - peers and devices. The API has been implemented by several userspace - tools and network management utility, and the WireGuard project - distributes the basic wg(8) tool. - - * queueing.[ch]: Shared function on the rx and tx path for handling - the various queues used in the multicore algorithms. - - * send.c: Handles encrypting outgoing packets in parallel on - multiple cores, before sending them in order on a single core, via - workqueues and ring buffers. Also handles sending handshake and cookie - messages as part of the protocol, in parallel. - - * receive.c: Handles decrypting incoming packets in parallel on - multiple cores, before passing them off in order to be ingested via - the rest of the networking subsystem with GRO via the typical NAPI - poll function. Also handles receiving handshake and cookie messages - as part of the protocol, in parallel. - - * timers.[ch]: Uses the timer wheel to implement protocol particular - event timeouts, and gives a set of very simple event-driven entry - point functions for callers. - - * main.c, version.h: Initialization and deinitialization of the module. - - * selftest/*.h: Runtime unit tests for some of the most security - sensitive functions. - - * tools/testing/selftests/wireguard/netns.sh: Aforementioned testing - script using network namespaces. - -This commit aims to be as self-contained as possible, implementing -WireGuard as a standalone module not needing much special handling or -coordination from the network subsystem. I expect for future -optimizations to the network stack to positively improve WireGuard, and -vice-versa, but for the time being, this exists as intentionally -standalone. - -We introduce a menu option for CONFIG_WIREGUARD, as well as providing a -verbose debug log and self-tests via CONFIG_WIREGUARD_DEBUG. - -Signed-off-by: Jason A. Donenfeld -Cc: David Miller -Cc: Greg KH -Cc: Linus Torvalds -Cc: Herbert Xu -Cc: linux-crypto@vger.kernel.org -Cc: linux-kernel@vger.kernel.org -Cc: netdev@vger.kernel.org -Signed-off-by: David S. Miller -[Jason: ported to 5.4 by doing the following: - - wg_get_device_start uses genl_family_attrbuf - - trival skb_redirect_reset change from 2c64605b590e is folded in - - skb_list_walk_safe was already backported prior] -Signed-off-by: Jason A. Donenfeld ---- - MAINTAINERS | 8 + - drivers/net/Kconfig | 41 + - drivers/net/Makefile | 1 + - drivers/net/wireguard/Makefile | 18 + - drivers/net/wireguard/allowedips.c | 381 +++++++++ - drivers/net/wireguard/allowedips.h | 59 ++ - drivers/net/wireguard/cookie.c | 236 ++++++ - drivers/net/wireguard/cookie.h | 59 ++ - drivers/net/wireguard/device.c | 458 ++++++++++ - drivers/net/wireguard/device.h | 65 ++ - drivers/net/wireguard/main.c | 64 ++ - drivers/net/wireguard/messages.h | 128 +++ - drivers/net/wireguard/netlink.c | 648 +++++++++++++++ - drivers/net/wireguard/netlink.h | 12 + - drivers/net/wireguard/noise.c | 828 +++++++++++++++++++ - drivers/net/wireguard/noise.h | 137 +++ - drivers/net/wireguard/peer.c | 240 ++++++ - drivers/net/wireguard/peer.h | 83 ++ - drivers/net/wireguard/peerlookup.c | 221 +++++ - drivers/net/wireguard/peerlookup.h | 64 ++ - drivers/net/wireguard/queueing.c | 53 ++ - drivers/net/wireguard/queueing.h | 197 +++++ - drivers/net/wireguard/ratelimiter.c | 223 +++++ - drivers/net/wireguard/ratelimiter.h | 19 + - drivers/net/wireguard/receive.c | 595 +++++++++++++ - drivers/net/wireguard/selftest/allowedips.c | 683 +++++++++++++++ - drivers/net/wireguard/selftest/counter.c | 104 +++ - drivers/net/wireguard/selftest/ratelimiter.c | 226 +++++ - drivers/net/wireguard/send.c | 413 +++++++++ - drivers/net/wireguard/socket.c | 437 ++++++++++ - drivers/net/wireguard/socket.h | 44 + - drivers/net/wireguard/timers.c | 243 ++++++ - drivers/net/wireguard/timers.h | 31 + - drivers/net/wireguard/version.h | 1 + - include/uapi/linux/wireguard.h | 196 +++++ - tools/testing/selftests/wireguard/netns.sh | 537 ++++++++++++ - 36 files changed, 7753 insertions(+) - create mode 100644 drivers/net/wireguard/Makefile - create mode 100644 drivers/net/wireguard/allowedips.c - create mode 100644 drivers/net/wireguard/allowedips.h - create mode 100644 drivers/net/wireguard/cookie.c - create mode 100644 drivers/net/wireguard/cookie.h - create mode 100644 drivers/net/wireguard/device.c - create mode 100644 drivers/net/wireguard/device.h - create mode 100644 drivers/net/wireguard/main.c - create mode 100644 drivers/net/wireguard/messages.h - create mode 100644 drivers/net/wireguard/netlink.c - create mode 100644 drivers/net/wireguard/netlink.h - create mode 100644 drivers/net/wireguard/noise.c - create mode 100644 drivers/net/wireguard/noise.h - create mode 100644 drivers/net/wireguard/peer.c - create mode 100644 drivers/net/wireguard/peer.h - create mode 100644 drivers/net/wireguard/peerlookup.c - create mode 100644 drivers/net/wireguard/peerlookup.h - create mode 100644 drivers/net/wireguard/queueing.c - create mode 100644 drivers/net/wireguard/queueing.h - create mode 100644 drivers/net/wireguard/ratelimiter.c - create mode 100644 drivers/net/wireguard/ratelimiter.h - create mode 100644 drivers/net/wireguard/receive.c - create mode 100644 drivers/net/wireguard/selftest/allowedips.c - create mode 100644 drivers/net/wireguard/selftest/counter.c - create mode 100644 drivers/net/wireguard/selftest/ratelimiter.c - create mode 100644 drivers/net/wireguard/send.c - create mode 100644 drivers/net/wireguard/socket.c - create mode 100644 drivers/net/wireguard/socket.h - create mode 100644 drivers/net/wireguard/timers.c - create mode 100644 drivers/net/wireguard/timers.h - create mode 100644 drivers/net/wireguard/version.h - create mode 100644 include/uapi/linux/wireguard.h - create mode 100755 tools/testing/selftests/wireguard/netns.sh - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -17584,6 +17584,14 @@ L: linux-gpio@vger.kernel.org - S: Maintained - F: drivers/gpio/gpio-ws16c48.c - -+WIREGUARD SECURE NETWORK TUNNEL -+M: Jason A. Donenfeld -+S: Maintained -+F: drivers/net/wireguard/ -+F: tools/testing/selftests/wireguard/ -+L: wireguard@lists.zx2c4.com -+L: netdev@vger.kernel.org -+ - WISTRON LAPTOP BUTTON DRIVER - M: Miloslav Trmac - S: Maintained ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -71,6 +71,47 @@ config DUMMY - To compile this driver as a module, choose M here: the module - will be called dummy. - -+config WIREGUARD -+ tristate "WireGuard secure network tunnel" -+ depends on NET && INET -+ depends on IPV6 || !IPV6 -+ select NET_UDP_TUNNEL -+ select DST_CACHE -+ select CRYPTO -+ select CRYPTO_LIB_CURVE25519 -+ select CRYPTO_LIB_CHACHA20POLY1305 -+ select CRYPTO_LIB_BLAKE2S -+ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT -+ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT -+ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT -+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT -+ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON -+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON -+ select CRYPTO_POLY1305_ARM if ARM -+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON -+ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 -+ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT) -+ help -+ WireGuard is a secure, fast, and easy to use replacement for IPSec -+ that uses modern cryptography and clever networking tricks. It's -+ designed to be fairly general purpose and abstract enough to fit most -+ use cases, while at the same time remaining extremely simple to -+ configure. See www.wireguard.com for more info. -+ -+ It's safe to say Y or M here, as the driver is very lightweight and -+ is only in use when an administrator chooses to add an interface. -+ -+config WIREGUARD_DEBUG -+ bool "Debugging checks and verbose messages" -+ depends on WIREGUARD -+ help -+ This will write log messages for handshake and other events -+ that occur for a WireGuard interface. It will also perform some -+ extra validation checks and unit tests at various points. This is -+ only useful for debugging. -+ -+ Say N here unless you know what you're doing. -+ - config EQUALIZER - tristate "EQL (serial line load balancing) support" - ---help--- ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/ - obj-$(CONFIG_IPVLAN) += ipvlan/ - obj-$(CONFIG_IPVTAP) += ipvlan/ - obj-$(CONFIG_DUMMY) += dummy.o -+obj-$(CONFIG_WIREGUARD) += wireguard/ - obj-$(CONFIG_EQUALIZER) += eql.o - obj-$(CONFIG_IFB) += ifb.o - obj-$(CONFIG_MACSEC) += macsec.o ---- /dev/null -+++ b/drivers/net/wireguard/Makefile -@@ -0,0 +1,18 @@ -+ccflags-y := -O3 -+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' -+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -+wireguard-y := main.o -+wireguard-y += noise.o -+wireguard-y += device.o -+wireguard-y += peer.o -+wireguard-y += timers.o -+wireguard-y += queueing.o -+wireguard-y += send.o -+wireguard-y += receive.o -+wireguard-y += socket.o -+wireguard-y += peerlookup.o -+wireguard-y += allowedips.o -+wireguard-y += ratelimiter.o -+wireguard-y += cookie.o -+wireguard-y += netlink.o -+obj-$(CONFIG_WIREGUARD) := wireguard.o ---- /dev/null -+++ b/drivers/net/wireguard/allowedips.c -@@ -0,0 +1,381 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "allowedips.h" -+#include "peer.h" -+ -+static void swap_endian(u8 *dst, const u8 *src, u8 bits) -+{ -+ if (bits == 32) { -+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); -+ } else if (bits == 128) { -+ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); -+ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); -+ } -+} -+ -+static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, -+ u8 cidr, u8 bits) -+{ -+ node->cidr = cidr; -+ node->bit_at_a = cidr / 8U; -+#ifdef __LITTLE_ENDIAN -+ node->bit_at_a ^= (bits / 8U - 1U) % 8U; -+#endif -+ node->bit_at_b = 7U - (cidr % 8U); -+ node->bitlen = bits; -+ memcpy(node->bits, src, bits / 8U); -+} -+#define CHOOSE_NODE(parent, key) \ -+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] -+ -+static void node_free_rcu(struct rcu_head *rcu) -+{ -+ kfree(container_of(rcu, struct allowedips_node, rcu)); -+} -+ -+static void push_rcu(struct allowedips_node **stack, -+ struct allowedips_node __rcu *p, unsigned int *len) -+{ -+ if (rcu_access_pointer(p)) { -+ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); -+ stack[(*len)++] = rcu_dereference_raw(p); -+ } -+} -+ -+static void root_free_rcu(struct rcu_head *rcu) -+{ -+ struct allowedips_node *node, *stack[128] = { -+ container_of(rcu, struct allowedips_node, rcu) }; -+ unsigned int len = 1; -+ -+ while (len > 0 && (node = stack[--len])) { -+ push_rcu(stack, node->bit[0], &len); -+ push_rcu(stack, node->bit[1], &len); -+ kfree(node); -+ } -+} -+ -+static void root_remove_peer_lists(struct allowedips_node *root) -+{ -+ struct allowedips_node *node, *stack[128] = { root }; -+ unsigned int len = 1; -+ -+ while (len > 0 && (node = stack[--len])) { -+ push_rcu(stack, node->bit[0], &len); -+ push_rcu(stack, node->bit[1], &len); -+ if (rcu_access_pointer(node->peer)) -+ list_del(&node->peer_list); -+ } -+} -+ -+static void walk_remove_by_peer(struct allowedips_node __rcu **top, -+ struct wg_peer *peer, struct mutex *lock) -+{ -+#define REF(p) rcu_access_pointer(p) -+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) -+#define PUSH(p) ({ \ -+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ -+ stack[len++] = p; \ -+ }) -+ -+ struct allowedips_node __rcu **stack[128], **nptr; -+ struct allowedips_node *node, *prev; -+ unsigned int len; -+ -+ if (unlikely(!peer || !REF(*top))) -+ return; -+ -+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { -+ nptr = stack[len - 1]; -+ node = DEREF(nptr); -+ if (!node) { -+ --len; -+ continue; -+ } -+ if (!prev || REF(prev->bit[0]) == node || -+ REF(prev->bit[1]) == node) { -+ if (REF(node->bit[0])) -+ PUSH(&node->bit[0]); -+ else if (REF(node->bit[1])) -+ PUSH(&node->bit[1]); -+ } else if (REF(node->bit[0]) == prev) { -+ if (REF(node->bit[1])) -+ PUSH(&node->bit[1]); -+ } else { -+ if (rcu_dereference_protected(node->peer, -+ lockdep_is_held(lock)) == peer) { -+ RCU_INIT_POINTER(node->peer, NULL); -+ list_del_init(&node->peer_list); -+ if (!node->bit[0] || !node->bit[1]) { -+ rcu_assign_pointer(*nptr, DEREF( -+ &node->bit[!REF(node->bit[0])])); -+ call_rcu(&node->rcu, node_free_rcu); -+ node = DEREF(nptr); -+ } -+ } -+ --len; -+ } -+ } -+ -+#undef REF -+#undef DEREF -+#undef PUSH -+} -+ -+static unsigned int fls128(u64 a, u64 b) -+{ -+ return a ? fls64(a) + 64U : fls64(b); -+} -+ -+static u8 common_bits(const struct allowedips_node *node, const u8 *key, -+ u8 bits) -+{ -+ if (bits == 32) -+ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); -+ else if (bits == 128) -+ return 128U - fls128( -+ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], -+ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); -+ return 0; -+} -+ -+static bool prefix_matches(const struct allowedips_node *node, const u8 *key, -+ u8 bits) -+{ -+ /* This could be much faster if it actually just compared the common -+ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and -+ * the rest, but it turns out that common_bits is already super fast on -+ * modern processors, even taking into account the unfortunate bswap. -+ * So, we just inline it like this instead. -+ */ -+ return common_bits(node, key, bits) >= node->cidr; -+} -+ -+static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, -+ const u8 *key) -+{ -+ struct allowedips_node *node = trie, *found = NULL; -+ -+ while (node && prefix_matches(node, key, bits)) { -+ if (rcu_access_pointer(node->peer)) -+ found = node; -+ if (node->cidr == bits) -+ break; -+ node = rcu_dereference_bh(CHOOSE_NODE(node, key)); -+ } -+ return found; -+} -+ -+/* Returns a strong reference to a peer */ -+static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, -+ const void *be_ip) -+{ -+ /* Aligned so it can be passed to fls/fls64 */ -+ u8 ip[16] __aligned(__alignof(u64)); -+ struct allowedips_node *node; -+ struct wg_peer *peer = NULL; -+ -+ swap_endian(ip, be_ip, bits); -+ -+ rcu_read_lock_bh(); -+retry: -+ node = find_node(rcu_dereference_bh(root), bits, ip); -+ if (node) { -+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); -+ if (!peer) -+ goto retry; -+ } -+ rcu_read_unlock_bh(); -+ return peer; -+} -+ -+static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, -+ u8 cidr, u8 bits, struct allowedips_node **rnode, -+ struct mutex *lock) -+{ -+ struct allowedips_node *node = rcu_dereference_protected(trie, -+ lockdep_is_held(lock)); -+ struct allowedips_node *parent = NULL; -+ bool exact = false; -+ -+ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { -+ parent = node; -+ if (parent->cidr == cidr) { -+ exact = true; -+ break; -+ } -+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key), -+ lockdep_is_held(lock)); -+ } -+ *rnode = parent; -+ return exact; -+} -+ -+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ struct allowedips_node *node, *parent, *down, *newnode; -+ -+ if (unlikely(cidr > bits || !peer)) -+ return -EINVAL; -+ -+ if (!rcu_access_pointer(*trie)) { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (unlikely(!node)) -+ return -ENOMEM; -+ RCU_INIT_POINTER(node->peer, peer); -+ list_add_tail(&node->peer_list, &peer->allowedips_list); -+ copy_and_assign_cidr(node, key, cidr, bits); -+ rcu_assign_pointer(*trie, node); -+ return 0; -+ } -+ if (node_placement(*trie, key, cidr, bits, &node, lock)) { -+ rcu_assign_pointer(node->peer, peer); -+ list_move_tail(&node->peer_list, &peer->allowedips_list); -+ return 0; -+ } -+ -+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); -+ if (unlikely(!newnode)) -+ return -ENOMEM; -+ RCU_INIT_POINTER(newnode->peer, peer); -+ list_add_tail(&newnode->peer_list, &peer->allowedips_list); -+ copy_and_assign_cidr(newnode, key, cidr, bits); -+ -+ if (!node) { -+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); -+ } else { -+ down = rcu_dereference_protected(CHOOSE_NODE(node, key), -+ lockdep_is_held(lock)); -+ if (!down) { -+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); -+ return 0; -+ } -+ } -+ cidr = min(cidr, common_bits(down, key, bits)); -+ parent = node; -+ -+ if (newnode->cidr == cidr) { -+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); -+ if (!parent) -+ rcu_assign_pointer(*trie, newnode); -+ else -+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), -+ newnode); -+ } else { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (unlikely(!node)) { -+ kfree(newnode); -+ return -ENOMEM; -+ } -+ INIT_LIST_HEAD(&node->peer_list); -+ copy_and_assign_cidr(node, newnode->bits, cidr, bits); -+ -+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); -+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); -+ if (!parent) -+ rcu_assign_pointer(*trie, node); -+ else -+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), -+ node); -+ } -+ return 0; -+} -+ -+void wg_allowedips_init(struct allowedips *table) -+{ -+ table->root4 = table->root6 = NULL; -+ table->seq = 1; -+} -+ -+void wg_allowedips_free(struct allowedips *table, struct mutex *lock) -+{ -+ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; -+ -+ ++table->seq; -+ RCU_INIT_POINTER(table->root4, NULL); -+ RCU_INIT_POINTER(table->root6, NULL); -+ if (rcu_access_pointer(old4)) { -+ struct allowedips_node *node = rcu_dereference_protected(old4, -+ lockdep_is_held(lock)); -+ -+ root_remove_peer_lists(node); -+ call_rcu(&node->rcu, root_free_rcu); -+ } -+ if (rcu_access_pointer(old6)) { -+ struct allowedips_node *node = rcu_dereference_protected(old6, -+ lockdep_is_held(lock)); -+ -+ root_remove_peer_lists(node); -+ call_rcu(&node->rcu, root_free_rcu); -+ } -+} -+ -+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ /* Aligned so it can be passed to fls */ -+ u8 key[4] __aligned(__alignof(u32)); -+ -+ ++table->seq; -+ swap_endian(key, (const u8 *)ip, 32); -+ return add(&table->root4, 32, key, cidr, peer, lock); -+} -+ -+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ /* Aligned so it can be passed to fls64 */ -+ u8 key[16] __aligned(__alignof(u64)); -+ -+ ++table->seq; -+ swap_endian(key, (const u8 *)ip, 128); -+ return add(&table->root6, 128, key, cidr, peer, lock); -+} -+ -+void wg_allowedips_remove_by_peer(struct allowedips *table, -+ struct wg_peer *peer, struct mutex *lock) -+{ -+ ++table->seq; -+ walk_remove_by_peer(&table->root4, peer, lock); -+ walk_remove_by_peer(&table->root6, peer, lock); -+} -+ -+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) -+{ -+ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); -+ swap_endian(ip, node->bits, node->bitlen); -+ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); -+ if (node->cidr) -+ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); -+ -+ *cidr = node->cidr; -+ return node->bitlen == 32 ? AF_INET : AF_INET6; -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, -+ struct sk_buff *skb) -+{ -+ if (skb->protocol == htons(ETH_P_IP)) -+ return lookup(table->root4, 32, &ip_hdr(skb)->daddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); -+ return NULL; -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, -+ struct sk_buff *skb) -+{ -+ if (skb->protocol == htons(ETH_P_IP)) -+ return lookup(table->root4, 32, &ip_hdr(skb)->saddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); -+ return NULL; -+} -+ -+#include "selftest/allowedips.c" ---- /dev/null -+++ b/drivers/net/wireguard/allowedips.h -@@ -0,0 +1,59 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_ALLOWEDIPS_H -+#define _WG_ALLOWEDIPS_H -+ -+#include -+#include -+#include -+ -+struct wg_peer; -+ -+struct allowedips_node { -+ struct wg_peer __rcu *peer; -+ struct allowedips_node __rcu *bit[2]; -+ /* While it may seem scandalous that we waste space for v4, -+ * we're alloc'ing to the nearest power of 2 anyway, so this -+ * doesn't actually make a difference. -+ */ -+ u8 bits[16] __aligned(__alignof(u64)); -+ u8 cidr, bit_at_a, bit_at_b, bitlen; -+ -+ /* Keep rarely used list at bottom to be beyond cache line. */ -+ union { -+ struct list_head peer_list; -+ struct rcu_head rcu; -+ }; -+}; -+ -+struct allowedips { -+ struct allowedips_node __rcu *root4; -+ struct allowedips_node __rcu *root6; -+ u64 seq; -+}; -+ -+void wg_allowedips_init(struct allowedips *table); -+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); -+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock); -+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock); -+void wg_allowedips_remove_by_peer(struct allowedips *table, -+ struct wg_peer *peer, struct mutex *lock); -+/* The ip input pointer should be __aligned(__alignof(u64))) */ -+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr); -+ -+/* These return a strong reference to a peer: */ -+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, -+ struct sk_buff *skb); -+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, -+ struct sk_buff *skb); -+ -+#ifdef DEBUG -+bool wg_allowedips_selftest(void); -+#endif -+ -+#endif /* _WG_ALLOWEDIPS_H */ ---- /dev/null -+++ b/drivers/net/wireguard/cookie.c -@@ -0,0 +1,236 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "cookie.h" -+#include "peer.h" -+#include "device.h" -+#include "messages.h" -+#include "ratelimiter.h" -+#include "timers.h" -+ -+#include -+#include -+ -+#include -+#include -+ -+void wg_cookie_checker_init(struct cookie_checker *checker, -+ struct wg_device *wg) -+{ -+ init_rwsem(&checker->secret_lock); -+ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); -+ get_random_bytes(checker->secret, NOISE_HASH_LEN); -+ checker->device = wg; -+} -+ -+enum { COOKIE_KEY_LABEL_LEN = 8 }; -+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----"; -+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--"; -+ -+static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN], -+ const u8 label[COOKIE_KEY_LABEL_LEN]) -+{ -+ struct blake2s_state blake; -+ -+ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); -+ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); -+ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN); -+ blake2s_final(&blake, key); -+} -+ -+/* Must hold peer->handshake.static_identity->lock */ -+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker) -+{ -+ if (likely(checker->device->static_identity.has_identity)) { -+ precompute_key(checker->cookie_encryption_key, -+ checker->device->static_identity.static_public, -+ cookie_key_label); -+ precompute_key(checker->message_mac1_key, -+ checker->device->static_identity.static_public, -+ mac1_key_label); -+ } else { -+ memset(checker->cookie_encryption_key, 0, -+ NOISE_SYMMETRIC_KEY_LEN); -+ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN); -+ } -+} -+ -+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer) -+{ -+ precompute_key(peer->latest_cookie.cookie_decryption_key, -+ peer->handshake.remote_static, cookie_key_label); -+ precompute_key(peer->latest_cookie.message_mac1_key, -+ peer->handshake.remote_static, mac1_key_label); -+} -+ -+void wg_cookie_init(struct cookie *cookie) -+{ -+ memset(cookie, 0, sizeof(*cookie)); -+ init_rwsem(&cookie->lock); -+} -+ -+static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, -+ const u8 key[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ len = len - sizeof(struct message_macs) + -+ offsetof(struct message_macs, mac1); -+ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); -+} -+ -+static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, -+ const u8 cookie[COOKIE_LEN]) -+{ -+ len = len - sizeof(struct message_macs) + -+ offsetof(struct message_macs, mac2); -+ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); -+} -+ -+static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, -+ struct cookie_checker *checker) -+{ -+ struct blake2s_state state; -+ -+ if (wg_birthdate_has_expired(checker->secret_birthdate, -+ COOKIE_SECRET_MAX_AGE)) { -+ down_write(&checker->secret_lock); -+ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); -+ get_random_bytes(checker->secret, NOISE_HASH_LEN); -+ up_write(&checker->secret_lock); -+ } -+ -+ down_read(&checker->secret_lock); -+ -+ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); -+ if (skb->protocol == htons(ETH_P_IP)) -+ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, -+ sizeof(struct in_addr)); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, -+ sizeof(struct in6_addr)); -+ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); -+ blake2s_final(&state, cookie); -+ -+ up_read(&checker->secret_lock); -+} -+ -+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, -+ struct sk_buff *skb, -+ bool check_cookie) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ (skb->data + skb->len - sizeof(*macs)); -+ enum cookie_mac_state ret; -+ u8 computed_mac[COOKIE_LEN]; -+ u8 cookie[COOKIE_LEN]; -+ -+ ret = INVALID_MAC; -+ compute_mac1(computed_mac, skb->data, skb->len, -+ checker->message_mac1_key); -+ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN)) -+ goto out; -+ -+ ret = VALID_MAC_BUT_NO_COOKIE; -+ -+ if (!check_cookie) -+ goto out; -+ -+ make_cookie(cookie, skb, checker); -+ -+ compute_mac2(computed_mac, skb->data, skb->len, cookie); -+ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN)) -+ goto out; -+ -+ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED; -+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev))) -+ goto out; -+ -+ ret = VALID_MAC_WITH_COOKIE; -+ -+out: -+ return ret; -+} -+ -+void wg_cookie_add_mac_to_packet(void *message, size_t len, -+ struct wg_peer *peer) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ ((u8 *)message + len - sizeof(*macs)); -+ -+ down_write(&peer->latest_cookie.lock); -+ compute_mac1(macs->mac1, message, len, -+ peer->latest_cookie.message_mac1_key); -+ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN); -+ peer->latest_cookie.have_sent_mac1 = true; -+ up_write(&peer->latest_cookie.lock); -+ -+ down_read(&peer->latest_cookie.lock); -+ if (peer->latest_cookie.is_valid && -+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate, -+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY)) -+ compute_mac2(macs->mac2, message, len, -+ peer->latest_cookie.cookie); -+ else -+ memset(macs->mac2, 0, COOKIE_LEN); -+ up_read(&peer->latest_cookie.lock); -+} -+ -+void wg_cookie_message_create(struct message_handshake_cookie *dst, -+ struct sk_buff *skb, __le32 index, -+ struct cookie_checker *checker) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ ((u8 *)skb->data + skb->len - sizeof(*macs)); -+ u8 cookie[COOKIE_LEN]; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE); -+ dst->receiver_index = index; -+ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN); -+ -+ make_cookie(cookie, skb, checker); -+ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN, -+ macs->mac1, COOKIE_LEN, dst->nonce, -+ checker->cookie_encryption_key); -+} -+ -+void wg_cookie_message_consume(struct message_handshake_cookie *src, -+ struct wg_device *wg) -+{ -+ struct wg_peer *peer = NULL; -+ u8 cookie[COOKIE_LEN]; -+ bool ret; -+ -+ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable, -+ INDEX_HASHTABLE_HANDSHAKE | -+ INDEX_HASHTABLE_KEYPAIR, -+ src->receiver_index, &peer))) -+ return; -+ -+ down_read(&peer->latest_cookie.lock); -+ if (unlikely(!peer->latest_cookie.have_sent_mac1)) { -+ up_read(&peer->latest_cookie.lock); -+ goto out; -+ } -+ ret = xchacha20poly1305_decrypt( -+ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie), -+ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce, -+ peer->latest_cookie.cookie_decryption_key); -+ up_read(&peer->latest_cookie.lock); -+ -+ if (ret) { -+ down_write(&peer->latest_cookie.lock); -+ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN); -+ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns(); -+ peer->latest_cookie.is_valid = true; -+ peer->latest_cookie.have_sent_mac1 = false; -+ up_write(&peer->latest_cookie.lock); -+ } else { -+ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n", -+ wg->dev->name); -+ } -+ -+out: -+ wg_peer_put(peer); -+} ---- /dev/null -+++ b/drivers/net/wireguard/cookie.h -@@ -0,0 +1,59 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_COOKIE_H -+#define _WG_COOKIE_H -+ -+#include "messages.h" -+#include -+ -+struct wg_peer; -+ -+struct cookie_checker { -+ u8 secret[NOISE_HASH_LEN]; -+ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u64 secret_birthdate; -+ struct rw_semaphore secret_lock; -+ struct wg_device *device; -+}; -+ -+struct cookie { -+ u64 birthdate; -+ bool is_valid; -+ u8 cookie[COOKIE_LEN]; -+ bool have_sent_mac1; -+ u8 last_mac1_sent[COOKIE_LEN]; -+ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; -+ struct rw_semaphore lock; -+}; -+ -+enum cookie_mac_state { -+ INVALID_MAC, -+ VALID_MAC_BUT_NO_COOKIE, -+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED, -+ VALID_MAC_WITH_COOKIE -+}; -+ -+void wg_cookie_checker_init(struct cookie_checker *checker, -+ struct wg_device *wg); -+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker); -+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer); -+void wg_cookie_init(struct cookie *cookie); -+ -+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, -+ struct sk_buff *skb, -+ bool check_cookie); -+void wg_cookie_add_mac_to_packet(void *message, size_t len, -+ struct wg_peer *peer); -+ -+void wg_cookie_message_create(struct message_handshake_cookie *src, -+ struct sk_buff *skb, __le32 index, -+ struct cookie_checker *checker); -+void wg_cookie_message_consume(struct message_handshake_cookie *src, -+ struct wg_device *wg); -+ -+#endif /* _WG_COOKIE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/device.c -@@ -0,0 +1,458 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "socket.h" -+#include "timers.h" -+#include "device.h" -+#include "ratelimiter.h" -+#include "peer.h" -+#include "messages.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static LIST_HEAD(device_list); -+ -+static int wg_open(struct net_device *dev) -+{ -+ struct in_device *dev_v4 = __in_dev_get_rtnl(dev); -+ struct inet6_dev *dev_v6 = __in6_dev_get(dev); -+ struct wg_device *wg = netdev_priv(dev); -+ struct wg_peer *peer; -+ int ret; -+ -+ if (dev_v4) { -+ /* At some point we might put this check near the ip_rt_send_ -+ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar -+ * to the current secpath check. -+ */ -+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); -+ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; -+ } -+ if (dev_v6) -+ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; -+ -+ ret = wg_socket_init(wg, wg->incoming_port); -+ if (ret < 0) -+ return ret; -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ wg_packet_send_staged_packets(peer); -+ if (peer->persistent_keepalive_interval) -+ wg_packet_send_keepalive(peer); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ return 0; -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static int wg_pm_notification(struct notifier_block *nb, unsigned long action, -+ void *data) -+{ -+ struct wg_device *wg; -+ struct wg_peer *peer; -+ -+ /* If the machine is constantly suspending and resuming, as part of -+ * its normal operation rather than as a somewhat rare event, then we -+ * don't actually want to clear keys. -+ */ -+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) -+ return 0; -+ -+ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) -+ return 0; -+ -+ rtnl_lock(); -+ list_for_each_entry(wg, &device_list, device_list) { -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ del_timer(&peer->timer_zero_key_material); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ } -+ rtnl_unlock(); -+ rcu_barrier(); -+ return 0; -+} -+ -+static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; -+#endif -+ -+static int wg_stop(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ struct wg_peer *peer; -+ -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ wg_packet_purge_staged_packets(peer); -+ wg_timers_stop(peer); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ skb_queue_purge(&wg->incoming_handshakes); -+ wg_socket_reinit(wg, NULL, NULL); -+ return 0; -+} -+ -+static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ struct sk_buff_head packets; -+ struct wg_peer *peer; -+ struct sk_buff *next; -+ sa_family_t family; -+ u32 mtu; -+ int ret; -+ -+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { -+ ret = -EPROTONOSUPPORT; -+ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); -+ goto err; -+ } -+ -+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); -+ if (unlikely(!peer)) { -+ ret = -ENOKEY; -+ if (skb->protocol == htons(ETH_P_IP)) -+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", -+ dev->name, &ip_hdr(skb)->daddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", -+ dev->name, &ipv6_hdr(skb)->daddr); -+ goto err; -+ } -+ -+ family = READ_ONCE(peer->endpoint.addr.sa_family); -+ if (unlikely(family != AF_INET && family != AF_INET6)) { -+ ret = -EDESTADDRREQ; -+ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", -+ dev->name, peer->internal_id); -+ goto err_peer; -+ } -+ -+ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; -+ -+ __skb_queue_head_init(&packets); -+ if (!skb_is_gso(skb)) { -+ skb_mark_not_on_list(skb); -+ } else { -+ struct sk_buff *segs = skb_gso_segment(skb, 0); -+ -+ if (unlikely(IS_ERR(segs))) { -+ ret = PTR_ERR(segs); -+ goto err_peer; -+ } -+ dev_kfree_skb(skb); -+ skb = segs; -+ } -+ -+ skb_list_walk_safe(skb, skb, next) { -+ skb_mark_not_on_list(skb); -+ -+ skb = skb_share_check(skb, GFP_ATOMIC); -+ if (unlikely(!skb)) -+ continue; -+ -+ /* We only need to keep the original dst around for icmp, -+ * so at this point we're in a position to drop it. -+ */ -+ skb_dst_drop(skb); -+ -+ PACKET_CB(skb)->mtu = mtu; -+ -+ __skb_queue_tail(&packets, skb); -+ } -+ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ /* If the queue is getting too big, we start removing the oldest packets -+ * until it's small again. We do this before adding the new packet, so -+ * we don't remove GSO segments that are in excess. -+ */ -+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { -+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); -+ ++dev->stats.tx_dropped; -+ } -+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ -+ wg_packet_send_staged_packets(peer); -+ -+ wg_peer_put(peer); -+ return NETDEV_TX_OK; -+ -+err_peer: -+ wg_peer_put(peer); -+err: -+ ++dev->stats.tx_errors; -+ if (skb->protocol == htons(ETH_P_IP)) -+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+ kfree_skb(skb); -+ return ret; -+} -+ -+static const struct net_device_ops netdev_ops = { -+ .ndo_open = wg_open, -+ .ndo_stop = wg_stop, -+ .ndo_start_xmit = wg_xmit, -+ .ndo_get_stats64 = ip_tunnel_get_stats64 -+}; -+ -+static void wg_destruct(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ -+ rtnl_lock(); -+ list_del(&wg->device_list); -+ rtnl_unlock(); -+ mutex_lock(&wg->device_update_lock); -+ wg->incoming_port = 0; -+ wg_socket_reinit(wg, NULL, NULL); -+ /* The final references are cleared in the below calls to destroy_workqueue. */ -+ wg_peer_remove_all(wg); -+ destroy_workqueue(wg->handshake_receive_wq); -+ destroy_workqueue(wg->handshake_send_wq); -+ destroy_workqueue(wg->packet_crypt_wq); -+ wg_packet_queue_free(&wg->decrypt_queue, true); -+ wg_packet_queue_free(&wg->encrypt_queue, true); -+ rcu_barrier(); /* Wait for all the peers to be actually freed. */ -+ wg_ratelimiter_uninit(); -+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); -+ skb_queue_purge(&wg->incoming_handshakes); -+ free_percpu(dev->tstats); -+ free_percpu(wg->incoming_handshakes_worker); -+ if (wg->have_creating_net_ref) -+ put_net(wg->creating_net); -+ kvfree(wg->index_hashtable); -+ kvfree(wg->peer_hashtable); -+ mutex_unlock(&wg->device_update_lock); -+ -+ pr_debug("%s: Interface deleted\n", dev->name); -+ free_netdev(dev); -+} -+ -+static const struct device_type device_type = { .name = KBUILD_MODNAME }; -+ -+static void wg_setup(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | -+ NETIF_F_SG | NETIF_F_GSO | -+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; -+ -+ dev->netdev_ops = &netdev_ops; -+ dev->hard_header_len = 0; -+ dev->addr_len = 0; -+ dev->needed_headroom = DATA_PACKET_HEAD_ROOM; -+ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); -+ dev->type = ARPHRD_NONE; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->priv_flags |= IFF_NO_QUEUE; -+ dev->features |= NETIF_F_LLTX; -+ dev->features |= WG_NETDEV_FEATURES; -+ dev->hw_features |= WG_NETDEV_FEATURES; -+ dev->hw_enc_features |= WG_NETDEV_FEATURES; -+ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - -+ sizeof(struct udphdr) - -+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); -+ -+ SET_NETDEV_DEVTYPE(dev, &device_type); -+ -+ /* We need to keep the dst around in case of icmp replies. */ -+ netif_keep_dst(dev); -+ -+ memset(wg, 0, sizeof(*wg)); -+ wg->dev = dev; -+} -+ -+static int wg_newlink(struct net *src_net, struct net_device *dev, -+ struct nlattr *tb[], struct nlattr *data[], -+ struct netlink_ext_ack *extack) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ int ret = -ENOMEM; -+ -+ wg->creating_net = src_net; -+ init_rwsem(&wg->static_identity.lock); -+ mutex_init(&wg->socket_update_lock); -+ mutex_init(&wg->device_update_lock); -+ skb_queue_head_init(&wg->incoming_handshakes); -+ wg_allowedips_init(&wg->peer_allowedips); -+ wg_cookie_checker_init(&wg->cookie_checker, wg); -+ INIT_LIST_HEAD(&wg->peer_list); -+ wg->device_update_gen = 1; -+ -+ wg->peer_hashtable = wg_pubkey_hashtable_alloc(); -+ if (!wg->peer_hashtable) -+ return ret; -+ -+ wg->index_hashtable = wg_index_hashtable_alloc(); -+ if (!wg->index_hashtable) -+ goto err_free_peer_hashtable; -+ -+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); -+ if (!dev->tstats) -+ goto err_free_index_hashtable; -+ -+ wg->incoming_handshakes_worker = -+ wg_packet_percpu_multicore_worker_alloc( -+ wg_packet_handshake_receive_worker, wg); -+ if (!wg->incoming_handshakes_worker) -+ goto err_free_tstats; -+ -+ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", -+ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); -+ if (!wg->handshake_receive_wq) -+ goto err_free_incoming_handshakes; -+ -+ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", -+ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); -+ if (!wg->handshake_send_wq) -+ goto err_destroy_handshake_receive; -+ -+ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", -+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); -+ if (!wg->packet_crypt_wq) -+ goto err_destroy_handshake_send; -+ -+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, -+ true, MAX_QUEUED_PACKETS); -+ if (ret < 0) -+ goto err_destroy_packet_crypt; -+ -+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, -+ true, MAX_QUEUED_PACKETS); -+ if (ret < 0) -+ goto err_free_encrypt_queue; -+ -+ ret = wg_ratelimiter_init(); -+ if (ret < 0) -+ goto err_free_decrypt_queue; -+ -+ ret = register_netdevice(dev); -+ if (ret < 0) -+ goto err_uninit_ratelimiter; -+ -+ list_add(&wg->device_list, &device_list); -+ -+ /* We wait until the end to assign priv_destructor, so that -+ * register_netdevice doesn't call it for us if it fails. -+ */ -+ dev->priv_destructor = wg_destruct; -+ -+ pr_debug("%s: Interface created\n", dev->name); -+ return ret; -+ -+err_uninit_ratelimiter: -+ wg_ratelimiter_uninit(); -+err_free_decrypt_queue: -+ wg_packet_queue_free(&wg->decrypt_queue, true); -+err_free_encrypt_queue: -+ wg_packet_queue_free(&wg->encrypt_queue, true); -+err_destroy_packet_crypt: -+ destroy_workqueue(wg->packet_crypt_wq); -+err_destroy_handshake_send: -+ destroy_workqueue(wg->handshake_send_wq); -+err_destroy_handshake_receive: -+ destroy_workqueue(wg->handshake_receive_wq); -+err_free_incoming_handshakes: -+ free_percpu(wg->incoming_handshakes_worker); -+err_free_tstats: -+ free_percpu(dev->tstats); -+err_free_index_hashtable: -+ kvfree(wg->index_hashtable); -+err_free_peer_hashtable: -+ kvfree(wg->peer_hashtable); -+ return ret; -+} -+ -+static struct rtnl_link_ops link_ops __read_mostly = { -+ .kind = KBUILD_MODNAME, -+ .priv_size = sizeof(struct wg_device), -+ .setup = wg_setup, -+ .newlink = wg_newlink, -+}; -+ -+static int wg_netdevice_notification(struct notifier_block *nb, -+ unsigned long action, void *data) -+{ -+ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; -+ struct wg_device *wg = netdev_priv(dev); -+ -+ ASSERT_RTNL(); -+ -+ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) -+ return 0; -+ -+ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { -+ put_net(wg->creating_net); -+ wg->have_creating_net_ref = false; -+ } else if (dev_net(dev) != wg->creating_net && -+ !wg->have_creating_net_ref) { -+ wg->have_creating_net_ref = true; -+ get_net(wg->creating_net); -+ } -+ return 0; -+} -+ -+static struct notifier_block netdevice_notifier = { -+ .notifier_call = wg_netdevice_notification -+}; -+ -+int __init wg_device_init(void) -+{ -+ int ret; -+ -+#ifdef CONFIG_PM_SLEEP -+ ret = register_pm_notifier(&pm_notifier); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = register_netdevice_notifier(&netdevice_notifier); -+ if (ret) -+ goto error_pm; -+ -+ ret = rtnl_link_register(&link_ops); -+ if (ret) -+ goto error_netdevice; -+ -+ return 0; -+ -+error_netdevice: -+ unregister_netdevice_notifier(&netdevice_notifier); -+error_pm: -+#ifdef CONFIG_PM_SLEEP -+ unregister_pm_notifier(&pm_notifier); -+#endif -+ return ret; -+} -+ -+void wg_device_uninit(void) -+{ -+ rtnl_link_unregister(&link_ops); -+ unregister_netdevice_notifier(&netdevice_notifier); -+#ifdef CONFIG_PM_SLEEP -+ unregister_pm_notifier(&pm_notifier); -+#endif -+ rcu_barrier(); -+} ---- /dev/null -+++ b/drivers/net/wireguard/device.h -@@ -0,0 +1,65 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_DEVICE_H -+#define _WG_DEVICE_H -+ -+#include "noise.h" -+#include "allowedips.h" -+#include "peerlookup.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+ -+struct multicore_worker { -+ void *ptr; -+ struct work_struct work; -+}; -+ -+struct crypt_queue { -+ struct ptr_ring ring; -+ union { -+ struct { -+ struct multicore_worker __percpu *worker; -+ int last_cpu; -+ }; -+ struct work_struct work; -+ }; -+}; -+ -+struct wg_device { -+ struct net_device *dev; -+ struct crypt_queue encrypt_queue, decrypt_queue; -+ struct sock __rcu *sock4, *sock6; -+ struct net *creating_net; -+ struct noise_static_identity static_identity; -+ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; -+ struct workqueue_struct *packet_crypt_wq; -+ struct sk_buff_head incoming_handshakes; -+ int incoming_handshake_cpu; -+ struct multicore_worker __percpu *incoming_handshakes_worker; -+ struct cookie_checker cookie_checker; -+ struct pubkey_hashtable *peer_hashtable; -+ struct index_hashtable *index_hashtable; -+ struct allowedips peer_allowedips; -+ struct mutex device_update_lock, socket_update_lock; -+ struct list_head device_list, peer_list; -+ unsigned int num_peers, device_update_gen; -+ u32 fwmark; -+ u16 incoming_port; -+ bool have_creating_net_ref; -+}; -+ -+int wg_device_init(void); -+void wg_device_uninit(void); -+ -+#endif /* _WG_DEVICE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/main.c -@@ -0,0 +1,64 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "version.h" -+#include "device.h" -+#include "noise.h" -+#include "queueing.h" -+#include "ratelimiter.h" -+#include "netlink.h" -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+static int __init mod_init(void) -+{ -+ int ret; -+ -+#ifdef DEBUG -+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || -+ !wg_ratelimiter_selftest()) -+ return -ENOTRECOVERABLE; -+#endif -+ wg_noise_init(); -+ -+ ret = wg_device_init(); -+ if (ret < 0) -+ goto err_device; -+ -+ ret = wg_genetlink_init(); -+ if (ret < 0) -+ goto err_netlink; -+ -+ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n"); -+ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved.\n"); -+ -+ return 0; -+ -+err_netlink: -+ wg_device_uninit(); -+err_device: -+ return ret; -+} -+ -+static void __exit mod_exit(void) -+{ -+ wg_genetlink_uninit(); -+ wg_device_uninit(); -+} -+ -+module_init(mod_init); -+module_exit(mod_exit); -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("WireGuard secure network tunnel"); -+MODULE_AUTHOR("Jason A. Donenfeld "); -+MODULE_VERSION(WIREGUARD_VERSION); -+MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME); -+MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME); ---- /dev/null -+++ b/drivers/net/wireguard/messages.h -@@ -0,0 +1,128 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_MESSAGES_H -+#define _WG_MESSAGES_H -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+enum noise_lengths { -+ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE, -+ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE, -+ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32), -+ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE, -+ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE -+}; -+ -+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN) -+ -+enum cookie_values { -+ COOKIE_SECRET_MAX_AGE = 2 * 60, -+ COOKIE_SECRET_LATENCY = 5, -+ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE, -+ COOKIE_LEN = 16 -+}; -+ -+enum counter_values { -+ COUNTER_BITS_TOTAL = 2048, -+ COUNTER_REDUNDANT_BITS = BITS_PER_LONG, -+ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS -+}; -+ -+enum limits { -+ REKEY_AFTER_MESSAGES = 1ULL << 60, -+ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1, -+ REKEY_TIMEOUT = 5, -+ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3, -+ REKEY_AFTER_TIME = 120, -+ REJECT_AFTER_TIME = 180, -+ INITIATIONS_PER_SECOND = 50, -+ MAX_PEERS_PER_DEVICE = 1U << 20, -+ KEEPALIVE_TIMEOUT = 10, -+ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT, -+ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */ -+ MAX_STAGED_PACKETS = 128, -+ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */ -+}; -+ -+enum message_type { -+ MESSAGE_INVALID = 0, -+ MESSAGE_HANDSHAKE_INITIATION = 1, -+ MESSAGE_HANDSHAKE_RESPONSE = 2, -+ MESSAGE_HANDSHAKE_COOKIE = 3, -+ MESSAGE_DATA = 4 -+}; -+ -+struct message_header { -+ /* The actual layout of this that we want is: -+ * u8 type -+ * u8 reserved_zero[3] -+ * -+ * But it turns out that by encoding this as little endian, -+ * we achieve the same thing, and it makes checking faster. -+ */ -+ __le32 type; -+}; -+ -+struct message_macs { -+ u8 mac1[COOKIE_LEN]; -+ u8 mac2[COOKIE_LEN]; -+}; -+ -+struct message_handshake_initiation { -+ struct message_header header; -+ __le32 sender_index; -+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)]; -+ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)]; -+ struct message_macs macs; -+}; -+ -+struct message_handshake_response { -+ struct message_header header; -+ __le32 sender_index; -+ __le32 receiver_index; -+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 encrypted_nothing[noise_encrypted_len(0)]; -+ struct message_macs macs; -+}; -+ -+struct message_handshake_cookie { -+ struct message_header header; -+ __le32 receiver_index; -+ u8 nonce[COOKIE_NONCE_LEN]; -+ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)]; -+}; -+ -+struct message_data { -+ struct message_header header; -+ __le32 key_idx; -+ __le64 counter; -+ u8 encrypted_data[]; -+}; -+ -+#define message_data_len(plain_len) \ -+ (noise_encrypted_len(plain_len) + sizeof(struct message_data)) -+ -+enum message_alignments { -+ MESSAGE_PADDING_MULTIPLE = 16, -+ MESSAGE_MINIMUM_LENGTH = message_data_len(0) -+}; -+ -+#define SKB_HEADER_LEN \ -+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ -+ sizeof(struct udphdr) + NET_SKB_PAD) -+#define DATA_PACKET_HEAD_ROOM \ -+ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4) -+ -+enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ }; -+ -+#endif /* _WG_MESSAGES_H */ ---- /dev/null -+++ b/drivers/net/wireguard/netlink.c -@@ -0,0 +1,648 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "netlink.h" -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "queueing.h" -+#include "messages.h" -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+static struct genl_family genl_family; -+ -+static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { -+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, -+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, -+ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, -+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, -+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, -+ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED } -+}; -+ -+static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { -+ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, -+ [WGPEER_A_FLAGS] = { .type = NLA_U32 }, -+ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, -+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, -+ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, -+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, -+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, -+ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, -+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 } -+}; -+ -+static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { -+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, -+ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, -+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } -+}; -+ -+static struct wg_device *lookup_interface(struct nlattr **attrs, -+ struct sk_buff *skb) -+{ -+ struct net_device *dev = NULL; -+ -+ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME]) -+ return ERR_PTR(-EBADR); -+ if (attrs[WGDEVICE_A_IFINDEX]) -+ dev = dev_get_by_index(sock_net(skb->sk), -+ nla_get_u32(attrs[WGDEVICE_A_IFINDEX])); -+ else if (attrs[WGDEVICE_A_IFNAME]) -+ dev = dev_get_by_name(sock_net(skb->sk), -+ nla_data(attrs[WGDEVICE_A_IFNAME])); -+ if (!dev) -+ return ERR_PTR(-ENODEV); -+ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind || -+ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) { -+ dev_put(dev); -+ return ERR_PTR(-EOPNOTSUPP); -+ } -+ return netdev_priv(dev); -+} -+ -+static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr, -+ int family) -+{ -+ struct nlattr *allowedip_nest; -+ -+ allowedip_nest = nla_nest_start(skb, 0); -+ if (!allowedip_nest) -+ return -EMSGSIZE; -+ -+ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) || -+ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) || -+ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ? -+ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) { -+ nla_nest_cancel(skb, allowedip_nest); -+ return -EMSGSIZE; -+ } -+ -+ nla_nest_end(skb, allowedip_nest); -+ return 0; -+} -+ -+struct dump_ctx { -+ struct wg_device *wg; -+ struct wg_peer *next_peer; -+ u64 allowedips_seq; -+ struct allowedips_node *next_allowedip; -+}; -+ -+#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args) -+ -+static int -+get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) -+{ -+ -+ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0); -+ struct allowedips_node *allowedips_node = ctx->next_allowedip; -+ bool fail; -+ -+ if (!peer_nest) -+ return -EMSGSIZE; -+ -+ down_read(&peer->handshake.lock); -+ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN, -+ peer->handshake.remote_static); -+ up_read(&peer->handshake.lock); -+ if (fail) -+ goto err; -+ -+ if (!allowedips_node) { -+ const struct __kernel_timespec last_handshake = { -+ .tv_sec = peer->walltime_last_handshake.tv_sec, -+ .tv_nsec = peer->walltime_last_handshake.tv_nsec -+ }; -+ -+ down_read(&peer->handshake.lock); -+ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY, -+ NOISE_SYMMETRIC_KEY_LEN, -+ peer->handshake.preshared_key); -+ up_read(&peer->handshake.lock); -+ if (fail) -+ goto err; -+ -+ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME, -+ sizeof(last_handshake), &last_handshake) || -+ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, -+ peer->persistent_keepalive_interval) || -+ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes, -+ WGPEER_A_UNSPEC) || -+ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes, -+ WGPEER_A_UNSPEC) || -+ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1)) -+ goto err; -+ -+ read_lock_bh(&peer->endpoint_lock); -+ if (peer->endpoint.addr.sa_family == AF_INET) -+ fail = nla_put(skb, WGPEER_A_ENDPOINT, -+ sizeof(peer->endpoint.addr4), -+ &peer->endpoint.addr4); -+ else if (peer->endpoint.addr.sa_family == AF_INET6) -+ fail = nla_put(skb, WGPEER_A_ENDPOINT, -+ sizeof(peer->endpoint.addr6), -+ &peer->endpoint.addr6); -+ read_unlock_bh(&peer->endpoint_lock); -+ if (fail) -+ goto err; -+ allowedips_node = -+ list_first_entry_or_null(&peer->allowedips_list, -+ struct allowedips_node, peer_list); -+ } -+ if (!allowedips_node) -+ goto no_allowedips; -+ if (!ctx->allowedips_seq) -+ ctx->allowedips_seq = peer->device->peer_allowedips.seq; -+ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) -+ goto no_allowedips; -+ -+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); -+ if (!allowedips_nest) -+ goto err; -+ -+ list_for_each_entry_from(allowedips_node, &peer->allowedips_list, -+ peer_list) { -+ u8 cidr, ip[16] __aligned(__alignof(u64)); -+ int family; -+ -+ family = wg_allowedips_read_node(allowedips_node, ip, &cidr); -+ if (get_allowedips(skb, ip, cidr, family)) { -+ nla_nest_end(skb, allowedips_nest); -+ nla_nest_end(skb, peer_nest); -+ ctx->next_allowedip = allowedips_node; -+ return -EMSGSIZE; -+ } -+ } -+ nla_nest_end(skb, allowedips_nest); -+no_allowedips: -+ nla_nest_end(skb, peer_nest); -+ ctx->next_allowedip = NULL; -+ ctx->allowedips_seq = 0; -+ return 0; -+err: -+ nla_nest_cancel(skb, peer_nest); -+ return -EMSGSIZE; -+} -+ -+static int wg_get_device_start(struct netlink_callback *cb) -+{ -+ struct nlattr **attrs = genl_family_attrbuf(&genl_family); -+ struct wg_device *wg; -+ int ret; -+ -+ ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, attrs, -+ genl_family.maxattr, device_policy, NULL); -+ if (ret < 0) -+ return ret; -+ wg = lookup_interface(attrs, cb->skb); -+ if (IS_ERR(wg)) -+ return PTR_ERR(wg); -+ DUMP_CTX(cb)->wg = wg; -+ return 0; -+} -+ -+static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) -+{ -+ struct wg_peer *peer, *next_peer_cursor; -+ struct dump_ctx *ctx = DUMP_CTX(cb); -+ struct wg_device *wg = ctx->wg; -+ struct nlattr *peers_nest; -+ int ret = -EMSGSIZE; -+ bool done = true; -+ void *hdr; -+ -+ rtnl_lock(); -+ mutex_lock(&wg->device_update_lock); -+ cb->seq = wg->device_update_gen; -+ next_peer_cursor = ctx->next_peer; -+ -+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, -+ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE); -+ if (!hdr) -+ goto out; -+ genl_dump_check_consistent(cb, hdr); -+ -+ if (!ctx->next_peer) { -+ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT, -+ wg->incoming_port) || -+ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) || -+ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) || -+ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name)) -+ goto out; -+ -+ down_read(&wg->static_identity.lock); -+ if (wg->static_identity.has_identity) { -+ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY, -+ NOISE_PUBLIC_KEY_LEN, -+ wg->static_identity.static_private) || -+ nla_put(skb, WGDEVICE_A_PUBLIC_KEY, -+ NOISE_PUBLIC_KEY_LEN, -+ wg->static_identity.static_public)) { -+ up_read(&wg->static_identity.lock); -+ goto out; -+ } -+ } -+ up_read(&wg->static_identity.lock); -+ } -+ -+ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS); -+ if (!peers_nest) -+ goto out; -+ ret = 0; -+ /* If the last cursor was removed via list_del_init in peer_remove, then -+ * we just treat this the same as there being no more peers left. The -+ * reason is that seq_nr should indicate to userspace that this isn't a -+ * coherent dump anyway, so they'll try again. -+ */ -+ if (list_empty(&wg->peer_list) || -+ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { -+ nla_nest_cancel(skb, peers_nest); -+ goto out; -+ } -+ lockdep_assert_held(&wg->device_update_lock); -+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); -+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { -+ if (get_peer(peer, skb, ctx)) { -+ done = false; -+ break; -+ } -+ next_peer_cursor = peer; -+ } -+ nla_nest_end(skb, peers_nest); -+ -+out: -+ if (!ret && !done && next_peer_cursor) -+ wg_peer_get(next_peer_cursor); -+ wg_peer_put(ctx->next_peer); -+ mutex_unlock(&wg->device_update_lock); -+ rtnl_unlock(); -+ -+ if (ret) { -+ genlmsg_cancel(skb, hdr); -+ return ret; -+ } -+ genlmsg_end(skb, hdr); -+ if (done) { -+ ctx->next_peer = NULL; -+ return 0; -+ } -+ ctx->next_peer = next_peer_cursor; -+ return skb->len; -+ -+ /* At this point, we can't really deal ourselves with safely zeroing out -+ * the private key material after usage. This will need an additional API -+ * in the kernel for marking skbs as zero_on_free. -+ */ -+} -+ -+static int wg_get_device_done(struct netlink_callback *cb) -+{ -+ struct dump_ctx *ctx = DUMP_CTX(cb); -+ -+ if (ctx->wg) -+ dev_put(ctx->wg->dev); -+ wg_peer_put(ctx->next_peer); -+ return 0; -+} -+ -+static int set_port(struct wg_device *wg, u16 port) -+{ -+ struct wg_peer *peer; -+ -+ if (wg->incoming_port == port) -+ return 0; -+ list_for_each_entry(peer, &wg->peer_list, peer_list) -+ wg_socket_clear_peer_endpoint_src(peer); -+ if (!netif_running(wg->dev)) { -+ wg->incoming_port = port; -+ return 0; -+ } -+ return wg_socket_init(wg, port); -+} -+ -+static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs) -+{ -+ int ret = -EINVAL; -+ u16 family; -+ u8 cidr; -+ -+ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] || -+ !attrs[WGALLOWEDIP_A_CIDR_MASK]) -+ return ret; -+ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]); -+ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]); -+ -+ if (family == AF_INET && cidr <= 32 && -+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) -+ ret = wg_allowedips_insert_v4( -+ &peer->device->peer_allowedips, -+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, -+ &peer->device->device_update_lock); -+ else if (family == AF_INET6 && cidr <= 128 && -+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) -+ ret = wg_allowedips_insert_v6( -+ &peer->device->peer_allowedips, -+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, -+ &peer->device->device_update_lock); -+ -+ return ret; -+} -+ -+static int set_peer(struct wg_device *wg, struct nlattr **attrs) -+{ -+ u8 *public_key = NULL, *preshared_key = NULL; -+ struct wg_peer *peer = NULL; -+ u32 flags = 0; -+ int ret; -+ -+ ret = -EINVAL; -+ if (attrs[WGPEER_A_PUBLIC_KEY] && -+ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN) -+ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]); -+ else -+ goto out; -+ if (attrs[WGPEER_A_PRESHARED_KEY] && -+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN) -+ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]); -+ -+ if (attrs[WGPEER_A_FLAGS]) -+ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]); -+ ret = -EOPNOTSUPP; -+ if (flags & ~__WGPEER_F_ALL) -+ goto out; -+ -+ ret = -EPFNOSUPPORT; -+ if (attrs[WGPEER_A_PROTOCOL_VERSION]) { -+ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1) -+ goto out; -+ } -+ -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, -+ nla_data(attrs[WGPEER_A_PUBLIC_KEY])); -+ ret = 0; -+ if (!peer) { /* Peer doesn't exist yet. Add a new one. */ -+ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY)) -+ goto out; -+ -+ /* The peer is new, so there aren't allowed IPs to remove. */ -+ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS; -+ -+ down_read(&wg->static_identity.lock); -+ if (wg->static_identity.has_identity && -+ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]), -+ wg->static_identity.static_public, -+ NOISE_PUBLIC_KEY_LEN)) { -+ /* We silently ignore peers that have the same public -+ * key as the device. The reason we do it silently is -+ * that we'd like for people to be able to reuse the -+ * same set of API calls across peers. -+ */ -+ up_read(&wg->static_identity.lock); -+ ret = 0; -+ goto out; -+ } -+ up_read(&wg->static_identity.lock); -+ -+ peer = wg_peer_create(wg, public_key, preshared_key); -+ if (IS_ERR(peer)) { -+ /* Similar to the above, if the key is invalid, we skip -+ * it without fanfare, so that services don't need to -+ * worry about doing key validation themselves. -+ */ -+ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); -+ peer = NULL; -+ goto out; -+ } -+ /* Take additional reference, as though we've just been -+ * looked up. -+ */ -+ wg_peer_get(peer); -+ } -+ -+ if (flags & WGPEER_F_REMOVE_ME) { -+ wg_peer_remove(peer); -+ goto out; -+ } -+ -+ if (preshared_key) { -+ down_write(&peer->handshake.lock); -+ memcpy(&peer->handshake.preshared_key, preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); -+ up_write(&peer->handshake.lock); -+ } -+ -+ if (attrs[WGPEER_A_ENDPOINT]) { -+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); -+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); -+ -+ if ((len == sizeof(struct sockaddr_in) && -+ addr->sa_family == AF_INET) || -+ (len == sizeof(struct sockaddr_in6) && -+ addr->sa_family == AF_INET6)) { -+ struct endpoint endpoint = { { { 0 } } }; -+ -+ memcpy(&endpoint.addr, addr, len); -+ wg_socket_set_peer_endpoint(peer, &endpoint); -+ } -+ } -+ -+ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS) -+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer, -+ &wg->device_update_lock); -+ -+ if (attrs[WGPEER_A_ALLOWEDIPS]) { -+ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; -+ int rem; -+ -+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { -+ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, -+ attr, allowedip_policy, NULL); -+ if (ret < 0) -+ goto out; -+ ret = set_allowedip(peer, allowedip); -+ if (ret < 0) -+ goto out; -+ } -+ } -+ -+ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) { -+ const u16 persistent_keepalive_interval = nla_get_u16( -+ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]); -+ const bool send_keepalive = -+ !peer->persistent_keepalive_interval && -+ persistent_keepalive_interval && -+ netif_running(wg->dev); -+ -+ peer->persistent_keepalive_interval = persistent_keepalive_interval; -+ if (send_keepalive) -+ wg_packet_send_keepalive(peer); -+ } -+ -+ if (netif_running(wg->dev)) -+ wg_packet_send_staged_packets(peer); -+ -+out: -+ wg_peer_put(peer); -+ if (attrs[WGPEER_A_PRESHARED_KEY]) -+ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]), -+ nla_len(attrs[WGPEER_A_PRESHARED_KEY])); -+ return ret; -+} -+ -+static int wg_set_device(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wg_device *wg = lookup_interface(info->attrs, skb); -+ u32 flags = 0; -+ int ret; -+ -+ if (IS_ERR(wg)) { -+ ret = PTR_ERR(wg); -+ goto out_nodev; -+ } -+ -+ rtnl_lock(); -+ mutex_lock(&wg->device_update_lock); -+ -+ if (info->attrs[WGDEVICE_A_FLAGS]) -+ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]); -+ ret = -EOPNOTSUPP; -+ if (flags & ~__WGDEVICE_F_ALL) -+ goto out; -+ -+ ret = -EPERM; -+ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || -+ info->attrs[WGDEVICE_A_FWMARK]) && -+ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) -+ goto out; -+ -+ ++wg->device_update_gen; -+ -+ if (info->attrs[WGDEVICE_A_FWMARK]) { -+ struct wg_peer *peer; -+ -+ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) -+ wg_socket_clear_peer_endpoint_src(peer); -+ } -+ -+ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) { -+ ret = set_port(wg, -+ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT])); -+ if (ret) -+ goto out; -+ } -+ -+ if (flags & WGDEVICE_F_REPLACE_PEERS) -+ wg_peer_remove_all(wg); -+ -+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] && -+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) == -+ NOISE_PUBLIC_KEY_LEN) { -+ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); -+ u8 public_key[NOISE_PUBLIC_KEY_LEN]; -+ struct wg_peer *peer, *temp; -+ -+ if (!crypto_memneq(wg->static_identity.static_private, -+ private_key, NOISE_PUBLIC_KEY_LEN)) -+ goto skip_set_private_key; -+ -+ /* We remove before setting, to prevent race, which means doing -+ * two 25519-genpub ops. -+ */ -+ if (curve25519_generate_public(public_key, private_key)) { -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, -+ public_key); -+ if (peer) { -+ wg_peer_put(peer); -+ wg_peer_remove(peer); -+ } -+ } -+ -+ down_write(&wg->static_identity.lock); -+ wg_noise_set_static_identity_private_key(&wg->static_identity, -+ private_key); -+ list_for_each_entry_safe(peer, temp, &wg->peer_list, -+ peer_list) { -+ if (wg_noise_precompute_static_static(peer)) -+ wg_noise_expire_current_peer_keypairs(peer); -+ else -+ wg_peer_remove(peer); -+ } -+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); -+ up_write(&wg->static_identity.lock); -+ } -+skip_set_private_key: -+ -+ if (info->attrs[WGDEVICE_A_PEERS]) { -+ struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; -+ int rem; -+ -+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { -+ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, -+ peer_policy, NULL); -+ if (ret < 0) -+ goto out; -+ ret = set_peer(wg, peer); -+ if (ret < 0) -+ goto out; -+ } -+ } -+ ret = 0; -+ -+out: -+ mutex_unlock(&wg->device_update_lock); -+ rtnl_unlock(); -+ dev_put(wg->dev); -+out_nodev: -+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY]) -+ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]), -+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY])); -+ return ret; -+} -+ -+static const struct genl_ops genl_ops[] = { -+ { -+ .cmd = WG_CMD_GET_DEVICE, -+ .start = wg_get_device_start, -+ .dumpit = wg_get_device_dump, -+ .done = wg_get_device_done, -+ .flags = GENL_UNS_ADMIN_PERM -+ }, { -+ .cmd = WG_CMD_SET_DEVICE, -+ .doit = wg_set_device, -+ .flags = GENL_UNS_ADMIN_PERM -+ } -+}; -+ -+static struct genl_family genl_family __ro_after_init = { -+ .ops = genl_ops, -+ .n_ops = ARRAY_SIZE(genl_ops), -+ .name = WG_GENL_NAME, -+ .version = WG_GENL_VERSION, -+ .maxattr = WGDEVICE_A_MAX, -+ .module = THIS_MODULE, -+ .policy = device_policy, -+ .netnsok = true -+}; -+ -+int __init wg_genetlink_init(void) -+{ -+ return genl_register_family(&genl_family); -+} -+ -+void __exit wg_genetlink_uninit(void) -+{ -+ genl_unregister_family(&genl_family); -+} ---- /dev/null -+++ b/drivers/net/wireguard/netlink.h -@@ -0,0 +1,12 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_NETLINK_H -+#define _WG_NETLINK_H -+ -+int wg_genetlink_init(void); -+void wg_genetlink_uninit(void); -+ -+#endif /* _WG_NETLINK_H */ ---- /dev/null -+++ b/drivers/net/wireguard/noise.c -@@ -0,0 +1,828 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "noise.h" -+#include "device.h" -+#include "peer.h" -+#include "messages.h" -+#include "queueing.h" -+#include "peerlookup.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* This implements Noise_IKpsk2: -+ * -+ * <- s -+ * ****** -+ * -> e, es, s, ss, {t} -+ * <- e, ee, se, psk, {} -+ */ -+ -+static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; -+static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com"; -+static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init; -+static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init; -+static atomic64_t keypair_counter = ATOMIC64_INIT(0); -+ -+void __init wg_noise_init(void) -+{ -+ struct blake2s_state blake; -+ -+ blake2s(handshake_init_chaining_key, handshake_name, NULL, -+ NOISE_HASH_LEN, sizeof(handshake_name), 0); -+ blake2s_init(&blake, NOISE_HASH_LEN); -+ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); -+ blake2s_update(&blake, identifier_name, sizeof(identifier_name)); -+ blake2s_final(&blake, handshake_init_hash); -+} -+ -+/* Must hold peer->handshake.static_identity->lock */ -+bool wg_noise_precompute_static_static(struct wg_peer *peer) -+{ -+ bool ret = true; -+ -+ down_write(&peer->handshake.lock); -+ if (peer->handshake.static_identity->has_identity) -+ ret = curve25519( -+ peer->handshake.precomputed_static_static, -+ peer->handshake.static_identity->static_private, -+ peer->handshake.remote_static); -+ else -+ memset(peer->handshake.precomputed_static_static, 0, -+ NOISE_PUBLIC_KEY_LEN); -+ up_write(&peer->handshake.lock); -+ return ret; -+} -+ -+bool wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer) -+{ -+ memset(handshake, 0, sizeof(*handshake)); -+ init_rwsem(&handshake->lock); -+ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE; -+ handshake->entry.peer = peer; -+ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN); -+ if (peer_preshared_key) -+ memcpy(handshake->preshared_key, peer_preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); -+ handshake->static_identity = static_identity; -+ handshake->state = HANDSHAKE_ZEROED; -+ return wg_noise_precompute_static_static(peer); -+} -+ -+static void handshake_zero(struct noise_handshake *handshake) -+{ -+ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN); -+ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN); -+ memset(&handshake->hash, 0, NOISE_HASH_LEN); -+ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN); -+ handshake->remote_index = 0; -+ handshake->state = HANDSHAKE_ZEROED; -+} -+ -+void wg_noise_handshake_clear(struct noise_handshake *handshake) -+{ -+ wg_index_hashtable_remove( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ down_write(&handshake->lock); -+ handshake_zero(handshake); -+ up_write(&handshake->lock); -+ wg_index_hashtable_remove( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+} -+ -+static struct noise_keypair *keypair_create(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL); -+ -+ if (unlikely(!keypair)) -+ return NULL; -+ keypair->internal_id = atomic64_inc_return(&keypair_counter); -+ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; -+ keypair->entry.peer = peer; -+ kref_init(&keypair->refcount); -+ return keypair; -+} -+ -+static void keypair_free_rcu(struct rcu_head *rcu) -+{ -+ kzfree(container_of(rcu, struct noise_keypair, rcu)); -+} -+ -+static void keypair_free_kref(struct kref *kref) -+{ -+ struct noise_keypair *keypair = -+ container_of(kref, struct noise_keypair, refcount); -+ -+ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n", -+ keypair->entry.peer->device->dev->name, -+ keypair->internal_id, -+ keypair->entry.peer->internal_id); -+ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable, -+ &keypair->entry); -+ call_rcu(&keypair->rcu, keypair_free_rcu); -+} -+ -+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now) -+{ -+ if (unlikely(!keypair)) -+ return; -+ if (unlikely(unreference_now)) -+ wg_index_hashtable_remove( -+ keypair->entry.peer->device->index_hashtable, -+ &keypair->entry); -+ kref_put(&keypair->refcount, keypair_free_kref); -+} -+ -+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair) -+{ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), -+ "Taking noise keypair reference without holding the RCU BH read lock"); -+ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount))) -+ return NULL; -+ return keypair; -+} -+ -+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs) -+{ -+ struct noise_keypair *old; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ -+ /* We zero the next_keypair before zeroing the others, so that -+ * wg_noise_received_with_keypair returns early before subsequent ones -+ * are zeroed. -+ */ -+ old = rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ old = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ old = rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->current_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+} -+ -+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ -+ spin_lock_bh(&peer->keypairs.keypair_update_lock); -+ keypair = rcu_dereference_protected(peer->keypairs.next_keypair, -+ lockdep_is_held(&peer->keypairs.keypair_update_lock)); -+ if (keypair) -+ keypair->sending.is_valid = false; -+ keypair = rcu_dereference_protected(peer->keypairs.current_keypair, -+ lockdep_is_held(&peer->keypairs.keypair_update_lock)); -+ if (keypair) -+ keypair->sending.is_valid = false; -+ spin_unlock_bh(&peer->keypairs.keypair_update_lock); -+} -+ -+static void add_new_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *new_keypair) -+{ -+ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ next_keypair = rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ current_keypair = rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ if (new_keypair->i_am_the_initiator) { -+ /* If we're the initiator, it means we've sent a handshake, and -+ * received a confirmation response, which means this new -+ * keypair can now be used. -+ */ -+ if (next_keypair) { -+ /* If there already was a next keypair pending, we -+ * demote it to be the previous keypair, and free the -+ * existing current. Note that this means KCI can result -+ * in this transition. It would perhaps be more sound to -+ * always just get rid of the unused next keypair -+ * instead of putting it in the previous slot, but this -+ * might be a bit less robust. Something to think about -+ * for the future. -+ */ -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ rcu_assign_pointer(keypairs->previous_keypair, -+ next_keypair); -+ wg_noise_keypair_put(current_keypair, true); -+ } else /* If there wasn't an existing next keypair, we replace -+ * the previous with the current one. -+ */ -+ rcu_assign_pointer(keypairs->previous_keypair, -+ current_keypair); -+ /* At this point we can get rid of the old previous keypair, and -+ * set up the new keypair. -+ */ -+ wg_noise_keypair_put(previous_keypair, true); -+ rcu_assign_pointer(keypairs->current_keypair, new_keypair); -+ } else { -+ /* If we're the responder, it means we can't use the new keypair -+ * until we receive confirmation via the first data packet, so -+ * we get rid of the existing previous one, the possibly -+ * existing next one, and slide in the new next one. -+ */ -+ rcu_assign_pointer(keypairs->next_keypair, new_keypair); -+ wg_noise_keypair_put(next_keypair, true); -+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); -+ wg_noise_keypair_put(previous_keypair, true); -+ } -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+} -+ -+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *received_keypair) -+{ -+ struct noise_keypair *old_keypair; -+ bool key_is_new; -+ -+ /* We first check without taking the spinlock. */ -+ key_is_new = received_keypair == -+ rcu_access_pointer(keypairs->next_keypair); -+ if (likely(!key_is_new)) -+ return false; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ /* After locking, we double check that things didn't change from -+ * beneath us. -+ */ -+ if (unlikely(received_keypair != -+ rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)))) { -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+ return false; -+ } -+ -+ /* When we've finally received the confirmation, we slide the next -+ * into the current, the current into the previous, and get rid of -+ * the old previous. -+ */ -+ old_keypair = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ rcu_assign_pointer(keypairs->previous_keypair, -+ rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock))); -+ wg_noise_keypair_put(old_keypair, true); -+ rcu_assign_pointer(keypairs->current_keypair, received_keypair); -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+ return true; -+} -+ -+/* Must hold static_identity->lock */ -+void wg_noise_set_static_identity_private_key( -+ struct noise_static_identity *static_identity, -+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]) -+{ -+ memcpy(static_identity->static_private, private_key, -+ NOISE_PUBLIC_KEY_LEN); -+ curve25519_clamp_secret(static_identity->static_private); -+ static_identity->has_identity = curve25519_generate_public( -+ static_identity->static_public, private_key); -+} -+ -+/* This is Hugo Krawczyk's HKDF: -+ * - https://eprint.iacr.org/2010/264.pdf -+ * - https://tools.ietf.org/html/rfc5869 -+ */ -+static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, -+ size_t first_len, size_t second_len, size_t third_len, -+ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN]) -+{ -+ u8 output[BLAKE2S_HASH_SIZE + 1]; -+ u8 secret[BLAKE2S_HASH_SIZE]; -+ -+ WARN_ON(IS_ENABLED(DEBUG) && -+ (first_len > BLAKE2S_HASH_SIZE || -+ second_len > BLAKE2S_HASH_SIZE || -+ third_len > BLAKE2S_HASH_SIZE || -+ ((second_len || second_dst || third_len || third_dst) && -+ (!first_len || !first_dst)) || -+ ((third_len || third_dst) && (!second_len || !second_dst)))); -+ -+ /* Extract entropy from data into secret */ -+ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); -+ -+ if (!first_dst || !first_len) -+ goto out; -+ -+ /* Expand first key: key = secret, data = 0x1 */ -+ output[0] = 1; -+ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); -+ memcpy(first_dst, output, first_len); -+ -+ if (!second_dst || !second_len) -+ goto out; -+ -+ /* Expand second key: key = secret, data = first-key || 0x2 */ -+ output[BLAKE2S_HASH_SIZE] = 2; -+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, -+ BLAKE2S_HASH_SIZE); -+ memcpy(second_dst, output, second_len); -+ -+ if (!third_dst || !third_len) -+ goto out; -+ -+ /* Expand third key: key = secret, data = second-key || 0x3 */ -+ output[BLAKE2S_HASH_SIZE] = 3; -+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, -+ BLAKE2S_HASH_SIZE); -+ memcpy(third_dst, output, third_len); -+ -+out: -+ /* Clear sensitive data from stack */ -+ memzero_explicit(secret, BLAKE2S_HASH_SIZE); -+ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); -+} -+ -+static void symmetric_key_init(struct noise_symmetric_key *key) -+{ -+ spin_lock_init(&key->counter.receive.lock); -+ atomic64_set(&key->counter.counter, 0); -+ memset(key->counter.receive.backtrack, 0, -+ sizeof(key->counter.receive.backtrack)); -+ key->birthdate = ktime_get_coarse_boottime_ns(); -+ key->is_valid = true; -+} -+ -+static void derive_keys(struct noise_symmetric_key *first_dst, -+ struct noise_symmetric_key *second_dst, -+ const u8 chaining_key[NOISE_HASH_LEN]) -+{ -+ kdf(first_dst->key, second_dst->key, NULL, NULL, -+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, -+ chaining_key); -+ symmetric_key_init(first_dst); -+ symmetric_key_init(second_dst); -+} -+ -+static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 private[NOISE_PUBLIC_KEY_LEN], -+ const u8 public[NOISE_PUBLIC_KEY_LEN]) -+{ -+ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN]; -+ -+ if (unlikely(!curve25519(dh_calculation, private, public))) -+ return false; -+ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key); -+ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN); -+ return true; -+} -+ -+static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) -+{ -+ struct blake2s_state blake; -+ -+ blake2s_init(&blake, NOISE_HASH_LEN); -+ blake2s_update(&blake, hash, NOISE_HASH_LEN); -+ blake2s_update(&blake, src, src_len); -+ blake2s_final(&blake, hash); -+} -+ -+static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 psk[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ u8 temp_hash[NOISE_HASH_LEN]; -+ -+ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key); -+ mix_hash(hash, temp_hash, NOISE_HASH_LEN); -+ memzero_explicit(temp_hash, NOISE_HASH_LEN); -+} -+ -+static void handshake_init(u8 chaining_key[NOISE_HASH_LEN], -+ u8 hash[NOISE_HASH_LEN], -+ const u8 remote_static[NOISE_PUBLIC_KEY_LEN]) -+{ -+ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN); -+ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN); -+ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN); -+} -+ -+static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext, -+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash, -+ NOISE_HASH_LEN, -+ 0 /* Always zero for Noise_IK */, key); -+ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len)); -+} -+ -+static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext, -+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len, -+ hash, NOISE_HASH_LEN, -+ 0 /* Always zero for Noise_IK */, key)) -+ return false; -+ mix_hash(hash, src_ciphertext, src_len); -+ return true; -+} -+ -+static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN], -+ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN], -+ u8 chaining_key[NOISE_HASH_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ if (ephemeral_dst != ephemeral_src) -+ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN); -+ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN); -+ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0, -+ NOISE_PUBLIC_KEY_LEN, chaining_key); -+} -+ -+static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN]) -+{ -+ struct timespec64 now; -+ -+ ktime_get_real_ts64(&now); -+ -+ /* In order to prevent some sort of infoleak from precise timers, we -+ * round down the nanoseconds part to the closest rounded-down power of -+ * two to the maximum initiations per second allowed anyway by the -+ * implementation. -+ */ -+ now.tv_nsec = ALIGN_DOWN(now.tv_nsec, -+ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND)); -+ -+ /* https://cr.yp.to/libtai/tai64.html */ -+ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec); -+ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec); -+} -+ -+bool -+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, -+ struct noise_handshake *handshake) -+{ -+ u8 timestamp[NOISE_TIMESTAMP_LEN]; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ bool ret = false; -+ -+ /* We need to wait for crng _before_ taking any locks, since -+ * curve25519_generate_secret uses get_random_bytes_wait. -+ */ -+ wait_for_random_bytes(); -+ -+ down_read(&handshake->static_identity->lock); -+ down_write(&handshake->lock); -+ -+ if (unlikely(!handshake->static_identity->has_identity)) -+ goto out; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION); -+ -+ handshake_init(handshake->chaining_key, handshake->hash, -+ handshake->remote_static); -+ -+ /* e */ -+ curve25519_generate_secret(handshake->ephemeral_private); -+ if (!curve25519_generate_public(dst->unencrypted_ephemeral, -+ handshake->ephemeral_private)) -+ goto out; -+ message_ephemeral(dst->unencrypted_ephemeral, -+ dst->unencrypted_ephemeral, handshake->chaining_key, -+ handshake->hash); -+ -+ /* es */ -+ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private, -+ handshake->remote_static)) -+ goto out; -+ -+ /* s */ -+ message_encrypt(dst->encrypted_static, -+ handshake->static_identity->static_public, -+ NOISE_PUBLIC_KEY_LEN, key, handshake->hash); -+ -+ /* ss */ -+ kdf(handshake->chaining_key, key, NULL, -+ handshake->precomputed_static_static, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ handshake->chaining_key); -+ -+ /* {t} */ -+ tai64n_now(timestamp); -+ message_encrypt(dst->encrypted_timestamp, timestamp, -+ NOISE_TIMESTAMP_LEN, key, handshake->hash); -+ -+ dst->sender_index = wg_index_hashtable_insert( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ -+ handshake->state = HANDSHAKE_CREATED_INITIATION; -+ ret = true; -+ -+out: -+ up_write(&handshake->lock); -+ up_read(&handshake->static_identity->lock); -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ return ret; -+} -+ -+struct wg_peer * -+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, -+ struct wg_device *wg) -+{ -+ struct wg_peer *peer = NULL, *ret_peer = NULL; -+ struct noise_handshake *handshake; -+ bool replay_attack, flood_attack; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ u8 hash[NOISE_HASH_LEN]; -+ u8 s[NOISE_PUBLIC_KEY_LEN]; -+ u8 e[NOISE_PUBLIC_KEY_LEN]; -+ u8 t[NOISE_TIMESTAMP_LEN]; -+ u64 initiation_consumption; -+ -+ down_read(&wg->static_identity.lock); -+ if (unlikely(!wg->static_identity.has_identity)) -+ goto out; -+ -+ handshake_init(chaining_key, hash, wg->static_identity.static_public); -+ -+ /* e */ -+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); -+ -+ /* es */ -+ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e)) -+ goto out; -+ -+ /* s */ -+ if (!message_decrypt(s, src->encrypted_static, -+ sizeof(src->encrypted_static), key, hash)) -+ goto out; -+ -+ /* Lookup which peer we're actually talking to */ -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s); -+ if (!peer) -+ goto out; -+ handshake = &peer->handshake; -+ -+ /* ss */ -+ kdf(chaining_key, key, NULL, handshake->precomputed_static_static, -+ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ chaining_key); -+ -+ /* {t} */ -+ if (!message_decrypt(t, src->encrypted_timestamp, -+ sizeof(src->encrypted_timestamp), key, hash)) -+ goto out; -+ -+ down_read(&handshake->lock); -+ replay_attack = memcmp(t, handshake->latest_timestamp, -+ NOISE_TIMESTAMP_LEN) <= 0; -+ flood_attack = (s64)handshake->last_initiation_consumption + -+ NSEC_PER_SEC / INITIATIONS_PER_SECOND > -+ (s64)ktime_get_coarse_boottime_ns(); -+ up_read(&handshake->lock); -+ if (replay_attack || flood_attack) -+ goto out; -+ -+ /* Success! Copy everything to peer */ -+ down_write(&handshake->lock); -+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); -+ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0) -+ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN); -+ memcpy(handshake->hash, hash, NOISE_HASH_LEN); -+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); -+ handshake->remote_index = src->sender_index; -+ if ((s64)(handshake->last_initiation_consumption - -+ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) -+ handshake->last_initiation_consumption = initiation_consumption; -+ handshake->state = HANDSHAKE_CONSUMED_INITIATION; -+ up_write(&handshake->lock); -+ ret_peer = peer; -+ -+out: -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ memzero_explicit(hash, NOISE_HASH_LEN); -+ memzero_explicit(chaining_key, NOISE_HASH_LEN); -+ up_read(&wg->static_identity.lock); -+ if (!ret_peer) -+ wg_peer_put(peer); -+ return ret_peer; -+} -+ -+bool wg_noise_handshake_create_response(struct message_handshake_response *dst, -+ struct noise_handshake *handshake) -+{ -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ bool ret = false; -+ -+ /* We need to wait for crng _before_ taking any locks, since -+ * curve25519_generate_secret uses get_random_bytes_wait. -+ */ -+ wait_for_random_bytes(); -+ -+ down_read(&handshake->static_identity->lock); -+ down_write(&handshake->lock); -+ -+ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION) -+ goto out; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE); -+ dst->receiver_index = handshake->remote_index; -+ -+ /* e */ -+ curve25519_generate_secret(handshake->ephemeral_private); -+ if (!curve25519_generate_public(dst->unencrypted_ephemeral, -+ handshake->ephemeral_private)) -+ goto out; -+ message_ephemeral(dst->unencrypted_ephemeral, -+ dst->unencrypted_ephemeral, handshake->chaining_key, -+ handshake->hash); -+ -+ /* ee */ -+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, -+ handshake->remote_ephemeral)) -+ goto out; -+ -+ /* se */ -+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, -+ handshake->remote_static)) -+ goto out; -+ -+ /* psk */ -+ mix_psk(handshake->chaining_key, handshake->hash, key, -+ handshake->preshared_key); -+ -+ /* {} */ -+ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash); -+ -+ dst->sender_index = wg_index_hashtable_insert( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ -+ handshake->state = HANDSHAKE_CREATED_RESPONSE; -+ ret = true; -+ -+out: -+ up_write(&handshake->lock); -+ up_read(&handshake->static_identity->lock); -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ return ret; -+} -+ -+struct wg_peer * -+wg_noise_handshake_consume_response(struct message_handshake_response *src, -+ struct wg_device *wg) -+{ -+ enum noise_handshake_state state = HANDSHAKE_ZEROED; -+ struct wg_peer *peer = NULL, *ret_peer = NULL; -+ struct noise_handshake *handshake; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 hash[NOISE_HASH_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ u8 e[NOISE_PUBLIC_KEY_LEN]; -+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ -+ down_read(&wg->static_identity.lock); -+ -+ if (unlikely(!wg->static_identity.has_identity)) -+ goto out; -+ -+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup( -+ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE, -+ src->receiver_index, &peer); -+ if (unlikely(!handshake)) -+ goto out; -+ -+ down_read(&handshake->lock); -+ state = handshake->state; -+ memcpy(hash, handshake->hash, NOISE_HASH_LEN); -+ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); -+ memcpy(ephemeral_private, handshake->ephemeral_private, -+ NOISE_PUBLIC_KEY_LEN); -+ up_read(&handshake->lock); -+ -+ if (state != HANDSHAKE_CREATED_INITIATION) -+ goto fail; -+ -+ /* e */ -+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); -+ -+ /* ee */ -+ if (!mix_dh(chaining_key, NULL, ephemeral_private, e)) -+ goto fail; -+ -+ /* se */ -+ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e)) -+ goto fail; -+ -+ /* psk */ -+ mix_psk(chaining_key, hash, key, handshake->preshared_key); -+ -+ /* {} */ -+ if (!message_decrypt(NULL, src->encrypted_nothing, -+ sizeof(src->encrypted_nothing), key, hash)) -+ goto fail; -+ -+ /* Success! Copy everything to peer */ -+ down_write(&handshake->lock); -+ /* It's important to check that the state is still the same, while we -+ * have an exclusive lock. -+ */ -+ if (handshake->state != state) { -+ up_write(&handshake->lock); -+ goto fail; -+ } -+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); -+ memcpy(handshake->hash, hash, NOISE_HASH_LEN); -+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); -+ handshake->remote_index = src->sender_index; -+ handshake->state = HANDSHAKE_CONSUMED_RESPONSE; -+ up_write(&handshake->lock); -+ ret_peer = peer; -+ goto out; -+ -+fail: -+ wg_peer_put(peer); -+out: -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ memzero_explicit(hash, NOISE_HASH_LEN); -+ memzero_explicit(chaining_key, NOISE_HASH_LEN); -+ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); -+ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); -+ up_read(&wg->static_identity.lock); -+ return ret_peer; -+} -+ -+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, -+ struct noise_keypairs *keypairs) -+{ -+ struct noise_keypair *new_keypair; -+ bool ret = false; -+ -+ down_write(&handshake->lock); -+ if (handshake->state != HANDSHAKE_CREATED_RESPONSE && -+ handshake->state != HANDSHAKE_CONSUMED_RESPONSE) -+ goto out; -+ -+ new_keypair = keypair_create(handshake->entry.peer); -+ if (!new_keypair) -+ goto out; -+ new_keypair->i_am_the_initiator = handshake->state == -+ HANDSHAKE_CONSUMED_RESPONSE; -+ new_keypair->remote_index = handshake->remote_index; -+ -+ if (new_keypair->i_am_the_initiator) -+ derive_keys(&new_keypair->sending, &new_keypair->receiving, -+ handshake->chaining_key); -+ else -+ derive_keys(&new_keypair->receiving, &new_keypair->sending, -+ handshake->chaining_key); -+ -+ handshake_zero(handshake); -+ rcu_read_lock_bh(); -+ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer, -+ handshake)->is_dead))) { -+ add_new_keypair(keypairs, new_keypair); -+ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n", -+ handshake->entry.peer->device->dev->name, -+ new_keypair->internal_id, -+ handshake->entry.peer->internal_id); -+ ret = wg_index_hashtable_replace( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry, &new_keypair->entry); -+ } else { -+ kzfree(new_keypair); -+ } -+ rcu_read_unlock_bh(); -+ -+out: -+ up_write(&handshake->lock); -+ return ret; -+} ---- /dev/null -+++ b/drivers/net/wireguard/noise.h -@@ -0,0 +1,137 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+#ifndef _WG_NOISE_H -+#define _WG_NOISE_H -+ -+#include "messages.h" -+#include "peerlookup.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+union noise_counter { -+ struct { -+ u64 counter; -+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; -+ spinlock_t lock; -+ } receive; -+ atomic64_t counter; -+}; -+ -+struct noise_symmetric_key { -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ union noise_counter counter; -+ u64 birthdate; -+ bool is_valid; -+}; -+ -+struct noise_keypair { -+ struct index_hashtable_entry entry; -+ struct noise_symmetric_key sending; -+ struct noise_symmetric_key receiving; -+ __le32 remote_index; -+ bool i_am_the_initiator; -+ struct kref refcount; -+ struct rcu_head rcu; -+ u64 internal_id; -+}; -+ -+struct noise_keypairs { -+ struct noise_keypair __rcu *current_keypair; -+ struct noise_keypair __rcu *previous_keypair; -+ struct noise_keypair __rcu *next_keypair; -+ spinlock_t keypair_update_lock; -+}; -+ -+struct noise_static_identity { -+ u8 static_public[NOISE_PUBLIC_KEY_LEN]; -+ u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ struct rw_semaphore lock; -+ bool has_identity; -+}; -+ -+enum noise_handshake_state { -+ HANDSHAKE_ZEROED, -+ HANDSHAKE_CREATED_INITIATION, -+ HANDSHAKE_CONSUMED_INITIATION, -+ HANDSHAKE_CREATED_RESPONSE, -+ HANDSHAKE_CONSUMED_RESPONSE -+}; -+ -+struct noise_handshake { -+ struct index_hashtable_entry entry; -+ -+ enum noise_handshake_state state; -+ u64 last_initiation_consumption; -+ -+ struct noise_static_identity *static_identity; -+ -+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 remote_static[NOISE_PUBLIC_KEY_LEN]; -+ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN]; -+ -+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; -+ -+ u8 hash[NOISE_HASH_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ -+ u8 latest_timestamp[NOISE_TIMESTAMP_LEN]; -+ __le32 remote_index; -+ -+ /* Protects all members except the immutable (after noise_handshake_ -+ * init): remote_static, precomputed_static_static, static_identity. -+ */ -+ struct rw_semaphore lock; -+}; -+ -+struct wg_device; -+ -+void wg_noise_init(void); -+bool wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer); -+void wg_noise_handshake_clear(struct noise_handshake *handshake); -+static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) -+{ -+ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() - -+ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC); -+} -+ -+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now); -+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair); -+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs); -+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *received_keypair); -+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); -+ -+void wg_noise_set_static_identity_private_key( -+ struct noise_static_identity *static_identity, -+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]); -+bool wg_noise_precompute_static_static(struct wg_peer *peer); -+ -+bool -+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, -+ struct noise_handshake *handshake); -+struct wg_peer * -+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, -+ struct wg_device *wg); -+ -+bool wg_noise_handshake_create_response(struct message_handshake_response *dst, -+ struct noise_handshake *handshake); -+struct wg_peer * -+wg_noise_handshake_consume_response(struct message_handshake_response *src, -+ struct wg_device *wg); -+ -+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, -+ struct noise_keypairs *keypairs); -+ -+#endif /* _WG_NOISE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/peer.c -@@ -0,0 +1,240 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "peer.h" -+#include "device.h" -+#include "queueing.h" -+#include "timers.h" -+#include "peerlookup.h" -+#include "noise.h" -+ -+#include -+#include -+#include -+#include -+ -+static atomic64_t peer_counter = ATOMIC64_INIT(0); -+ -+struct wg_peer *wg_peer_create(struct wg_device *wg, -+ const u8 public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ struct wg_peer *peer; -+ int ret = -ENOMEM; -+ -+ lockdep_assert_held(&wg->device_update_lock); -+ -+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE) -+ return ERR_PTR(ret); -+ -+ peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ if (unlikely(!peer)) -+ return ERR_PTR(ret); -+ peer->device = wg; -+ -+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -+ public_key, preshared_key, peer)) { -+ ret = -EKEYREJECTED; -+ goto err_1; -+ } -+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -+ goto err_1; -+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, -+ MAX_QUEUED_PACKETS)) -+ goto err_2; -+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false, -+ MAX_QUEUED_PACKETS)) -+ goto err_3; -+ -+ peer->internal_id = atomic64_inc_return(&peer_counter); -+ peer->serial_work_cpu = nr_cpumask_bits; -+ wg_cookie_init(&peer->latest_cookie); -+ wg_timers_init(peer); -+ wg_cookie_checker_precompute_peer_keys(peer); -+ spin_lock_init(&peer->keypairs.keypair_update_lock); -+ INIT_WORK(&peer->transmit_handshake_work, -+ wg_packet_handshake_send_worker); -+ rwlock_init(&peer->endpoint_lock); -+ kref_init(&peer->refcount); -+ skb_queue_head_init(&peer->staged_packet_queue); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); -+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, -+ NAPI_POLL_WEIGHT); -+ napi_enable(&peer->napi); -+ list_add_tail(&peer->peer_list, &wg->peer_list); -+ INIT_LIST_HEAD(&peer->allowedips_list); -+ wg_pubkey_hashtable_add(wg->peer_hashtable, peer); -+ ++wg->num_peers; -+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); -+ return peer; -+ -+err_3: -+ wg_packet_queue_free(&peer->tx_queue, false); -+err_2: -+ dst_cache_destroy(&peer->endpoint_cache); -+err_1: -+ kfree(peer); -+ return ERR_PTR(ret); -+} -+ -+struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) -+{ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), -+ "Taking peer reference without holding the RCU read lock"); -+ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) -+ return NULL; -+ return peer; -+} -+ -+static void peer_make_dead(struct wg_peer *peer) -+{ -+ /* Remove from configuration-time lookup structures. */ -+ list_del_init(&peer->peer_list); -+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, -+ &peer->device->device_update_lock); -+ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); -+ -+ /* Mark as dead, so that we don't allow jumping contexts after. */ -+ WRITE_ONCE(peer->is_dead, true); -+ -+ /* The caller must now synchronize_rcu() for this to take effect. */ -+} -+ -+static void peer_remove_after_dead(struct wg_peer *peer) -+{ -+ WARN_ON(!peer->is_dead); -+ -+ /* No more keypairs can be created for this peer, since is_dead protects -+ * add_new_keypair, so we can now destroy existing ones. -+ */ -+ wg_noise_keypairs_clear(&peer->keypairs); -+ -+ /* Destroy all ongoing timers that were in-flight at the beginning of -+ * this function. -+ */ -+ wg_timers_stop(peer); -+ -+ /* The transition between packet encryption/decryption queues isn't -+ * guarded by is_dead, but each reference's life is strictly bounded by -+ * two generations: once for parallel crypto and once for serial -+ * ingestion, so we can simply flush twice, and be sure that we no -+ * longer have references inside these queues. -+ */ -+ -+ /* a) For encrypt/decrypt. */ -+ flush_workqueue(peer->device->packet_crypt_wq); -+ /* b.1) For send (but not receive, since that's napi). */ -+ flush_workqueue(peer->device->packet_crypt_wq); -+ /* b.2.1) For receive (but not send, since that's wq). */ -+ napi_disable(&peer->napi); -+ /* b.2.1) It's now safe to remove the napi struct, which must be done -+ * here from process context. -+ */ -+ netif_napi_del(&peer->napi); -+ -+ /* Ensure any workstructs we own (like transmit_handshake_work or -+ * clear_peer_work) no longer are in use. -+ */ -+ flush_workqueue(peer->device->handshake_send_wq); -+ -+ /* After the above flushes, a peer might still be active in a few -+ * different contexts: 1) from xmit(), before hitting is_dead and -+ * returning, 2) from wg_packet_consume_data(), before hitting is_dead -+ * and returning, 3) from wg_receive_handshake_packet() after a point -+ * where it has processed an incoming handshake packet, but where -+ * all calls to pass it off to timers fails because of is_dead. We won't -+ * have new references in (1) eventually, because we're removed from -+ * allowedips; we won't have new references in (2) eventually, because -+ * wg_index_hashtable_lookup will always return NULL, since we removed -+ * all existing keypairs and no more can be created; we won't have new -+ * references in (3) eventually, because we're removed from the pubkey -+ * hash table, which allows for a maximum of one handshake response, -+ * via the still-uncleared index hashtable entry, but not more than one, -+ * and in wg_cookie_message_consume, the lookup eventually gets a peer -+ * with a refcount of zero, so no new reference is taken. -+ */ -+ -+ --peer->device->num_peers; -+ wg_peer_put(peer); -+} -+ -+/* We have a separate "remove" function make sure that all active places where -+ * a peer is currently operating will eventually come to an end and not pass -+ * their reference onto another context. -+ */ -+void wg_peer_remove(struct wg_peer *peer) -+{ -+ if (unlikely(!peer)) -+ return; -+ lockdep_assert_held(&peer->device->device_update_lock); -+ -+ peer_make_dead(peer); -+ synchronize_rcu(); -+ peer_remove_after_dead(peer); -+} -+ -+void wg_peer_remove_all(struct wg_device *wg) -+{ -+ struct wg_peer *peer, *temp; -+ LIST_HEAD(dead_peers); -+ -+ lockdep_assert_held(&wg->device_update_lock); -+ -+ /* Avoid having to traverse individually for each one. */ -+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); -+ -+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { -+ peer_make_dead(peer); -+ list_add_tail(&peer->peer_list, &dead_peers); -+ } -+ synchronize_rcu(); -+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) -+ peer_remove_after_dead(peer); -+} -+ -+static void rcu_release(struct rcu_head *rcu) -+{ -+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); -+ -+ dst_cache_destroy(&peer->endpoint_cache); -+ wg_packet_queue_free(&peer->rx_queue, false); -+ wg_packet_queue_free(&peer->tx_queue, false); -+ -+ /* The final zeroing takes care of clearing any remaining handshake key -+ * material and other potentially sensitive information. -+ */ -+ kzfree(peer); -+} -+ -+static void kref_release(struct kref *refcount) -+{ -+ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); -+ -+ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ /* Remove ourself from dynamic runtime lookup structures, now that the -+ * last reference is gone. -+ */ -+ wg_index_hashtable_remove(peer->device->index_hashtable, -+ &peer->handshake.entry); -+ -+ /* Remove any lingering packets that didn't have a chance to be -+ * transmitted. -+ */ -+ wg_packet_purge_staged_packets(peer); -+ -+ /* Free the memory used. */ -+ call_rcu(&peer->rcu, rcu_release); -+} -+ -+void wg_peer_put(struct wg_peer *peer) -+{ -+ if (unlikely(!peer)) -+ return; -+ kref_put(&peer->refcount, kref_release); -+} ---- /dev/null -+++ b/drivers/net/wireguard/peer.h -@@ -0,0 +1,83 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_PEER_H -+#define _WG_PEER_H -+ -+#include "device.h" -+#include "noise.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+ -+struct endpoint { -+ union { -+ struct sockaddr addr; -+ struct sockaddr_in addr4; -+ struct sockaddr_in6 addr6; -+ }; -+ union { -+ struct { -+ struct in_addr src4; -+ /* Essentially the same as addr6->scope_id */ -+ int src_if4; -+ }; -+ struct in6_addr src6; -+ }; -+}; -+ -+struct wg_peer { -+ struct wg_device *device; -+ struct crypt_queue tx_queue, rx_queue; -+ struct sk_buff_head staged_packet_queue; -+ int serial_work_cpu; -+ struct noise_keypairs keypairs; -+ struct endpoint endpoint; -+ struct dst_cache endpoint_cache; -+ rwlock_t endpoint_lock; -+ struct noise_handshake handshake; -+ atomic64_t last_sent_handshake; -+ struct work_struct transmit_handshake_work, clear_peer_work; -+ struct cookie latest_cookie; -+ struct hlist_node pubkey_hash; -+ u64 rx_bytes, tx_bytes; -+ struct timer_list timer_retransmit_handshake, timer_send_keepalive; -+ struct timer_list timer_new_handshake, timer_zero_key_material; -+ struct timer_list timer_persistent_keepalive; -+ unsigned int timer_handshake_attempts; -+ u16 persistent_keepalive_interval; -+ bool timer_need_another_keepalive; -+ bool sent_lastminute_handshake; -+ struct timespec64 walltime_last_handshake; -+ struct kref refcount; -+ struct rcu_head rcu; -+ struct list_head peer_list; -+ struct list_head allowedips_list; -+ u64 internal_id; -+ struct napi_struct napi; -+ bool is_dead; -+}; -+ -+struct wg_peer *wg_peer_create(struct wg_device *wg, -+ const u8 public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]); -+ -+struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer); -+static inline struct wg_peer *wg_peer_get(struct wg_peer *peer) -+{ -+ kref_get(&peer->refcount); -+ return peer; -+} -+void wg_peer_put(struct wg_peer *peer); -+void wg_peer_remove(struct wg_peer *peer); -+void wg_peer_remove_all(struct wg_device *wg); -+ -+#endif /* _WG_PEER_H */ ---- /dev/null -+++ b/drivers/net/wireguard/peerlookup.c -@@ -0,0 +1,221 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "peerlookup.h" -+#include "peer.h" -+#include "noise.h" -+ -+static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) -+{ -+ /* siphash gives us a secure 64bit number based on a random key. Since -+ * the bits are uniformly distributed, we can then mask off to get the -+ * bits we need. -+ */ -+ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); -+ -+ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; -+} -+ -+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) -+{ -+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); -+ -+ if (!table) -+ return NULL; -+ -+ get_random_bytes(&table->key, sizeof(table->key)); -+ hash_init(table->hashtable); -+ mutex_init(&table->lock); -+ return table; -+} -+ -+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, -+ struct wg_peer *peer) -+{ -+ mutex_lock(&table->lock); -+ hlist_add_head_rcu(&peer->pubkey_hash, -+ pubkey_bucket(table, peer->handshake.remote_static)); -+ mutex_unlock(&table->lock); -+} -+ -+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, -+ struct wg_peer *peer) -+{ -+ mutex_lock(&table->lock); -+ hlist_del_init_rcu(&peer->pubkey_hash); -+ mutex_unlock(&table->lock); -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer * -+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) -+{ -+ struct wg_peer *iter_peer, *peer = NULL; -+ -+ rcu_read_lock_bh(); -+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), -+ pubkey_hash) { -+ if (!memcmp(pubkey, iter_peer->handshake.remote_static, -+ NOISE_PUBLIC_KEY_LEN)) { -+ peer = iter_peer; -+ break; -+ } -+ } -+ peer = wg_peer_get_maybe_zero(peer); -+ rcu_read_unlock_bh(); -+ return peer; -+} -+ -+static struct hlist_head *index_bucket(struct index_hashtable *table, -+ const __le32 index) -+{ -+ /* Since the indices are random and thus all bits are uniformly -+ * distributed, we can find its bucket simply by masking. -+ */ -+ return &table->hashtable[(__force u32)index & -+ (HASH_SIZE(table->hashtable) - 1)]; -+} -+ -+struct index_hashtable *wg_index_hashtable_alloc(void) -+{ -+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); -+ -+ if (!table) -+ return NULL; -+ -+ hash_init(table->hashtable); -+ spin_lock_init(&table->lock); -+ return table; -+} -+ -+/* At the moment, we limit ourselves to 2^20 total peers, which generally might -+ * amount to 2^20*3 items in this hashtable. The algorithm below works by -+ * picking a random number and testing it. We can see that these limits mean we -+ * usually succeed pretty quickly: -+ * -+ * >>> def calculation(tries, size): -+ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) -+ * ... -+ * >>> calculation(1, 2**20 * 3) -+ * 0.999267578125 -+ * >>> calculation(2, 2**20 * 3) -+ * 0.0007318854331970215 -+ * >>> calculation(3, 2**20 * 3) -+ * 5.360489012673497e-07 -+ * >>> calculation(4, 2**20 * 3) -+ * 3.9261394135792216e-10 -+ * -+ * At the moment, we don't do any masking, so this algorithm isn't exactly -+ * constant time in either the random guessing or in the hash list lookup. We -+ * could require a minimum of 3 tries, which would successfully mask the -+ * guessing. this would not, however, help with the growing hash lengths, which -+ * is another thing to consider moving forward. -+ */ -+ -+__le32 wg_index_hashtable_insert(struct index_hashtable *table, -+ struct index_hashtable_entry *entry) -+{ -+ struct index_hashtable_entry *existing_entry; -+ -+ spin_lock_bh(&table->lock); -+ hlist_del_init_rcu(&entry->index_hash); -+ spin_unlock_bh(&table->lock); -+ -+ rcu_read_lock_bh(); -+ -+search_unused_slot: -+ /* First we try to find an unused slot, randomly, while unlocked. */ -+ entry->index = (__force __le32)get_random_u32(); -+ hlist_for_each_entry_rcu_bh(existing_entry, -+ index_bucket(table, entry->index), -+ index_hash) { -+ if (existing_entry->index == entry->index) -+ /* If it's already in use, we continue searching. */ -+ goto search_unused_slot; -+ } -+ -+ /* Once we've found an unused slot, we lock it, and then double-check -+ * that nobody else stole it from us. -+ */ -+ spin_lock_bh(&table->lock); -+ hlist_for_each_entry_rcu_bh(existing_entry, -+ index_bucket(table, entry->index), -+ index_hash) { -+ if (existing_entry->index == entry->index) { -+ spin_unlock_bh(&table->lock); -+ /* If it was stolen, we start over. */ -+ goto search_unused_slot; -+ } -+ } -+ /* Otherwise, we know we have it exclusively (since we're locked), -+ * so we insert. -+ */ -+ hlist_add_head_rcu(&entry->index_hash, -+ index_bucket(table, entry->index)); -+ spin_unlock_bh(&table->lock); -+ -+ rcu_read_unlock_bh(); -+ -+ return entry->index; -+} -+ -+bool wg_index_hashtable_replace(struct index_hashtable *table, -+ struct index_hashtable_entry *old, -+ struct index_hashtable_entry *new) -+{ -+ if (unlikely(hlist_unhashed(&old->index_hash))) -+ return false; -+ spin_lock_bh(&table->lock); -+ new->index = old->index; -+ hlist_replace_rcu(&old->index_hash, &new->index_hash); -+ -+ /* Calling init here NULLs out index_hash, and in fact after this -+ * function returns, it's theoretically possible for this to get -+ * reinserted elsewhere. That means the RCU lookup below might either -+ * terminate early or jump between buckets, in which case the packet -+ * simply gets dropped, which isn't terrible. -+ */ -+ INIT_HLIST_NODE(&old->index_hash); -+ spin_unlock_bh(&table->lock); -+ return true; -+} -+ -+void wg_index_hashtable_remove(struct index_hashtable *table, -+ struct index_hashtable_entry *entry) -+{ -+ spin_lock_bh(&table->lock); -+ hlist_del_init_rcu(&entry->index_hash); -+ spin_unlock_bh(&table->lock); -+} -+ -+/* Returns a strong reference to a entry->peer */ -+struct index_hashtable_entry * -+wg_index_hashtable_lookup(struct index_hashtable *table, -+ const enum index_hashtable_type type_mask, -+ const __le32 index, struct wg_peer **peer) -+{ -+ struct index_hashtable_entry *iter_entry, *entry = NULL; -+ -+ rcu_read_lock_bh(); -+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), -+ index_hash) { -+ if (iter_entry->index == index) { -+ if (likely(iter_entry->type & type_mask)) -+ entry = iter_entry; -+ break; -+ } -+ } -+ if (likely(entry)) { -+ entry->peer = wg_peer_get_maybe_zero(entry->peer); -+ if (likely(entry->peer)) -+ *peer = entry->peer; -+ else -+ entry = NULL; -+ } -+ rcu_read_unlock_bh(); -+ return entry; -+} ---- /dev/null -+++ b/drivers/net/wireguard/peerlookup.h -@@ -0,0 +1,64 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_PEERLOOKUP_H -+#define _WG_PEERLOOKUP_H -+ -+#include "messages.h" -+ -+#include -+#include -+#include -+ -+struct wg_peer; -+ -+struct pubkey_hashtable { -+ /* TODO: move to rhashtable */ -+ DECLARE_HASHTABLE(hashtable, 11); -+ siphash_key_t key; -+ struct mutex lock; -+}; -+ -+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void); -+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, -+ struct wg_peer *peer); -+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, -+ struct wg_peer *peer); -+struct wg_peer * -+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]); -+ -+struct index_hashtable { -+ /* TODO: move to rhashtable */ -+ DECLARE_HASHTABLE(hashtable, 13); -+ spinlock_t lock; -+}; -+ -+enum index_hashtable_type { -+ INDEX_HASHTABLE_HANDSHAKE = 1U << 0, -+ INDEX_HASHTABLE_KEYPAIR = 1U << 1 -+}; -+ -+struct index_hashtable_entry { -+ struct wg_peer *peer; -+ struct hlist_node index_hash; -+ enum index_hashtable_type type; -+ __le32 index; -+}; -+ -+struct index_hashtable *wg_index_hashtable_alloc(void); -+__le32 wg_index_hashtable_insert(struct index_hashtable *table, -+ struct index_hashtable_entry *entry); -+bool wg_index_hashtable_replace(struct index_hashtable *table, -+ struct index_hashtable_entry *old, -+ struct index_hashtable_entry *new); -+void wg_index_hashtable_remove(struct index_hashtable *table, -+ struct index_hashtable_entry *entry); -+struct index_hashtable_entry * -+wg_index_hashtable_lookup(struct index_hashtable *table, -+ const enum index_hashtable_type type_mask, -+ const __le32 index, struct wg_peer **peer); -+ -+#endif /* _WG_PEERLOOKUP_H */ ---- /dev/null -+++ b/drivers/net/wireguard/queueing.c -@@ -0,0 +1,53 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+ -+struct multicore_worker __percpu * -+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) -+{ -+ int cpu; -+ struct multicore_worker __percpu *worker = -+ alloc_percpu(struct multicore_worker); -+ -+ if (!worker) -+ return NULL; -+ -+ for_each_possible_cpu(cpu) { -+ per_cpu_ptr(worker, cpu)->ptr = ptr; -+ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); -+ } -+ return worker; -+} -+ -+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -+ bool multicore, unsigned int len) -+{ -+ int ret; -+ -+ memset(queue, 0, sizeof(*queue)); -+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); -+ if (ret) -+ return ret; -+ if (function) { -+ if (multicore) { -+ queue->worker = wg_packet_percpu_multicore_worker_alloc( -+ function, queue); -+ if (!queue->worker) -+ return -ENOMEM; -+ } else { -+ INIT_WORK(&queue->work, function); -+ } -+ } -+ return 0; -+} -+ -+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) -+{ -+ if (multicore) -+ free_percpu(queue->worker); -+ WARN_ON(!__ptr_ring_empty(&queue->ring)); -+ ptr_ring_cleanup(&queue->ring, NULL); -+} ---- /dev/null -+++ b/drivers/net/wireguard/queueing.h -@@ -0,0 +1,197 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_QUEUEING_H -+#define _WG_QUEUEING_H -+ -+#include "peer.h" -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+struct wg_peer; -+struct multicore_worker; -+struct crypt_queue; -+struct sk_buff; -+ -+/* queueing.c APIs: */ -+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -+ bool multicore, unsigned int len); -+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); -+struct multicore_worker __percpu * -+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); -+ -+/* receive.c APIs: */ -+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); -+void wg_packet_handshake_receive_worker(struct work_struct *work); -+/* NAPI poll function: */ -+int wg_packet_rx_poll(struct napi_struct *napi, int budget); -+/* Workqueue worker: */ -+void wg_packet_decrypt_worker(struct work_struct *work); -+ -+/* send.c APIs: */ -+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, -+ bool is_retry); -+void wg_packet_send_handshake_response(struct wg_peer *peer); -+void wg_packet_send_handshake_cookie(struct wg_device *wg, -+ struct sk_buff *initiating_skb, -+ __le32 sender_index); -+void wg_packet_send_keepalive(struct wg_peer *peer); -+void wg_packet_purge_staged_packets(struct wg_peer *peer); -+void wg_packet_send_staged_packets(struct wg_peer *peer); -+/* Workqueue workers: */ -+void wg_packet_handshake_send_worker(struct work_struct *work); -+void wg_packet_tx_worker(struct work_struct *work); -+void wg_packet_encrypt_worker(struct work_struct *work); -+ -+enum packet_state { -+ PACKET_STATE_UNCRYPTED, -+ PACKET_STATE_CRYPTED, -+ PACKET_STATE_DEAD -+}; -+ -+struct packet_cb { -+ u64 nonce; -+ struct noise_keypair *keypair; -+ atomic_t state; -+ u32 mtu; -+ u8 ds; -+}; -+ -+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) -+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) -+ -+/* Returns either the correct skb->protocol value, or 0 if invalid. */ -+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) -+{ -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct iphdr)) <= -+ skb_tail_pointer(skb) && -+ ip_hdr(skb)->version == 4) -+ return htons(ETH_P_IP); -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= -+ skb_tail_pointer(skb) && -+ ipv6_hdr(skb)->version == 6) -+ return htons(ETH_P_IPV6); -+ return 0; -+} -+ -+static inline void wg_reset_packet(struct sk_buff *skb) -+{ -+ const int pfmemalloc = skb->pfmemalloc; -+ -+ skb_scrub_packet(skb, true); -+ memset(&skb->headers_start, 0, -+ offsetof(struct sk_buff, headers_end) - -+ offsetof(struct sk_buff, headers_start)); -+ skb->pfmemalloc = pfmemalloc; -+ skb->queue_mapping = 0; -+ skb->nohdr = 0; -+ skb->peeked = 0; -+ skb->mac_len = 0; -+ skb->dev = NULL; -+#ifdef CONFIG_NET_SCHED -+ skb->tc_index = 0; -+#endif -+ skb_reset_redirect(skb); -+ skb->hdr_len = skb_headroom(skb); -+ skb_reset_mac_header(skb); -+ skb_reset_network_header(skb); -+ skb_reset_transport_header(skb); -+ skb_probe_transport_header(skb); -+ skb_reset_inner_headers(skb); -+} -+ -+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) -+{ -+ unsigned int cpu = *stored_cpu, cpu_index, i; -+ -+ if (unlikely(cpu == nr_cpumask_bits || -+ !cpumask_test_cpu(cpu, cpu_online_mask))) { -+ cpu_index = id % cpumask_weight(cpu_online_mask); -+ cpu = cpumask_first(cpu_online_mask); -+ for (i = 0; i < cpu_index; ++i) -+ cpu = cpumask_next(cpu, cpu_online_mask); -+ *stored_cpu = cpu; -+ } -+ return cpu; -+} -+ -+/* This function is racy, in the sense that next is unlocked, so it could return -+ * the same CPU twice. A race-free version of this would be to instead store an -+ * atomic sequence number, do an increment-and-return, and then iterate through -+ * every possible CPU until we get to that index -- choose_cpu. However that's -+ * a bit slower, and it doesn't seem like this potential race actually -+ * introduces any performance loss, so we live with it. -+ */ -+static inline int wg_cpumask_next_online(int *next) -+{ -+ int cpu = *next; -+ -+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) -+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; -+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; -+ return cpu; -+} -+ -+static inline int wg_queue_enqueue_per_device_and_peer( -+ struct crypt_queue *device_queue, struct crypt_queue *peer_queue, -+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) -+{ -+ int cpu; -+ -+ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); -+ /* We first queue this up for the peer ingestion, but the consumer -+ * will wait for the state to change to CRYPTED or DEAD before. -+ */ -+ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) -+ return -ENOSPC; -+ /* Then we queue it up in the device queue, which consumes the -+ * packet as soon as it can. -+ */ -+ cpu = wg_cpumask_next_online(next_cpu); -+ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) -+ return -EPIPE; -+ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); -+ return 0; -+} -+ -+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, -+ struct sk_buff *skb, -+ enum packet_state state) -+{ -+ /* We take a reference, because as soon as we call atomic_set, the -+ * peer can be freed from below us. -+ */ -+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); -+ -+ atomic_set_release(&PACKET_CB(skb)->state, state); -+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, -+ peer->internal_id), -+ peer->device->packet_crypt_wq, &queue->work); -+ wg_peer_put(peer); -+} -+ -+static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, -+ enum packet_state state) -+{ -+ /* We take a reference, because as soon as we call atomic_set, the -+ * peer can be freed from below us. -+ */ -+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); -+ -+ atomic_set_release(&PACKET_CB(skb)->state, state); -+ napi_schedule(&peer->napi); -+ wg_peer_put(peer); -+} -+ -+#ifdef DEBUG -+bool wg_packet_counter_selftest(void); -+#endif -+ -+#endif /* _WG_QUEUEING_H */ ---- /dev/null -+++ b/drivers/net/wireguard/ratelimiter.c -@@ -0,0 +1,223 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "ratelimiter.h" -+#include -+#include -+#include -+#include -+ -+static struct kmem_cache *entry_cache; -+static hsiphash_key_t key; -+static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock"); -+static DEFINE_MUTEX(init_lock); -+static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */ -+static atomic_t total_entries = ATOMIC_INIT(0); -+static unsigned int max_entries, table_size; -+static void wg_ratelimiter_gc_entries(struct work_struct *); -+static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); -+static struct hlist_head *table_v4; -+#if IS_ENABLED(CONFIG_IPV6) -+static struct hlist_head *table_v6; -+#endif -+ -+struct ratelimiter_entry { -+ u64 last_time_ns, tokens, ip; -+ void *net; -+ spinlock_t lock; -+ struct hlist_node hash; -+ struct rcu_head rcu; -+}; -+ -+enum { -+ PACKETS_PER_SECOND = 20, -+ PACKETS_BURSTABLE = 5, -+ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND, -+ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE -+}; -+ -+static void entry_free(struct rcu_head *rcu) -+{ -+ kmem_cache_free(entry_cache, -+ container_of(rcu, struct ratelimiter_entry, rcu)); -+ atomic_dec(&total_entries); -+} -+ -+static void entry_uninit(struct ratelimiter_entry *entry) -+{ -+ hlist_del_rcu(&entry->hash); -+ call_rcu(&entry->rcu, entry_free); -+} -+ -+/* Calling this function with a NULL work uninits all entries. */ -+static void wg_ratelimiter_gc_entries(struct work_struct *work) -+{ -+ const u64 now = ktime_get_coarse_boottime_ns(); -+ struct ratelimiter_entry *entry; -+ struct hlist_node *temp; -+ unsigned int i; -+ -+ for (i = 0; i < table_size; ++i) { -+ spin_lock(&table_lock); -+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { -+ if (unlikely(!work) || -+ now - entry->last_time_ns > NSEC_PER_SEC) -+ entry_uninit(entry); -+ } -+#if IS_ENABLED(CONFIG_IPV6) -+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { -+ if (unlikely(!work) || -+ now - entry->last_time_ns > NSEC_PER_SEC) -+ entry_uninit(entry); -+ } -+#endif -+ spin_unlock(&table_lock); -+ if (likely(work)) -+ cond_resched(); -+ } -+ if (likely(work)) -+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); -+} -+ -+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net) -+{ -+ /* We only take the bottom half of the net pointer, so that we can hash -+ * 3 words in the end. This way, siphash's len param fits into the final -+ * u32, and we don't incur an extra round. -+ */ -+ const u32 net_word = (unsigned long)net; -+ struct ratelimiter_entry *entry; -+ struct hlist_head *bucket; -+ u64 ip; -+ -+ if (skb->protocol == htons(ETH_P_IP)) { -+ ip = (u64 __force)ip_hdr(skb)->saddr; -+ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & -+ (table_size - 1)]; -+ } -+#if IS_ENABLED(CONFIG_IPV6) -+ else if (skb->protocol == htons(ETH_P_IPV6)) { -+ /* Only use 64 bits, so as to ratelimit the whole /64. */ -+ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip)); -+ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & -+ (table_size - 1)]; -+ } -+#endif -+ else -+ return false; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(entry, bucket, hash) { -+ if (entry->net == net && entry->ip == ip) { -+ u64 now, tokens; -+ bool ret; -+ /* Quasi-inspired by nft_limit.c, but this is actually a -+ * slightly different algorithm. Namely, we incorporate -+ * the burst as part of the maximum tokens, rather than -+ * as part of the rate. -+ */ -+ spin_lock(&entry->lock); -+ now = ktime_get_coarse_boottime_ns(); -+ tokens = min_t(u64, TOKEN_MAX, -+ entry->tokens + now - -+ entry->last_time_ns); -+ entry->last_time_ns = now; -+ ret = tokens >= PACKET_COST; -+ entry->tokens = ret ? tokens - PACKET_COST : tokens; -+ spin_unlock(&entry->lock); -+ rcu_read_unlock(); -+ return ret; -+ } -+ } -+ rcu_read_unlock(); -+ -+ if (atomic_inc_return(&total_entries) > max_entries) -+ goto err_oom; -+ -+ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL); -+ if (unlikely(!entry)) -+ goto err_oom; -+ -+ entry->net = net; -+ entry->ip = ip; -+ INIT_HLIST_NODE(&entry->hash); -+ spin_lock_init(&entry->lock); -+ entry->last_time_ns = ktime_get_coarse_boottime_ns(); -+ entry->tokens = TOKEN_MAX - PACKET_COST; -+ spin_lock(&table_lock); -+ hlist_add_head_rcu(&entry->hash, bucket); -+ spin_unlock(&table_lock); -+ return true; -+ -+err_oom: -+ atomic_dec(&total_entries); -+ return false; -+} -+ -+int wg_ratelimiter_init(void) -+{ -+ mutex_lock(&init_lock); -+ if (++init_refcnt != 1) -+ goto out; -+ -+ entry_cache = KMEM_CACHE(ratelimiter_entry, 0); -+ if (!entry_cache) -+ goto err; -+ -+ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting, -+ * but what it shares in common is that it uses a massive hashtable. So, -+ * we borrow their wisdom about good table sizes on different systems -+ * dependent on RAM. This calculation here comes from there. -+ */ -+ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 : -+ max_t(unsigned long, 16, roundup_pow_of_two( -+ (totalram_pages() << PAGE_SHIFT) / -+ (1U << 14) / sizeof(struct hlist_head))); -+ max_entries = table_size * 8; -+ -+ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); -+ if (unlikely(!table_v4)) -+ goto err_kmemcache; -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); -+ if (unlikely(!table_v6)) { -+ kvfree(table_v4); -+ goto err_kmemcache; -+ } -+#endif -+ -+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); -+ get_random_bytes(&key, sizeof(key)); -+out: -+ mutex_unlock(&init_lock); -+ return 0; -+ -+err_kmemcache: -+ kmem_cache_destroy(entry_cache); -+err: -+ --init_refcnt; -+ mutex_unlock(&init_lock); -+ return -ENOMEM; -+} -+ -+void wg_ratelimiter_uninit(void) -+{ -+ mutex_lock(&init_lock); -+ if (!init_refcnt || --init_refcnt) -+ goto out; -+ -+ cancel_delayed_work_sync(&gc_work); -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ kvfree(table_v4); -+#if IS_ENABLED(CONFIG_IPV6) -+ kvfree(table_v6); -+#endif -+ kmem_cache_destroy(entry_cache); -+out: -+ mutex_unlock(&init_lock); -+} -+ -+#include "selftest/ratelimiter.c" ---- /dev/null -+++ b/drivers/net/wireguard/ratelimiter.h -@@ -0,0 +1,19 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_RATELIMITER_H -+#define _WG_RATELIMITER_H -+ -+#include -+ -+int wg_ratelimiter_init(void); -+void wg_ratelimiter_uninit(void); -+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net); -+ -+#ifdef DEBUG -+bool wg_ratelimiter_selftest(void); -+#endif -+ -+#endif /* _WG_RATELIMITER_H */ ---- /dev/null -+++ b/drivers/net/wireguard/receive.c -@@ -0,0 +1,595 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "device.h" -+#include "peer.h" -+#include "timers.h" -+#include "messages.h" -+#include "cookie.h" -+#include "socket.h" -+ -+#include -+#include -+#include -+#include -+ -+/* Must be called with bh disabled. */ -+static void update_rx_stats(struct wg_peer *peer, size_t len) -+{ -+ struct pcpu_sw_netstats *tstats = -+ get_cpu_ptr(peer->device->dev->tstats); -+ -+ u64_stats_update_begin(&tstats->syncp); -+ ++tstats->rx_packets; -+ tstats->rx_bytes += len; -+ peer->rx_bytes += len; -+ u64_stats_update_end(&tstats->syncp); -+ put_cpu_ptr(tstats); -+} -+ -+#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) -+ -+static size_t validate_header_len(struct sk_buff *skb) -+{ -+ if (unlikely(skb->len < sizeof(struct message_header))) -+ return 0; -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && -+ skb->len >= MESSAGE_MINIMUM_LENGTH) -+ return sizeof(struct message_data); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && -+ skb->len == sizeof(struct message_handshake_initiation)) -+ return sizeof(struct message_handshake_initiation); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && -+ skb->len == sizeof(struct message_handshake_response)) -+ return sizeof(struct message_handshake_response); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && -+ skb->len == sizeof(struct message_handshake_cookie)) -+ return sizeof(struct message_handshake_cookie); -+ return 0; -+} -+ -+static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) -+{ -+ size_t data_offset, data_len, header_len; -+ struct udphdr *udp; -+ -+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || -+ skb_transport_header(skb) < skb->head || -+ (skb_transport_header(skb) + sizeof(struct udphdr)) > -+ skb_tail_pointer(skb))) -+ return -EINVAL; /* Bogus IP header */ -+ udp = udp_hdr(skb); -+ data_offset = (u8 *)udp - skb->data; -+ if (unlikely(data_offset > U16_MAX || -+ data_offset + sizeof(struct udphdr) > skb->len)) -+ /* Packet has offset at impossible location or isn't big enough -+ * to have UDP fields. -+ */ -+ return -EINVAL; -+ data_len = ntohs(udp->len); -+ if (unlikely(data_len < sizeof(struct udphdr) || -+ data_len > skb->len - data_offset)) -+ /* UDP packet is reporting too small of a size or lying about -+ * its size. -+ */ -+ return -EINVAL; -+ data_len -= sizeof(struct udphdr); -+ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; -+ if (unlikely(!pskb_may_pull(skb, -+ data_offset + sizeof(struct message_header)) || -+ pskb_trim(skb, data_len + data_offset) < 0)) -+ return -EINVAL; -+ skb_pull(skb, data_offset); -+ if (unlikely(skb->len != data_len)) -+ /* Final len does not agree with calculated len */ -+ return -EINVAL; -+ header_len = validate_header_len(skb); -+ if (unlikely(!header_len)) -+ return -EINVAL; -+ __skb_push(skb, data_offset); -+ if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) -+ return -EINVAL; -+ __skb_pull(skb, data_offset); -+ return 0; -+} -+ -+static void wg_receive_handshake_packet(struct wg_device *wg, -+ struct sk_buff *skb) -+{ -+ enum cookie_mac_state mac_state; -+ struct wg_peer *peer = NULL; -+ /* This is global, so that our load calculation applies to the whole -+ * system. We don't care about races with it at all. -+ */ -+ static u64 last_under_load; -+ bool packet_needs_cookie; -+ bool under_load; -+ -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { -+ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", -+ wg->dev->name, skb); -+ wg_cookie_message_consume( -+ (struct message_handshake_cookie *)skb->data, wg); -+ return; -+ } -+ -+ under_load = skb_queue_len(&wg->incoming_handshakes) >= -+ MAX_QUEUED_INCOMING_HANDSHAKES / 8; -+ if (under_load) -+ last_under_load = ktime_get_coarse_boottime_ns(); -+ else if (last_under_load) -+ under_load = !wg_birthdate_has_expired(last_under_load, 1); -+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, -+ under_load); -+ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || -+ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { -+ packet_needs_cookie = false; -+ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { -+ packet_needs_cookie = true; -+ } else { -+ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ -+ switch (SKB_TYPE_LE32(skb)) { -+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { -+ struct message_handshake_initiation *message = -+ (struct message_handshake_initiation *)skb->data; -+ -+ if (packet_needs_cookie) { -+ wg_packet_send_handshake_cookie(wg, skb, -+ message->sender_index); -+ return; -+ } -+ peer = wg_noise_handshake_consume_initiation(message, wg); -+ if (unlikely(!peer)) { -+ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ wg_socket_set_peer_endpoint_from_skb(peer, skb); -+ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", -+ wg->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ wg_packet_send_handshake_response(peer); -+ break; -+ } -+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { -+ struct message_handshake_response *message = -+ (struct message_handshake_response *)skb->data; -+ -+ if (packet_needs_cookie) { -+ wg_packet_send_handshake_cookie(wg, skb, -+ message->sender_index); -+ return; -+ } -+ peer = wg_noise_handshake_consume_response(message, wg); -+ if (unlikely(!peer)) { -+ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ wg_socket_set_peer_endpoint_from_skb(peer, skb); -+ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", -+ wg->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ if (wg_noise_handshake_begin_session(&peer->handshake, -+ &peer->keypairs)) { -+ wg_timers_session_derived(peer); -+ wg_timers_handshake_complete(peer); -+ /* Calling this function will either send any existing -+ * packets in the queue and not send a keepalive, which -+ * is the best case, Or, if there's nothing in the -+ * queue, it will send a keepalive, in order to give -+ * immediate confirmation of the session. -+ */ -+ wg_packet_send_keepalive(peer); -+ } -+ break; -+ } -+ } -+ -+ if (unlikely(!peer)) { -+ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); -+ return; -+ } -+ -+ local_bh_disable(); -+ update_rx_stats(peer, skb->len); -+ local_bh_enable(); -+ -+ wg_timers_any_authenticated_packet_received(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_peer_put(peer); -+} -+ -+void wg_packet_handshake_receive_worker(struct work_struct *work) -+{ -+ struct wg_device *wg = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *skb; -+ -+ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { -+ wg_receive_handshake_packet(wg, skb); -+ dev_kfree_skb(skb); -+ cond_resched(); -+ } -+} -+ -+static void keep_key_fresh(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ bool send = false; -+ -+ if (peer->sent_lastminute_handshake) -+ return; -+ -+ rcu_read_lock_bh(); -+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -+ keypair->i_am_the_initiator && -+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) -+ send = true; -+ rcu_read_unlock_bh(); -+ -+ if (send) { -+ peer->sent_lastminute_handshake = true; -+ wg_packet_send_queued_handshake_initiation(peer, false); -+ } -+} -+ -+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) -+{ -+ struct scatterlist sg[MAX_SKB_FRAGS + 8]; -+ struct sk_buff *trailer; -+ unsigned int offset; -+ int num_frags; -+ -+ if (unlikely(!key)) -+ return false; -+ -+ if (unlikely(!READ_ONCE(key->is_valid) || -+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || -+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { -+ WRITE_ONCE(key->is_valid, false); -+ return false; -+ } -+ -+ PACKET_CB(skb)->nonce = -+ le64_to_cpu(((struct message_data *)skb->data)->counter); -+ -+ /* We ensure that the network header is part of the packet before we -+ * call skb_cow_data, so that there's no chance that data is removed -+ * from the skb, so that later we can extract the original endpoint. -+ */ -+ offset = skb->data - skb_network_header(skb); -+ skb_push(skb, offset); -+ num_frags = skb_cow_data(skb, 0, &trailer); -+ offset += sizeof(struct message_data); -+ skb_pull(skb, offset); -+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) -+ return false; -+ -+ sg_init_table(sg, num_frags); -+ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) -+ return false; -+ -+ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, -+ PACKET_CB(skb)->nonce, -+ key->key)) -+ return false; -+ -+ /* Another ugly situation of pushing and pulling the header so as to -+ * keep endpoint information intact. -+ */ -+ skb_push(skb, offset); -+ if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) -+ return false; -+ skb_pull(skb, offset); -+ -+ return true; -+} -+ -+/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ -+static bool counter_validate(union noise_counter *counter, u64 their_counter) -+{ -+ unsigned long index, index_current, top, i; -+ bool ret = false; -+ -+ spin_lock_bh(&counter->receive.lock); -+ -+ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || -+ their_counter >= REJECT_AFTER_MESSAGES)) -+ goto out; -+ -+ ++their_counter; -+ -+ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < -+ counter->receive.counter)) -+ goto out; -+ -+ index = their_counter >> ilog2(BITS_PER_LONG); -+ -+ if (likely(their_counter > counter->receive.counter)) { -+ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); -+ top = min_t(unsigned long, index - index_current, -+ COUNTER_BITS_TOTAL / BITS_PER_LONG); -+ for (i = 1; i <= top; ++i) -+ counter->receive.backtrack[(i + index_current) & -+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; -+ counter->receive.counter = their_counter; -+ } -+ -+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; -+ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), -+ &counter->receive.backtrack[index]); -+ -+out: -+ spin_unlock_bh(&counter->receive.lock); -+ return ret; -+} -+ -+#include "selftest/counter.c" -+ -+static void wg_packet_consume_data_done(struct wg_peer *peer, -+ struct sk_buff *skb, -+ struct endpoint *endpoint) -+{ -+ struct net_device *dev = peer->device->dev; -+ unsigned int len, len_before_trim; -+ struct wg_peer *routed_peer; -+ -+ wg_socket_set_peer_endpoint(peer, endpoint); -+ -+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, -+ PACKET_CB(skb)->keypair))) { -+ wg_timers_handshake_complete(peer); -+ wg_packet_send_staged_packets(peer); -+ } -+ -+ keep_key_fresh(peer); -+ -+ wg_timers_any_authenticated_packet_received(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ -+ /* A packet with length 0 is a keepalive packet */ -+ if (unlikely(!skb->len)) { -+ update_rx_stats(peer, message_data_len(0)); -+ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ goto packet_processed; -+ } -+ -+ wg_timers_data_received(peer); -+ -+ if (unlikely(skb_network_header(skb) < skb->head)) -+ goto dishonest_packet_size; -+ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && -+ (ip_hdr(skb)->version == 4 || -+ (ip_hdr(skb)->version == 6 && -+ pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) -+ goto dishonest_packet_type; -+ -+ skb->dev = dev; -+ /* We've already verified the Poly1305 auth tag, which means this packet -+ * was not modified in transit. We can therefore tell the networking -+ * stack that all checksums of every layer of encapsulation have already -+ * been checked "by the hardware" and therefore is unneccessary to check -+ * again in software. -+ */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ skb->csum_level = ~0; /* All levels */ -+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); -+ if (skb->protocol == htons(ETH_P_IP)) { -+ len = ntohs(ip_hdr(skb)->tot_len); -+ if (unlikely(len < sizeof(struct iphdr))) -+ goto dishonest_packet_size; -+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -+ IP_ECN_set_ce(ip_hdr(skb)); -+ } else if (skb->protocol == htons(ETH_P_IPV6)) { -+ len = ntohs(ipv6_hdr(skb)->payload_len) + -+ sizeof(struct ipv6hdr); -+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -+ IP6_ECN_set_ce(skb, ipv6_hdr(skb)); -+ } else { -+ goto dishonest_packet_type; -+ } -+ -+ if (unlikely(len > skb->len)) -+ goto dishonest_packet_size; -+ len_before_trim = skb->len; -+ if (unlikely(pskb_trim(skb, len))) -+ goto packet_processed; -+ -+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, -+ skb); -+ wg_peer_put(routed_peer); /* We don't need the extra reference. */ -+ -+ if (unlikely(routed_peer != peer)) -+ goto dishonest_packet_peer; -+ -+ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { -+ ++dev->stats.rx_dropped; -+ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ } else { -+ update_rx_stats(peer, message_data_len(len_before_trim)); -+ } -+ return; -+ -+dishonest_packet_peer: -+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", -+ dev->name, skb, peer->internal_id, -+ &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_frame_errors; -+ goto packet_processed; -+dishonest_packet_type: -+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_frame_errors; -+ goto packet_processed; -+dishonest_packet_size: -+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_length_errors; -+ goto packet_processed; -+packet_processed: -+ dev_kfree_skb(skb); -+} -+ -+int wg_packet_rx_poll(struct napi_struct *napi, int budget) -+{ -+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi); -+ struct crypt_queue *queue = &peer->rx_queue; -+ struct noise_keypair *keypair; -+ struct endpoint endpoint; -+ enum packet_state state; -+ struct sk_buff *skb; -+ int work_done = 0; -+ bool free; -+ -+ if (unlikely(budget <= 0)) -+ return 0; -+ -+ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && -+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != -+ PACKET_STATE_UNCRYPTED) { -+ __ptr_ring_discard_one(&queue->ring); -+ peer = PACKET_PEER(skb); -+ keypair = PACKET_CB(skb)->keypair; -+ free = true; -+ -+ if (unlikely(state != PACKET_STATE_CRYPTED)) -+ goto next; -+ -+ if (unlikely(!counter_validate(&keypair->receiving.counter, -+ PACKET_CB(skb)->nonce))) { -+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", -+ peer->device->dev->name, -+ PACKET_CB(skb)->nonce, -+ keypair->receiving.counter.receive.counter); -+ goto next; -+ } -+ -+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) -+ goto next; -+ -+ wg_reset_packet(skb); -+ wg_packet_consume_data_done(peer, skb, &endpoint); -+ free = false; -+ -+next: -+ wg_noise_keypair_put(keypair, false); -+ wg_peer_put(peer); -+ if (unlikely(free)) -+ dev_kfree_skb(skb); -+ -+ if (++work_done >= budget) -+ break; -+ } -+ -+ if (work_done < budget) -+ napi_complete_done(napi, work_done); -+ -+ return work_done; -+} -+ -+void wg_packet_decrypt_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *skb; -+ -+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { -+ enum packet_state state = likely(decrypt_packet(skb, -+ &PACKET_CB(skb)->keypair->receiving)) ? -+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; -+ wg_queue_enqueue_per_peer_napi(skb, state); -+ } -+} -+ -+static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) -+{ -+ __le32 idx = ((struct message_data *)skb->data)->key_idx; -+ struct wg_peer *peer = NULL; -+ int ret; -+ -+ rcu_read_lock_bh(); -+ PACKET_CB(skb)->keypair = -+ (struct noise_keypair *)wg_index_hashtable_lookup( -+ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, -+ &peer); -+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) -+ goto err_keypair; -+ -+ if (unlikely(READ_ONCE(peer->is_dead))) -+ goto err; -+ -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, -+ &peer->rx_queue, skb, -+ wg->packet_crypt_wq, -+ &wg->decrypt_queue.last_cpu); -+ if (unlikely(ret == -EPIPE)) -+ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); -+ if (likely(!ret || ret == -EPIPE)) { -+ rcu_read_unlock_bh(); -+ return; -+ } -+err: -+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); -+err_keypair: -+ rcu_read_unlock_bh(); -+ wg_peer_put(peer); -+ dev_kfree_skb(skb); -+} -+ -+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) -+{ -+ if (unlikely(prepare_skb_header(skb, wg) < 0)) -+ goto err; -+ switch (SKB_TYPE_LE32(skb)) { -+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): -+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): -+ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { -+ int cpu; -+ -+ if (skb_queue_len(&wg->incoming_handshakes) > -+ MAX_QUEUED_INCOMING_HANDSHAKES || -+ unlikely(!rng_is_initialized())) { -+ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ goto err; -+ } -+ skb_queue_tail(&wg->incoming_handshakes, skb); -+ /* Queues up a call to packet_process_queued_handshake_ -+ * packets(skb): -+ */ -+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); -+ queue_work_on(cpu, wg->handshake_receive_wq, -+ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); -+ break; -+ } -+ case cpu_to_le32(MESSAGE_DATA): -+ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); -+ wg_packet_consume_data(wg, skb); -+ break; -+ default: -+ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ goto err; -+ } -+ return; -+ -+err: -+ dev_kfree_skb(skb); -+} ---- /dev/null -+++ b/drivers/net/wireguard/selftest/allowedips.c -@@ -0,0 +1,683 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This contains some basic static unit tests for the allowedips data structure. -+ * It also has two additional modes that are disabled and meant to be used by -+ * folks directly playing with this file. If you define the macro -+ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in -+ * memory, it will be printed out as KERN_DEBUG in a format that can be passed -+ * to graphviz (the dot command) to visualize it. If you define the macro -+ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of -+ * randomized tests done against a trivial implementation, which may take -+ * upwards of a half-hour to complete. There's no set of users who should be -+ * enabling these, and the only developers that should go anywhere near these -+ * nobs are the ones who are reading this comment. -+ */ -+ -+#ifdef DEBUG -+ -+#include -+ -+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, -+ u8 cidr) -+{ -+ swap_endian(dst, src, bits); -+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); -+ if (cidr) -+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); -+} -+ -+static __init void print_node(struct allowedips_node *node, u8 bits) -+{ -+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; -+ char *fmt_declaration = KERN_DEBUG -+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; -+ char *style = "dotted"; -+ u8 ip1[16], ip2[16]; -+ u32 color = 0; -+ -+ if (bits == 32) { -+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; -+ fmt_declaration = KERN_DEBUG -+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; -+ } else if (bits == 128) { -+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; -+ fmt_declaration = KERN_DEBUG -+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; -+ } -+ if (node->peer) { -+ hsiphash_key_t key = { { 0 } }; -+ -+ memcpy(&key, &node->peer, sizeof(node->peer)); -+ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | -+ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | -+ hsiphash_1u32(0xabad1dea, &key) % 200; -+ style = "bold"; -+ } -+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); -+ printk(fmt_declaration, ip1, node->cidr, style, color); -+ if (node->bit[0]) { -+ swap_endian_and_apply_cidr(ip2, -+ rcu_dereference_raw(node->bit[0])->bits, bits, -+ node->cidr); -+ printk(fmt_connection, ip1, node->cidr, ip2, -+ rcu_dereference_raw(node->bit[0])->cidr); -+ print_node(rcu_dereference_raw(node->bit[0]), bits); -+ } -+ if (node->bit[1]) { -+ swap_endian_and_apply_cidr(ip2, -+ rcu_dereference_raw(node->bit[1])->bits, -+ bits, node->cidr); -+ printk(fmt_connection, ip1, node->cidr, ip2, -+ rcu_dereference_raw(node->bit[1])->cidr); -+ print_node(rcu_dereference_raw(node->bit[1]), bits); -+ } -+} -+ -+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) -+{ -+ printk(KERN_DEBUG "digraph trie {\n"); -+ print_node(rcu_dereference_raw(top), bits); -+ printk(KERN_DEBUG "}\n"); -+} -+ -+enum { -+ NUM_PEERS = 2000, -+ NUM_RAND_ROUTES = 400, -+ NUM_MUTATED_ROUTES = 100, -+ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 -+}; -+ -+struct horrible_allowedips { -+ struct hlist_head head; -+}; -+ -+struct horrible_allowedips_node { -+ struct hlist_node table; -+ union nf_inet_addr ip; -+ union nf_inet_addr mask; -+ u8 ip_version; -+ void *value; -+}; -+ -+static __init void horrible_allowedips_init(struct horrible_allowedips *table) -+{ -+ INIT_HLIST_HEAD(&table->head); -+} -+ -+static __init void horrible_allowedips_free(struct horrible_allowedips *table) -+{ -+ struct horrible_allowedips_node *node; -+ struct hlist_node *h; -+ -+ hlist_for_each_entry_safe(node, h, &table->head, table) { -+ hlist_del(&node->table); -+ kfree(node); -+ } -+} -+ -+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) -+{ -+ union nf_inet_addr mask; -+ -+ memset(&mask, 0x00, 128 / 8); -+ memset(&mask, 0xff, cidr / 8); -+ if (cidr % 32) -+ mask.all[cidr / 32] = (__force u32)htonl( -+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); -+ return mask; -+} -+ -+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) -+{ -+ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + -+ hweight32(subnet.all[2]) + hweight32(subnet.all[3]); -+} -+ -+static __init inline void -+horrible_mask_self(struct horrible_allowedips_node *node) -+{ -+ if (node->ip_version == 4) { -+ node->ip.ip &= node->mask.ip; -+ } else if (node->ip_version == 6) { -+ node->ip.ip6[0] &= node->mask.ip6[0]; -+ node->ip.ip6[1] &= node->mask.ip6[1]; -+ node->ip.ip6[2] &= node->mask.ip6[2]; -+ node->ip.ip6[3] &= node->mask.ip6[3]; -+ } -+} -+ -+static __init inline bool -+horrible_match_v4(const struct horrible_allowedips_node *node, -+ struct in_addr *ip) -+{ -+ return (ip->s_addr & node->mask.ip) == node->ip.ip; -+} -+ -+static __init inline bool -+horrible_match_v6(const struct horrible_allowedips_node *node, -+ struct in6_addr *ip) -+{ -+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == -+ node->ip.ip6[0] && -+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == -+ node->ip.ip6[1] && -+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == -+ node->ip.ip6[2] && -+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; -+} -+ -+static __init void -+horrible_insert_ordered(struct horrible_allowedips *table, -+ struct horrible_allowedips_node *node) -+{ -+ struct horrible_allowedips_node *other = NULL, *where = NULL; -+ u8 my_cidr = horrible_mask_to_cidr(node->mask); -+ -+ hlist_for_each_entry(other, &table->head, table) { -+ if (!memcmp(&other->mask, &node->mask, -+ sizeof(union nf_inet_addr)) && -+ !memcmp(&other->ip, &node->ip, -+ sizeof(union nf_inet_addr)) && -+ other->ip_version == node->ip_version) { -+ other->value = node->value; -+ kfree(node); -+ return; -+ } -+ where = other; -+ if (horrible_mask_to_cidr(other->mask) <= my_cidr) -+ break; -+ } -+ if (!other && !where) -+ hlist_add_head(&node->table, &table->head); -+ else if (!other) -+ hlist_add_behind(&node->table, &where->table); -+ else -+ hlist_add_before(&node->table, &where->table); -+} -+ -+static __init int -+horrible_allowedips_insert_v4(struct horrible_allowedips *table, -+ struct in_addr *ip, u8 cidr, void *value) -+{ -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -+ GFP_KERNEL); -+ -+ if (unlikely(!node)) -+ return -ENOMEM; -+ node->ip.in = *ip; -+ node->mask = horrible_cidr_to_mask(cidr); -+ node->ip_version = 4; -+ node->value = value; -+ horrible_mask_self(node); -+ horrible_insert_ordered(table, node); -+ return 0; -+} -+ -+static __init int -+horrible_allowedips_insert_v6(struct horrible_allowedips *table, -+ struct in6_addr *ip, u8 cidr, void *value) -+{ -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -+ GFP_KERNEL); -+ -+ if (unlikely(!node)) -+ return -ENOMEM; -+ node->ip.in6 = *ip; -+ node->mask = horrible_cidr_to_mask(cidr); -+ node->ip_version = 6; -+ node->value = value; -+ horrible_mask_self(node); -+ horrible_insert_ordered(table, node); -+ return 0; -+} -+ -+static __init void * -+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, -+ struct in_addr *ip) -+{ -+ struct horrible_allowedips_node *node; -+ void *ret = NULL; -+ -+ hlist_for_each_entry(node, &table->head, table) { -+ if (node->ip_version != 4) -+ continue; -+ if (horrible_match_v4(node, ip)) { -+ ret = node->value; -+ break; -+ } -+ } -+ return ret; -+} -+ -+static __init void * -+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, -+ struct in6_addr *ip) -+{ -+ struct horrible_allowedips_node *node; -+ void *ret = NULL; -+ -+ hlist_for_each_entry(node, &table->head, table) { -+ if (node->ip_version != 6) -+ continue; -+ if (horrible_match_v6(node, ip)) { -+ ret = node->value; -+ break; -+ } -+ } -+ return ret; -+} -+ -+static __init bool randomized_test(void) -+{ -+ unsigned int i, j, k, mutate_amount, cidr; -+ u8 ip[16], mutate_mask[16], mutated[16]; -+ struct wg_peer **peers, *peer; -+ struct horrible_allowedips h; -+ DEFINE_MUTEX(mutex); -+ struct allowedips t; -+ bool ret = false; -+ -+ mutex_init(&mutex); -+ -+ wg_allowedips_init(&t); -+ horrible_allowedips_init(&h); -+ -+ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); -+ if (unlikely(!peers)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free; -+ } -+ for (i = 0; i < NUM_PEERS; ++i) { -+ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); -+ if (unlikely(!peers[i])) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free; -+ } -+ kref_init(&peers[i]->refcount); -+ } -+ -+ mutex_lock(&mutex); -+ -+ for (i = 0; i < NUM_RAND_ROUTES; ++i) { -+ prandom_bytes(ip, 4); -+ cidr = prandom_u32_max(32) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, -+ peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, -+ cidr, peer) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { -+ memcpy(mutated, ip, 4); -+ prandom_bytes(mutate_mask, 4); -+ mutate_amount = prandom_u32_max(32); -+ for (k = 0; k < mutate_amount / 8; ++k) -+ mutate_mask[k] = 0xff; -+ mutate_mask[k] = 0xff -+ << ((8 - (mutate_amount % 8)) % 8); -+ for (; k < 4; ++k) -+ mutate_mask[k] = 0; -+ for (k = 0; k < 4; ++k) -+ mutated[k] = (mutated[k] & mutate_mask[k]) | -+ (~mutate_mask[k] & -+ prandom_u32_max(256)); -+ cidr = prandom_u32_max(32) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v4(&t, -+ (struct in_addr *)mutated, -+ cidr, peer, &mutex) < 0) { -+ pr_err("allowedips random malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v4(&h, -+ (struct in_addr *)mutated, cidr, peer)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ } -+ } -+ -+ for (i = 0; i < NUM_RAND_ROUTES; ++i) { -+ prandom_bytes(ip, 16); -+ cidr = prandom_u32_max(128) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, -+ peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, -+ cidr, peer) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { -+ memcpy(mutated, ip, 16); -+ prandom_bytes(mutate_mask, 16); -+ mutate_amount = prandom_u32_max(128); -+ for (k = 0; k < mutate_amount / 8; ++k) -+ mutate_mask[k] = 0xff; -+ mutate_mask[k] = 0xff -+ << ((8 - (mutate_amount % 8)) % 8); -+ for (; k < 4; ++k) -+ mutate_mask[k] = 0; -+ for (k = 0; k < 4; ++k) -+ mutated[k] = (mutated[k] & mutate_mask[k]) | -+ (~mutate_mask[k] & -+ prandom_u32_max(256)); -+ cidr = prandom_u32_max(128) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v6(&t, -+ (struct in6_addr *)mutated, -+ cidr, peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v6( -+ &h, (struct in6_addr *)mutated, cidr, -+ peer)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ } -+ } -+ -+ mutex_unlock(&mutex); -+ -+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { -+ print_tree(t.root4, 32); -+ print_tree(t.root6, 128); -+ } -+ -+ for (i = 0; i < NUM_QUERIES; ++i) { -+ prandom_bytes(ip, 4); -+ if (lookup(t.root4, 32, ip) != -+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { -+ pr_err("allowedips random self-test: FAIL\n"); -+ goto free; -+ } -+ } -+ -+ for (i = 0; i < NUM_QUERIES; ++i) { -+ prandom_bytes(ip, 16); -+ if (lookup(t.root6, 128, ip) != -+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { -+ pr_err("allowedips random self-test: FAIL\n"); -+ goto free; -+ } -+ } -+ ret = true; -+ -+free: -+ mutex_lock(&mutex); -+free_locked: -+ wg_allowedips_free(&t, &mutex); -+ mutex_unlock(&mutex); -+ horrible_allowedips_free(&h); -+ if (peers) { -+ for (i = 0; i < NUM_PEERS; ++i) -+ kfree(peers[i]); -+ } -+ kfree(peers); -+ return ret; -+} -+ -+static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) -+{ -+ static struct in_addr ip; -+ u8 *split = (u8 *)&ip; -+ -+ split[0] = a; -+ split[1] = b; -+ split[2] = c; -+ split[3] = d; -+ return &ip; -+} -+ -+static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) -+{ -+ static struct in6_addr ip; -+ __be32 *split = (__be32 *)&ip; -+ -+ split[0] = cpu_to_be32(a); -+ split[1] = cpu_to_be32(b); -+ split[2] = cpu_to_be32(c); -+ split[3] = cpu_to_be32(d); -+ return &ip; -+} -+ -+static __init struct wg_peer *init_peer(void) -+{ -+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ -+ if (!peer) -+ return NULL; -+ kref_init(&peer->refcount); -+ INIT_LIST_HEAD(&peer->allowedips_list); -+ return peer; -+} -+ -+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ -+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ -+ cidr, mem, &mutex) -+ -+#define maybe_fail() do { \ -+ ++i; \ -+ if (!_s) { \ -+ pr_info("allowedips self-test %zu: FAIL\n", i); \ -+ success = false; \ -+ } \ -+ } while (0) -+ -+#define test(version, mem, ipa, ipb, ipc, ipd) do { \ -+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ -+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \ -+ maybe_fail(); \ -+ } while (0) -+ -+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ -+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ -+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \ -+ maybe_fail(); \ -+ } while (0) -+ -+#define test_boolean(cond) do { \ -+ bool _s = (cond); \ -+ maybe_fail(); \ -+ } while (0) -+ -+bool __init wg_allowedips_selftest(void) -+{ -+ bool found_a = false, found_b = false, found_c = false, found_d = false, -+ found_e = false, found_other = false; -+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), -+ *d = init_peer(), *e = init_peer(), *f = init_peer(), -+ *g = init_peer(), *h = init_peer(); -+ struct allowedips_node *iter_node; -+ bool success = false; -+ struct allowedips t; -+ DEFINE_MUTEX(mutex); -+ struct in6_addr ip; -+ size_t i = 0, count = 0; -+ __be64 part; -+ -+ mutex_init(&mutex); -+ mutex_lock(&mutex); -+ wg_allowedips_init(&t); -+ -+ if (!a || !b || !c || !d || !e || !f || !g || !h) { -+ pr_err("allowedips self-test malloc: FAIL\n"); -+ goto free; -+ } -+ -+ insert(4, a, 192, 168, 4, 0, 24); -+ insert(4, b, 192, 168, 4, 4, 32); -+ insert(4, c, 192, 168, 0, 0, 16); -+ insert(4, d, 192, 95, 5, 64, 27); -+ /* replaces previous entry, and maskself is required */ -+ insert(4, c, 192, 95, 5, 65, 27); -+ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); -+ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); -+ insert(4, e, 0, 0, 0, 0, 0); -+ insert(6, e, 0, 0, 0, 0, 0); -+ /* replaces previous entry */ -+ insert(6, f, 0, 0, 0, 0, 0); -+ insert(6, g, 0x24046800, 0, 0, 0, 32); -+ /* maskself is required */ -+ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); -+ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); -+ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); -+ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); -+ insert(4, g, 64, 15, 112, 0, 20); -+ /* maskself is required */ -+ insert(4, h, 64, 15, 123, 211, 25); -+ insert(4, a, 10, 0, 0, 0, 25); -+ insert(4, b, 10, 0, 0, 128, 25); -+ insert(4, a, 10, 1, 0, 0, 30); -+ insert(4, b, 10, 1, 0, 4, 30); -+ insert(4, c, 10, 1, 0, 8, 29); -+ insert(4, d, 10, 1, 0, 16, 29); -+ -+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { -+ print_tree(t.root4, 32); -+ print_tree(t.root6, 128); -+ } -+ -+ success = true; -+ -+ test(4, a, 192, 168, 4, 20); -+ test(4, a, 192, 168, 4, 0); -+ test(4, b, 192, 168, 4, 4); -+ test(4, c, 192, 168, 200, 182); -+ test(4, c, 192, 95, 5, 68); -+ test(4, e, 192, 95, 5, 96); -+ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); -+ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); -+ test(6, f, 0x26075300, 0x60006b01, 0, 0); -+ test(6, g, 0x24046800, 0x40040806, 0, 0x1006); -+ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); -+ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); -+ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); -+ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); -+ test(6, h, 0x24046800, 0x40040800, 0, 0); -+ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); -+ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); -+ test(4, g, 64, 15, 116, 26); -+ test(4, g, 64, 15, 127, 3); -+ test(4, g, 64, 15, 123, 1); -+ test(4, h, 64, 15, 123, 128); -+ test(4, h, 64, 15, 123, 129); -+ test(4, a, 10, 0, 0, 52); -+ test(4, b, 10, 0, 0, 220); -+ test(4, a, 10, 1, 0, 2); -+ test(4, b, 10, 1, 0, 6); -+ test(4, c, 10, 1, 0, 10); -+ test(4, d, 10, 1, 0, 20); -+ -+ insert(4, a, 1, 0, 0, 0, 32); -+ insert(4, a, 64, 0, 0, 0, 32); -+ insert(4, a, 128, 0, 0, 0, 32); -+ insert(4, a, 192, 0, 0, 0, 32); -+ insert(4, a, 255, 0, 0, 0, 32); -+ wg_allowedips_remove_by_peer(&t, a, &mutex); -+ test_negative(4, a, 1, 0, 0, 0); -+ test_negative(4, a, 64, 0, 0, 0); -+ test_negative(4, a, 128, 0, 0, 0); -+ test_negative(4, a, 192, 0, 0, 0); -+ test_negative(4, a, 255, 0, 0, 0); -+ -+ wg_allowedips_free(&t, &mutex); -+ wg_allowedips_init(&t); -+ insert(4, a, 192, 168, 0, 0, 16); -+ insert(4, a, 192, 168, 0, 0, 24); -+ wg_allowedips_remove_by_peer(&t, a, &mutex); -+ test_negative(4, a, 192, 168, 0, 1); -+ -+ /* These will hit the WARN_ON(len >= 128) in free_node if something -+ * goes wrong. -+ */ -+ for (i = 0; i < 128; ++i) { -+ part = cpu_to_be64(~(1LLU << (i % 64))); -+ memset(&ip, 0xff, 16); -+ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); -+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); -+ } -+ -+ wg_allowedips_free(&t, &mutex); -+ -+ wg_allowedips_init(&t); -+ insert(4, a, 192, 95, 5, 93, 27); -+ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); -+ insert(4, a, 10, 1, 0, 20, 29); -+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); -+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); -+ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { -+ u8 cidr, ip[16] __aligned(__alignof(u64)); -+ int family = wg_allowedips_read_node(iter_node, ip, &cidr); -+ -+ count++; -+ -+ if (cidr == 27 && family == AF_INET && -+ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) -+ found_a = true; -+ else if (cidr == 128 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), -+ sizeof(struct in6_addr))) -+ found_b = true; -+ else if (cidr == 29 && family == AF_INET && -+ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) -+ found_c = true; -+ else if (cidr == 83 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), -+ sizeof(struct in6_addr))) -+ found_d = true; -+ else if (cidr == 21 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075000, 0, 0, 0), -+ sizeof(struct in6_addr))) -+ found_e = true; -+ else -+ found_other = true; -+ } -+ test_boolean(count == 5); -+ test_boolean(found_a); -+ test_boolean(found_b); -+ test_boolean(found_c); -+ test_boolean(found_d); -+ test_boolean(found_e); -+ test_boolean(!found_other); -+ -+ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) -+ success = randomized_test(); -+ -+ if (success) -+ pr_info("allowedips self-tests: pass\n"); -+ -+free: -+ wg_allowedips_free(&t, &mutex); -+ kfree(a); -+ kfree(b); -+ kfree(c); -+ kfree(d); -+ kfree(e); -+ kfree(f); -+ kfree(g); -+ kfree(h); -+ mutex_unlock(&mutex); -+ -+ return success; -+} -+ -+#undef test_negative -+#undef test -+#undef remove -+#undef insert -+#undef init_peer -+ -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/selftest/counter.c -@@ -0,0 +1,104 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifdef DEBUG -+bool __init wg_packet_counter_selftest(void) -+{ -+ unsigned int test_num = 0, i; -+ union noise_counter counter; -+ bool success = true; -+ -+#define T_INIT do { \ -+ memset(&counter, 0, sizeof(union noise_counter)); \ -+ spin_lock_init(&counter.receive.lock); \ -+ } while (0) -+#define T_LIM (COUNTER_WINDOW_SIZE + 1) -+#define T(n, v) do { \ -+ ++test_num; \ -+ if (counter_validate(&counter, n) != (v)) { \ -+ pr_err("nonce counter self-test %u: FAIL\n", \ -+ test_num); \ -+ success = false; \ -+ } \ -+ } while (0) -+ -+ T_INIT; -+ /* 1 */ T(0, true); -+ /* 2 */ T(1, true); -+ /* 3 */ T(1, false); -+ /* 4 */ T(9, true); -+ /* 5 */ T(8, true); -+ /* 6 */ T(7, true); -+ /* 7 */ T(7, false); -+ /* 8 */ T(T_LIM, true); -+ /* 9 */ T(T_LIM - 1, true); -+ /* 10 */ T(T_LIM - 1, false); -+ /* 11 */ T(T_LIM - 2, true); -+ /* 12 */ T(2, true); -+ /* 13 */ T(2, false); -+ /* 14 */ T(T_LIM + 16, true); -+ /* 15 */ T(3, false); -+ /* 16 */ T(T_LIM + 16, false); -+ /* 17 */ T(T_LIM * 4, true); -+ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true); -+ /* 19 */ T(10, false); -+ /* 20 */ T(T_LIM * 4 - T_LIM, false); -+ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false); -+ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true); -+ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false); -+ /* 24 */ T(0, false); -+ /* 25 */ T(REJECT_AFTER_MESSAGES, false); -+ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true); -+ /* 27 */ T(REJECT_AFTER_MESSAGES, false); -+ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false); -+ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true); -+ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false); -+ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false); -+ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false); -+ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true); -+ /* 34 */ T(0, false); -+ -+ T_INIT; -+ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i) -+ T(i, true); -+ T(0, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i) -+ T(i, true); -+ T(1, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;) -+ T(i, true); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;) -+ T(i, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) -+ T(i, true); -+ T(COUNTER_WINDOW_SIZE + 1, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) -+ T(i, true); -+ T(0, true); -+ T(COUNTER_WINDOW_SIZE + 1, true); -+ -+#undef T -+#undef T_LIM -+#undef T_INIT -+ -+ if (success) -+ pr_info("nonce counter self-tests: pass\n"); -+ return success; -+} -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/selftest/ratelimiter.c -@@ -0,0 +1,226 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifdef DEBUG -+ -+#include -+ -+static const struct { -+ bool result; -+ unsigned int msec_to_sleep_before; -+} expected_results[] __initconst = { -+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, -+ [PACKETS_BURSTABLE] = { false, 0 }, -+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, -+ [PACKETS_BURSTABLE + 2] = { false, 0 }, -+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, -+ [PACKETS_BURSTABLE + 4] = { true, 0 }, -+ [PACKETS_BURSTABLE + 5] = { false, 0 } -+}; -+ -+static __init unsigned int maximum_jiffies_at_index(int index) -+{ -+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; -+ int i; -+ -+ for (i = 0; i <= index; ++i) -+ total_msecs += expected_results[i].msec_to_sleep_before; -+ return msecs_to_jiffies(total_msecs); -+} -+ -+static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, -+ struct sk_buff *skb6, struct ipv6hdr *hdr6, -+ int *test) -+{ -+ unsigned long loop_start_time; -+ int i; -+ -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ loop_start_time = jiffies; -+ -+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { -+ if (expected_results[i].msec_to_sleep_before) -+ msleep(expected_results[i].msec_to_sleep_before); -+ -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (wg_ratelimiter_allow(skb4, &init_net) != -+ expected_results[i].result) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (!wg_ratelimiter_allow(skb4, &init_net)) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1); -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i); -+ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (wg_ratelimiter_allow(skb6, &init_net) != -+ expected_results[i].result) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr6->saddr.in6_u.u6_addr32[0] = -+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (!wg_ratelimiter_allow(skb6, &init_net)) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr6->saddr.in6_u.u6_addr32[0] = -+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1); -+ -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+#endif -+ } -+ return 0; -+} -+ -+static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4, -+ int *test) -+{ -+ int i; -+ -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ -+ if (atomic_read(&total_entries)) -+ return -EXFULL; -+ ++(*test); -+ -+ for (i = 0; i <= max_entries; ++i) { -+ hdr4->saddr = htonl(i); -+ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries)) -+ return -EXFULL; -+ ++(*test); -+ } -+ return 0; -+} -+ -+bool __init wg_ratelimiter_selftest(void) -+{ -+ enum { TRIALS_BEFORE_GIVING_UP = 5000 }; -+ bool success = false; -+ int test = 0, trials; -+ struct sk_buff *skb4, *skb6; -+ struct iphdr *hdr4; -+ struct ipv6hdr *hdr6; -+ -+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) -+ return true; -+ -+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); -+ -+ if (wg_ratelimiter_init()) -+ goto out; -+ ++test; -+ if (wg_ratelimiter_init()) { -+ wg_ratelimiter_uninit(); -+ goto out; -+ } -+ ++test; -+ if (wg_ratelimiter_init()) { -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ goto out; -+ } -+ ++test; -+ -+ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL); -+ if (unlikely(!skb4)) -+ goto err_nofree; -+ skb4->protocol = htons(ETH_P_IP); -+ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4)); -+ hdr4->saddr = htonl(8182); -+ skb_reset_network_header(skb4); -+ ++test; -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL); -+ if (unlikely(!skb6)) { -+ kfree_skb(skb4); -+ goto err_nofree; -+ } -+ skb6->protocol = htons(ETH_P_IPV6); -+ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6)); -+ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212); -+ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188); -+ skb_reset_network_header(skb6); -+ ++test; -+#endif -+ -+ for (trials = TRIALS_BEFORE_GIVING_UP;;) { -+ int test_count = 0, ret; -+ -+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); -+ if (ret == -ETIMEDOUT) { -+ if (!trials--) { -+ test += test_count; -+ goto err; -+ } -+ msleep(500); -+ continue; -+ } else if (ret < 0) { -+ test += test_count; -+ goto err; -+ } else { -+ test += test_count; -+ break; -+ } -+ } -+ -+ for (trials = TRIALS_BEFORE_GIVING_UP;;) { -+ int test_count = 0; -+ -+ if (capacity_test(skb4, hdr4, &test_count) < 0) { -+ if (!trials--) { -+ test += test_count; -+ goto err; -+ } -+ msleep(50); -+ continue; -+ } -+ test += test_count; -+ break; -+ } -+ -+ success = true; -+ -+err: -+ kfree_skb(skb4); -+#if IS_ENABLED(CONFIG_IPV6) -+ kfree_skb(skb6); -+#endif -+err_nofree: -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ /* Uninit one extra time to check underflow detection. */ -+ wg_ratelimiter_uninit(); -+out: -+ if (success) -+ pr_info("ratelimiter self-tests: pass\n"); -+ else -+ pr_err("ratelimiter self-test %d: FAIL\n", test); -+ -+ return success; -+} -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/send.c -@@ -0,0 +1,413 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "timers.h" -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "messages.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static void wg_packet_send_handshake_initiation(struct wg_peer *peer) -+{ -+ struct message_handshake_initiation packet; -+ -+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), -+ REKEY_TIMEOUT)) -+ return; /* This function is rate limited. */ -+ -+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); -+ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) { -+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ atomic64_set(&peer->last_sent_handshake, -+ ktime_get_coarse_boottime_ns()); -+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet), -+ HANDSHAKE_DSCP); -+ wg_timers_handshake_initiated(peer); -+ } -+} -+ -+void wg_packet_handshake_send_worker(struct work_struct *work) -+{ -+ struct wg_peer *peer = container_of(work, struct wg_peer, -+ transmit_handshake_work); -+ -+ wg_packet_send_handshake_initiation(peer); -+ wg_peer_put(peer); -+} -+ -+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, -+ bool is_retry) -+{ -+ if (!is_retry) -+ peer->timer_handshake_attempts = 0; -+ -+ rcu_read_lock_bh(); -+ /* We check last_sent_handshake here in addition to the actual function -+ * we're queueing up, so that we don't queue things if not strictly -+ * necessary: -+ */ -+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), -+ REKEY_TIMEOUT) || -+ unlikely(READ_ONCE(peer->is_dead))) -+ goto out; -+ -+ wg_peer_get(peer); -+ /* Queues up calling packet_send_queued_handshakes(peer), where we do a -+ * peer_put(peer) after: -+ */ -+ if (!queue_work(peer->device->handshake_send_wq, -+ &peer->transmit_handshake_work)) -+ /* If the work was already queued, we want to drop the -+ * extra reference: -+ */ -+ wg_peer_put(peer); -+out: -+ rcu_read_unlock_bh(); -+} -+ -+void wg_packet_send_handshake_response(struct wg_peer *peer) -+{ -+ struct message_handshake_response packet; -+ -+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); -+ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) { -+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); -+ if (wg_noise_handshake_begin_session(&peer->handshake, -+ &peer->keypairs)) { -+ wg_timers_session_derived(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ atomic64_set(&peer->last_sent_handshake, -+ ktime_get_coarse_boottime_ns()); -+ wg_socket_send_buffer_to_peer(peer, &packet, -+ sizeof(packet), -+ HANDSHAKE_DSCP); -+ } -+ } -+} -+ -+void wg_packet_send_handshake_cookie(struct wg_device *wg, -+ struct sk_buff *initiating_skb, -+ __le32 sender_index) -+{ -+ struct message_handshake_cookie packet; -+ -+ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", -+ wg->dev->name, initiating_skb); -+ wg_cookie_message_create(&packet, initiating_skb, sender_index, -+ &wg->cookie_checker); -+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, -+ sizeof(packet)); -+} -+ -+static void keep_key_fresh(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ bool send = false; -+ -+ rcu_read_lock_bh(); -+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -+ (unlikely(atomic64_read(&keypair->sending.counter.counter) > -+ REKEY_AFTER_MESSAGES) || -+ (keypair->i_am_the_initiator && -+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -+ REKEY_AFTER_TIME))))) -+ send = true; -+ rcu_read_unlock_bh(); -+ -+ if (send) -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} -+ -+static unsigned int calculate_skb_padding(struct sk_buff *skb) -+{ -+ /* We do this modulo business with the MTU, just in case the networking -+ * layer gives us a packet that's bigger than the MTU. In that case, we -+ * wouldn't want the final subtraction to overflow in the case of the -+ * padded_size being clamped. -+ */ -+ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; -+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); -+ -+ if (padded_size > PACKET_CB(skb)->mtu) -+ padded_size = PACKET_CB(skb)->mtu; -+ return padded_size - last_unit; -+} -+ -+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) -+{ -+ unsigned int padding_len, plaintext_len, trailer_len; -+ struct scatterlist sg[MAX_SKB_FRAGS + 8]; -+ struct message_data *header; -+ struct sk_buff *trailer; -+ int num_frags; -+ -+ /* Calculate lengths. */ -+ padding_len = calculate_skb_padding(skb); -+ trailer_len = padding_len + noise_encrypted_len(0); -+ plaintext_len = skb->len + padding_len; -+ -+ /* Expand data section to have room for padding and auth tag. */ -+ num_frags = skb_cow_data(skb, trailer_len, &trailer); -+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) -+ return false; -+ -+ /* Set the padding to zeros, and make sure it and the auth tag are part -+ * of the skb. -+ */ -+ memset(skb_tail_pointer(trailer), 0, padding_len); -+ -+ /* Expand head section to have room for our header and the network -+ * stack's headers. -+ */ -+ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) -+ return false; -+ -+ /* Finalize checksum calculation for the inner packet, if required. */ -+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && -+ skb_checksum_help(skb))) -+ return false; -+ -+ /* Only after checksumming can we safely add on the padding at the end -+ * and the header. -+ */ -+ skb_set_inner_network_header(skb, 0); -+ header = (struct message_data *)skb_push(skb, sizeof(*header)); -+ header->header.type = cpu_to_le32(MESSAGE_DATA); -+ header->key_idx = keypair->remote_index; -+ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); -+ pskb_put(skb, trailer, trailer_len); -+ -+ /* Now we can encrypt the scattergather segments */ -+ sg_init_table(sg, num_frags); -+ if (skb_to_sgvec(skb, sg, sizeof(struct message_data), -+ noise_encrypted_len(plaintext_len)) <= 0) -+ return false; -+ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0, -+ PACKET_CB(skb)->nonce, -+ keypair->sending.key); -+} -+ -+void wg_packet_send_keepalive(struct wg_peer *peer) -+{ -+ struct sk_buff *skb; -+ -+ if (skb_queue_empty(&peer->staged_packet_queue)) { -+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, -+ GFP_ATOMIC); -+ if (unlikely(!skb)) -+ return; -+ skb_reserve(skb, DATA_PACKET_HEAD_ROOM); -+ skb->dev = peer->device->dev; -+ PACKET_CB(skb)->mtu = skb->dev->mtu; -+ skb_queue_tail(&peer->staged_packet_queue, skb); -+ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ } -+ -+ wg_packet_send_staged_packets(peer); -+} -+ -+static void wg_packet_create_data_done(struct sk_buff *first, -+ struct wg_peer *peer) -+{ -+ struct sk_buff *skb, *next; -+ bool is_keepalive, data_sent = false; -+ -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ skb_list_walk_safe(first, skb, next) { -+ is_keepalive = skb->len == message_data_len(0); -+ if (likely(!wg_socket_send_skb_to_peer(peer, skb, -+ PACKET_CB(skb)->ds) && !is_keepalive)) -+ data_sent = true; -+ } -+ -+ if (likely(data_sent)) -+ wg_timers_data_sent(peer); -+ -+ keep_key_fresh(peer); -+} -+ -+void wg_packet_tx_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct crypt_queue, -+ work); -+ struct noise_keypair *keypair; -+ enum packet_state state; -+ struct sk_buff *first; -+ struct wg_peer *peer; -+ -+ while ((first = __ptr_ring_peek(&queue->ring)) != NULL && -+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) != -+ PACKET_STATE_UNCRYPTED) { -+ __ptr_ring_discard_one(&queue->ring); -+ peer = PACKET_PEER(first); -+ keypair = PACKET_CB(first)->keypair; -+ -+ if (likely(state == PACKET_STATE_CRYPTED)) -+ wg_packet_create_data_done(first, peer); -+ else -+ kfree_skb_list(first); -+ -+ wg_noise_keypair_put(keypair, false); -+ wg_peer_put(peer); -+ } -+} -+ -+void wg_packet_encrypt_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *first, *skb, *next; -+ -+ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { -+ enum packet_state state = PACKET_STATE_CRYPTED; -+ -+ skb_list_walk_safe(first, skb, next) { -+ if (likely(encrypt_packet(skb, -+ PACKET_CB(first)->keypair))) { -+ wg_reset_packet(skb); -+ } else { -+ state = PACKET_STATE_DEAD; -+ break; -+ } -+ } -+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, -+ state); -+ -+ } -+} -+ -+static void wg_packet_create_data(struct sk_buff *first) -+{ -+ struct wg_peer *peer = PACKET_PEER(first); -+ struct wg_device *wg = peer->device; -+ int ret = -EINVAL; -+ -+ rcu_read_lock_bh(); -+ if (unlikely(READ_ONCE(peer->is_dead))) -+ goto err; -+ -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, -+ &peer->tx_queue, first, -+ wg->packet_crypt_wq, -+ &wg->encrypt_queue.last_cpu); -+ if (unlikely(ret == -EPIPE)) -+ wg_queue_enqueue_per_peer(&peer->tx_queue, first, -+ PACKET_STATE_DEAD); -+err: -+ rcu_read_unlock_bh(); -+ if (likely(!ret || ret == -EPIPE)) -+ return; -+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false); -+ wg_peer_put(peer); -+ kfree_skb_list(first); -+} -+ -+void wg_packet_purge_staged_packets(struct wg_peer *peer) -+{ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; -+ __skb_queue_purge(&peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+} -+ -+void wg_packet_send_staged_packets(struct wg_peer *peer) -+{ -+ struct noise_symmetric_key *key; -+ struct noise_keypair *keypair; -+ struct sk_buff_head packets; -+ struct sk_buff *skb; -+ -+ /* Steal the current queue into our local one. */ -+ __skb_queue_head_init(&packets); -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ skb_queue_splice_init(&peer->staged_packet_queue, &packets); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ if (unlikely(skb_queue_empty(&packets))) -+ return; -+ -+ /* First we make sure we have a valid reference to a valid key. */ -+ rcu_read_lock_bh(); -+ keypair = wg_noise_keypair_get( -+ rcu_dereference_bh(peer->keypairs.current_keypair)); -+ rcu_read_unlock_bh(); -+ if (unlikely(!keypair)) -+ goto out_nokey; -+ key = &keypair->sending; -+ if (unlikely(!READ_ONCE(key->is_valid))) -+ goto out_nokey; -+ if (unlikely(wg_birthdate_has_expired(key->birthdate, -+ REJECT_AFTER_TIME))) -+ goto out_invalid; -+ -+ /* After we know we have a somewhat valid key, we now try to assign -+ * nonces to all of the packets in the queue. If we can't assign nonces -+ * for all of them, we just consider it a failure and wait for the next -+ * handshake. -+ */ -+ skb_queue_walk(&packets, skb) { -+ /* 0 for no outer TOS: no leak. TODO: at some later point, we -+ * might consider using flowi->tos as outer instead. -+ */ -+ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); -+ PACKET_CB(skb)->nonce = -+ atomic64_inc_return(&key->counter.counter) - 1; -+ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) -+ goto out_invalid; -+ } -+ -+ packets.prev->next = NULL; -+ wg_peer_get(keypair->entry.peer); -+ PACKET_CB(packets.next)->keypair = keypair; -+ wg_packet_create_data(packets.next); -+ return; -+ -+out_invalid: -+ WRITE_ONCE(key->is_valid, false); -+out_nokey: -+ wg_noise_keypair_put(keypair, false); -+ -+ /* We orphan the packets if we're waiting on a handshake, so that they -+ * don't block a socket's pool. -+ */ -+ skb_queue_walk(&packets, skb) -+ skb_orphan(skb); -+ /* Then we put them back on the top of the queue. We're not too -+ * concerned about accidentally getting things a little out of order if -+ * packets are being added really fast, because this queue is for before -+ * packets can even be sent and it's small anyway. -+ */ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ skb_queue_splice(&packets, &peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ -+ /* If we're exiting because there's something wrong with the key, it -+ * means we should initiate a new handshake. -+ */ -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} ---- /dev/null -+++ b/drivers/net/wireguard/socket.c -@@ -0,0 +1,437 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "queueing.h" -+#include "messages.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static int send4(struct wg_device *wg, struct sk_buff *skb, -+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) -+{ -+ struct flowi4 fl = { -+ .saddr = endpoint->src4.s_addr, -+ .daddr = endpoint->addr4.sin_addr.s_addr, -+ .fl4_dport = endpoint->addr4.sin_port, -+ .flowi4_mark = wg->fwmark, -+ .flowi4_proto = IPPROTO_UDP -+ }; -+ struct rtable *rt = NULL; -+ struct sock *sock; -+ int ret = 0; -+ -+ skb_mark_not_on_list(skb); -+ skb->dev = wg->dev; -+ skb->mark = wg->fwmark; -+ -+ rcu_read_lock_bh(); -+ sock = rcu_dereference_bh(wg->sock4); -+ -+ if (unlikely(!sock)) { -+ ret = -ENONET; -+ goto err; -+ } -+ -+ fl.fl4_sport = inet_sk(sock)->inet_sport; -+ -+ if (cache) -+ rt = dst_cache_get_ip4(cache, &fl.saddr); -+ -+ if (!rt) { -+ security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); -+ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, -+ fl.saddr, RT_SCOPE_HOST))) { -+ endpoint->src4.s_addr = 0; -+ *(__force __be32 *)&endpoint->src_if4 = 0; -+ fl.saddr = 0; -+ if (cache) -+ dst_cache_reset(cache); -+ } -+ rt = ip_route_output_flow(sock_net(sock), &fl, sock); -+ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) && -+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && -+ rt->dst.dev->ifindex != endpoint->src_if4)))) { -+ endpoint->src4.s_addr = 0; -+ *(__force __be32 *)&endpoint->src_if4 = 0; -+ fl.saddr = 0; -+ if (cache) -+ dst_cache_reset(cache); -+ if (!IS_ERR(rt)) -+ ip_rt_put(rt); -+ rt = ip_route_output_flow(sock_net(sock), &fl, sock); -+ } -+ if (unlikely(IS_ERR(rt))) { -+ ret = PTR_ERR(rt); -+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", -+ wg->dev->name, &endpoint->addr, ret); -+ goto err; -+ } else if (unlikely(rt->dst.dev == skb->dev)) { -+ ip_rt_put(rt); -+ ret = -ELOOP; -+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -+ wg->dev->name, &endpoint->addr); -+ goto err; -+ } -+ if (cache) -+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr); -+ } -+ -+ skb->ignore_df = 1; -+ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, -+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, -+ fl.fl4_dport, false, false); -+ goto out; -+ -+err: -+ kfree_skb(skb); -+out: -+ rcu_read_unlock_bh(); -+ return ret; -+} -+ -+static int send6(struct wg_device *wg, struct sk_buff *skb, -+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) -+{ -+#if IS_ENABLED(CONFIG_IPV6) -+ struct flowi6 fl = { -+ .saddr = endpoint->src6, -+ .daddr = endpoint->addr6.sin6_addr, -+ .fl6_dport = endpoint->addr6.sin6_port, -+ .flowi6_mark = wg->fwmark, -+ .flowi6_oif = endpoint->addr6.sin6_scope_id, -+ .flowi6_proto = IPPROTO_UDP -+ /* TODO: addr->sin6_flowinfo */ -+ }; -+ struct dst_entry *dst = NULL; -+ struct sock *sock; -+ int ret = 0; -+ -+ skb_mark_not_on_list(skb); -+ skb->dev = wg->dev; -+ skb->mark = wg->fwmark; -+ -+ rcu_read_lock_bh(); -+ sock = rcu_dereference_bh(wg->sock6); -+ -+ if (unlikely(!sock)) { -+ ret = -ENONET; -+ goto err; -+ } -+ -+ fl.fl6_sport = inet_sk(sock)->inet_sport; -+ -+ if (cache) -+ dst = dst_cache_get_ip6(cache, &fl.saddr); -+ -+ if (!dst) { -+ security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); -+ if (unlikely(!ipv6_addr_any(&fl.saddr) && -+ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { -+ endpoint->src6 = fl.saddr = in6addr_any; -+ if (cache) -+ dst_cache_reset(cache); -+ } -+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, -+ NULL); -+ if (unlikely(IS_ERR(dst))) { -+ ret = PTR_ERR(dst); -+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", -+ wg->dev->name, &endpoint->addr, ret); -+ goto err; -+ } else if (unlikely(dst->dev == skb->dev)) { -+ dst_release(dst); -+ ret = -ELOOP; -+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -+ wg->dev->name, &endpoint->addr); -+ goto err; -+ } -+ if (cache) -+ dst_cache_set_ip6(cache, dst, &fl.saddr); -+ } -+ -+ skb->ignore_df = 1; -+ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, -+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport, -+ fl.fl6_dport, false); -+ goto out; -+ -+err: -+ kfree_skb(skb); -+out: -+ rcu_read_unlock_bh(); -+ return ret; -+#else -+ return -EAFNOSUPPORT; -+#endif -+} -+ -+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds) -+{ -+ size_t skb_len = skb->len; -+ int ret = -EAFNOSUPPORT; -+ -+ read_lock_bh(&peer->endpoint_lock); -+ if (peer->endpoint.addr.sa_family == AF_INET) -+ ret = send4(peer->device, skb, &peer->endpoint, ds, -+ &peer->endpoint_cache); -+ else if (peer->endpoint.addr.sa_family == AF_INET6) -+ ret = send6(peer->device, skb, &peer->endpoint, ds, -+ &peer->endpoint_cache); -+ else -+ dev_kfree_skb(skb); -+ if (likely(!ret)) -+ peer->tx_bytes += skb_len; -+ read_unlock_bh(&peer->endpoint_lock); -+ -+ return ret; -+} -+ -+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer, -+ size_t len, u8 ds) -+{ -+ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); -+ -+ if (unlikely(!skb)) -+ return -ENOMEM; -+ -+ skb_reserve(skb, SKB_HEADER_LEN); -+ skb_set_inner_network_header(skb, 0); -+ skb_put_data(skb, buffer, len); -+ return wg_socket_send_skb_to_peer(peer, skb, ds); -+} -+ -+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, -+ struct sk_buff *in_skb, void *buffer, -+ size_t len) -+{ -+ int ret = 0; -+ struct sk_buff *skb; -+ struct endpoint endpoint; -+ -+ if (unlikely(!in_skb)) -+ return -EINVAL; -+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb); -+ if (unlikely(ret < 0)) -+ return ret; -+ -+ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); -+ if (unlikely(!skb)) -+ return -ENOMEM; -+ skb_reserve(skb, SKB_HEADER_LEN); -+ skb_set_inner_network_header(skb, 0); -+ skb_put_data(skb, buffer, len); -+ -+ if (endpoint.addr.sa_family == AF_INET) -+ ret = send4(wg, skb, &endpoint, 0, NULL); -+ else if (endpoint.addr.sa_family == AF_INET6) -+ ret = send6(wg, skb, &endpoint, 0, NULL); -+ /* No other possibilities if the endpoint is valid, which it is, -+ * as we checked above. -+ */ -+ -+ return ret; -+} -+ -+int wg_socket_endpoint_from_skb(struct endpoint *endpoint, -+ const struct sk_buff *skb) -+{ -+ memset(endpoint, 0, sizeof(*endpoint)); -+ if (skb->protocol == htons(ETH_P_IP)) { -+ endpoint->addr4.sin_family = AF_INET; -+ endpoint->addr4.sin_port = udp_hdr(skb)->source; -+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; -+ endpoint->src4.s_addr = ip_hdr(skb)->daddr; -+ endpoint->src_if4 = skb->skb_iif; -+ } else if (skb->protocol == htons(ETH_P_IPV6)) { -+ endpoint->addr6.sin6_family = AF_INET6; -+ endpoint->addr6.sin6_port = udp_hdr(skb)->source; -+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; -+ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id( -+ &ipv6_hdr(skb)->saddr, skb->skb_iif); -+ endpoint->src6 = ipv6_hdr(skb)->daddr; -+ } else { -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) -+{ -+ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && -+ a->addr4.sin_port == b->addr4.sin_port && -+ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr && -+ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) || -+ (a->addr.sa_family == AF_INET6 && -+ b->addr.sa_family == AF_INET6 && -+ a->addr6.sin6_port == b->addr6.sin6_port && -+ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) && -+ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id && -+ ipv6_addr_equal(&a->src6, &b->src6)) || -+ unlikely(!a->addr.sa_family && !b->addr.sa_family); -+} -+ -+void wg_socket_set_peer_endpoint(struct wg_peer *peer, -+ const struct endpoint *endpoint) -+{ -+ /* First we check unlocked, in order to optimize, since it's pretty rare -+ * that an endpoint will change. If we happen to be mid-write, and two -+ * CPUs wind up writing the same thing or something slightly different, -+ * it doesn't really matter much either. -+ */ -+ if (endpoint_eq(endpoint, &peer->endpoint)) -+ return; -+ write_lock_bh(&peer->endpoint_lock); -+ if (endpoint->addr.sa_family == AF_INET) { -+ peer->endpoint.addr4 = endpoint->addr4; -+ peer->endpoint.src4 = endpoint->src4; -+ peer->endpoint.src_if4 = endpoint->src_if4; -+ } else if (endpoint->addr.sa_family == AF_INET6) { -+ peer->endpoint.addr6 = endpoint->addr6; -+ peer->endpoint.src6 = endpoint->src6; -+ } else { -+ goto out; -+ } -+ dst_cache_reset(&peer->endpoint_cache); -+out: -+ write_unlock_bh(&peer->endpoint_lock); -+} -+ -+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, -+ const struct sk_buff *skb) -+{ -+ struct endpoint endpoint; -+ -+ if (!wg_socket_endpoint_from_skb(&endpoint, skb)) -+ wg_socket_set_peer_endpoint(peer, &endpoint); -+} -+ -+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) -+{ -+ write_lock_bh(&peer->endpoint_lock); -+ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); -+ dst_cache_reset(&peer->endpoint_cache); -+ write_unlock_bh(&peer->endpoint_lock); -+} -+ -+static int wg_receive(struct sock *sk, struct sk_buff *skb) -+{ -+ struct wg_device *wg; -+ -+ if (unlikely(!sk)) -+ goto err; -+ wg = sk->sk_user_data; -+ if (unlikely(!wg)) -+ goto err; -+ wg_packet_receive(wg, skb); -+ return 0; -+ -+err: -+ kfree_skb(skb); -+ return 0; -+} -+ -+static void sock_free(struct sock *sock) -+{ -+ if (unlikely(!sock)) -+ return; -+ sk_clear_memalloc(sock); -+ udp_tunnel_sock_release(sock->sk_socket); -+} -+ -+static void set_sock_opts(struct socket *sock) -+{ -+ sock->sk->sk_allocation = GFP_ATOMIC; -+ sock->sk->sk_sndbuf = INT_MAX; -+ sk_set_memalloc(sock->sk); -+} -+ -+int wg_socket_init(struct wg_device *wg, u16 port) -+{ -+ int ret; -+ struct udp_tunnel_sock_cfg cfg = { -+ .sk_user_data = wg, -+ .encap_type = 1, -+ .encap_rcv = wg_receive -+ }; -+ struct socket *new4 = NULL, *new6 = NULL; -+ struct udp_port_cfg port4 = { -+ .family = AF_INET, -+ .local_ip.s_addr = htonl(INADDR_ANY), -+ .local_udp_port = htons(port), -+ .use_udp_checksums = true -+ }; -+#if IS_ENABLED(CONFIG_IPV6) -+ int retries = 0; -+ struct udp_port_cfg port6 = { -+ .family = AF_INET6, -+ .local_ip6 = IN6ADDR_ANY_INIT, -+ .use_udp6_tx_checksums = true, -+ .use_udp6_rx_checksums = true, -+ .ipv6_v6only = true -+ }; -+#endif -+ -+#if IS_ENABLED(CONFIG_IPV6) -+retry: -+#endif -+ -+ ret = udp_sock_create(wg->creating_net, &port4, &new4); -+ if (ret < 0) { -+ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); -+ return ret; -+ } -+ set_sock_opts(new4); -+ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ if (ipv6_mod_enabled()) { -+ port6.local_udp_port = inet_sk(new4->sk)->inet_sport; -+ ret = udp_sock_create(wg->creating_net, &port6, &new6); -+ if (ret < 0) { -+ udp_tunnel_sock_release(new4); -+ if (ret == -EADDRINUSE && !port && retries++ < 100) -+ goto retry; -+ pr_err("%s: Could not create IPv6 socket\n", -+ wg->dev->name); -+ return ret; -+ } -+ set_sock_opts(new6); -+ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); -+ } -+#endif -+ -+ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); -+ return 0; -+} -+ -+void wg_socket_reinit(struct wg_device *wg, struct sock *new4, -+ struct sock *new6) -+{ -+ struct sock *old4, *old6; -+ -+ mutex_lock(&wg->socket_update_lock); -+ old4 = rcu_dereference_protected(wg->sock4, -+ lockdep_is_held(&wg->socket_update_lock)); -+ old6 = rcu_dereference_protected(wg->sock6, -+ lockdep_is_held(&wg->socket_update_lock)); -+ rcu_assign_pointer(wg->sock4, new4); -+ rcu_assign_pointer(wg->sock6, new6); -+ if (new4) -+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); -+ mutex_unlock(&wg->socket_update_lock); -+ synchronize_rcu(); -+ synchronize_net(); -+ sock_free(old4); -+ sock_free(old6); -+} ---- /dev/null -+++ b/drivers/net/wireguard/socket.h -@@ -0,0 +1,44 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_SOCKET_H -+#define _WG_SOCKET_H -+ -+#include -+#include -+#include -+#include -+ -+int wg_socket_init(struct wg_device *wg, u16 port); -+void wg_socket_reinit(struct wg_device *wg, struct sock *new4, -+ struct sock *new6); -+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data, -+ size_t len, u8 ds); -+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, -+ u8 ds); -+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, -+ struct sk_buff *in_skb, -+ void *out_buffer, size_t len); -+ -+int wg_socket_endpoint_from_skb(struct endpoint *endpoint, -+ const struct sk_buff *skb); -+void wg_socket_set_peer_endpoint(struct wg_peer *peer, -+ const struct endpoint *endpoint); -+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, -+ const struct sk_buff *skb); -+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer); -+ -+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) -+#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \ -+ struct endpoint __endpoint; \ -+ wg_socket_endpoint_from_skb(&__endpoint, skb); \ -+ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \ -+ ##__VA_ARGS__); \ -+ } while (0) -+#else -+#define net_dbg_skb_ratelimited(fmt, skb, ...) -+#endif -+ -+#endif /* _WG_SOCKET_H */ ---- /dev/null -+++ b/drivers/net/wireguard/timers.c -@@ -0,0 +1,243 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "timers.h" -+#include "device.h" -+#include "peer.h" -+#include "queueing.h" -+#include "socket.h" -+ -+/* -+ * - Timer for retransmitting the handshake if we don't hear back after -+ * `REKEY_TIMEOUT + jitter` ms. -+ * -+ * - Timer for sending empty packet if we have received a packet but after have -+ * not sent one for `KEEPALIVE_TIMEOUT` ms. -+ * -+ * - Timer for initiating new handshake if we have sent a packet but after have -+ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) + -+ * jitter` ms. -+ * -+ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms -+ * if no new keys have been received. -+ * -+ * - Timer for, if enabled, sending an empty authenticated packet every user- -+ * specified seconds. -+ */ -+ -+static inline void mod_peer_timer(struct wg_peer *peer, -+ struct timer_list *timer, -+ unsigned long expires) -+{ -+ rcu_read_lock_bh(); -+ if (likely(netif_running(peer->device->dev) && -+ !READ_ONCE(peer->is_dead))) -+ mod_timer(timer, expires); -+ rcu_read_unlock_bh(); -+} -+ -+static void wg_expired_retransmit_handshake(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, -+ timer_retransmit_handshake); -+ -+ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { -+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); -+ -+ del_timer(&peer->timer_send_keepalive); -+ /* We drop all packets without a keypair and don't try again, -+ * if we try unsuccessfully for too long to make a handshake. -+ */ -+ wg_packet_purge_staged_packets(peer); -+ -+ /* We set a timer for destroying any residue that might be left -+ * of a partial exchange. -+ */ -+ if (!timer_pending(&peer->timer_zero_key_material)) -+ mod_peer_timer(peer, &peer->timer_zero_key_material, -+ jiffies + REJECT_AFTER_TIME * 3 * HZ); -+ } else { -+ ++peer->timer_handshake_attempts; -+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, REKEY_TIMEOUT, -+ peer->timer_handshake_attempts + 1); -+ -+ /* We clear the endpoint address src address, in case this is -+ * the cause of trouble. -+ */ -+ wg_socket_clear_peer_endpoint_src(peer); -+ -+ wg_packet_send_queued_handshake_initiation(peer, true); -+ } -+} -+ -+static void wg_expired_send_keepalive(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive); -+ -+ wg_packet_send_keepalive(peer); -+ if (peer->timer_need_another_keepalive) { -+ peer->timer_need_another_keepalive = false; -+ mod_peer_timer(peer, &peer->timer_send_keepalive, -+ jiffies + KEEPALIVE_TIMEOUT * HZ); -+ } -+} -+ -+static void wg_expired_new_handshake(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake); -+ -+ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); -+ /* We clear the endpoint address src address, in case this is the cause -+ * of trouble. -+ */ -+ wg_socket_clear_peer_endpoint_src(peer); -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} -+ -+static void wg_expired_zero_key_material(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material); -+ -+ rcu_read_lock_bh(); -+ if (!READ_ONCE(peer->is_dead)) { -+ wg_peer_get(peer); -+ if (!queue_work(peer->device->handshake_send_wq, -+ &peer->clear_peer_work)) -+ /* If the work was already on the queue, we want to drop -+ * the extra reference. -+ */ -+ wg_peer_put(peer); -+ } -+ rcu_read_unlock_bh(); -+} -+ -+static void wg_queued_expired_zero_key_material(struct work_struct *work) -+{ -+ struct wg_peer *peer = container_of(work, struct wg_peer, -+ clear_peer_work); -+ -+ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, REJECT_AFTER_TIME * 3); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ wg_peer_put(peer); -+} -+ -+static void wg_expired_send_persistent_keepalive(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, -+ timer_persistent_keepalive); -+ -+ if (likely(peer->persistent_keepalive_interval)) -+ wg_packet_send_keepalive(peer); -+} -+ -+/* Should be called after an authenticated data packet is sent. */ -+void wg_timers_data_sent(struct wg_peer *peer) -+{ -+ if (!timer_pending(&peer->timer_new_handshake)) -+ mod_peer_timer(peer, &peer->timer_new_handshake, -+ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ + -+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); -+} -+ -+/* Should be called after an authenticated data packet is received. */ -+void wg_timers_data_received(struct wg_peer *peer) -+{ -+ if (likely(netif_running(peer->device->dev))) { -+ if (!timer_pending(&peer->timer_send_keepalive)) -+ mod_peer_timer(peer, &peer->timer_send_keepalive, -+ jiffies + KEEPALIVE_TIMEOUT * HZ); -+ else -+ peer->timer_need_another_keepalive = true; -+ } -+} -+ -+/* Should be called after any type of authenticated packet is sent, whether -+ * keepalive, data, or handshake. -+ */ -+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_send_keepalive); -+} -+ -+/* Should be called after any type of authenticated packet is received, whether -+ * keepalive, data, or handshake. -+ */ -+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_new_handshake); -+} -+ -+/* Should be called after a handshake initiation message is sent. */ -+void wg_timers_handshake_initiated(struct wg_peer *peer) -+{ -+ mod_peer_timer(peer, &peer->timer_retransmit_handshake, -+ jiffies + REKEY_TIMEOUT * HZ + -+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); -+} -+ -+/* Should be called after a handshake response message is received and processed -+ * or when getting key confirmation via the first data message. -+ */ -+void wg_timers_handshake_complete(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_retransmit_handshake); -+ peer->timer_handshake_attempts = 0; -+ peer->sent_lastminute_handshake = false; -+ ktime_get_real_ts64(&peer->walltime_last_handshake); -+} -+ -+/* Should be called after an ephemeral key is created, which is before sending a -+ * handshake response or after receiving a handshake response. -+ */ -+void wg_timers_session_derived(struct wg_peer *peer) -+{ -+ mod_peer_timer(peer, &peer->timer_zero_key_material, -+ jiffies + REJECT_AFTER_TIME * 3 * HZ); -+} -+ -+/* Should be called before a packet with authentication, whether -+ * keepalive, data, or handshakem is sent, or after one is received. -+ */ -+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer) -+{ -+ if (peer->persistent_keepalive_interval) -+ mod_peer_timer(peer, &peer->timer_persistent_keepalive, -+ jiffies + peer->persistent_keepalive_interval * HZ); -+} -+ -+void wg_timers_init(struct wg_peer *peer) -+{ -+ timer_setup(&peer->timer_retransmit_handshake, -+ wg_expired_retransmit_handshake, 0); -+ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0); -+ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0); -+ timer_setup(&peer->timer_zero_key_material, -+ wg_expired_zero_key_material, 0); -+ timer_setup(&peer->timer_persistent_keepalive, -+ wg_expired_send_persistent_keepalive, 0); -+ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material); -+ peer->timer_handshake_attempts = 0; -+ peer->sent_lastminute_handshake = false; -+ peer->timer_need_another_keepalive = false; -+} -+ -+void wg_timers_stop(struct wg_peer *peer) -+{ -+ del_timer_sync(&peer->timer_retransmit_handshake); -+ del_timer_sync(&peer->timer_send_keepalive); -+ del_timer_sync(&peer->timer_new_handshake); -+ del_timer_sync(&peer->timer_zero_key_material); -+ del_timer_sync(&peer->timer_persistent_keepalive); -+ flush_work(&peer->clear_peer_work); -+} ---- /dev/null -+++ b/drivers/net/wireguard/timers.h -@@ -0,0 +1,31 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_TIMERS_H -+#define _WG_TIMERS_H -+ -+#include -+ -+struct wg_peer; -+ -+void wg_timers_init(struct wg_peer *peer); -+void wg_timers_stop(struct wg_peer *peer); -+void wg_timers_data_sent(struct wg_peer *peer); -+void wg_timers_data_received(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer); -+void wg_timers_handshake_initiated(struct wg_peer *peer); -+void wg_timers_handshake_complete(struct wg_peer *peer); -+void wg_timers_session_derived(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer); -+ -+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds, -+ u64 expiration_seconds) -+{ -+ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC) -+ <= (s64)ktime_get_coarse_boottime_ns(); -+} -+ -+#endif /* _WG_TIMERS_H */ ---- /dev/null -+++ b/drivers/net/wireguard/version.h -@@ -0,0 +1 @@ -+#define WIREGUARD_VERSION "1.0.0" ---- /dev/null -+++ b/include/uapi/linux/wireguard.h -@@ -0,0 +1,196 @@ -+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * Documentation -+ * ============= -+ * -+ * The below enums and macros are for interfacing with WireGuard, using generic -+ * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two -+ * methods: get and set. Note that while they share many common attributes, -+ * these two functions actually accept a slightly different set of inputs and -+ * outputs. -+ * -+ * WG_CMD_GET_DEVICE -+ * ----------------- -+ * -+ * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain -+ * one but not both of: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * -+ * The kernel will then return several messages (NLM_F_MULTI) containing the -+ * following tree of nested items: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGDEVICE_A_LISTEN_PORT: NLA_U16 -+ * WGDEVICE_A_FWMARK: NLA_U32 -+ * WGDEVICE_A_PEERS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6 -+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16 -+ * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec -+ * WGPEER_A_RX_BYTES: NLA_U64 -+ * WGPEER_A_TX_BYTES: NLA_U64 -+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGALLOWEDIP_A_FAMILY: NLA_U16 -+ * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr -+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 -+ * 0: NLA_NESTED -+ * ... -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32 -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * -+ * It is possible that all of the allowed IPs of a single peer will not -+ * fit within a single netlink message. In that case, the same peer will -+ * be written in the following message, except it will only contain -+ * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several -+ * times in a row for the same peer. It is then up to the receiver to -+ * coalesce adjacent peers. Likewise, it is possible that all peers will -+ * not fit within a single message. So, subsequent peers will be sent -+ * in following messages, except those will only contain WGDEVICE_A_IFNAME -+ * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these -+ * messages to form the complete list of peers. -+ * -+ * Since this is an NLA_F_DUMP command, the final message will always be -+ * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message -+ * contains an integer error code. It is either zero or a negative error -+ * code corresponding to the errno. -+ * -+ * WG_CMD_SET_DEVICE -+ * ----------------- -+ * -+ * May only be called via NLM_F_REQUEST. The command should contain the -+ * following tree of nested items, containing one but not both of -+ * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current -+ * peers should be removed prior to adding the list below. -+ * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove -+ * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly -+ * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable -+ * WGDEVICE_A_PEERS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN -+ * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the -+ * specified peer should not exist at the end of the -+ * operation, rather than added/updated and/or -+ * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed -+ * IPs of this peer should be removed prior to adding -+ * the list below and/or WGPEER_F_UPDATE_ONLY if the -+ * peer should only be set if it already exists. -+ * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove -+ * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6 -+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable -+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGALLOWEDIP_A_FAMILY: NLA_U16 -+ * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr -+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 -+ * 0: NLA_NESTED -+ * ... -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at -+ * all by most users of this API, as the -+ * most recent protocol will be used when -+ * this is unset. Otherwise, must be set -+ * to 1. -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * -+ * It is possible that the amount of configuration data exceeds that of -+ * the maximum message length accepted by the kernel. In that case, several -+ * messages should be sent one after another, with each successive one -+ * filling in information not contained in the prior. Note that if -+ * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably -+ * should not be specified in fragments that come after, so that the list -+ * of peers is only cleared the first time but appened after. Likewise for -+ * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message -+ * of a peer, it likely should not be specified in subsequent fragments. -+ * -+ * If an error occurs, NLMSG_ERROR will reply containing an errno. -+ */ -+ -+#ifndef _WG_UAPI_WIREGUARD_H -+#define _WG_UAPI_WIREGUARD_H -+ -+#define WG_GENL_NAME "wireguard" -+#define WG_GENL_VERSION 1 -+ -+#define WG_KEY_LEN 32 -+ -+enum wg_cmd { -+ WG_CMD_GET_DEVICE, -+ WG_CMD_SET_DEVICE, -+ __WG_CMD_MAX -+}; -+#define WG_CMD_MAX (__WG_CMD_MAX - 1) -+ -+enum wgdevice_flag { -+ WGDEVICE_F_REPLACE_PEERS = 1U << 0, -+ __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS -+}; -+enum wgdevice_attribute { -+ WGDEVICE_A_UNSPEC, -+ WGDEVICE_A_IFINDEX, -+ WGDEVICE_A_IFNAME, -+ WGDEVICE_A_PRIVATE_KEY, -+ WGDEVICE_A_PUBLIC_KEY, -+ WGDEVICE_A_FLAGS, -+ WGDEVICE_A_LISTEN_PORT, -+ WGDEVICE_A_FWMARK, -+ WGDEVICE_A_PEERS, -+ __WGDEVICE_A_LAST -+}; -+#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1) -+ -+enum wgpeer_flag { -+ WGPEER_F_REMOVE_ME = 1U << 0, -+ WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1, -+ WGPEER_F_UPDATE_ONLY = 1U << 2, -+ __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS | -+ WGPEER_F_UPDATE_ONLY -+}; -+enum wgpeer_attribute { -+ WGPEER_A_UNSPEC, -+ WGPEER_A_PUBLIC_KEY, -+ WGPEER_A_PRESHARED_KEY, -+ WGPEER_A_FLAGS, -+ WGPEER_A_ENDPOINT, -+ WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, -+ WGPEER_A_LAST_HANDSHAKE_TIME, -+ WGPEER_A_RX_BYTES, -+ WGPEER_A_TX_BYTES, -+ WGPEER_A_ALLOWEDIPS, -+ WGPEER_A_PROTOCOL_VERSION, -+ __WGPEER_A_LAST -+}; -+#define WGPEER_A_MAX (__WGPEER_A_LAST - 1) -+ -+enum wgallowedip_attribute { -+ WGALLOWEDIP_A_UNSPEC, -+ WGALLOWEDIP_A_FAMILY, -+ WGALLOWEDIP_A_IPADDR, -+ WGALLOWEDIP_A_CIDR_MASK, -+ __WGALLOWEDIP_A_LAST -+}; -+#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1) -+ -+#endif /* _WG_UAPI_WIREGUARD_H */ ---- /dev/null -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -0,0 +1,537 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+# -+# This script tests the below topology: -+# -+# ┌─────────────────────┐ ┌──────────────────────────────────┐ ┌─────────────────────┐ -+# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ -+# │ │ │ │ │ │ -+# │┌────────┐ │ │ ┌────────┐ │ │ ┌────────┐│ -+# ││ wg0 │───────────┼───┼────────────│ lo │────────────┼───┼───────────│ wg0 ││ -+# │├────────┴──────────┐│ │ ┌───────┴────────┴────────┐ │ │┌──────────┴────────┤│ -+# ││192.168.241.1/24 ││ │ │(ns1) (ns2) │ │ ││192.168.241.2/24 ││ -+# ││fd00::1/24 ││ │ │127.0.0.1:1 127.0.0.1:2│ │ ││fd00::2/24 ││ -+# │└───────────────────┘│ │ │[::]:1 [::]:2 │ │ │└───────────────────┘│ -+# └─────────────────────┘ │ └─────────────────────────┘ │ └─────────────────────┘ -+# └──────────────────────────────────┘ -+# -+# After the topology is prepared we run a series of TCP/UDP iperf3 tests between the -+# wireguard peers in $ns1 and $ns2. Note that $ns0 is the endpoint for the wg0 -+# interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further -+# details on how this is accomplished. -+set -e -+ -+exec 3>&1 -+export WG_HIDE_KEYS=never -+netns0="wg-test-$$-0" -+netns1="wg-test-$$-1" -+netns2="wg-test-$$-2" -+pretty() { echo -e "\x1b[32m\x1b[1m[+] ${1:+NS$1: }${2}\x1b[0m" >&3; } -+pp() { pretty "" "$*"; "$@"; } -+maybe_exec() { if [[ $BASHPID -eq $$ ]]; then "$@"; else exec "$@"; fi; } -+n0() { pretty 0 "$*"; maybe_exec ip netns exec $netns0 "$@"; } -+n1() { pretty 1 "$*"; maybe_exec ip netns exec $netns1 "$@"; } -+n2() { pretty 2 "$*"; maybe_exec ip netns exec $netns2 "$@"; } -+ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } -+ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } -+ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } -+sleep() { read -t "$1" -N 0 || true; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } -+waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } -+ -+cleanup() { -+ set +e -+ exec 2>/dev/null -+ printf "$orig_message_cost" > /proc/sys/net/core/message_cost -+ ip0 link del dev wg0 -+ ip1 link del dev wg0 -+ ip2 link del dev wg0 -+ local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" -+ [[ -n $to_kill ]] && kill $to_kill -+ pp ip netns del $netns1 -+ pp ip netns del $netns2 -+ pp ip netns del $netns0 -+ exit -+} -+ -+orig_message_cost="$(< /proc/sys/net/core/message_cost)" -+trap cleanup EXIT -+printf 0 > /proc/sys/net/core/message_cost -+ -+ip netns del $netns0 2>/dev/null || true -+ip netns del $netns1 2>/dev/null || true -+ip netns del $netns2 2>/dev/null || true -+pp ip netns add $netns0 -+pp ip netns add $netns1 -+pp ip netns add $netns2 -+ip0 link set up dev lo -+ -+ip0 link add dev wg0 type wireguard -+ip0 link set wg0 netns $netns1 -+ip0 link add dev wg0 type wireguard -+ip0 link set wg0 netns $netns2 -+key1="$(pp wg genkey)" -+key2="$(pp wg genkey)" -+key3="$(pp wg genkey)" -+pub1="$(pp wg pubkey <<<"$key1")" -+pub2="$(pp wg pubkey <<<"$key2")" -+pub3="$(pp wg pubkey <<<"$key3")" -+psk="$(pp wg genpsk)" -+[[ -n $key1 && -n $key2 && -n $psk ]] -+ -+configure_peers() { -+ ip1 addr add 192.168.241.1/24 dev wg0 -+ ip1 addr add fd00::1/24 dev wg0 -+ -+ ip2 addr add 192.168.241.2/24 dev wg0 -+ ip2 addr add fd00::2/24 dev wg0 -+ -+ n1 wg set wg0 \ -+ private-key <(echo "$key1") \ -+ listen-port 1 \ -+ peer "$pub2" \ -+ preshared-key <(echo "$psk") \ -+ allowed-ips 192.168.241.2/32,fd00::2/128 -+ n2 wg set wg0 \ -+ private-key <(echo "$key2") \ -+ listen-port 2 \ -+ peer "$pub1" \ -+ preshared-key <(echo "$psk") \ -+ allowed-ips 192.168.241.1/32,fd00::1/128 -+ -+ ip1 link set up dev wg0 -+ ip2 link set up dev wg0 -+} -+configure_peers -+ -+tests() { -+ # Ping over IPv4 -+ n2 ping -c 10 -f -W 1 192.168.241.1 -+ n1 ping -c 10 -f -W 1 192.168.241.2 -+ -+ # Ping over IPv6 -+ n2 ping6 -c 10 -f -W 1 fd00::1 -+ n1 ping6 -c 10 -f -W 1 fd00::2 -+ -+ # TCP over IPv4 -+ n2 iperf3 -s -1 -B 192.168.241.2 & -+ waitiperf $netns2 -+ n1 iperf3 -Z -t 3 -c 192.168.241.2 -+ -+ # TCP over IPv6 -+ n1 iperf3 -s -1 -B fd00::1 & -+ waitiperf $netns1 -+ n2 iperf3 -Z -t 3 -c fd00::1 -+ -+ # UDP over IPv4 -+ n1 iperf3 -s -1 -B 192.168.241.1 & -+ waitiperf $netns1 -+ n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 -+ -+ # UDP over IPv6 -+ n2 iperf3 -s -1 -B fd00::2 & -+ waitiperf $netns2 -+ n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 -+} -+ -+[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" -+big_mtu=$(( 34816 - 1500 + $orig_mtu )) -+ -+# Test using IPv4 as outer transport -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 -+# Before calling tests, we first make sure that the stats counters and timestamper are working -+n2 ping -c 10 -f -W 1 192.168.241.1 -+{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip2 -stats link show dev wg0) -+(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) -+{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip1 -stats link show dev wg0) -+(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) -+read _ rx_bytes tx_bytes < <(n2 wg show wg0 transfer) -+(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) -+read _ rx_bytes tx_bytes < <(n1 wg show wg0 transfer) -+(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) -+read _ timestamp < <(n1 wg show wg0 latest-handshakes) -+(( timestamp != 0 )) -+ -+tests -+ip1 link set wg0 mtu $big_mtu -+ip2 link set wg0 mtu $big_mtu -+tests -+ -+ip1 link set wg0 mtu $orig_mtu -+ip2 link set wg0 mtu $orig_mtu -+ -+# Test using IPv6 as outer transport -+n1 wg set wg0 peer "$pub2" endpoint [::1]:2 -+n2 wg set wg0 peer "$pub1" endpoint [::1]:1 -+tests -+ip1 link set wg0 mtu $big_mtu -+ip2 link set wg0 mtu $big_mtu -+tests -+ -+# Test that route MTUs work with the padding -+ip1 link set wg0 mtu 1300 -+ip2 link set wg0 mtu 1300 -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 -+n0 iptables -A INPUT -m length --length 1360 -j DROP -+n1 ip route add 192.168.241.2/32 dev wg0 mtu 1299 -+n2 ip route add 192.168.241.1/32 dev wg0 mtu 1299 -+n2 ping -c 1 -W 1 -s 1269 192.168.241.1 -+n2 ip route delete 192.168.241.1/32 dev wg0 mtu 1299 -+n1 ip route delete 192.168.241.2/32 dev wg0 mtu 1299 -+n0 iptables -F INPUT -+ -+ip1 link set wg0 mtu $orig_mtu -+ip2 link set wg0 mtu $orig_mtu -+ -+# Test using IPv4 that roaming works -+ip0 -4 addr del 127.0.0.1/8 dev lo -+ip0 -4 addr add 127.212.121.99/8 dev lo -+n1 wg set wg0 listen-port 9999 -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n1 ping6 -W 1 -c 1 fd00::2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 127.212.121.99:9999" ]] -+ -+# Test using IPv6 that roaming works -+n1 wg set wg0 listen-port 9998 -+n1 wg set wg0 peer "$pub2" endpoint [::1]:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [::1]:9998" ]] -+ -+# Test that crypto-RP filter works -+n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 -+exec 4< <(n1 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns1 -+n2 ncat -u 192.168.241.1 1111 <<<"X" -+read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] -+kill $ncat_pid -+more_specific_key="$(pp wg genkey | pp wg pubkey)" -+n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32 -+n2 wg set wg0 listen-port 9997 -+exec 4< <(n1 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns1 -+n2 ncat -u 192.168.241.1 1111 <<<"X" -+! read -r -N 1 -t 1 out <&4 || false -+kill $ncat_pid -+n1 wg set wg0 peer "$more_specific_key" remove -+[[ $(n1 wg show wg0 endpoints) == "$pub2 [::1]:9997" ]] -+ -+# Test that we can change private keys keys and immediately handshake -+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips 192.168.241.2/32 endpoint 127.0.0.1:2 -+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 -+n1 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 private-key <(echo "$key3") -+n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove -+n1 ping -W 1 -c 1 192.168.241.2 -+ -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# Test using NAT. We now change the topology to this: -+# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -+# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ -+# │ │ │ │ │ │ -+# │ ┌─────┐ ┌─────┐ │ │ ┌──────┐ ┌──────┐ │ │ ┌─────┐ ┌─────┐ │ -+# │ │ wg0 │─────────────│vethc│───────────┼────┼────│vethrc│ │vethrs│──────────────┼─────┼──│veths│────────────│ wg0 │ │ -+# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├──────┴─────────┐ ├──────┴────────────┐ │ │ ├─────┴──────────┐ ├─────┴──────────┐ │ -+# │ │192.168.241.1/24│ │192.168.1.100/24││ │ │192.168.1.1/24 │ │10.0.0.1/24 │ │ │ │10.0.0.100/24 │ │192.168.241.2/24│ │ -+# │ │fd00::1/24 │ │ ││ │ │ │ │SNAT:192.168.1.0/24│ │ │ │ │ │fd00::2/24 │ │ -+# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └───────────────────┘ │ │ └────────────────┘ └────────────────┘ │ -+# └────────────────────────────────────────┘ └────────────────────────────────────────────────┘ └────────────────────────────────────────┘ -+ -+ip1 link add dev wg0 type wireguard -+ip2 link add dev wg0 type wireguard -+configure_peers -+ -+ip0 link add vethrc type veth peer name vethc -+ip0 link add vethrs type veth peer name veths -+ip0 link set vethc netns $netns1 -+ip0 link set veths netns $netns2 -+ip0 link set vethrc up -+ip0 link set vethrs up -+ip0 addr add 192.168.1.1/24 dev vethrc -+ip0 addr add 10.0.0.1/24 dev vethrs -+ip1 addr add 192.168.1.100/24 dev vethc -+ip1 link set vethc up -+ip1 route add default via 192.168.1.1 -+ip2 addr add 10.0.0.100/24 dev veths -+ip2 link set veths up -+waitiface $netns0 vethrc -+waitiface $netns0 vethrs -+waitiface $netns1 vethc -+waitiface $netns2 veths -+ -+n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' -+n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout' -+n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream' -+n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1 -+ -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.100:2 persistent-keepalive 1 -+n1 ping -W 1 -c 1 192.168.241.2 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+# Demonstrate n2 can still send packets to n1, since persistent-keepalive will prevent connection tracking entry from expiring (to see entries: `n0 conntrack -L`). -+pp sleep 3 -+n2 ping -W 1 -c 1 192.168.241.1 -+n1 wg set wg0 peer "$pub2" persistent-keepalive 0 -+ -+# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. -+ip1 -6 addr add fc00::9/96 dev vethc -+ip1 -6 route add default via fc00::1 -+ip2 -4 addr add 192.168.99.7/32 dev wg0 -+ip2 -6 addr add abab::1111/128 dev wg0 -+n1 wg set wg0 fwmark 51820 peer "$pub2" allowed-ips 192.168.99.7,abab::1111 -+ip1 -6 route add default dev wg0 table 51820 -+ip1 -6 rule add not fwmark 51820 table 51820 -+ip1 -6 rule add table main suppress_prefixlength 0 -+ip1 -4 route add default dev wg0 table 51820 -+ip1 -4 rule add not fwmark 51820 table 51820 -+ip1 -4 rule add table main suppress_prefixlength 0 -+# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. -+if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then -+ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -+ n1 ping -W 1 -c 100 -f 192.168.99.7 -+ n1 ping -W 1 -c 100 -f abab::1111 -+fi -+ -+n0 iptables -t nat -F -+ip0 link del vethrc -+ip0 link del vethrs -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# Test that saddr routing is sticky but not too sticky, changing to this topology: -+# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -+# │ $ns1 namespace │ │ $ns2 namespace │ -+# │ │ │ │ -+# │ ┌─────┐ ┌─────┐ │ │ ┌─────┐ ┌─────┐ │ -+# │ │ wg0 │─────────────│veth1│───────────┼────┼──│veth2│────────────│ wg0 │ │ -+# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├─────┴──────────┐ ├─────┴──────────┐ │ -+# │ │192.168.241.1/24│ │10.0.0.1/24 ││ │ │10.0.0.2/24 │ │192.168.241.2/24│ │ -+# │ │fd00::1/24 │ │fd00:aa::1/96 ││ │ │fd00:aa::2/96 │ │fd00::2/24 │ │ -+# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └────────────────┘ │ -+# └────────────────────────────────────────┘ └────────────────────────────────────────┘ -+ -+ip1 link add dev wg0 type wireguard -+ip2 link add dev wg0 type wireguard -+configure_peers -+ip1 link add veth1 type veth peer name veth2 -+ip1 link set veth2 netns $netns2 -+n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' -+n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth1/accept_dad' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth2/accept_dad' -+n1 bash -c 'printf 1 > /proc/sys/net/ipv4/conf/veth1/promote_secondaries' -+ -+# First we check that we aren't overly sticky and can fall over to new IPs when old ones are removed -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip1 addr add fd00:aa::1/96 dev veth1 -+ip2 addr add 10.0.0.2/24 dev veth2 -+ip2 addr add fd00:aa::2/96 dev veth2 -+ip1 link set veth1 up -+ip2 link set veth2 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+ip1 addr add 10.0.0.10/24 dev veth1 -+ip1 addr del 10.0.0.1/24 dev veth1 -+n1 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+ip1 addr add fd00:aa::10/96 dev veth1 -+ip1 addr del fd00:aa::1/96 dev veth1 -+n1 ping -W 1 -c 1 192.168.241.2 -+ -+# Now we show that we can successfully do reply to sender routing -+ip1 link set veth1 down -+ip2 link set veth2 down -+ip1 addr flush dev veth1 -+ip2 addr flush dev veth2 -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip1 addr add 10.0.0.2/24 dev veth1 -+ip1 addr add fd00:aa::1/96 dev veth1 -+ip1 addr add fd00:aa::2/96 dev veth1 -+ip2 addr add 10.0.0.3/24 dev veth2 -+ip2 addr add fd00:aa::3/96 dev veth2 -+ip1 link set veth1 up -+ip2 link set veth2 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+n2 wg set wg0 peer "$pub1" endpoint 10.0.0.1:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::1]:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint 10.0.0.2:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.2:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::2]:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::2]:1" ]] -+ -+# What happens if the inbound destination address belongs to a different interface as the default route? -+ip1 link add dummy0 type dummy -+ip1 addr add 10.50.0.1/24 dev dummy0 -+ip1 link set dummy0 up -+ip2 route add 10.50.0.0/24 dev veth2 -+n2 wg set wg0 peer "$pub1" endpoint 10.50.0.1:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.50.0.1:1" ]] -+ -+ip1 link del dummy0 -+ip1 addr flush dev veth1 -+ip2 addr flush dev veth2 -+ip1 route flush dev veth1 -+ip2 route flush dev veth2 -+ -+# Now we see what happens if another interface route takes precedence over an ongoing one -+ip1 link add veth3 type veth peer name veth4 -+ip1 link set veth4 netns $netns2 -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip2 addr add 10.0.0.2/24 dev veth2 -+ip1 addr add 10.0.0.3/24 dev veth3 -+ip1 link set veth1 up -+ip2 link set veth2 up -+ip1 link set veth3 up -+ip2 link set veth4 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+waitiface $netns1 veth3 -+waitiface $netns2 veth4 -+ip1 route flush dev veth1 -+ip1 route flush dev veth3 -+ip1 route add 10.0.0.0/24 dev veth1 src 10.0.0.1 metric 2 -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+ip1 route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1 -+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth1/rp_filter' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth4/rp_filter' -+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]] -+ -+ip1 link del veth1 -+ip1 link del veth3 -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# We test that Netlink/IPC is working properly by doing things that usually cause split responses -+ip0 link add dev wg0 type wireguard -+config=( "[Interface]" "PrivateKey=$(wg genkey)" "[Peer]" "PublicKey=$(wg genkey)" ) -+for a in {1..255}; do -+ for b in {0..255}; do -+ config+=( "AllowedIPs=$a.$b.0.0/16,$a::$b/128" ) -+ done -+done -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+i=0 -+for ip in $(n0 wg show wg0 allowed-ips); do -+ ((++i)) -+done -+((i == 255*256*2+1)) -+ip0 link del wg0 -+ip0 link add dev wg0 type wireguard -+config=( "[Interface]" "PrivateKey=$(wg genkey)" ) -+for a in {1..40}; do -+ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) -+ for b in {1..52}; do -+ config+=( "AllowedIPs=$a.$b.0.0/16" ) -+ done -+done -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+i=0 -+while read -r line; do -+ j=0 -+ for ip in $line; do -+ ((++j)) -+ done -+ ((j == 53)) -+ ((++i)) -+done < <(n0 wg show wg0 allowed-ips) -+((i == 40)) -+ip0 link del wg0 -+ip0 link add wg0 type wireguard -+config=( ) -+for i in {1..29}; do -+ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) -+done -+config+=( "[Peer]" "PublicKey=$(wg genkey)" "AllowedIPs=255.2.3.4/32,abcd::255/128" ) -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+n0 wg showconf wg0 > /dev/null -+ip0 link del wg0 -+ -+allowedips=( ) -+for i in {1..197}; do -+ allowedips+=( abcd::$i ) -+done -+saved_ifs="$IFS" -+IFS=, -+allowedips="${allowedips[*]}" -+IFS="$saved_ifs" -+ip0 link add wg0 type wireguard -+n0 wg set wg0 peer "$pub1" -+n0 wg set wg0 peer "$pub2" allowed-ips "$allowedips" -+{ -+ read -r pub allowedips -+ [[ $pub == "$pub1" && $allowedips == "(none)" ]] -+ read -r pub allowedips -+ [[ $pub == "$pub2" ]] -+ i=0 -+ for _ in $allowedips; do -+ ((++i)) -+ done -+ ((i == 197)) -+} < <(n0 wg show wg0 allowed-ips) -+ip0 link del wg0 -+ -+! n0 wg show doesnotexist || false -+ -+ip0 link add wg0 type wireguard -+n0 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") -+[[ $(n0 wg show wg0 private-key) == "$key1" ]] -+[[ $(n0 wg show wg0 preshared-keys) == "$pub2 $psk" ]] -+n0 wg set wg0 private-key /dev/null peer "$pub2" preshared-key /dev/null -+[[ $(n0 wg show wg0 private-key) == "(none)" ]] -+[[ $(n0 wg show wg0 preshared-keys) == "$pub2 (none)" ]] -+n0 wg set wg0 peer "$pub2" -+n0 wg set wg0 private-key <(echo "$key2") -+[[ $(n0 wg show wg0 public-key) == "$pub2" ]] -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 peer "$pub2" -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 private-key <(echo "$key1") -+n0 wg set wg0 peer "$pub2" -+[[ $(n0 wg show wg0 peers) == "$pub2" ]] -+n0 wg set wg0 private-key <(echo "/${key1:1}") -+[[ $(n0 wg show wg0 private-key) == "+${key1:1}" ]] -+n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.0.0/12,192.168.0.0/16 -+n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 -+n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 -+n0 wg set wg0 peer "$pub2" allowed-ips ::/0 -+ip0 link del wg0 -+ -+declare -A objects -+while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do -+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue -+ objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" -+done < /dev/kmsg -+alldeleted=1 -+for object in "${!objects[@]}"; do -+ if [[ ${objects["$object"]} != *createddestroyed ]]; then -+ echo "Error: $object: merely ${objects["$object"]}" >&3 -+ alldeleted=0 -+ fi -+done -+[[ $alldeleted -eq 1 ]] -+pretty "" "Objects that were created were also destroyed." diff --git a/target/linux/generic/backport-5.4/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch b/target/linux/generic/backport-5.4/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch new file mode 100644 index 0000000000..c2f8f77f53 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch @@ -0,0 +1,30 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Sun, 15 Dec 2019 22:08:01 +0100 +Subject: [PATCH] wireguard: Kconfig: select parent dependency for crypto + +commit d7c68a38bb4f9b7c1a2e4a772872c752ee5c44a6 upstream. + +This fixes the crypto selection submenu depenencies. Otherwise, we'd +wind up issuing warnings in which certain dependencies we also select +couldn't be satisfied. This condition was triggered by the addition of +the test suite autobuilder in the previous commit. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/Kconfig | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -85,6 +85,8 @@ config WIREGUARD + select CRYPTO_POLY1305_X86_64 if X86 && 64BIT + select CRYPTO_BLAKE2S_X86 if X86 && 64BIT + select CRYPTO_CURVE25519_X86 if X86 && 64BIT ++ select ARM_CRYPTO if ARM ++ select ARM64_CRYPTO if ARM64 + select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON + select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON + select CRYPTO_POLY1305_ARM if ARM diff --git a/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch b/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch new file mode 100644 index 0000000000..9b34e663a9 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch @@ -0,0 +1,66 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Josh Soref +Date: Sun, 15 Dec 2019 22:08:02 +0100 +Subject: [PATCH] wireguard: global: fix spelling mistakes in comments + +commit a2ec8b5706944d228181c8b91d815f41d6dd8e7b upstream. + +This fixes two spelling errors in source code comments. + +Signed-off-by: Josh Soref +[Jason: rewrote commit message] +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 2 +- + include/uapi/linux/wireguard.h | 8 ++++---- + 2 files changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -380,7 +380,7 @@ static void wg_packet_consume_data_done( + /* We've already verified the Poly1305 auth tag, which means this packet + * was not modified in transit. We can therefore tell the networking + * stack that all checksums of every layer of encapsulation have already +- * been checked "by the hardware" and therefore is unneccessary to check ++ * been checked "by the hardware" and therefore is unnecessary to check + * again in software. + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; +--- a/include/uapi/linux/wireguard.h ++++ b/include/uapi/linux/wireguard.h +@@ -18,13 +18,13 @@ + * one but not both of: + * + * WGDEVICE_A_IFINDEX: NLA_U32 +- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * + * The kernel will then return several messages (NLM_F_MULTI) containing the + * following tree of nested items: + * + * WGDEVICE_A_IFINDEX: NLA_U32 +- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN + * WGDEVICE_A_LISTEN_PORT: NLA_U16 +@@ -77,7 +77,7 @@ + * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: + * + * WGDEVICE_A_IFINDEX: NLA_U32 +- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 ++ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 + * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current + * peers should be removed prior to adding the list below. + * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove +@@ -121,7 +121,7 @@ + * filling in information not contained in the prior. Note that if + * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably + * should not be specified in fragments that come after, so that the list +- * of peers is only cleared the first time but appened after. Likewise for ++ * of peers is only cleared the first time but appended after. Likewise for + * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message + * of a peer, it likely should not be specified in subsequent fragments. + * diff --git a/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-selftests-import-harness-makefile-for-test.patch b/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-selftests-import-harness-makefile-for-test.patch deleted file mode 100644 index ca3853aa19..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0075-wireguard-selftests-import-harness-makefile-for-test.patch +++ /dev/null @@ -1,1078 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 15 Dec 2019 22:08:00 +0100 -Subject: [PATCH] wireguard: selftests: import harness makefile for test suite - -commit 65d88d04114bca7d85faebd5fed61069cb2b632c upstream. - -WireGuard has been using this on build.wireguard.com for the last -several years with considerable success. It allows for very quick and -iterative development cycles, and supports several platforms. - -To run the test suite on your current platform in QEMU: - - $ make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -To run it with KASAN and such turned on: - - $ DEBUG_KERNEL=yes make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -To run it emulated for another platform in QEMU: - - $ ARCH=arm make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -At the moment, we support aarch64_be, aarch64, arm, armeb, i686, m68k, -mips64, mips64el, mips, mipsel, powerpc64le, powerpc, and x86_64. - -The system supports incremental rebuilding, so it should be very fast to -change a single file and then test it out and have immediate feedback. - -This requires for the right toolchain and qemu to be installed prior. -I've had success with those from musl.cc. - -This is tailored for WireGuard at the moment, though later projects -might generalize it for other network testing. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - .../selftests/wireguard/qemu/.gitignore | 2 + - .../testing/selftests/wireguard/qemu/Makefile | 385 ++++++++++++++++++ - .../wireguard/qemu/arch/aarch64.config | 5 + - .../wireguard/qemu/arch/aarch64_be.config | 6 + - .../selftests/wireguard/qemu/arch/arm.config | 9 + - .../wireguard/qemu/arch/armeb.config | 10 + - .../selftests/wireguard/qemu/arch/i686.config | 5 + - .../selftests/wireguard/qemu/arch/m68k.config | 9 + - .../selftests/wireguard/qemu/arch/mips.config | 11 + - .../wireguard/qemu/arch/mips64.config | 14 + - .../wireguard/qemu/arch/mips64el.config | 15 + - .../wireguard/qemu/arch/mipsel.config | 12 + - .../wireguard/qemu/arch/powerpc.config | 10 + - .../wireguard/qemu/arch/powerpc64le.config | 12 + - .../wireguard/qemu/arch/x86_64.config | 5 + - .../selftests/wireguard/qemu/debug.config | 67 +++ - tools/testing/selftests/wireguard/qemu/init.c | 284 +++++++++++++ - .../selftests/wireguard/qemu/kernel.config | 86 ++++ - 18 files changed, 947 insertions(+) - create mode 100644 tools/testing/selftests/wireguard/qemu/.gitignore - create mode 100644 tools/testing/selftests/wireguard/qemu/Makefile - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/arm.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/armeb.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/i686.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/m68k.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64el.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mipsel.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/x86_64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/debug.config - create mode 100644 tools/testing/selftests/wireguard/qemu/init.c - create mode 100644 tools/testing/selftests/wireguard/qemu/kernel.config - ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/.gitignore -@@ -0,0 +1,2 @@ -+build/ -+distfiles/ ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -0,0 +1,385 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ -+PWD := $(shell pwd) -+ -+CHOST := $(shell gcc -dumpmachine) -+ifneq (,$(ARCH)) -+CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) -+ifeq (,$(CBUILD)) -+$(error The toolchain for $(ARCH) is not installed) -+endif -+else -+CBUILD := $(CHOST) -+ARCH := $(firstword $(subst -, ,$(CBUILD))) -+endif -+ -+# Set these from the environment to override -+KERNEL_PATH ?= $(PWD)/../../../../.. -+BUILD_PATH ?= $(PWD)/build/$(ARCH) -+DISTFILES_PATH ?= $(PWD)/distfiles -+NR_CPUS ?= 4 -+ -+MIRROR := https://download.wireguard.com/qemu-test/distfiles/ -+ -+default: qemu -+ -+# variable name, tarball project name, version, tarball extension, default URI base -+define tar_download = -+$(1)_VERSION := $(3) -+$(1)_NAME := $(2)-$$($(1)_VERSION) -+$(1)_TAR := $(DISTFILES_PATH)/$$($(1)_NAME)$(4) -+$(1)_PATH := $(BUILD_PATH)/$$($(1)_NAME) -+$(call file_download,$$($(1)_NAME)$(4),$(5),$(6)) -+endef -+ -+define file_download = -+$(DISTFILES_PATH)/$(1): -+ mkdir -p $(DISTFILES_PATH) -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -+ if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi -+endef -+ -+$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) -+$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) -+$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) -+$(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) -+$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) -+$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) -+$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) -+ -+KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) -+rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -+WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*) -+ -+export CFLAGS ?= -O3 -pipe -+export LDFLAGS ?= -+export CPPFLAGS := -I$(BUILD_PATH)/include -+ -+ifeq ($(CHOST),$(CBUILD)) -+CROSS_COMPILE_FLAG := --host=$(CHOST) -+NOPIE_GCC := gcc -fno-PIE -+CFLAGS += -march=native -+STRIP := strip -+else -+$(info Cross compilation: building for $(CBUILD) using $(CHOST)) -+CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) -+export CROSS_COMPILE=$(CBUILD)- -+NOPIE_GCC := $(CBUILD)-gcc -fno-PIE -+STRIP := $(CBUILD)-strip -+endif -+ifeq ($(ARCH),aarch64) -+QEMU_ARCH := aarch64 -+KERNEL_ARCH := arm64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a53 -machine virt -+CFLAGS += -march=armv8-a -mtune=cortex-a53 -+endif -+else ifeq ($(ARCH),aarch64_be) -+QEMU_ARCH := aarch64 -+KERNEL_ARCH := arm64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a53 -machine virt -+CFLAGS += -march=armv8-a -mtune=cortex-a53 -+endif -+else ifeq ($(ARCH),arm) -+QEMU_ARCH := arm -+KERNEL_ARCH := arm -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a15 -machine virt -+CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux -+endif -+else ifeq ($(ARCH),armeb) -+QEMU_ARCH := arm -+KERNEL_ARCH := arm -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a15 -machine virt -+CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian. -+LDFLAGS += -Wl,--be8 -+endif -+else ifeq ($(ARCH),x86_64) -+QEMU_ARCH := x86_64 -+KERNEL_ARCH := x86_64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine q35,accel=kvm -+else -+QEMU_MACHINE := -cpu Skylake-Server -machine q35 -+CFLAGS += -march=skylake-avx512 -+endif -+else ifeq ($(ARCH),i686) -+QEMU_ARCH := i386 -+KERNEL_ARCH := x86 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage -+ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) -+QEMU_MACHINE := -cpu host -machine q35,accel=kvm -+else -+QEMU_MACHINE := -cpu coreduo -machine q35 -+CFLAGS += -march=prescott -+endif -+else ifeq ($(ARCH),mips64) -+QEMU_ARCH := mips64 -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EB -+else -+QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 -+CFLAGS += -march=mips64r2 -EB -+endif -+else ifeq ($(ARCH),mips64el) -+QEMU_ARCH := mips64el -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EL -+else -+QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 -+CFLAGS += -march=mips64r2 -EL -+endif -+else ifeq ($(ARCH),mips) -+QEMU_ARCH := mips -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EB -+else -+QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 -+CFLAGS += -march=mips32r2 -EB -+endif -+else ifeq ($(ARCH),mipsel) -+QEMU_ARCH := mipsel -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EL -+else -+QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 -+CFLAGS += -march=mips32r2 -EL -+endif -+else ifeq ($(ARCH),powerpc64le) -+QEMU_ARCH := ppc64 -+KERNEL_ARCH := powerpc -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine pseries -+else -+QEMU_MACHINE := -machine pseries -+endif -+CFLAGS += -mcpu=powerpc64le -mlong-double-64 -+else ifeq ($(ARCH),powerpc) -+QEMU_ARCH := ppc -+KERNEL_ARCH := powerpc -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 -+else -+QEMU_MACHINE := -machine ppce500 -+endif -+CFLAGS += -mcpu=powerpc -mlong-double-64 -msecure-plt -+else ifeq ($(ARCH),m68k) -+QEMU_ARCH := m68k -+KERNEL_ARCH := m68k -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -+else -+QEMU_MACHINE := -machine q800 -+endif -+else -+$(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) -+endif -+ -+REAL_CC := $(CBUILD)-gcc -+MUSL_CC := $(BUILD_PATH)/musl-gcc -+export CC := $(MUSL_CC) -+USERSPACE_DEPS := $(MUSL_CC) $(BUILD_PATH)/include/.installed $(BUILD_PATH)/include/linux/.installed -+ -+build: $(KERNEL_BZIMAGE) -+qemu: $(KERNEL_BZIMAGE) -+ rm -f $(BUILD_PATH)/result -+ timeout --foreground 20m qemu-system-$(QEMU_ARCH) \ -+ -nodefaults \ -+ -nographic \ -+ -smp $(NR_CPUS) \ -+ $(QEMU_MACHINE) \ -+ -m $$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \ -+ -serial stdio \ -+ -serial file:$(BUILD_PATH)/result \ -+ -no-reboot \ -+ -monitor none \ -+ -kernel $< -+ grep -Fq success $(BUILD_PATH)/result -+ -+$(BUILD_PATH)/init-cpio-spec.txt: -+ mkdir -p $(BUILD_PATH) -+ echo "file /init $(BUILD_PATH)/init 755 0 0" > $@ -+ echo "file /init.sh $(PWD)/../netns.sh 755 0 0" >> $@ -+ echo "dir /dev 755 0 0" >> $@ -+ echo "nod /dev/console 644 0 0 c 5 1" >> $@ -+ echo "dir /bin 755 0 0" >> $@ -+ echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ -+ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ -+ echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ -+ echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ -+ echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ -+ echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ -+ echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ -+ echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ -+ echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ -+ echo "slink /bin/ping6 ping 777 0 0" >> $@ -+ echo "dir /lib 755 0 0" >> $@ -+ echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ -+ echo "slink /lib/ld-linux.so.1 libc.so 777 0 0" >> $@ -+ -+$(KERNEL_BUILD_PATH)/.config: kernel.config arch/$(ARCH).config -+ mkdir -p $(KERNEL_BUILD_PATH) -+ cp kernel.config $(KERNEL_BUILD_PATH)/minimal.config -+ printf 'CONFIG_NR_CPUS=$(NR_CPUS)\nCONFIG_INITRAMFS_SOURCE="$(BUILD_PATH)/init-cpio-spec.txt"\n' >> $(KERNEL_BUILD_PATH)/minimal.config -+ cat arch/$(ARCH).config >> $(KERNEL_BUILD_PATH)/minimal.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) allnoconfig -+ cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config -+ $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) -+ -+$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" -+ -+$(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install -+ touch $@ -+ -+$(MUSL_PATH)/lib/libc.so: $(MUSL_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ cd $(MUSL_PATH) && CC=$(REAL_CC) ./configure --prefix=/ --disable-static --build=$(CBUILD) -+ $(MAKE) -C $(MUSL_PATH) -+ $(STRIP) -s $@ -+ -+$(BUILD_PATH)/include/.installed: $(MUSL_PATH)/lib/libc.so -+ $(MAKE) -C $(MUSL_PATH) DESTDIR=$(BUILD_PATH) install-headers -+ touch $@ -+ -+$(MUSL_CC): $(MUSL_PATH)/lib/libc.so -+ sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs -+ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc -+ chmod +x $(BUILD_PATH)/musl-gcc -+ -+$(IPERF_PATH)/.installed: $(IPERF_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ sed -i '1s/^/#include /' $(IPERF_PATH)/src/cjson.h $(IPERF_PATH)/src/timer.h -+ sed -i -r 's/-p?g//g' $(IPERF_PATH)/src/Makefile* -+ touch $@ -+ -+$(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ $(MAKE) -C $(IPERF_PATH) -+ $(STRIP) -s $@ -+ -+$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ $(MAKE) -C $(LIBMNL_PATH) -+ sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc -+ -+$(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+ $(STRIP) -s $@ -+ -+$(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -+ mkdir -p $(BUILD_PATH) -+ $(MUSL_CC) -o $@ $(CFLAGS) $(LDFLAGS) -std=gnu11 $< -+ $(STRIP) -s $@ -+ -+$(IPUTILS_PATH)/.installed: $(IPUTILS_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping -+ $(STRIP) -s $@ -+ -+$(BASH_PATH)/.installed: $(BASH_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(BASH_PATH)/bash: | $(BASH_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(BASH_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --without-bash-malloc --disable-debugger --disable-help-builtin --disable-history --disable-multibyte --disable-progcomp --disable-readline --disable-mem-scramble -+ $(MAKE) -C $(BASH_PATH) -+ $(STRIP) -s $@ -+ -+$(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk -+ printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile -+ touch $@ -+ -+$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -+ $(STRIP) -s $(IPROUTE2_PATH)/ip/ip -+ -+$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -+ $(STRIP) -s $(IPROUTE2_PATH)/misc/ss -+ -+$(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure -+ touch $@ -+ -+$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include -+ $(MAKE) -C $(IPTABLES_PATH) -+ $(STRIP) -s $@ -+ -+$(NMAP_PATH)/.installed: $(NMAP_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux -+ $(MAKE) -C $(NMAP_PATH) build-ncat -+ $(STRIP) -s $@ -+ -+clean: -+ rm -rf $(BUILD_PATH) -+ -+distclean: clean -+ rm -rf $(DISTFILES_PATH) -+ -+menuconfig: $(KERNEL_BUILD_PATH)/.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig -+ -+.PHONY: qemu build clean distclean menuconfig -+.DELETE_ON_ERROR: ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config -@@ -0,0 +1,6 @@ -+CONFIG_CPU_BIG_ENDIAN=y -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/arm.config -@@ -0,0 +1,9 @@ -+CONFIG_MMU=y -+CONFIG_ARCH_MULTI_V7=y -+CONFIG_ARCH_VIRT=y -+CONFIG_THUMB2_KERNEL=n -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/armeb.config -@@ -0,0 +1,10 @@ -+CONFIG_MMU=y -+CONFIG_ARCH_MULTI_V7=y -+CONFIG_ARCH_VIRT=y -+CONFIG_THUMB2_KERNEL=n -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_CPU_BIG_ENDIAN=y -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config -@@ -0,0 +1,9 @@ -+CONFIG_MMU=y -+CONFIG_M68040=y -+CONFIG_MAC=y -+CONFIG_SERIAL_PMACZILOG=y -+CONFIG_SERIAL_PMACZILOG_TTYS=y -+CONFIG_SERIAL_PMACZILOG_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips.config -@@ -0,0 +1,11 @@ -+CONFIG_CPU_MIPS32_R2=y -+CONFIG_MIPS_MALTA=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64.config -@@ -0,0 +1,14 @@ -+CONFIG_64BIT=y -+CONFIG_CPU_MIPS64_R2=y -+CONFIG_MIPS32_N32=y -+CONFIG_CPU_HAS_MSA=y -+CONFIG_MIPS_MALTA=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64el.config -@@ -0,0 +1,15 @@ -+CONFIG_64BIT=y -+CONFIG_CPU_MIPS64_R2=y -+CONFIG_MIPS32_N32=y -+CONFIG_CPU_HAS_MSA=y -+CONFIG_MIPS_MALTA=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config -@@ -0,0 +1,12 @@ -+CONFIG_CPU_MIPS32_R2=y -+CONFIG_MIPS_MALTA=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config -@@ -0,0 +1,10 @@ -+CONFIG_PPC_QEMU_E500=y -+CONFIG_FSL_SOC_BOOKE=y -+CONFIG_PPC_85xx=y -+CONFIG_PHYS_64BIT=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_MATH_EMULATION=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -@@ -0,0 +1,12 @@ -+CONFIG_PPC64=y -+CONFIG_PPC_PSERIES=y -+CONFIG_ALTIVEC=y -+CONFIG_VSX=y -+CONFIG_PPC_OF_BOOT_TRAMPOLINE=y -+CONFIG_PPC_RADIX_MMU=y -+CONFIG_HVC_CONSOLE=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" -+CONFIG_SECTION_MISMATCH_WARN_ONLY=y -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/debug.config -@@ -0,0 +1,67 @@ -+CONFIG_LOCALVERSION="-debug" -+CONFIG_ENABLE_WARN_DEPRECATED=y -+CONFIG_ENABLE_MUST_CHECK=y -+CONFIG_FRAME_POINTER=y -+CONFIG_STACK_VALIDATION=y -+CONFIG_DEBUG_KERNEL=y -+CONFIG_DEBUG_INFO=y -+CONFIG_DEBUG_INFO_DWARF4=y -+CONFIG_PAGE_EXTENSION=y -+CONFIG_PAGE_POISONING=y -+CONFIG_DEBUG_OBJECTS=y -+CONFIG_DEBUG_OBJECTS_FREE=y -+CONFIG_DEBUG_OBJECTS_TIMERS=y -+CONFIG_DEBUG_OBJECTS_WORK=y -+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 -+CONFIG_SLUB_DEBUG_ON=y -+CONFIG_DEBUG_VM=y -+CONFIG_DEBUG_MEMORY_INIT=y -+CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -+CONFIG_DEBUG_STACKOVERFLOW=y -+CONFIG_HAVE_ARCH_KMEMCHECK=y -+CONFIG_HAVE_ARCH_KASAN=y -+CONFIG_KASAN=y -+CONFIG_KASAN_INLINE=y -+CONFIG_UBSAN=y -+CONFIG_UBSAN_SANITIZE_ALL=y -+CONFIG_UBSAN_NO_ALIGNMENT=y -+CONFIG_UBSAN_NULL=y -+CONFIG_DEBUG_KMEMLEAK=y -+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192 -+CONFIG_DEBUG_STACK_USAGE=y -+CONFIG_DEBUG_SHIRQ=y -+CONFIG_WQ_WATCHDOG=y -+CONFIG_SCHED_DEBUG=y -+CONFIG_SCHED_INFO=y -+CONFIG_SCHEDSTATS=y -+CONFIG_SCHED_STACK_END_CHECK=y -+CONFIG_DEBUG_TIMEKEEPING=y -+CONFIG_TIMER_STATS=y -+CONFIG_DEBUG_PREEMPT=y -+CONFIG_DEBUG_RT_MUTEXES=y -+CONFIG_DEBUG_SPINLOCK=y -+CONFIG_DEBUG_MUTEXES=y -+CONFIG_DEBUG_LOCK_ALLOC=y -+CONFIG_PROVE_LOCKING=y -+CONFIG_LOCKDEP=y -+CONFIG_DEBUG_ATOMIC_SLEEP=y -+CONFIG_TRACE_IRQFLAGS=y -+CONFIG_DEBUG_BUGVERBOSE=y -+CONFIG_DEBUG_LIST=y -+CONFIG_DEBUG_PI_LIST=y -+CONFIG_PROVE_RCU=y -+CONFIG_SPARSE_RCU_POINTER=y -+CONFIG_RCU_CPU_STALL_TIMEOUT=21 -+CONFIG_RCU_TRACE=y -+CONFIG_RCU_EQS_DEBUG=y -+CONFIG_USER_STACKTRACE_SUPPORT=y -+CONFIG_DEBUG_SG=y -+CONFIG_DEBUG_NOTIFIERS=y -+CONFIG_DOUBLEFAULT=y -+CONFIG_X86_DEBUG_FPU=y -+CONFIG_DEBUG_SECTION_MISMATCH=y -+CONFIG_DEBUG_PAGEALLOC=y -+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y -+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -0,0 +1,284 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#define _GNU_SOURCE -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+__attribute__((noreturn)) static void poweroff(void) -+{ -+ fflush(stdout); -+ fflush(stderr); -+ reboot(RB_AUTOBOOT); -+ sleep(30); -+ fprintf(stderr, "\x1b[37m\x1b[41m\x1b[1mFailed to power off!!!\x1b[0m\n"); -+ exit(1); -+} -+ -+static void panic(const char *what) -+{ -+ fprintf(stderr, "\n\n\x1b[37m\x1b[41m\x1b[1mSOMETHING WENT HORRIBLY WRONG\x1b[0m\n\n \x1b[31m\x1b[1m%s: %s\x1b[0m\n\n\x1b[37m\x1b[44m\x1b[1mPower off...\x1b[0m\n\n", what, strerror(errno)); -+ poweroff(); -+} -+ -+#define pretty_message(msg) puts("\x1b[32m\x1b[1m" msg "\x1b[0m") -+ -+static void print_banner(void) -+{ -+ struct utsname utsname; -+ int len; -+ -+ if (uname(&utsname) < 0) -+ panic("uname"); -+ -+ len = strlen(" WireGuard Test Suite on ") + strlen(utsname.sysname) + strlen(utsname.release) + strlen(utsname.machine); -+ printf("\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\x1b[45m\x1b[33m\x1b[1m WireGuard Test Suite on %s %s %s \x1b[0m\n\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\n", len, "", utsname.sysname, utsname.release, utsname.machine, len, ""); -+} -+ -+static void seed_rng(void) -+{ -+ int fd; -+ struct { -+ int entropy_count; -+ int buffer_size; -+ unsigned char buffer[256]; -+ } entropy = { -+ .entropy_count = sizeof(entropy.buffer) * 8, -+ .buffer_size = sizeof(entropy.buffer), -+ .buffer = "Adding real entropy is not actually important for these tests. Don't try this at home, kids!" -+ }; -+ -+ if (mknod("/dev/urandom", S_IFCHR | 0644, makedev(1, 9))) -+ panic("mknod(/dev/urandom)"); -+ fd = open("/dev/urandom", O_WRONLY); -+ if (fd < 0) -+ panic("open(urandom)"); -+ for (int i = 0; i < 256; ++i) { -+ if (ioctl(fd, RNDADDENTROPY, &entropy) < 0) -+ panic("ioctl(urandom)"); -+ } -+ close(fd); -+} -+ -+static void mount_filesystems(void) -+{ -+ pretty_message("[+] Mounting filesystems..."); -+ mkdir("/dev", 0755); -+ mkdir("/proc", 0755); -+ mkdir("/sys", 0755); -+ mkdir("/tmp", 0755); -+ mkdir("/run", 0755); -+ mkdir("/var", 0755); -+ if (mount("none", "/dev", "devtmpfs", 0, NULL)) -+ panic("devtmpfs mount"); -+ if (mount("none", "/proc", "proc", 0, NULL)) -+ panic("procfs mount"); -+ if (mount("none", "/sys", "sysfs", 0, NULL)) -+ panic("sysfs mount"); -+ if (mount("none", "/tmp", "tmpfs", 0, NULL)) -+ panic("tmpfs mount"); -+ if (mount("none", "/run", "tmpfs", 0, NULL)) -+ panic("tmpfs mount"); -+ if (mount("none", "/sys/kernel/debug", "debugfs", 0, NULL)) -+ ; /* Not a problem if it fails.*/ -+ if (symlink("/run", "/var/run")) -+ panic("run symlink"); -+ if (symlink("/proc/self/fd", "/dev/fd")) -+ panic("fd symlink"); -+} -+ -+static void enable_logging(void) -+{ -+ int fd; -+ pretty_message("[+] Enabling logging..."); -+ fd = open("/proc/sys/kernel/printk", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "9\n", 2) != 2) -+ panic("write(printk)"); -+ close(fd); -+ } -+ fd = open("/proc/sys/debug/exception-trace", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "1\n", 2) != 2) -+ panic("write(exception-trace)"); -+ close(fd); -+ } -+ fd = open("/proc/sys/kernel/panic_on_warn", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "1\n", 2) != 2) -+ panic("write(panic_on_warn)"); -+ close(fd); -+ } -+} -+ -+static void kmod_selftests(void) -+{ -+ FILE *file; -+ char line[2048], *start, *pass; -+ bool success = true; -+ pretty_message("[+] Module self-tests:"); -+ file = fopen("/proc/kmsg", "r"); -+ if (!file) -+ panic("fopen(kmsg)"); -+ if (fcntl(fileno(file), F_SETFL, O_NONBLOCK) < 0) -+ panic("fcntl(kmsg, nonblock)"); -+ while (fgets(line, sizeof(line), file)) { -+ start = strstr(line, "wireguard: "); -+ if (!start) -+ continue; -+ start += 11; -+ *strchrnul(start, '\n') = '\0'; -+ if (strstr(start, "www.wireguard.com")) -+ break; -+ pass = strstr(start, ": pass"); -+ if (!pass || pass[6] != '\0') { -+ success = false; -+ printf(" \x1b[31m* %s\x1b[0m\n", start); -+ } else -+ printf(" \x1b[32m* %s\x1b[0m\n", start); -+ } -+ fclose(file); -+ if (!success) { -+ puts("\x1b[31m\x1b[1m[-] Tests failed! \u2639\x1b[0m"); -+ poweroff(); -+ } -+} -+ -+static void launch_tests(void) -+{ -+ char cmdline[4096], *success_dev; -+ int status, fd; -+ pid_t pid; -+ -+ pretty_message("[+] Launching tests..."); -+ pid = fork(); -+ if (pid == -1) -+ panic("fork"); -+ else if (pid == 0) { -+ execl("/init.sh", "init", NULL); -+ panic("exec"); -+ } -+ if (waitpid(pid, &status, 0) < 0) -+ panic("waitpid"); -+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { -+ pretty_message("[+] Tests successful! :-)"); -+ fd = open("/proc/cmdline", O_RDONLY); -+ if (fd < 0) -+ panic("open(/proc/cmdline)"); -+ if (read(fd, cmdline, sizeof(cmdline) - 1) <= 0) -+ panic("read(/proc/cmdline)"); -+ cmdline[sizeof(cmdline) - 1] = '\0'; -+ for (success_dev = strtok(cmdline, " \n"); success_dev; success_dev = strtok(NULL, " \n")) { -+ if (strncmp(success_dev, "wg.success=", 11)) -+ continue; -+ memcpy(success_dev + 11 - 5, "/dev/", 5); -+ success_dev += 11 - 5; -+ break; -+ } -+ if (!success_dev || !strlen(success_dev)) -+ panic("Unable to find success device"); -+ -+ fd = open(success_dev, O_WRONLY); -+ if (fd < 0) -+ panic("open(success_dev)"); -+ if (write(fd, "success\n", 8) != 8) -+ panic("write(success_dev)"); -+ close(fd); -+ } else { -+ const char *why = "unknown cause"; -+ int what = -1; -+ -+ if (WIFEXITED(status)) { -+ why = "exit code"; -+ what = WEXITSTATUS(status); -+ } else if (WIFSIGNALED(status)) { -+ why = "signal"; -+ what = WTERMSIG(status); -+ } -+ printf("\x1b[31m\x1b[1m[-] Tests failed with %s %d! \u2639\x1b[0m\n", why, what); -+ } -+} -+ -+static void ensure_console(void) -+{ -+ for (unsigned int i = 0; i < 1000; ++i) { -+ int fd = open("/dev/console", O_RDWR); -+ if (fd < 0) { -+ usleep(50000); -+ continue; -+ } -+ dup2(fd, 0); -+ dup2(fd, 1); -+ dup2(fd, 2); -+ close(fd); -+ if (write(1, "\0\0\0\0\n", 5) == 5) -+ return; -+ } -+ panic("Unable to open console device"); -+} -+ -+static void clear_leaks(void) -+{ -+ int fd; -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); -+ if (fd < 0) -+ return; -+ pretty_message("[+] Starting memory leak detection..."); -+ write(fd, "clear\n", 5); -+ close(fd); -+} -+ -+static void check_leaks(void) -+{ -+ int fd; -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); -+ if (fd < 0) -+ return; -+ pretty_message("[+] Scanning for memory leaks..."); -+ sleep(2); /* Wait for any grace periods. */ -+ write(fd, "scan\n", 5); -+ close(fd); -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_RDONLY); -+ if (fd < 0) -+ return; -+ if (sendfile(1, fd, NULL, 0x7ffff000) > 0) -+ panic("Memory leaks encountered"); -+ close(fd); -+} -+ -+int main(int argc, char *argv[]) -+{ -+ seed_rng(); -+ ensure_console(); -+ print_banner(); -+ mount_filesystems(); -+ kmod_selftests(); -+ enable_logging(); -+ clear_leaks(); -+ launch_tests(); -+ check_leaks(); -+ poweroff(); -+ return 1; -+} ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -0,0 +1,86 @@ -+CONFIG_LOCALVERSION="" -+CONFIG_NET=y -+CONFIG_NETDEVICES=y -+CONFIG_NET_CORE=y -+CONFIG_NET_IPIP=y -+CONFIG_DUMMY=y -+CONFIG_VETH=y -+CONFIG_MULTIUSER=y -+CONFIG_NAMESPACES=y -+CONFIG_NET_NS=y -+CONFIG_UNIX=y -+CONFIG_INET=y -+CONFIG_IPV6=y -+CONFIG_NETFILTER=y -+CONFIG_NETFILTER_ADVANCED=y -+CONFIG_NF_CONNTRACK=y -+CONFIG_NF_NAT=y -+CONFIG_NETFILTER_XTABLES=y -+CONFIG_NETFILTER_XT_NAT=y -+CONFIG_NETFILTER_XT_MATCH_LENGTH=y -+CONFIG_NF_CONNTRACK_IPV4=y -+CONFIG_NF_NAT_IPV4=y -+CONFIG_IP_NF_IPTABLES=y -+CONFIG_IP_NF_FILTER=y -+CONFIG_IP_NF_NAT=y -+CONFIG_IP_ADVANCED_ROUTER=y -+CONFIG_IP_MULTIPLE_TABLES=y -+CONFIG_IPV6_MULTIPLE_TABLES=y -+CONFIG_TTY=y -+CONFIG_BINFMT_ELF=y -+CONFIG_BINFMT_SCRIPT=y -+CONFIG_VDSO=y -+CONFIG_VIRTUALIZATION=y -+CONFIG_HYPERVISOR_GUEST=y -+CONFIG_PARAVIRT=y -+CONFIG_KVM_GUEST=y -+CONFIG_PARAVIRT_SPINLOCKS=y -+CONFIG_PRINTK=y -+CONFIG_KALLSYMS=y -+CONFIG_BUG=y -+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -+CONFIG_EMBEDDED=n -+CONFIG_BASE_FULL=y -+CONFIG_FUTEX=y -+CONFIG_SHMEM=y -+CONFIG_SLUB=y -+CONFIG_SPARSEMEM_VMEMMAP=y -+CONFIG_SMP=y -+CONFIG_SCHED_SMT=y -+CONFIG_SCHED_MC=y -+CONFIG_NUMA=y -+CONFIG_PREEMPT=y -+CONFIG_NO_HZ=y -+CONFIG_NO_HZ_IDLE=y -+CONFIG_NO_HZ_FULL=n -+CONFIG_HZ_PERIODIC=n -+CONFIG_HIGH_RES_TIMERS=y -+CONFIG_ARCH_RANDOM=y -+CONFIG_FILE_LOCKING=y -+CONFIG_POSIX_TIMERS=y -+CONFIG_DEVTMPFS=y -+CONFIG_PROC_FS=y -+CONFIG_PROC_SYSCTL=y -+CONFIG_SYSFS=y -+CONFIG_TMPFS=y -+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 -+CONFIG_PRINTK_TIME=y -+CONFIG_BLK_DEV_INITRD=y -+CONFIG_LEGACY_VSYSCALL_NONE=y -+CONFIG_KERNEL_GZIP=y -+CONFIG_PANIC_ON_OOPS=y -+CONFIG_BUG_ON_DATA_CORRUPTION=y -+CONFIG_LOCKUP_DETECTOR=y -+CONFIG_SOFTLOCKUP_DETECTOR=y -+CONFIG_HARDLOCKUP_DETECTOR=y -+CONFIG_WQ_WATCHDOG=y -+CONFIG_DETECT_HUNG_TASK=y -+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y -+CONFIG_PANIC_TIMEOUT=-1 -+CONFIG_STACKTRACE=y -+CONFIG_EARLY_PRINTK=y -+CONFIG_GDB_SCRIPTS=y -+CONFIG_WIREGUARD=y -+CONFIG_WIREGUARD_DEBUG=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-Kconfig-select-parent-dependency-for-crypt.patch b/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-Kconfig-select-parent-dependency-for-crypt.patch deleted file mode 100644 index c2f8f77f53..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-Kconfig-select-parent-dependency-for-crypt.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 15 Dec 2019 22:08:01 +0100 -Subject: [PATCH] wireguard: Kconfig: select parent dependency for crypto - -commit d7c68a38bb4f9b7c1a2e4a772872c752ee5c44a6 upstream. - -This fixes the crypto selection submenu depenencies. Otherwise, we'd -wind up issuing warnings in which certain dependencies we also select -couldn't be satisfied. This condition was triggered by the addition of -the test suite autobuilder in the previous commit. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/Kconfig | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -85,6 +85,8 @@ config WIREGUARD - select CRYPTO_POLY1305_X86_64 if X86 && 64BIT - select CRYPTO_BLAKE2S_X86 if X86 && 64BIT - select CRYPTO_CURVE25519_X86 if X86 && 64BIT -+ select ARM_CRYPTO if ARM -+ select ARM64_CRYPTO if ARM64 - select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON - select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON - select CRYPTO_POLY1305_ARM if ARM diff --git a/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch b/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch new file mode 100644 index 0000000000..3cc0b56c3e --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch @@ -0,0 +1,28 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: YueHaibing +Date: Sun, 15 Dec 2019 22:08:03 +0100 +Subject: [PATCH] wireguard: main: remove unused include + +commit 43967b6ff91e53bcce5ae08c16a0588a475b53a1 upstream. + +Remove from the includes for main.c, which is unused. + +Signed-off-by: YueHaibing +[Jason: reworded commit message] +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/main.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/net/wireguard/main.c ++++ b/drivers/net/wireguard/main.c +@@ -12,7 +12,6 @@ + + #include + +-#include + #include + #include + #include diff --git a/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch b/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch new file mode 100644 index 0000000000..edd90484dd --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch @@ -0,0 +1,41 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Wei Yongjun +Date: Sun, 15 Dec 2019 22:08:04 +0100 +Subject: [PATCH] wireguard: allowedips: use kfree_rcu() instead of call_rcu() + +commit d89ee7d5c73af15c1c6f12b016cdf469742b5726 upstream. + +The callback function of call_rcu() just calls a kfree(), so we +can use kfree_rcu() instead of call_rcu() + callback function. + +Signed-off-by: Wei Yongjun +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/allowedips.c | 7 +------ + 1 file changed, 1 insertion(+), 6 deletions(-) + +--- a/drivers/net/wireguard/allowedips.c ++++ b/drivers/net/wireguard/allowedips.c +@@ -31,11 +31,6 @@ static void copy_and_assign_cidr(struct + #define CHOOSE_NODE(parent, key) \ + parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] + +-static void node_free_rcu(struct rcu_head *rcu) +-{ +- kfree(container_of(rcu, struct allowedips_node, rcu)); +-} +- + static void push_rcu(struct allowedips_node **stack, + struct allowedips_node __rcu *p, unsigned int *len) + { +@@ -112,7 +107,7 @@ static void walk_remove_by_peer(struct a + if (!node->bit[0] || !node->bit[1]) { + rcu_assign_pointer(*nptr, DEREF( + &node->bit[!REF(node->bit[0])])); +- call_rcu(&node->rcu, node_free_rcu); ++ kfree_rcu(node, rcu); + node = DEREF(nptr); + } + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-global-fix-spelling-mistakes-in-comments.patch b/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-global-fix-spelling-mistakes-in-comments.patch deleted file mode 100644 index 9b34e663a9..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0077-wireguard-global-fix-spelling-mistakes-in-comments.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Josh Soref -Date: Sun, 15 Dec 2019 22:08:02 +0100 -Subject: [PATCH] wireguard: global: fix spelling mistakes in comments - -commit a2ec8b5706944d228181c8b91d815f41d6dd8e7b upstream. - -This fixes two spelling errors in source code comments. - -Signed-off-by: Josh Soref -[Jason: rewrote commit message] -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 2 +- - include/uapi/linux/wireguard.h | 8 ++++---- - 2 files changed, 5 insertions(+), 5 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -380,7 +380,7 @@ static void wg_packet_consume_data_done( - /* We've already verified the Poly1305 auth tag, which means this packet - * was not modified in transit. We can therefore tell the networking - * stack that all checksums of every layer of encapsulation have already -- * been checked "by the hardware" and therefore is unneccessary to check -+ * been checked "by the hardware" and therefore is unnecessary to check - * again in software. - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; ---- a/include/uapi/linux/wireguard.h -+++ b/include/uapi/linux/wireguard.h -@@ -18,13 +18,13 @@ - * one but not both of: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * - * The kernel will then return several messages (NLM_F_MULTI) containing the - * following tree of nested items: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN - * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN - * WGDEVICE_A_LISTEN_PORT: NLA_U16 -@@ -77,7 +77,7 @@ - * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current - * peers should be removed prior to adding the list below. - * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove -@@ -121,7 +121,7 @@ - * filling in information not contained in the prior. Note that if - * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably - * should not be specified in fragments that come after, so that the list -- * of peers is only cleared the first time but appened after. Likewise for -+ * of peers is only cleared the first time but appended after. Likewise for - * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message - * of a peer, it likely should not be specified in subsequent fragments. - * diff --git a/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-main-remove-unused-include-linux-version.h.patch b/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-main-remove-unused-include-linux-version.h.patch deleted file mode 100644 index 3cc0b56c3e..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-main-remove-unused-include-linux-version.h.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: YueHaibing -Date: Sun, 15 Dec 2019 22:08:03 +0100 -Subject: [PATCH] wireguard: main: remove unused include - -commit 43967b6ff91e53bcce5ae08c16a0588a475b53a1 upstream. - -Remove from the includes for main.c, which is unused. - -Signed-off-by: YueHaibing -[Jason: reworded commit message] -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/main.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/main.c -+++ b/drivers/net/wireguard/main.c -@@ -12,7 +12,6 @@ - - #include - --#include - #include - #include - #include diff --git a/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch b/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch new file mode 100644 index 0000000000..6ff0dd9d10 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch @@ -0,0 +1,373 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 2 Jan 2020 17:47:49 +0100 +Subject: [PATCH] wireguard: selftests: remove ancient kernel compatibility + code + +commit 9a69a4c8802adf642bc4a13d471b5a86b44ed434 upstream. + +Quite a bit of the test suite was designed to work with ancient kernels. +Thankfully we no longer have to deal with this. This commit updates +things that we can finally update and removes things that we can finally +remove, to avoid the build-up of the last several years as a result of +having to support ancient kernels. We can finally rely on suppress_ +prefixlength being available. On the build side of things, the no-PIE +hack is no longer required, and we can bump some of the tools, repair +our m68k and i686-kvm support, and get better coverage of the static +branches used in the crypto lib and in udp_tunnel. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/netns.sh | 11 +-- + .../testing/selftests/wireguard/qemu/Makefile | 82 ++++++++++--------- + .../selftests/wireguard/qemu/arch/m68k.config | 2 +- + tools/testing/selftests/wireguard/qemu/init.c | 1 + + .../selftests/wireguard/qemu/kernel.config | 2 + + 5 files changed, 50 insertions(+), 48 deletions(-) + +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -37,7 +37,7 @@ n2() { pretty 2 "$*"; maybe_exec ip netn + ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } + ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } + ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } +-sleep() { read -t "$1" -N 0 || true; } ++sleep() { read -t "$1" -N 1 || true; } + waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } + waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } + waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } +@@ -294,12 +294,9 @@ ip1 -6 rule add table main suppress_pref + ip1 -4 route add default dev wg0 table 51820 + ip1 -4 rule add not fwmark 51820 table 51820 + ip1 -4 rule add table main suppress_prefixlength 0 +-# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. +-if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then +- # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. +- n1 ping -W 1 -c 100 -f 192.168.99.7 +- n1 ping -W 1 -c 100 -f abab::1111 +-fi ++# Flood the pings instead of sending just one, to trigger routing table reference counting bugs. ++n1 ping -W 1 -c 100 -f 192.168.99.7 ++n1 ping -W 1 -c 100 -f abab::1111 + + n0 iptables -t nat -F + ip0 link del vethrc +--- a/tools/testing/selftests/wireguard/qemu/Makefile ++++ b/tools/testing/selftests/wireguard/qemu/Makefile +@@ -5,6 +5,7 @@ + PWD := $(shell pwd) + + CHOST := $(shell gcc -dumpmachine) ++HOST_ARCH := $(firstword $(subst -, ,$(CHOST))) + ifneq (,$(ARCH)) + CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) + ifeq (,$(CBUILD)) +@@ -37,19 +38,19 @@ endef + define file_download = + $(DISTFILES_PATH)/$(1): + mkdir -p $(DISTFILES_PATH) +- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' ++ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' + if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi + endef + +-$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) ++$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) + $(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) +-$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) ++$(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) + $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) +-$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) +-$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) +-$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) +-$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) +-$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) ++$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) ++$(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) ++$(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) ++$(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) ++$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) + + KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) + rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) +@@ -59,23 +60,21 @@ export CFLAGS ?= -O3 -pipe + export LDFLAGS ?= + export CPPFLAGS := -I$(BUILD_PATH)/include + +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + CROSS_COMPILE_FLAG := --host=$(CHOST) +-NOPIE_GCC := gcc -fno-PIE + CFLAGS += -march=native + STRIP := strip + else + $(info Cross compilation: building for $(CBUILD) using $(CHOST)) + CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) + export CROSS_COMPILE=$(CBUILD)- +-NOPIE_GCC := $(CBUILD)-gcc -fno-PIE + STRIP := $(CBUILD)-strip + endif + ifeq ($(ARCH),aarch64) + QEMU_ARCH := aarch64 + KERNEL_ARCH := arm64 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm + else + QEMU_MACHINE := -cpu cortex-a53 -machine virt +@@ -85,7 +84,7 @@ else ifeq ($(ARCH),aarch64_be) + QEMU_ARCH := aarch64 + KERNEL_ARCH := arm64 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm + else + QEMU_MACHINE := -cpu cortex-a53 -machine virt +@@ -95,7 +94,7 @@ else ifeq ($(ARCH),arm) + QEMU_ARCH := arm + KERNEL_ARCH := arm + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm + else + QEMU_MACHINE := -cpu cortex-a15 -machine virt +@@ -105,7 +104,7 @@ else ifeq ($(ARCH),armeb) + QEMU_ARCH := arm + KERNEL_ARCH := arm + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm + else + QEMU_MACHINE := -cpu cortex-a15 -machine virt +@@ -116,7 +115,7 @@ else ifeq ($(ARCH),x86_64) + QEMU_ARCH := x86_64 + KERNEL_ARCH := x86_64 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine q35,accel=kvm + else + QEMU_MACHINE := -cpu Skylake-Server -machine q35 +@@ -126,7 +125,7 @@ else ifeq ($(ARCH),i686) + QEMU_ARCH := i386 + KERNEL_ARCH := x86 + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage +-ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) ++ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) + QEMU_MACHINE := -cpu host -machine q35,accel=kvm + else + QEMU_MACHINE := -cpu coreduo -machine q35 +@@ -136,7 +135,7 @@ else ifeq ($(ARCH),mips64) + QEMU_ARCH := mips64 + KERNEL_ARCH := mips + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine malta,accel=kvm + CFLAGS += -EB + else +@@ -147,7 +146,7 @@ else ifeq ($(ARCH),mips64el) + QEMU_ARCH := mips64el + KERNEL_ARCH := mips + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine malta,accel=kvm + CFLAGS += -EL + else +@@ -158,7 +157,7 @@ else ifeq ($(ARCH),mips) + QEMU_ARCH := mips + KERNEL_ARCH := mips + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine malta,accel=kvm + CFLAGS += -EB + else +@@ -169,7 +168,7 @@ else ifeq ($(ARCH),mipsel) + QEMU_ARCH := mipsel + KERNEL_ARCH := mips + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host -machine malta,accel=kvm + CFLAGS += -EL + else +@@ -180,7 +179,7 @@ else ifeq ($(ARCH),powerpc64le) + QEMU_ARCH := ppc64 + KERNEL_ARCH := powerpc + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host,accel=kvm -machine pseries + else + QEMU_MACHINE := -machine pseries +@@ -190,7 +189,7 @@ else ifeq ($(ARCH),powerpc) + QEMU_ARCH := ppc + KERNEL_ARCH := powerpc + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage +-ifeq ($(CHOST),$(CBUILD)) ++ifeq ($(HOST_ARCH),$(ARCH)) + QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 + else + QEMU_MACHINE := -machine ppce500 +@@ -200,10 +199,11 @@ else ifeq ($(ARCH),m68k) + QEMU_ARCH := m68k + KERNEL_ARCH := m68k + KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux +-ifeq ($(CHOST),$(CBUILD)) +-QEMU_MACHINE := -cpu host,accel=kvm -machine q800 ++KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config) ++ifeq ($(HOST_ARCH),$(ARCH)) ++QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -smp 1 -append $(KERNEL_CMDLINE) + else +-QEMU_MACHINE := -machine q800 ++QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE) + endif + else + $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) +@@ -238,14 +238,14 @@ $(BUILD_PATH)/init-cpio-spec.txt: + echo "nod /dev/console 644 0 0 c 5 1" >> $@ + echo "dir /bin 755 0 0" >> $@ + echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ +- echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ ++ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/wg 755 0 0" >> $@ + echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ + echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ + echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ + echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ + echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ +- echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ +- echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ ++ echo "file /bin/xtables-legacy-multi $(IPTABLES_PATH)/iptables/xtables-legacy-multi 755 0 0" >> $@ ++ echo "slink /bin/iptables xtables-legacy-multi 777 0 0" >> $@ + echo "slink /bin/ping6 ping 777 0 0" >> $@ + echo "dir /lib 755 0 0" >> $@ + echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ +@@ -260,8 +260,8 @@ $(KERNEL_BUILD_PATH)/.config: kernel.con + cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config + $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) + +-$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) +- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" ++$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) + + $(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config + $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install +@@ -280,7 +280,7 @@ $(BUILD_PATH)/include/.installed: $(MUSL + + $(MUSL_CC): $(MUSL_PATH)/lib/libc.so + sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs +- printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc ++ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" "$$@"\n' > $(BUILD_PATH)/musl-gcc + chmod +x $(BUILD_PATH)/musl-gcc + + $(IPERF_PATH)/.installed: $(IPERF_TAR) +@@ -291,7 +291,7 @@ $(IPERF_PATH)/.installed: $(IPERF_TAR) + touch $@ + + $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) +- cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared ++ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --with-openssl=no + $(MAKE) -C $(IPERF_PATH) + $(STRIP) -s $@ + +@@ -308,8 +308,8 @@ $(WIREGUARD_TOOLS_PATH)/.installed: $(WI + flock -s $<.lock tar -C $(BUILD_PATH) -xf $< + touch $@ + +-$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) +- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg ++$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg + $(STRIP) -s $@ + + $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) +@@ -323,7 +323,8 @@ $(IPUTILS_PATH)/.installed: $(IPUTILS_TA + touch $@ + + $(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) +- $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping ++ sed -i /atexit/d $(IPUTILS_PATH)/ping.c ++ cd $(IPUTILS_PATH) && $(CC) $(CFLAGS) -std=c99 -o $@ ping.c ping_common.c ping6_common.c iputils_common.c -D_GNU_SOURCE -D'IPUTILS_VERSION(f)=f' -lresolv $(LDFLAGS) + $(STRIP) -s $@ + + $(BASH_PATH)/.installed: $(BASH_TAR) +@@ -357,7 +358,7 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ + sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure + touch $@ + +-$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) ++$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) + cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include + $(MAKE) -C $(IPTABLES_PATH) + $(STRIP) -s $@ +@@ -368,8 +369,9 @@ $(NMAP_PATH)/.installed: $(NMAP_TAR) + touch $@ + + $(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) +- cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux +- $(MAKE) -C $(NMAP_PATH) build-ncat ++ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux --without-libssh ++ $(MAKE) -C $(NMAP_PATH)/libpcap ++ $(MAKE) -C $(NMAP_PATH)/ncat + $(STRIP) -s $@ + + clean: +@@ -379,7 +381,7 @@ distclean: clean + rm -rf $(DISTFILES_PATH) + + menuconfig: $(KERNEL_BUILD_PATH)/.config +- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig ++ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) menuconfig + + .PHONY: qemu build clean distclean menuconfig + .DELETE_ON_ERROR: +--- a/tools/testing/selftests/wireguard/qemu/arch/m68k.config ++++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config +@@ -1,9 +1,9 @@ + CONFIG_MMU=y ++CONFIG_M68KCLASSIC=y + CONFIG_M68040=y + CONFIG_MAC=y + CONFIG_SERIAL_PMACZILOG=y + CONFIG_SERIAL_PMACZILOG_TTYS=y + CONFIG_SERIAL_PMACZILOG_CONSOLE=y +-CONFIG_CMDLINE_BOOL=y + CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" + CONFIG_FRAME_WARN=1024 +--- a/tools/testing/selftests/wireguard/qemu/init.c ++++ b/tools/testing/selftests/wireguard/qemu/init.c +@@ -21,6 +21,7 @@ + #include + #include + #include ++#include + #include + #include + +--- a/tools/testing/selftests/wireguard/qemu/kernel.config ++++ b/tools/testing/selftests/wireguard/qemu/kernel.config +@@ -39,6 +39,7 @@ CONFIG_PRINTK=y + CONFIG_KALLSYMS=y + CONFIG_BUG=y + CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y ++CONFIG_JUMP_LABEL=y + CONFIG_EMBEDDED=n + CONFIG_BASE_FULL=y + CONFIG_FUTEX=y +@@ -55,6 +56,7 @@ CONFIG_NO_HZ_IDLE=y + CONFIG_NO_HZ_FULL=n + CONFIG_HZ_PERIODIC=n + CONFIG_HIGH_RES_TIMERS=y ++CONFIG_COMPAT_32BIT_TIME=y + CONFIG_ARCH_RANDOM=y + CONFIG_FILE_LOCKING=y + CONFIG_POSIX_TIMERS=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch b/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch deleted file mode 100644 index edd90484dd..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Wei Yongjun -Date: Sun, 15 Dec 2019 22:08:04 +0100 -Subject: [PATCH] wireguard: allowedips: use kfree_rcu() instead of call_rcu() - -commit d89ee7d5c73af15c1c6f12b016cdf469742b5726 upstream. - -The callback function of call_rcu() just calls a kfree(), so we -can use kfree_rcu() instead of call_rcu() + callback function. - -Signed-off-by: Wei Yongjun -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 7 +------ - 1 file changed, 1 insertion(+), 6 deletions(-) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -31,11 +31,6 @@ static void copy_and_assign_cidr(struct - #define CHOOSE_NODE(parent, key) \ - parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] - --static void node_free_rcu(struct rcu_head *rcu) --{ -- kfree(container_of(rcu, struct allowedips_node, rcu)); --} -- - static void push_rcu(struct allowedips_node **stack, - struct allowedips_node __rcu *p, unsigned int *len) - { -@@ -112,7 +107,7 @@ static void walk_remove_by_peer(struct a - if (!node->bit[0] || !node->bit[1]) { - rcu_assign_pointer(*nptr, DEREF( - &node->bit[!REF(node->bit[0])])); -- call_rcu(&node->rcu, node_free_rcu); -+ kfree_rcu(node, rcu); - node = DEREF(nptr); - } - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch b/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch new file mode 100644 index 0000000000..fb03b1b1a6 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch @@ -0,0 +1,39 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 2 Jan 2020 17:47:50 +0100 +Subject: [PATCH] wireguard: queueing: do not account for pfmemalloc when + clearing skb header + +commit 04d2ea92a18417619182cbb79063f154892b0150 upstream. + +Before 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_ +header()"), the pfmemalloc flag used to be between headers_start and +headers_end, which is a region we clear when preparing the packet for +encryption/decryption. This is a parameter we certainly want to +preserve, which is why 8b7008620b84 moved it out of there. The code here +was written in a world before 8b7008620b84, though, where we had to +manually account for it. This commit brings things up to speed. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/queueing.h | 3 --- + 1 file changed, 3 deletions(-) + +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -83,13 +83,10 @@ static inline __be16 wg_skb_examine_untr + + static inline void wg_reset_packet(struct sk_buff *skb) + { +- const int pfmemalloc = skb->pfmemalloc; +- + skb_scrub_packet(skb, true); + memset(&skb->headers_start, 0, + offsetof(struct sk_buff, headers_end) - + offsetof(struct sk_buff, headers_start)); +- skb->pfmemalloc = pfmemalloc; + skb->queue_mapping = 0; + skb->nohdr = 0; + skb->peeked = 0; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-selftests-remove-ancient-kernel-compatibil.patch b/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-selftests-remove-ancient-kernel-compatibil.patch deleted file mode 100644 index 6ff0dd9d10..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-selftests-remove-ancient-kernel-compatibil.patch +++ /dev/null @@ -1,373 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:49 +0100 -Subject: [PATCH] wireguard: selftests: remove ancient kernel compatibility - code - -commit 9a69a4c8802adf642bc4a13d471b5a86b44ed434 upstream. - -Quite a bit of the test suite was designed to work with ancient kernels. -Thankfully we no longer have to deal with this. This commit updates -things that we can finally update and removes things that we can finally -remove, to avoid the build-up of the last several years as a result of -having to support ancient kernels. We can finally rely on suppress_ -prefixlength being available. On the build side of things, the no-PIE -hack is no longer required, and we can bump some of the tools, repair -our m68k and i686-kvm support, and get better coverage of the static -branches used in the crypto lib and in udp_tunnel. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 11 +-- - .../testing/selftests/wireguard/qemu/Makefile | 82 ++++++++++--------- - .../selftests/wireguard/qemu/arch/m68k.config | 2 +- - tools/testing/selftests/wireguard/qemu/init.c | 1 + - .../selftests/wireguard/qemu/kernel.config | 2 + - 5 files changed, 50 insertions(+), 48 deletions(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -37,7 +37,7 @@ n2() { pretty 2 "$*"; maybe_exec ip netn - ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } --sleep() { read -t "$1" -N 0 || true; } -+sleep() { read -t "$1" -N 1 || true; } - waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } - waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } - waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -@@ -294,12 +294,9 @@ ip1 -6 rule add table main suppress_pref - ip1 -4 route add default dev wg0 table 51820 - ip1 -4 rule add not fwmark 51820 table 51820 - ip1 -4 rule add table main suppress_prefixlength 0 --# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. --if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then -- # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -- n1 ping -W 1 -c 100 -f 192.168.99.7 -- n1 ping -W 1 -c 100 -f abab::1111 --fi -+# Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -+n1 ping -W 1 -c 100 -f 192.168.99.7 -+n1 ping -W 1 -c 100 -f abab::1111 - - n0 iptables -t nat -F - ip0 link del vethrc ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -5,6 +5,7 @@ - PWD := $(shell pwd) - - CHOST := $(shell gcc -dumpmachine) -+HOST_ARCH := $(firstword $(subst -, ,$(CHOST))) - ifneq (,$(ARCH)) - CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) - ifeq (,$(CBUILD)) -@@ -37,19 +38,19 @@ endef - define file_download = - $(DISTFILES_PATH)/$(1): - mkdir -p $(DISTFILES_PATH) -- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' - if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi - endef - --$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) -+$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) - $(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) --$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) -+$(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) --$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) --$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) --$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) --$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) --$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) -+$(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) -+$(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) -+$(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) - - KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) - rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -@@ -59,23 +60,21 @@ export CFLAGS ?= -O3 -pipe - export LDFLAGS ?= - export CPPFLAGS := -I$(BUILD_PATH)/include - --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - CROSS_COMPILE_FLAG := --host=$(CHOST) --NOPIE_GCC := gcc -fno-PIE - CFLAGS += -march=native - STRIP := strip - else - $(info Cross compilation: building for $(CBUILD) using $(CHOST)) - CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) - export CROSS_COMPILE=$(CBUILD)- --NOPIE_GCC := $(CBUILD)-gcc -fno-PIE - STRIP := $(CBUILD)-strip - endif - ifeq ($(ARCH),aarch64) - QEMU_ARCH := aarch64 - KERNEL_ARCH := arm64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a53 -machine virt -@@ -85,7 +84,7 @@ else ifeq ($(ARCH),aarch64_be) - QEMU_ARCH := aarch64 - KERNEL_ARCH := arm64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a53 -machine virt -@@ -95,7 +94,7 @@ else ifeq ($(ARCH),arm) - QEMU_ARCH := arm - KERNEL_ARCH := arm - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a15 -machine virt -@@ -105,7 +104,7 @@ else ifeq ($(ARCH),armeb) - QEMU_ARCH := arm - KERNEL_ARCH := arm - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a15 -machine virt -@@ -116,7 +115,7 @@ else ifeq ($(ARCH),x86_64) - QEMU_ARCH := x86_64 - KERNEL_ARCH := x86_64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine q35,accel=kvm - else - QEMU_MACHINE := -cpu Skylake-Server -machine q35 -@@ -126,7 +125,7 @@ else ifeq ($(ARCH),i686) - QEMU_ARCH := i386 - KERNEL_ARCH := x86 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage --ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) -+ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) - QEMU_MACHINE := -cpu host -machine q35,accel=kvm - else - QEMU_MACHINE := -cpu coreduo -machine q35 -@@ -136,7 +135,7 @@ else ifeq ($(ARCH),mips64) - QEMU_ARCH := mips64 - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EB - else -@@ -147,7 +146,7 @@ else ifeq ($(ARCH),mips64el) - QEMU_ARCH := mips64el - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EL - else -@@ -158,7 +157,7 @@ else ifeq ($(ARCH),mips) - QEMU_ARCH := mips - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EB - else -@@ -169,7 +168,7 @@ else ifeq ($(ARCH),mipsel) - QEMU_ARCH := mipsel - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EL - else -@@ -180,7 +179,7 @@ else ifeq ($(ARCH),powerpc64le) - QEMU_ARCH := ppc64 - KERNEL_ARCH := powerpc - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host,accel=kvm -machine pseries - else - QEMU_MACHINE := -machine pseries -@@ -190,7 +189,7 @@ else ifeq ($(ARCH),powerpc) - QEMU_ARCH := ppc - KERNEL_ARCH := powerpc - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 - else - QEMU_MACHINE := -machine ppce500 -@@ -200,10 +199,11 @@ else ifeq ($(ARCH),m68k) - QEMU_ARCH := m68k - KERNEL_ARCH := m68k - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) --QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -+KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config) -+ifeq ($(HOST_ARCH),$(ARCH)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -smp 1 -append $(KERNEL_CMDLINE) - else --QEMU_MACHINE := -machine q800 -+QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE) - endif - else - $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) -@@ -238,14 +238,14 @@ $(BUILD_PATH)/init-cpio-spec.txt: - echo "nod /dev/console 644 0 0 c 5 1" >> $@ - echo "dir /bin 755 0 0" >> $@ - echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ -- echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ -+ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/wg 755 0 0" >> $@ - echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ - echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ - echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ - echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ - echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ -- echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ -- echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ -+ echo "file /bin/xtables-legacy-multi $(IPTABLES_PATH)/iptables/xtables-legacy-multi 755 0 0" >> $@ -+ echo "slink /bin/iptables xtables-legacy-multi 777 0 0" >> $@ - echo "slink /bin/ping6 ping 777 0 0" >> $@ - echo "dir /lib 755 0 0" >> $@ - echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ -@@ -260,8 +260,8 @@ $(KERNEL_BUILD_PATH)/.config: kernel.con - cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config - $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) - --$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" -+$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) - - $(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config - $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install -@@ -280,7 +280,7 @@ $(BUILD_PATH)/include/.installed: $(MUSL - - $(MUSL_CC): $(MUSL_PATH)/lib/libc.so - sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs -- printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc -+ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" "$$@"\n' > $(BUILD_PATH)/musl-gcc - chmod +x $(BUILD_PATH)/musl-gcc - - $(IPERF_PATH)/.installed: $(IPERF_TAR) -@@ -291,7 +291,7 @@ $(IPERF_PATH)/.installed: $(IPERF_TAR) - touch $@ - - $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) -- cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --with-openssl=no - $(MAKE) -C $(IPERF_PATH) - $(STRIP) -s $@ - -@@ -308,8 +308,8 @@ $(WIREGUARD_TOOLS_PATH)/.installed: $(WI - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< - touch $@ - --$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg - $(STRIP) -s $@ - - $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -@@ -323,7 +323,8 @@ $(IPUTILS_PATH)/.installed: $(IPUTILS_TA - touch $@ - - $(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) -- $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping -+ sed -i /atexit/d $(IPUTILS_PATH)/ping.c -+ cd $(IPUTILS_PATH) && $(CC) $(CFLAGS) -std=c99 -o $@ ping.c ping_common.c ping6_common.c iputils_common.c -D_GNU_SOURCE -D'IPUTILS_VERSION(f)=f' -lresolv $(LDFLAGS) - $(STRIP) -s $@ - - $(BASH_PATH)/.installed: $(BASH_TAR) -@@ -357,7 +358,7 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ - sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure - touch $@ - --$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) - cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include - $(MAKE) -C $(IPTABLES_PATH) - $(STRIP) -s $@ -@@ -368,8 +369,9 @@ $(NMAP_PATH)/.installed: $(NMAP_TAR) - touch $@ - - $(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) -- cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux -- $(MAKE) -C $(NMAP_PATH) build-ncat -+ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux --without-libssh -+ $(MAKE) -C $(NMAP_PATH)/libpcap -+ $(MAKE) -C $(NMAP_PATH)/ncat - $(STRIP) -s $@ - - clean: -@@ -379,7 +381,7 @@ distclean: clean - rm -rf $(DISTFILES_PATH) - - menuconfig: $(KERNEL_BUILD_PATH)/.config -- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) menuconfig - - .PHONY: qemu build clean distclean menuconfig - .DELETE_ON_ERROR: ---- a/tools/testing/selftests/wireguard/qemu/arch/m68k.config -+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config -@@ -1,9 +1,9 @@ - CONFIG_MMU=y -+CONFIG_M68KCLASSIC=y - CONFIG_M68040=y - CONFIG_MAC=y - CONFIG_SERIAL_PMACZILOG=y - CONFIG_SERIAL_PMACZILOG_TTYS=y - CONFIG_SERIAL_PMACZILOG_CONSOLE=y --CONFIG_CMDLINE_BOOL=y - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" - CONFIG_FRAME_WARN=1024 ---- a/tools/testing/selftests/wireguard/qemu/init.c -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include - ---- a/tools/testing/selftests/wireguard/qemu/kernel.config -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -39,6 +39,7 @@ CONFIG_PRINTK=y - CONFIG_KALLSYMS=y - CONFIG_BUG=y - CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -+CONFIG_JUMP_LABEL=y - CONFIG_EMBEDDED=n - CONFIG_BASE_FULL=y - CONFIG_FUTEX=y -@@ -55,6 +56,7 @@ CONFIG_NO_HZ_IDLE=y - CONFIG_NO_HZ_FULL=n - CONFIG_HZ_PERIODIC=n - CONFIG_HIGH_RES_TIMERS=y -+CONFIG_COMPAT_32BIT_TIME=y - CONFIG_ARCH_RANDOM=y - CONFIG_FILE_LOCKING=y - CONFIG_POSIX_TIMERS=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch b/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch new file mode 100644 index 0000000000..779491c8db --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch @@ -0,0 +1,34 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 2 Jan 2020 17:47:51 +0100 +Subject: [PATCH] wireguard: socket: mark skbs as not on list when receiving + via gro + +commit 736775d06bac60d7a353e405398b48b2bd8b1e54 upstream. + +Certain drivers will pass gro skbs to udp, at which point the udp driver +simply iterates through them and passes them off to encap_rcv, which is +where we pick up. At the moment, we're not attempting to coalesce these +into bundles, but we also don't want to wind up having cascaded lists of +skbs treated separately. The right behavior here, then, is to just mark +each incoming one as not on a list. This can be seen in practice, for +example, with Qualcomm's rmnet_perf driver. + +Signed-off-by: Jason A. Donenfeld +Tested-by: Yaroslav Furman +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/socket.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -333,6 +333,7 @@ static int wg_receive(struct sock *sk, s + wg = sk->sk_user_data; + if (unlikely(!wg)) + goto err; ++ skb_mark_not_on_list(skb); + wg_packet_receive(wg, skb); + return 0; + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch b/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch new file mode 100644 index 0000000000..e77ab5834a --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch @@ -0,0 +1,164 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Tue, 4 Feb 2020 22:17:25 +0100 +Subject: [PATCH] wireguard: allowedips: fix use-after-free in + root_remove_peer_lists + +commit 9981159fc3b677b357f84e069a11de5a5ec8a2a8 upstream. + +In the unlikely case a new node could not be allocated, we need to +remove @newnode from @peer->allowedips_list before freeing it. + +syzbot reported: + +BUG: KASAN: use-after-free in __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 +Read of size 8 at addr ffff88809881a538 by task syz-executor.4/30133 + +CPU: 0 PID: 30133 Comm: syz-executor.4 Not tainted 5.5.0-syzkaller #0 +Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 +Call Trace: + __dump_stack lib/dump_stack.c:77 [inline] + dump_stack+0x197/0x210 lib/dump_stack.c:118 + print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374 + __kasan_report.cold+0x1b/0x32 mm/kasan/report.c:506 + kasan_report+0x12/0x20 mm/kasan/common.c:639 + __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 + __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 + __list_del_entry include/linux/list.h:132 [inline] + list_del include/linux/list.h:146 [inline] + root_remove_peer_lists+0x24f/0x4b0 drivers/net/wireguard/allowedips.c:65 + wg_allowedips_free+0x232/0x390 drivers/net/wireguard/allowedips.c:300 + wg_peer_remove_all+0xd5/0x620 drivers/net/wireguard/peer.c:187 + wg_set_device+0xd01/0x1350 drivers/net/wireguard/netlink.c:542 + genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] + genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] + genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 + netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 + genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 + netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] + netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 + netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 + sock_sendmsg_nosec net/socket.c:652 [inline] + sock_sendmsg+0xd7/0x130 net/socket.c:672 + ____sys_sendmsg+0x753/0x880 net/socket.c:2343 + ___sys_sendmsg+0x100/0x170 net/socket.c:2397 + __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 + __do_sys_sendmsg net/socket.c:2439 [inline] + __se_sys_sendmsg net/socket.c:2437 [inline] + __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 + do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 + entry_SYSCALL_64_after_hwframe+0x49/0xbe +RIP: 0033:0x45b399 +Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 +RSP: 002b:00007f99a9bcdc78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e +RAX: ffffffffffffffda RBX: 00007f99a9bce6d4 RCX: 000000000045b399 +RDX: 0000000000000000 RSI: 0000000020001340 RDI: 0000000000000003 +RBP: 000000000075bf20 R08: 0000000000000000 R09: 0000000000000000 +R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000004 +R13: 00000000000009ba R14: 00000000004cb2b8 R15: 0000000000000009 + +Allocated by task 30103: + save_stack+0x23/0x90 mm/kasan/common.c:72 + set_track mm/kasan/common.c:80 [inline] + __kasan_kmalloc mm/kasan/common.c:513 [inline] + __kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:486 + kasan_kmalloc+0x9/0x10 mm/kasan/common.c:527 + kmem_cache_alloc_trace+0x158/0x790 mm/slab.c:3551 + kmalloc include/linux/slab.h:556 [inline] + kzalloc include/linux/slab.h:670 [inline] + add+0x70a/0x1970 drivers/net/wireguard/allowedips.c:236 + wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 + set_allowedip drivers/net/wireguard/netlink.c:343 [inline] + set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 + wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 + genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] + genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] + genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 + netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 + genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 + netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] + netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 + netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 + sock_sendmsg_nosec net/socket.c:652 [inline] + sock_sendmsg+0xd7/0x130 net/socket.c:672 + ____sys_sendmsg+0x753/0x880 net/socket.c:2343 + ___sys_sendmsg+0x100/0x170 net/socket.c:2397 + __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 + __do_sys_sendmsg net/socket.c:2439 [inline] + __se_sys_sendmsg net/socket.c:2437 [inline] + __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 + do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 + entry_SYSCALL_64_after_hwframe+0x49/0xbe + +Freed by task 30103: + save_stack+0x23/0x90 mm/kasan/common.c:72 + set_track mm/kasan/common.c:80 [inline] + kasan_set_free_info mm/kasan/common.c:335 [inline] + __kasan_slab_free+0x102/0x150 mm/kasan/common.c:474 + kasan_slab_free+0xe/0x10 mm/kasan/common.c:483 + __cache_free mm/slab.c:3426 [inline] + kfree+0x10a/0x2c0 mm/slab.c:3757 + add+0x12d2/0x1970 drivers/net/wireguard/allowedips.c:266 + wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 + set_allowedip drivers/net/wireguard/netlink.c:343 [inline] + set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 + wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 + genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] + genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] + genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 + netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 + genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 + netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] + netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 + netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 + sock_sendmsg_nosec net/socket.c:652 [inline] + sock_sendmsg+0xd7/0x130 net/socket.c:672 + ____sys_sendmsg+0x753/0x880 net/socket.c:2343 + ___sys_sendmsg+0x100/0x170 net/socket.c:2397 + __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 + __do_sys_sendmsg net/socket.c:2439 [inline] + __se_sys_sendmsg net/socket.c:2437 [inline] + __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 + do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 + entry_SYSCALL_64_after_hwframe+0x49/0xbe + +The buggy address belongs to the object at ffff88809881a500 + which belongs to the cache kmalloc-64 of size 64 +The buggy address is located 56 bytes inside of + 64-byte region [ffff88809881a500, ffff88809881a540) +The buggy address belongs to the page: +page:ffffea0002620680 refcount:1 mapcount:0 mapping:ffff8880aa400380 index:0x0 +raw: 00fffe0000000200 ffffea000250b748 ffffea000254bac8 ffff8880aa400380 +raw: 0000000000000000 ffff88809881a000 0000000100000020 0000000000000000 +page dumped because: kasan: bad access detected + +Memory state around the buggy address: + ffff88809881a400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc + ffff88809881a480: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc +>ffff88809881a500: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc + ^ + ffff88809881a580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc + ffff88809881a600: 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Eric Dumazet +Reported-by: syzbot +Cc: Jason A. Donenfeld +Cc: wireguard@lists.zx2c4.com +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/allowedips.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/wireguard/allowedips.c ++++ b/drivers/net/wireguard/allowedips.c +@@ -263,6 +263,7 @@ static int add(struct allowedips_node __ + } else { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (unlikely(!node)) { ++ list_del(&newnode->peer_list); + kfree(newnode); + return -ENOMEM; + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch b/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch deleted file mode 100644 index fb03b1b1a6..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0081-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:50 +0100 -Subject: [PATCH] wireguard: queueing: do not account for pfmemalloc when - clearing skb header - -commit 04d2ea92a18417619182cbb79063f154892b0150 upstream. - -Before 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_ -header()"), the pfmemalloc flag used to be between headers_start and -headers_end, which is a region we clear when preparing the packet for -encryption/decryption. This is a parameter we certainly want to -preserve, which is why 8b7008620b84 moved it out of there. The code here -was written in a world before 8b7008620b84, though, where we had to -manually account for it. This commit brings things up to speed. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.h | 3 --- - 1 file changed, 3 deletions(-) - ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -83,13 +83,10 @@ static inline __be16 wg_skb_examine_untr - - static inline void wg_reset_packet(struct sk_buff *skb) - { -- const int pfmemalloc = skb->pfmemalloc; -- - skb_scrub_packet(skb, true); - memset(&skb->headers_start, 0, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); -- skb->pfmemalloc = pfmemalloc; - skb->queue_mapping = 0; - skb->nohdr = 0; - skb->peeked = 0; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch b/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch new file mode 100644 index 0000000000..55bb276118 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch @@ -0,0 +1,233 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 4 Feb 2020 22:17:26 +0100 +Subject: [PATCH] wireguard: noise: reject peers with low order public keys + +commit ec31c2676a10e064878927b243fada8c2fb0c03c upstream. + +Our static-static calculation returns a failure if the public key is of +low order. We check for this when peers are added, and don't allow them +to be added if they're low order, except in the case where we haven't +yet been given a private key. In that case, we would defer the removal +of the peer until we're given a private key, since at that point we're +doing new static-static calculations which incur failures we can act on. +This meant, however, that we wound up removing peers rather late in the +configuration flow. + +Syzkaller points out that peer_remove calls flush_workqueue, which in +turn might then wait for sending a handshake initiation to complete. +Since handshake initiation needs the static identity lock, holding the +static identity lock while calling peer_remove can result in a rare +deadlock. We have precisely this case in this situation of late-stage +peer removal based on an invalid public key. We can't drop the lock when +removing, because then incoming handshakes might interact with a bogus +static-static calculation. + +While the band-aid patch for this would involve breaking up the peer +removal into two steps like wg_peer_remove_all does, in order to solve +the locking issue, there's actually a much more elegant way of fixing +this: + +If the static-static calculation succeeds with one private key, it +*must* succeed with all others, because all 32-byte strings map to valid +private keys, thanks to clamping. That means we can get rid of this +silly dance and locking headaches of removing peers late in the +configuration flow, and instead just reject them early on, regardless of +whether the device has yet been assigned a private key. For the case +where the device doesn't yet have a private key, we safely use zeros +just for the purposes of checking for low order points by way of +checking the output of the calculation. + +The following PoC will trigger the deadlock: + +ip link add wg0 type wireguard +ip addr add 10.0.0.1/24 dev wg0 +ip link set wg0 up +ping -f 10.0.0.2 & +while true; do + wg set wg0 private-key /dev/null peer AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= allowed-ips 10.0.0.0/24 endpoint 10.0.0.3:1234 + wg set wg0 private-key <(echo AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=) +done + +[ 0.949105] ====================================================== +[ 0.949550] WARNING: possible circular locking dependency detected +[ 0.950143] 5.5.0-debug+ #18 Not tainted +[ 0.950431] ------------------------------------------------------ +[ 0.950959] wg/89 is trying to acquire lock: +[ 0.951252] ffff8880333e2128 ((wq_completion)wg-kex-wg0){+.+.}, at: flush_workqueue+0xe3/0x12f0 +[ 0.951865] +[ 0.951865] but task is already holding lock: +[ 0.952280] ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 +[ 0.953011] +[ 0.953011] which lock already depends on the new lock. +[ 0.953011] +[ 0.953651] +[ 0.953651] the existing dependency chain (in reverse order) is: +[ 0.954292] +[ 0.954292] -> #2 (&wg->static_identity.lock){++++}: +[ 0.954804] lock_acquire+0x127/0x350 +[ 0.955133] down_read+0x83/0x410 +[ 0.955428] wg_noise_handshake_create_initiation+0x97/0x700 +[ 0.955885] wg_packet_send_handshake_initiation+0x13a/0x280 +[ 0.956401] wg_packet_handshake_send_worker+0x10/0x20 +[ 0.956841] process_one_work+0x806/0x1500 +[ 0.957167] worker_thread+0x8c/0xcb0 +[ 0.957549] kthread+0x2ee/0x3b0 +[ 0.957792] ret_from_fork+0x24/0x30 +[ 0.958234] +[ 0.958234] -> #1 ((work_completion)(&peer->transmit_handshake_work)){+.+.}: +[ 0.958808] lock_acquire+0x127/0x350 +[ 0.959075] process_one_work+0x7ab/0x1500 +[ 0.959369] worker_thread+0x8c/0xcb0 +[ 0.959639] kthread+0x2ee/0x3b0 +[ 0.959896] ret_from_fork+0x24/0x30 +[ 0.960346] +[ 0.960346] -> #0 ((wq_completion)wg-kex-wg0){+.+.}: +[ 0.960945] check_prev_add+0x167/0x1e20 +[ 0.961351] __lock_acquire+0x2012/0x3170 +[ 0.961725] lock_acquire+0x127/0x350 +[ 0.961990] flush_workqueue+0x106/0x12f0 +[ 0.962280] peer_remove_after_dead+0x160/0x220 +[ 0.962600] wg_set_device+0xa24/0xcc0 +[ 0.962994] genl_rcv_msg+0x52f/0xe90 +[ 0.963298] netlink_rcv_skb+0x111/0x320 +[ 0.963618] genl_rcv+0x1f/0x30 +[ 0.963853] netlink_unicast+0x3f6/0x610 +[ 0.964245] netlink_sendmsg+0x700/0xb80 +[ 0.964586] __sys_sendto+0x1dd/0x2c0 +[ 0.964854] __x64_sys_sendto+0xd8/0x1b0 +[ 0.965141] do_syscall_64+0x90/0xd9a +[ 0.965408] entry_SYSCALL_64_after_hwframe+0x49/0xbe +[ 0.965769] +[ 0.965769] other info that might help us debug this: +[ 0.965769] +[ 0.966337] Chain exists of: +[ 0.966337] (wq_completion)wg-kex-wg0 --> (work_completion)(&peer->transmit_handshake_work) --> &wg->static_identity.lock +[ 0.966337] +[ 0.967417] Possible unsafe locking scenario: +[ 0.967417] +[ 0.967836] CPU0 CPU1 +[ 0.968155] ---- ---- +[ 0.968497] lock(&wg->static_identity.lock); +[ 0.968779] lock((work_completion)(&peer->transmit_handshake_work)); +[ 0.969345] lock(&wg->static_identity.lock); +[ 0.969809] lock((wq_completion)wg-kex-wg0); +[ 0.970146] +[ 0.970146] *** DEADLOCK *** +[ 0.970146] +[ 0.970531] 5 locks held by wg/89: +[ 0.970908] #0: ffffffff827433c8 (cb_lock){++++}, at: genl_rcv+0x10/0x30 +[ 0.971400] #1: ffffffff82743480 (genl_mutex){+.+.}, at: genl_rcv_msg+0x642/0xe90 +[ 0.971924] #2: ffffffff827160c0 (rtnl_mutex){+.+.}, at: wg_set_device+0x9f/0xcc0 +[ 0.972488] #3: ffff888032819de0 (&wg->device_update_lock){+.+.}, at: wg_set_device+0xb0/0xcc0 +[ 0.973095] #4: ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 +[ 0.973653] +[ 0.973653] stack backtrace: +[ 0.973932] CPU: 1 PID: 89 Comm: wg Not tainted 5.5.0-debug+ #18 +[ 0.974476] Call Trace: +[ 0.974638] dump_stack+0x97/0xe0 +[ 0.974869] check_noncircular+0x312/0x3e0 +[ 0.975132] ? print_circular_bug+0x1f0/0x1f0 +[ 0.975410] ? __kernel_text_address+0x9/0x30 +[ 0.975727] ? unwind_get_return_address+0x51/0x90 +[ 0.976024] check_prev_add+0x167/0x1e20 +[ 0.976367] ? graph_lock+0x70/0x160 +[ 0.976682] __lock_acquire+0x2012/0x3170 +[ 0.976998] ? register_lock_class+0x1140/0x1140 +[ 0.977323] lock_acquire+0x127/0x350 +[ 0.977627] ? flush_workqueue+0xe3/0x12f0 +[ 0.977890] flush_workqueue+0x106/0x12f0 +[ 0.978147] ? flush_workqueue+0xe3/0x12f0 +[ 0.978410] ? find_held_lock+0x2c/0x110 +[ 0.978662] ? lock_downgrade+0x6e0/0x6e0 +[ 0.978919] ? queue_rcu_work+0x60/0x60 +[ 0.979166] ? netif_napi_del+0x151/0x3b0 +[ 0.979501] ? peer_remove_after_dead+0x160/0x220 +[ 0.979871] peer_remove_after_dead+0x160/0x220 +[ 0.980232] wg_set_device+0xa24/0xcc0 +[ 0.980516] ? deref_stack_reg+0x8e/0xc0 +[ 0.980801] ? set_peer+0xe10/0xe10 +[ 0.981040] ? __ww_mutex_check_waiters+0x150/0x150 +[ 0.981430] ? __nla_validate_parse+0x163/0x270 +[ 0.981719] ? genl_family_rcv_msg_attrs_parse+0x13f/0x310 +[ 0.982078] genl_rcv_msg+0x52f/0xe90 +[ 0.982348] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 +[ 0.982690] ? register_lock_class+0x1140/0x1140 +[ 0.983049] netlink_rcv_skb+0x111/0x320 +[ 0.983298] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 +[ 0.983645] ? netlink_ack+0x880/0x880 +[ 0.983888] genl_rcv+0x1f/0x30 +[ 0.984168] netlink_unicast+0x3f6/0x610 +[ 0.984443] ? netlink_detachskb+0x60/0x60 +[ 0.984729] ? find_held_lock+0x2c/0x110 +[ 0.984976] netlink_sendmsg+0x700/0xb80 +[ 0.985220] ? netlink_broadcast_filtered+0xa60/0xa60 +[ 0.985533] __sys_sendto+0x1dd/0x2c0 +[ 0.985763] ? __x64_sys_getpeername+0xb0/0xb0 +[ 0.986039] ? sockfd_lookup_light+0x17/0x160 +[ 0.986397] ? __sys_recvmsg+0x8c/0xf0 +[ 0.986711] ? __sys_recvmsg_sock+0xd0/0xd0 +[ 0.987018] __x64_sys_sendto+0xd8/0x1b0 +[ 0.987283] ? lockdep_hardirqs_on+0x39b/0x5a0 +[ 0.987666] do_syscall_64+0x90/0xd9a +[ 0.987903] entry_SYSCALL_64_after_hwframe+0x49/0xbe +[ 0.988223] RIP: 0033:0x7fe77c12003e +[ 0.988508] Code: c3 8b 07 85 c0 75 24 49 89 fb 48 89 f0 48 89 d7 48 89 ce 4c 89 c2 4d 89 ca 4c 8b 44 24 08 4c 8b 4c 24 10 4c 4 +[ 0.989666] RSP: 002b:00007fffada2ed58 EFLAGS: 00000246 ORIG_RAX: 000000000000002c +[ 0.990137] RAX: ffffffffffffffda RBX: 00007fe77c159d48 RCX: 00007fe77c12003e +[ 0.990583] RDX: 0000000000000040 RSI: 000055fd1d38e020 RDI: 0000000000000004 +[ 0.991091] RBP: 000055fd1d38e020 R08: 000055fd1cb63358 R09: 000000000000000c +[ 0.991568] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000002c +[ 0.992014] R13: 0000000000000004 R14: 000055fd1d38e020 R15: 0000000000000001 + +Signed-off-by: Jason A. Donenfeld +Reported-by: syzbot +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/netlink.c | 6 ++---- + drivers/net/wireguard/noise.c | 10 +++++++--- + 2 files changed, 9 insertions(+), 7 deletions(-) + +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -575,10 +575,8 @@ static int wg_set_device(struct sk_buff + private_key); + list_for_each_entry_safe(peer, temp, &wg->peer_list, + peer_list) { +- if (wg_noise_precompute_static_static(peer)) +- wg_noise_expire_current_peer_keypairs(peer); +- else +- wg_peer_remove(peer); ++ BUG_ON(!wg_noise_precompute_static_static(peer)); ++ wg_noise_expire_current_peer_keypairs(peer); + } + wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); + up_write(&wg->static_identity.lock); +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -46,17 +46,21 @@ void __init wg_noise_init(void) + /* Must hold peer->handshake.static_identity->lock */ + bool wg_noise_precompute_static_static(struct wg_peer *peer) + { +- bool ret = true; ++ bool ret; + + down_write(&peer->handshake.lock); +- if (peer->handshake.static_identity->has_identity) ++ if (peer->handshake.static_identity->has_identity) { + ret = curve25519( + peer->handshake.precomputed_static_static, + peer->handshake.static_identity->static_private, + peer->handshake.remote_static); +- else ++ } else { ++ u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; ++ ++ ret = curve25519(empty, empty, peer->handshake.remote_static); + memset(peer->handshake.precomputed_static_static, 0, + NOISE_PUBLIC_KEY_LEN); ++ } + up_write(&peer->handshake.lock); + return ret; + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch b/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch deleted file mode 100644 index 779491c8db..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0082-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:51 +0100 -Subject: [PATCH] wireguard: socket: mark skbs as not on list when receiving - via gro - -commit 736775d06bac60d7a353e405398b48b2bd8b1e54 upstream. - -Certain drivers will pass gro skbs to udp, at which point the udp driver -simply iterates through them and passes them off to encap_rcv, which is -where we pick up. At the moment, we're not attempting to coalesce these -into bundles, but we also don't want to wind up having cascaded lists of -skbs treated separately. The right behavior here, then, is to just mark -each incoming one as not on a list. This can be seen in practice, for -example, with Qualcomm's rmnet_perf driver. - -Signed-off-by: Jason A. Donenfeld -Tested-by: Yaroslav Furman -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -333,6 +333,7 @@ static int wg_receive(struct sock *sk, s - wg = sk->sk_user_data; - if (unlikely(!wg)) - goto err; -+ skb_mark_not_on_list(skb); - wg_packet_receive(wg, skb); - return 0; - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-allowedips-fix-use-after-free-in-root_remo.patch b/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-allowedips-fix-use-after-free-in-root_remo.patch deleted file mode 100644 index e77ab5834a..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-allowedips-fix-use-after-free-in-root_remo.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Dumazet -Date: Tue, 4 Feb 2020 22:17:25 +0100 -Subject: [PATCH] wireguard: allowedips: fix use-after-free in - root_remove_peer_lists - -commit 9981159fc3b677b357f84e069a11de5a5ec8a2a8 upstream. - -In the unlikely case a new node could not be allocated, we need to -remove @newnode from @peer->allowedips_list before freeing it. - -syzbot reported: - -BUG: KASAN: use-after-free in __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 -Read of size 8 at addr ffff88809881a538 by task syz-executor.4/30133 - -CPU: 0 PID: 30133 Comm: syz-executor.4 Not tainted 5.5.0-syzkaller #0 -Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 -Call Trace: - __dump_stack lib/dump_stack.c:77 [inline] - dump_stack+0x197/0x210 lib/dump_stack.c:118 - print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374 - __kasan_report.cold+0x1b/0x32 mm/kasan/report.c:506 - kasan_report+0x12/0x20 mm/kasan/common.c:639 - __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 - __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 - __list_del_entry include/linux/list.h:132 [inline] - list_del include/linux/list.h:146 [inline] - root_remove_peer_lists+0x24f/0x4b0 drivers/net/wireguard/allowedips.c:65 - wg_allowedips_free+0x232/0x390 drivers/net/wireguard/allowedips.c:300 - wg_peer_remove_all+0xd5/0x620 drivers/net/wireguard/peer.c:187 - wg_set_device+0xd01/0x1350 drivers/net/wireguard/netlink.c:542 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe -RIP: 0033:0x45b399 -Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 -RSP: 002b:00007f99a9bcdc78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e -RAX: ffffffffffffffda RBX: 00007f99a9bce6d4 RCX: 000000000045b399 -RDX: 0000000000000000 RSI: 0000000020001340 RDI: 0000000000000003 -RBP: 000000000075bf20 R08: 0000000000000000 R09: 0000000000000000 -R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000004 -R13: 00000000000009ba R14: 00000000004cb2b8 R15: 0000000000000009 - -Allocated by task 30103: - save_stack+0x23/0x90 mm/kasan/common.c:72 - set_track mm/kasan/common.c:80 [inline] - __kasan_kmalloc mm/kasan/common.c:513 [inline] - __kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:486 - kasan_kmalloc+0x9/0x10 mm/kasan/common.c:527 - kmem_cache_alloc_trace+0x158/0x790 mm/slab.c:3551 - kmalloc include/linux/slab.h:556 [inline] - kzalloc include/linux/slab.h:670 [inline] - add+0x70a/0x1970 drivers/net/wireguard/allowedips.c:236 - wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 - set_allowedip drivers/net/wireguard/netlink.c:343 [inline] - set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 - wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe - -Freed by task 30103: - save_stack+0x23/0x90 mm/kasan/common.c:72 - set_track mm/kasan/common.c:80 [inline] - kasan_set_free_info mm/kasan/common.c:335 [inline] - __kasan_slab_free+0x102/0x150 mm/kasan/common.c:474 - kasan_slab_free+0xe/0x10 mm/kasan/common.c:483 - __cache_free mm/slab.c:3426 [inline] - kfree+0x10a/0x2c0 mm/slab.c:3757 - add+0x12d2/0x1970 drivers/net/wireguard/allowedips.c:266 - wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 - set_allowedip drivers/net/wireguard/netlink.c:343 [inline] - set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 - wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe - -The buggy address belongs to the object at ffff88809881a500 - which belongs to the cache kmalloc-64 of size 64 -The buggy address is located 56 bytes inside of - 64-byte region [ffff88809881a500, ffff88809881a540) -The buggy address belongs to the page: -page:ffffea0002620680 refcount:1 mapcount:0 mapping:ffff8880aa400380 index:0x0 -raw: 00fffe0000000200 ffffea000250b748 ffffea000254bac8 ffff8880aa400380 -raw: 0000000000000000 ffff88809881a000 0000000100000020 0000000000000000 -page dumped because: kasan: bad access detected - -Memory state around the buggy address: - ffff88809881a400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ffff88809881a480: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc ->ffff88809881a500: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ^ - ffff88809881a580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ffff88809881a600: 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Eric Dumazet -Reported-by: syzbot -Cc: Jason A. Donenfeld -Cc: wireguard@lists.zx2c4.com -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -263,6 +263,7 @@ static int add(struct allowedips_node __ - } else { - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(!node)) { -+ list_del(&newnode->peer_list); - kfree(newnode); - return -ENOMEM; - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch b/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch new file mode 100644 index 0000000000..86877a6590 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch @@ -0,0 +1,34 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 4 Feb 2020 22:17:27 +0100 +Subject: [PATCH] wireguard: selftests: ensure non-addition of peers with + failed precomputation + +commit f9398acba6a4ae9cb98bfe4d56414d376eff8d57 upstream. + +Ensure that peers with low order points are ignored, both in the case +where we already have a device private key and in the case where we do +not. This adds points that naturally give a zero output. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/netns.sh | 6 ++++++ + 1 file changed, 6 insertions(+) + +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -516,6 +516,12 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 + n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 + n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 + n0 wg set wg0 peer "$pub2" allowed-ips ::/0 ++n0 wg set wg0 peer "$pub2" remove ++low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) ++n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } ++[[ -z $(n0 wg show wg0 peers) ]] ++n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } ++[[ -z $(n0 wg show wg0 peers) ]] + ip0 link del wg0 + + declare -A objects diff --git a/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-noise-reject-peers-with-low-order-public-k.patch b/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-noise-reject-peers-with-low-order-public-k.patch deleted file mode 100644 index 55bb276118..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-noise-reject-peers-with-low-order-public-k.patch +++ /dev/null @@ -1,233 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:26 +0100 -Subject: [PATCH] wireguard: noise: reject peers with low order public keys - -commit ec31c2676a10e064878927b243fada8c2fb0c03c upstream. - -Our static-static calculation returns a failure if the public key is of -low order. We check for this when peers are added, and don't allow them -to be added if they're low order, except in the case where we haven't -yet been given a private key. In that case, we would defer the removal -of the peer until we're given a private key, since at that point we're -doing new static-static calculations which incur failures we can act on. -This meant, however, that we wound up removing peers rather late in the -configuration flow. - -Syzkaller points out that peer_remove calls flush_workqueue, which in -turn might then wait for sending a handshake initiation to complete. -Since handshake initiation needs the static identity lock, holding the -static identity lock while calling peer_remove can result in a rare -deadlock. We have precisely this case in this situation of late-stage -peer removal based on an invalid public key. We can't drop the lock when -removing, because then incoming handshakes might interact with a bogus -static-static calculation. - -While the band-aid patch for this would involve breaking up the peer -removal into two steps like wg_peer_remove_all does, in order to solve -the locking issue, there's actually a much more elegant way of fixing -this: - -If the static-static calculation succeeds with one private key, it -*must* succeed with all others, because all 32-byte strings map to valid -private keys, thanks to clamping. That means we can get rid of this -silly dance and locking headaches of removing peers late in the -configuration flow, and instead just reject them early on, regardless of -whether the device has yet been assigned a private key. For the case -where the device doesn't yet have a private key, we safely use zeros -just for the purposes of checking for low order points by way of -checking the output of the calculation. - -The following PoC will trigger the deadlock: - -ip link add wg0 type wireguard -ip addr add 10.0.0.1/24 dev wg0 -ip link set wg0 up -ping -f 10.0.0.2 & -while true; do - wg set wg0 private-key /dev/null peer AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= allowed-ips 10.0.0.0/24 endpoint 10.0.0.3:1234 - wg set wg0 private-key <(echo AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=) -done - -[ 0.949105] ====================================================== -[ 0.949550] WARNING: possible circular locking dependency detected -[ 0.950143] 5.5.0-debug+ #18 Not tainted -[ 0.950431] ------------------------------------------------------ -[ 0.950959] wg/89 is trying to acquire lock: -[ 0.951252] ffff8880333e2128 ((wq_completion)wg-kex-wg0){+.+.}, at: flush_workqueue+0xe3/0x12f0 -[ 0.951865] -[ 0.951865] but task is already holding lock: -[ 0.952280] ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 -[ 0.953011] -[ 0.953011] which lock already depends on the new lock. -[ 0.953011] -[ 0.953651] -[ 0.953651] the existing dependency chain (in reverse order) is: -[ 0.954292] -[ 0.954292] -> #2 (&wg->static_identity.lock){++++}: -[ 0.954804] lock_acquire+0x127/0x350 -[ 0.955133] down_read+0x83/0x410 -[ 0.955428] wg_noise_handshake_create_initiation+0x97/0x700 -[ 0.955885] wg_packet_send_handshake_initiation+0x13a/0x280 -[ 0.956401] wg_packet_handshake_send_worker+0x10/0x20 -[ 0.956841] process_one_work+0x806/0x1500 -[ 0.957167] worker_thread+0x8c/0xcb0 -[ 0.957549] kthread+0x2ee/0x3b0 -[ 0.957792] ret_from_fork+0x24/0x30 -[ 0.958234] -[ 0.958234] -> #1 ((work_completion)(&peer->transmit_handshake_work)){+.+.}: -[ 0.958808] lock_acquire+0x127/0x350 -[ 0.959075] process_one_work+0x7ab/0x1500 -[ 0.959369] worker_thread+0x8c/0xcb0 -[ 0.959639] kthread+0x2ee/0x3b0 -[ 0.959896] ret_from_fork+0x24/0x30 -[ 0.960346] -[ 0.960346] -> #0 ((wq_completion)wg-kex-wg0){+.+.}: -[ 0.960945] check_prev_add+0x167/0x1e20 -[ 0.961351] __lock_acquire+0x2012/0x3170 -[ 0.961725] lock_acquire+0x127/0x350 -[ 0.961990] flush_workqueue+0x106/0x12f0 -[ 0.962280] peer_remove_after_dead+0x160/0x220 -[ 0.962600] wg_set_device+0xa24/0xcc0 -[ 0.962994] genl_rcv_msg+0x52f/0xe90 -[ 0.963298] netlink_rcv_skb+0x111/0x320 -[ 0.963618] genl_rcv+0x1f/0x30 -[ 0.963853] netlink_unicast+0x3f6/0x610 -[ 0.964245] netlink_sendmsg+0x700/0xb80 -[ 0.964586] __sys_sendto+0x1dd/0x2c0 -[ 0.964854] __x64_sys_sendto+0xd8/0x1b0 -[ 0.965141] do_syscall_64+0x90/0xd9a -[ 0.965408] entry_SYSCALL_64_after_hwframe+0x49/0xbe -[ 0.965769] -[ 0.965769] other info that might help us debug this: -[ 0.965769] -[ 0.966337] Chain exists of: -[ 0.966337] (wq_completion)wg-kex-wg0 --> (work_completion)(&peer->transmit_handshake_work) --> &wg->static_identity.lock -[ 0.966337] -[ 0.967417] Possible unsafe locking scenario: -[ 0.967417] -[ 0.967836] CPU0 CPU1 -[ 0.968155] ---- ---- -[ 0.968497] lock(&wg->static_identity.lock); -[ 0.968779] lock((work_completion)(&peer->transmit_handshake_work)); -[ 0.969345] lock(&wg->static_identity.lock); -[ 0.969809] lock((wq_completion)wg-kex-wg0); -[ 0.970146] -[ 0.970146] *** DEADLOCK *** -[ 0.970146] -[ 0.970531] 5 locks held by wg/89: -[ 0.970908] #0: ffffffff827433c8 (cb_lock){++++}, at: genl_rcv+0x10/0x30 -[ 0.971400] #1: ffffffff82743480 (genl_mutex){+.+.}, at: genl_rcv_msg+0x642/0xe90 -[ 0.971924] #2: ffffffff827160c0 (rtnl_mutex){+.+.}, at: wg_set_device+0x9f/0xcc0 -[ 0.972488] #3: ffff888032819de0 (&wg->device_update_lock){+.+.}, at: wg_set_device+0xb0/0xcc0 -[ 0.973095] #4: ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 -[ 0.973653] -[ 0.973653] stack backtrace: -[ 0.973932] CPU: 1 PID: 89 Comm: wg Not tainted 5.5.0-debug+ #18 -[ 0.974476] Call Trace: -[ 0.974638] dump_stack+0x97/0xe0 -[ 0.974869] check_noncircular+0x312/0x3e0 -[ 0.975132] ? print_circular_bug+0x1f0/0x1f0 -[ 0.975410] ? __kernel_text_address+0x9/0x30 -[ 0.975727] ? unwind_get_return_address+0x51/0x90 -[ 0.976024] check_prev_add+0x167/0x1e20 -[ 0.976367] ? graph_lock+0x70/0x160 -[ 0.976682] __lock_acquire+0x2012/0x3170 -[ 0.976998] ? register_lock_class+0x1140/0x1140 -[ 0.977323] lock_acquire+0x127/0x350 -[ 0.977627] ? flush_workqueue+0xe3/0x12f0 -[ 0.977890] flush_workqueue+0x106/0x12f0 -[ 0.978147] ? flush_workqueue+0xe3/0x12f0 -[ 0.978410] ? find_held_lock+0x2c/0x110 -[ 0.978662] ? lock_downgrade+0x6e0/0x6e0 -[ 0.978919] ? queue_rcu_work+0x60/0x60 -[ 0.979166] ? netif_napi_del+0x151/0x3b0 -[ 0.979501] ? peer_remove_after_dead+0x160/0x220 -[ 0.979871] peer_remove_after_dead+0x160/0x220 -[ 0.980232] wg_set_device+0xa24/0xcc0 -[ 0.980516] ? deref_stack_reg+0x8e/0xc0 -[ 0.980801] ? set_peer+0xe10/0xe10 -[ 0.981040] ? __ww_mutex_check_waiters+0x150/0x150 -[ 0.981430] ? __nla_validate_parse+0x163/0x270 -[ 0.981719] ? genl_family_rcv_msg_attrs_parse+0x13f/0x310 -[ 0.982078] genl_rcv_msg+0x52f/0xe90 -[ 0.982348] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 -[ 0.982690] ? register_lock_class+0x1140/0x1140 -[ 0.983049] netlink_rcv_skb+0x111/0x320 -[ 0.983298] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 -[ 0.983645] ? netlink_ack+0x880/0x880 -[ 0.983888] genl_rcv+0x1f/0x30 -[ 0.984168] netlink_unicast+0x3f6/0x610 -[ 0.984443] ? netlink_detachskb+0x60/0x60 -[ 0.984729] ? find_held_lock+0x2c/0x110 -[ 0.984976] netlink_sendmsg+0x700/0xb80 -[ 0.985220] ? netlink_broadcast_filtered+0xa60/0xa60 -[ 0.985533] __sys_sendto+0x1dd/0x2c0 -[ 0.985763] ? __x64_sys_getpeername+0xb0/0xb0 -[ 0.986039] ? sockfd_lookup_light+0x17/0x160 -[ 0.986397] ? __sys_recvmsg+0x8c/0xf0 -[ 0.986711] ? __sys_recvmsg_sock+0xd0/0xd0 -[ 0.987018] __x64_sys_sendto+0xd8/0x1b0 -[ 0.987283] ? lockdep_hardirqs_on+0x39b/0x5a0 -[ 0.987666] do_syscall_64+0x90/0xd9a -[ 0.987903] entry_SYSCALL_64_after_hwframe+0x49/0xbe -[ 0.988223] RIP: 0033:0x7fe77c12003e -[ 0.988508] Code: c3 8b 07 85 c0 75 24 49 89 fb 48 89 f0 48 89 d7 48 89 ce 4c 89 c2 4d 89 ca 4c 8b 44 24 08 4c 8b 4c 24 10 4c 4 -[ 0.989666] RSP: 002b:00007fffada2ed58 EFLAGS: 00000246 ORIG_RAX: 000000000000002c -[ 0.990137] RAX: ffffffffffffffda RBX: 00007fe77c159d48 RCX: 00007fe77c12003e -[ 0.990583] RDX: 0000000000000040 RSI: 000055fd1d38e020 RDI: 0000000000000004 -[ 0.991091] RBP: 000055fd1d38e020 R08: 000055fd1cb63358 R09: 000000000000000c -[ 0.991568] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000002c -[ 0.992014] R13: 0000000000000004 R14: 000055fd1d38e020 R15: 0000000000000001 - -Signed-off-by: Jason A. Donenfeld -Reported-by: syzbot -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 6 ++---- - drivers/net/wireguard/noise.c | 10 +++++++--- - 2 files changed, 9 insertions(+), 7 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -575,10 +575,8 @@ static int wg_set_device(struct sk_buff - private_key); - list_for_each_entry_safe(peer, temp, &wg->peer_list, - peer_list) { -- if (wg_noise_precompute_static_static(peer)) -- wg_noise_expire_current_peer_keypairs(peer); -- else -- wg_peer_remove(peer); -+ BUG_ON(!wg_noise_precompute_static_static(peer)); -+ wg_noise_expire_current_peer_keypairs(peer); - } - wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); - up_write(&wg->static_identity.lock); ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -46,17 +46,21 @@ void __init wg_noise_init(void) - /* Must hold peer->handshake.static_identity->lock */ - bool wg_noise_precompute_static_static(struct wg_peer *peer) - { -- bool ret = true; -+ bool ret; - - down_write(&peer->handshake.lock); -- if (peer->handshake.static_identity->has_identity) -+ if (peer->handshake.static_identity->has_identity) { - ret = curve25519( - peer->handshake.precomputed_static_static, - peer->handshake.static_identity->static_private, - peer->handshake.remote_static); -- else -+ } else { -+ u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; -+ -+ ret = curve25519(empty, empty, peer->handshake.remote_static); - memset(peer->handshake.precomputed_static_static, 0, - NOISE_PUBLIC_KEY_LEN); -+ } - up_write(&peer->handshake.lock); - return ret; - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch b/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch new file mode 100644 index 0000000000..4530f0f49a --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch @@ -0,0 +1,77 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 4 Feb 2020 22:17:29 +0100 +Subject: [PATCH] wireguard: selftests: tie socket waiting to target pid + +commit 88f404a9b1d75388225b1c67b6dd327cb2182777 upstream. + +Without this, we wind up proceeding too early sometimes when the +previous process has just used the same listening port. So, we tie the +listening socket query to the specific pid we're interested in. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/netns.sh | 17 ++++++++--------- + 1 file changed, 8 insertions(+), 9 deletions(-) + +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -38,9 +38,8 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 + ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } + ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } + sleep() { read -t "$1" -N 1 || true; } +-waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } +-waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } +-waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } ++waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } ++waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } + waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } + + cleanup() { +@@ -119,22 +118,22 @@ tests() { + + # TCP over IPv4 + n2 iperf3 -s -1 -B 192.168.241.2 & +- waitiperf $netns2 ++ waitiperf $netns2 $! + n1 iperf3 -Z -t 3 -c 192.168.241.2 + + # TCP over IPv6 + n1 iperf3 -s -1 -B fd00::1 & +- waitiperf $netns1 ++ waitiperf $netns1 $! + n2 iperf3 -Z -t 3 -c fd00::1 + + # UDP over IPv4 + n1 iperf3 -s -1 -B 192.168.241.1 & +- waitiperf $netns1 ++ waitiperf $netns1 $! + n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 + + # UDP over IPv6 + n2 iperf3 -s -1 -B fd00::2 & +- waitiperf $netns2 ++ waitiperf $netns2 $! + n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 + } + +@@ -207,7 +206,7 @@ n1 ping -W 1 -c 1 192.168.241.2 + n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 + exec 4< <(n1 ncat -l -u -p 1111) + ncat_pid=$! +-waitncatudp $netns1 ++waitncatudp $netns1 $ncat_pid + n2 ncat -u 192.168.241.1 1111 <<<"X" + read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] + kill $ncat_pid +@@ -216,7 +215,7 @@ n1 wg set wg0 peer "$more_specific_key" + n2 wg set wg0 listen-port 9997 + exec 4< <(n1 ncat -l -u -p 1111) + ncat_pid=$! +-waitncatudp $netns1 ++waitncatudp $netns1 $ncat_pid + n2 ncat -u 192.168.241.1 1111 <<<"X" + ! read -r -N 1 -t 1 out <&4 || false + kill $ncat_pid diff --git a/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch b/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch new file mode 100644 index 0000000000..321db189e1 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch @@ -0,0 +1,64 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 11 Feb 2020 20:47:08 +0100 +Subject: [PATCH] wireguard: device: use icmp_ndo_send helper + +commit a12d7f3cbdc72c7625881c8dc2660fc2c979fdf2 upstream. + +Because wireguard is calling icmp from network device context, it should +use the ndo helper so that the rate limiting applies correctly. This +commit adds a small test to the wireguard test suite to ensure that the +new functions continue doing the right thing in the context of +wireguard. It does this by setting up a condition that will definately +evoke an icmp error message from the driver, but along a nat'd path. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 4 ++-- + tools/testing/selftests/wireguard/netns.sh | 11 +++++++++++ + 2 files changed, 13 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -203,9 +203,9 @@ err_peer: + err: + ++dev->stats.tx_errors; + if (skb->protocol == htons(ETH_P_IP)) +- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); ++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); + else if (skb->protocol == htons(ETH_P_IPV6)) +- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); ++ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); + kfree_skb(skb); + return ret; + } +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -24,6 +24,7 @@ + set -e + + exec 3>&1 ++export LANG=C + export WG_HIDE_KEYS=never + netns0="wg-test-$$-0" + netns1="wg-test-$$-1" +@@ -297,7 +298,17 @@ ip1 -4 rule add table main suppress_pref + n1 ping -W 1 -c 100 -f 192.168.99.7 + n1 ping -W 1 -c 100 -f abab::1111 + ++# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route. ++n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2 ++n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit. ++n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' ++ip0 -4 route add 192.168.241.1 via 10.0.0.100 ++n2 wg set wg0 peer "$pub1" remove ++[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]] ++ + n0 iptables -t nat -F ++n0 iptables -t filter -F ++n2 iptables -t nat -F + ip0 link del vethrc + ip0 link del vethrs + ip1 link del wg0 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-selftests-ensure-non-addition-of-peers-wit.patch b/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-selftests-ensure-non-addition-of-peers-wit.patch deleted file mode 100644 index 86877a6590..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0085-wireguard-selftests-ensure-non-addition-of-peers-wit.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:27 +0100 -Subject: [PATCH] wireguard: selftests: ensure non-addition of peers with - failed precomputation - -commit f9398acba6a4ae9cb98bfe4d56414d376eff8d57 upstream. - -Ensure that peers with low order points are ignored, both in the case -where we already have a device private key and in the case where we do -not. This adds points that naturally give a zero output. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -516,6 +516,12 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 - n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0 -+n0 wg set wg0 peer "$pub2" remove -+low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) -+n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } -+[[ -z $(n0 wg show wg0 peers) ]] - ip0 link del wg0 - - declare -A objects diff --git a/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch b/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch new file mode 100644 index 0000000000..ac292a8682 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch @@ -0,0 +1,104 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Fri, 14 Feb 2020 23:57:20 +0100 +Subject: [PATCH] wireguard: selftests: reduce complexity and fix make races + +commit 04ddf1208f03e1dbc39a4619c40eba640051b950 upstream. + +This gives us fewer dependencies and shortens build time, fixes up some +hash checking race conditions, and also fixes missing directory creation +that caused issues on massively parallel builds. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + .../testing/selftests/wireguard/qemu/Makefile | 38 +++++++------------ + 1 file changed, 14 insertions(+), 24 deletions(-) + +--- a/tools/testing/selftests/wireguard/qemu/Makefile ++++ b/tools/testing/selftests/wireguard/qemu/Makefile +@@ -38,19 +38,17 @@ endef + define file_download = + $(DISTFILES_PATH)/$(1): + mkdir -p $(DISTFILES_PATH) +- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' +- if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi ++ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' + endef + + $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) +-$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) + $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) + $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) + $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) + $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) + $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) + $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) +-$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) ++$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20200206,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,f5207248c6a3c3e3bfc9ab30b91c1897b00802ed861e1f9faaed873366078c64)) + + KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) + rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) +@@ -295,21 +293,13 @@ $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH + $(MAKE) -C $(IPERF_PATH) + $(STRIP) -s $@ + +-$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) +- flock -s $<.lock tar -C $(BUILD_PATH) -xf $< +- touch $@ +- +-$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) +- cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared +- $(MAKE) -C $(LIBMNL_PATH) +- sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc +- + $(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) ++ mkdir -p $(BUILD_PATH) + flock -s $<.lock tar -C $(BUILD_PATH) -xf $< + touch $@ + +-$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) +- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg ++$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(USERSPACE_DEPS) ++ $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src wg + $(STRIP) -s $@ + + $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) +@@ -340,17 +330,17 @@ $(BASH_PATH)/bash: | $(BASH_PATH)/.insta + $(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) + mkdir -p $(BUILD_PATH) + flock -s $<.lock tar -C $(BUILD_PATH) -xf $< +- printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk ++ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS\n' > $(IPROUTE2_PATH)/config.mk + printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile + touch $@ + +-$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) +- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip +- $(STRIP) -s $(IPROUTE2_PATH)/ip/ip +- +-$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) +- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss +- $(STRIP) -s $(IPROUTE2_PATH)/misc/ss ++$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) ++ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip ++ $(STRIP) -s $@ ++ ++$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) ++ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss ++ $(STRIP) -s $@ + + $(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) + mkdir -p $(BUILD_PATH) +@@ -358,8 +348,8 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ + sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure + touch $@ + +-$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) +- cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include ++$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(USERSPACE_DEPS) ++ cd $(IPTABLES_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --disable-connlabel --with-kernel=$(BUILD_PATH)/include + $(MAKE) -C $(IPTABLES_PATH) + $(STRIP) -s $@ + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-tie-socket-waiting-to-target-pid.patch b/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-tie-socket-waiting-to-target-pid.patch deleted file mode 100644 index 4530f0f49a..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0086-wireguard-selftests-tie-socket-waiting-to-target-pid.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:29 +0100 -Subject: [PATCH] wireguard: selftests: tie socket waiting to target pid - -commit 88f404a9b1d75388225b1c67b6dd327cb2182777 upstream. - -Without this, we wind up proceeding too early sometimes when the -previous process has just used the same listening port. So, we tie the -listening socket query to the specific pid we're interested in. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 17 ++++++++--------- - 1 file changed, 8 insertions(+), 9 deletions(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -38,9 +38,8 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } - sleep() { read -t "$1" -N 1 || true; } --waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } --waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } --waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } -+waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } - - cleanup() { -@@ -119,22 +118,22 @@ tests() { - - # TCP over IPv4 - n2 iperf3 -s -1 -B 192.168.241.2 & -- waitiperf $netns2 -+ waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -c 192.168.241.2 - - # TCP over IPv6 - n1 iperf3 -s -1 -B fd00::1 & -- waitiperf $netns1 -+ waitiperf $netns1 $! - n2 iperf3 -Z -t 3 -c fd00::1 - - # UDP over IPv4 - n1 iperf3 -s -1 -B 192.168.241.1 & -- waitiperf $netns1 -+ waitiperf $netns1 $! - n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 - - # UDP over IPv6 - n2 iperf3 -s -1 -B fd00::2 & -- waitiperf $netns2 -+ waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 - } - -@@ -207,7 +206,7 @@ n1 ping -W 1 -c 1 192.168.241.2 - n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 - exec 4< <(n1 ncat -l -u -p 1111) - ncat_pid=$! --waitncatudp $netns1 -+waitncatudp $netns1 $ncat_pid - n2 ncat -u 192.168.241.1 1111 <<<"X" - read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] - kill $ncat_pid -@@ -216,7 +215,7 @@ n1 wg set wg0 peer "$more_specific_key" - n2 wg set wg0 listen-port 9997 - exec 4< <(n1 ncat -l -u -p 1111) - ncat_pid=$! --waitncatudp $netns1 -+waitncatudp $netns1 $ncat_pid - n2 ncat -u 192.168.241.1 1111 <<<"X" - ! read -r -N 1 -t 1 out <&4 || false - kill $ncat_pid diff --git a/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-device-use-icmp_ndo_send-helper.patch b/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-device-use-icmp_ndo_send-helper.patch deleted file mode 100644 index 321db189e1..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-device-use-icmp_ndo_send-helper.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 11 Feb 2020 20:47:08 +0100 -Subject: [PATCH] wireguard: device: use icmp_ndo_send helper - -commit a12d7f3cbdc72c7625881c8dc2660fc2c979fdf2 upstream. - -Because wireguard is calling icmp from network device context, it should -use the ndo helper so that the rate limiting applies correctly. This -commit adds a small test to the wireguard test suite to ensure that the -new functions continue doing the right thing in the context of -wireguard. It does this by setting up a condition that will definately -evoke an icmp error message from the driver, but along a nat'd path. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 4 ++-- - tools/testing/selftests/wireguard/netns.sh | 11 +++++++++++ - 2 files changed, 13 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -203,9 +203,9 @@ err_peer: - err: - ++dev->stats.tx_errors; - if (skb->protocol == htons(ETH_P_IP)) -- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); -+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); - else if (skb->protocol == htons(ETH_P_IPV6)) -- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); - kfree_skb(skb); - return ret; - } ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -24,6 +24,7 @@ - set -e - - exec 3>&1 -+export LANG=C - export WG_HIDE_KEYS=never - netns0="wg-test-$$-0" - netns1="wg-test-$$-1" -@@ -297,7 +298,17 @@ ip1 -4 rule add table main suppress_pref - n1 ping -W 1 -c 100 -f 192.168.99.7 - n1 ping -W 1 -c 100 -f abab::1111 - -+# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route. -+n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2 -+n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit. -+n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' -+ip0 -4 route add 192.168.241.1 via 10.0.0.100 -+n2 wg set wg0 peer "$pub1" remove -+[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]] -+ - n0 iptables -t nat -F -+n0 iptables -t filter -F -+n2 iptables -t nat -F - ip0 link del vethrc - ip0 link del vethrs - ip1 link del wg0 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch b/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch new file mode 100644 index 0000000000..193d28a83f --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch @@ -0,0 +1,38 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Fri, 14 Feb 2020 23:57:21 +0100 +Subject: [PATCH] wireguard: receive: reset last_under_load to zero + +commit 2a8a4df36462aa85b0db87b7c5ea145ba67e34a8 upstream. + +This is a small optimization that prevents more expensive comparisons +from happening when they are no longer necessary, by clearing the +last_under_load variable whenever we wind up in a state where we were +under load but we no longer are. + +Signed-off-by: Jason A. Donenfeld +Suggested-by: Matt Dunwoodie +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 7 +++++-- + 1 file changed, 5 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -118,10 +118,13 @@ static void wg_receive_handshake_packet( + + under_load = skb_queue_len(&wg->incoming_handshakes) >= + MAX_QUEUED_INCOMING_HANDSHAKES / 8; +- if (under_load) ++ if (under_load) { + last_under_load = ktime_get_coarse_boottime_ns(); +- else if (last_under_load) ++ } else if (last_under_load) { + under_load = !wg_birthdate_has_expired(last_under_load, 1); ++ if (!under_load) ++ last_under_load = 0; ++ } + mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, + under_load); + if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || diff --git a/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-selftests-reduce-complexity-and-fix-make-r.patch b/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-selftests-reduce-complexity-and-fix-make-r.patch deleted file mode 100644 index ac292a8682..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-selftests-reduce-complexity-and-fix-make-r.patch +++ /dev/null @@ -1,104 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:20 +0100 -Subject: [PATCH] wireguard: selftests: reduce complexity and fix make races - -commit 04ddf1208f03e1dbc39a4619c40eba640051b950 upstream. - -This gives us fewer dependencies and shortens build time, fixes up some -hash checking race conditions, and also fixes missing directory creation -that caused issues on massively parallel builds. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - .../testing/selftests/wireguard/qemu/Makefile | 38 +++++++------------ - 1 file changed, 14 insertions(+), 24 deletions(-) - ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -38,19 +38,17 @@ endef - define file_download = - $(DISTFILES_PATH)/$(1): - mkdir -p $(DISTFILES_PATH) -- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -- if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' - endef - - $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) --$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) - $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) - $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) - $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) - $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) - $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) --$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20200206,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,f5207248c6a3c3e3bfc9ab30b91c1897b00802ed861e1f9faaed873366078c64)) - - KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) - rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -@@ -295,21 +293,13 @@ $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH - $(MAKE) -C $(IPERF_PATH) - $(STRIP) -s $@ - --$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) -- flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -- touch $@ -- --$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) -- cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -- $(MAKE) -C $(LIBMNL_PATH) -- sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc -- - $(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) -+ mkdir -p $(BUILD_PATH) - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< - touch $@ - --$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src wg - $(STRIP) -s $@ - - $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -@@ -340,17 +330,17 @@ $(BASH_PATH)/bash: | $(BASH_PATH)/.insta - $(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) - mkdir -p $(BUILD_PATH) - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -- printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk -+ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS\n' > $(IPROUTE2_PATH)/config.mk - printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile - touch $@ - --$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -- $(STRIP) -s $(IPROUTE2_PATH)/ip/ip -- --$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -- $(STRIP) -s $(IPROUTE2_PATH)/misc/ss -+$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -+ $(STRIP) -s $@ -+ -+$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -+ $(STRIP) -s $@ - - $(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) - mkdir -p $(BUILD_PATH) -@@ -358,8 +348,8 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ - sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure - touch $@ - --$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include -+$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(IPTABLES_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --disable-connlabel --with-kernel=$(BUILD_PATH)/include - $(MAKE) -C $(IPTABLES_PATH) - $(STRIP) -s $@ - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch b/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch new file mode 100644 index 0000000000..d84efe20f0 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch @@ -0,0 +1,95 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Fri, 14 Feb 2020 23:57:22 +0100 +Subject: [PATCH] wireguard: send: account for mtu=0 devices + +commit 175f1ca9a9ed8689d2028da1a7c624bb4fb4ff7e upstream. + +It turns out there's an easy way to get packets queued up while still +having an MTU of zero, and that's via persistent keep alive. This commit +makes sure that in whatever condition, we don't wind up dividing by +zero. Note that an MTU of zero for a wireguard interface is something +quasi-valid, so I don't think the correct fix is to limit it via +min_mtu. This can be reproduced easily with: + +ip link add wg0 type wireguard +ip link add wg1 type wireguard +ip link set wg0 up mtu 0 +ip link set wg1 up +wg set wg0 private-key <(wg genkey) +wg set wg1 listen-port 1 private-key <(wg genkey) peer $(wg show wg0 public-key) +wg set wg0 peer $(wg show wg1 public-key) persistent-keepalive 1 endpoint 127.0.0.1:1 + +However, while min_mtu=0 seems fine, it makes sense to restrict the +max_mtu. This commit also restricts the maximum MTU to the greatest +number for which rounding up to the padding multiple won't overflow a +signed integer. Packets this large were always rejected anyway +eventually, due to checks deeper in, but it seems more sound not to even +let the administrator configure something that won't work anyway. + +We use this opportunity to clean up this function a bit so that it's +clear which paths we're expecting. + +Signed-off-by: Jason A. Donenfeld +Cc: Eric Dumazet +Reviewed-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 7 ++++--- + drivers/net/wireguard/send.c | 16 +++++++++++----- + 2 files changed, 15 insertions(+), 8 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -258,6 +258,8 @@ static void wg_setup(struct net_device * + enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_SG | NETIF_F_GSO | + NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; ++ const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + ++ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); + + dev->netdev_ops = &netdev_ops; + dev->hard_header_len = 0; +@@ -271,9 +273,8 @@ static void wg_setup(struct net_device * + dev->features |= WG_NETDEV_FEATURES; + dev->hw_features |= WG_NETDEV_FEATURES; + dev->hw_enc_features |= WG_NETDEV_FEATURES; +- dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - +- sizeof(struct udphdr) - +- max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); ++ dev->mtu = ETH_DATA_LEN - overhead; ++ dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; + + SET_NETDEV_DEVTYPE(dev, &device_type); + +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_pee + + static unsigned int calculate_skb_padding(struct sk_buff *skb) + { ++ unsigned int padded_size, last_unit = skb->len; ++ ++ if (unlikely(!PACKET_CB(skb)->mtu)) ++ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; ++ + /* We do this modulo business with the MTU, just in case the networking + * layer gives us a packet that's bigger than the MTU. In that case, we + * wouldn't want the final subtraction to overflow in the case of the +- * padded_size being clamped. ++ * padded_size being clamped. Fortunately, that's very rarely the case, ++ * so we optimize for that not happening. + */ +- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; +- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); ++ if (unlikely(last_unit > PACKET_CB(skb)->mtu)) ++ last_unit %= PACKET_CB(skb)->mtu; + +- if (padded_size > PACKET_CB(skb)->mtu) +- padded_size = PACKET_CB(skb)->mtu; ++ padded_size = min(PACKET_CB(skb)->mtu, ++ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); + return padded_size - last_unit; + } + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-receive-reset-last_under_load-to-zero.patch b/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-receive-reset-last_under_load-to-zero.patch deleted file mode 100644 index 193d28a83f..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-receive-reset-last_under_load-to-zero.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:21 +0100 -Subject: [PATCH] wireguard: receive: reset last_under_load to zero - -commit 2a8a4df36462aa85b0db87b7c5ea145ba67e34a8 upstream. - -This is a small optimization that prevents more expensive comparisons -from happening when they are no longer necessary, by clearing the -last_under_load variable whenever we wind up in a state where we were -under load but we no longer are. - -Signed-off-by: Jason A. Donenfeld -Suggested-by: Matt Dunwoodie -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -118,10 +118,13 @@ static void wg_receive_handshake_packet( - - under_load = skb_queue_len(&wg->incoming_handshakes) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; -- if (under_load) -+ if (under_load) { - last_under_load = ktime_get_coarse_boottime_ns(); -- else if (last_under_load) -+ } else if (last_under_load) { - under_load = !wg_birthdate_has_expired(last_under_load, 1); -+ if (!under_load) -+ last_under_load = 0; -+ } - mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, - under_load); - if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || diff --git a/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch b/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch new file mode 100644 index 0000000000..458e9d51e5 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch @@ -0,0 +1,32 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Fri, 14 Feb 2020 23:57:23 +0100 +Subject: [PATCH] wireguard: socket: remove extra call to synchronize_net + +commit 1fbc33b0a7feb6ca72bf7dc8a05d81485ee8ee2e upstream. + +synchronize_net() is a wrapper around synchronize_rcu(), so there's no +point in having synchronize_net and synchronize_rcu back to back, +despite the documentation comment suggesting maybe it's somewhat useful, +"Wait for packets currently being received to be done." This commit +removes the extra call. + +Signed-off-by: Jason A. Donenfeld +Suggested-by: Eric Dumazet +Reviewed-by: Eric Dumazet +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/socket.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device * + wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); + mutex_unlock(&wg->socket_update_lock); + synchronize_rcu(); +- synchronize_net(); + sock_free(old4); + sock_free(old6); + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch b/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch new file mode 100644 index 0000000000..93545e6760 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch @@ -0,0 +1,27 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: YueHaibing +Date: Wed, 18 Mar 2020 18:30:43 -0600 +Subject: [PATCH] wireguard: selftests: remove duplicated include + +commit 166391159c5deb84795d2ff46e95f276177fa5fb upstream. + +This commit removes a duplicated include. + +Signed-off-by: YueHaibing +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/qemu/init.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/tools/testing/selftests/wireguard/qemu/init.c ++++ b/tools/testing/selftests/wireguard/qemu/init.c +@@ -13,7 +13,6 @@ + #include + #include + #include +-#include + #include + #include + #include diff --git a/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-send-account-for-mtu-0-devices.patch b/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-send-account-for-mtu-0-devices.patch deleted file mode 100644 index d84efe20f0..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0090-wireguard-send-account-for-mtu-0-devices.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:22 +0100 -Subject: [PATCH] wireguard: send: account for mtu=0 devices - -commit 175f1ca9a9ed8689d2028da1a7c624bb4fb4ff7e upstream. - -It turns out there's an easy way to get packets queued up while still -having an MTU of zero, and that's via persistent keep alive. This commit -makes sure that in whatever condition, we don't wind up dividing by -zero. Note that an MTU of zero for a wireguard interface is something -quasi-valid, so I don't think the correct fix is to limit it via -min_mtu. This can be reproduced easily with: - -ip link add wg0 type wireguard -ip link add wg1 type wireguard -ip link set wg0 up mtu 0 -ip link set wg1 up -wg set wg0 private-key <(wg genkey) -wg set wg1 listen-port 1 private-key <(wg genkey) peer $(wg show wg0 public-key) -wg set wg0 peer $(wg show wg1 public-key) persistent-keepalive 1 endpoint 127.0.0.1:1 - -However, while min_mtu=0 seems fine, it makes sense to restrict the -max_mtu. This commit also restricts the maximum MTU to the greatest -number for which rounding up to the padding multiple won't overflow a -signed integer. Packets this large were always rejected anyway -eventually, due to checks deeper in, but it seems more sound not to even -let the administrator configure something that won't work anyway. - -We use this opportunity to clean up this function a bit so that it's -clear which paths we're expecting. - -Signed-off-by: Jason A. Donenfeld -Cc: Eric Dumazet -Reviewed-by: Eric Dumazet -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 7 ++++--- - drivers/net/wireguard/send.c | 16 +++++++++++----- - 2 files changed, 15 insertions(+), 8 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -258,6 +258,8 @@ static void wg_setup(struct net_device * - enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; -+ const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + -+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); - - dev->netdev_ops = &netdev_ops; - dev->hard_header_len = 0; -@@ -271,9 +273,8 @@ static void wg_setup(struct net_device * - dev->features |= WG_NETDEV_FEATURES; - dev->hw_features |= WG_NETDEV_FEATURES; - dev->hw_enc_features |= WG_NETDEV_FEATURES; -- dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - -- sizeof(struct udphdr) - -- max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); -+ dev->mtu = ETH_DATA_LEN - overhead; -+ dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; - - SET_NETDEV_DEVTYPE(dev, &device_type); - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_pee - - static unsigned int calculate_skb_padding(struct sk_buff *skb) - { -+ unsigned int padded_size, last_unit = skb->len; -+ -+ if (unlikely(!PACKET_CB(skb)->mtu)) -+ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; -+ - /* We do this modulo business with the MTU, just in case the networking - * layer gives us a packet that's bigger than the MTU. In that case, we - * wouldn't want the final subtraction to overflow in the case of the -- * padded_size being clamped. -+ * padded_size being clamped. Fortunately, that's very rarely the case, -+ * so we optimize for that not happening. - */ -- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; -- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); -+ if (unlikely(last_unit > PACKET_CB(skb)->mtu)) -+ last_unit %= PACKET_CB(skb)->mtu; - -- if (padded_size > PACKET_CB(skb)->mtu) -- padded_size = PACKET_CB(skb)->mtu; -+ padded_size = min(PACKET_CB(skb)->mtu, -+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); - return padded_size - last_unit; - } - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch b/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch new file mode 100644 index 0000000000..a9ca655e74 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch @@ -0,0 +1,100 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 18 Mar 2020 18:30:45 -0600 +Subject: [PATCH] wireguard: queueing: account for skb->protocol==0 + +commit a5588604af448664e796daf3c1d5a4523c60667b upstream. + +We carry out checks to the effect of: + + if (skb->protocol != wg_examine_packet_protocol(skb)) + goto err; + +By having wg_skb_examine_untrusted_ip_hdr return 0 on failure, this +means that the check above still passes in the case where skb->protocol +is zero, which is possible to hit with AF_PACKET: + + struct sockaddr_pkt saddr = { .spkt_device = "wg0" }; + unsigned char buffer[5] = { 0 }; + sendto(socket(AF_PACKET, SOCK_PACKET, /* skb->protocol = */ 0), + buffer, sizeof(buffer), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); + +Additional checks mean that this isn't actually a problem in the code +base, but I could imagine it becoming a problem later if the function is +used more liberally. + +I would prefer to fix this by having wg_examine_packet_protocol return a +32-bit ~0 value on failure, which will never match any value of +skb->protocol, which would simply change the generated code from a mov +to a movzx. However, sparse complains, and adding __force casts doesn't +seem like a good idea, so instead we just add a simple helper function +to check for the zero return value. Since wg_examine_packet_protocol +itself gets inlined, this winds up not adding an additional branch to +the generated code, since the 0 return value already happens in a +mergable branch. + +Reported-by: Fabian Freyer +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 2 +- + drivers/net/wireguard/queueing.h | 8 +++++++- + drivers/net/wireguard/receive.c | 4 ++-- + 3 files changed, 10 insertions(+), 4 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buf + u32 mtu; + int ret; + +- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { ++ if (unlikely(!wg_check_packet_protocol(skb))) { + ret = -EPROTONOSUPPORT; + net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); + goto err; +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -66,7 +66,7 @@ struct packet_cb { + #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) + + /* Returns either the correct skb->protocol value, or 0 if invalid. */ +-static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) ++static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) + { + if (skb_network_header(skb) >= skb->head && + (skb_network_header(skb) + sizeof(struct iphdr)) <= +@@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untr + return 0; + } + ++static inline bool wg_check_packet_protocol(struct sk_buff *skb) ++{ ++ __be16 real_protocol = wg_examine_packet_protocol(skb); ++ return real_protocol && skb->protocol == real_protocol; ++} ++ + static inline void wg_reset_packet(struct sk_buff *skb) + { + skb_scrub_packet(skb, true); +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_ + size_t data_offset, data_len, header_len; + struct udphdr *udp; + +- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || ++ if (unlikely(!wg_check_packet_protocol(skb) || + skb_transport_header(skb) < skb->head || + (skb_transport_header(skb) + sizeof(struct udphdr)) > + skb_tail_pointer(skb))) +@@ -388,7 +388,7 @@ static void wg_packet_consume_data_done( + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ~0; /* All levels */ +- skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); ++ skb->protocol = wg_examine_packet_protocol(skb); + if (skb->protocol == htons(ETH_P_IP)) { + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-socket-remove-extra-call-to-synchronize_ne.patch b/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-socket-remove-extra-call-to-synchronize_ne.patch deleted file mode 100644 index 458e9d51e5..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0091-wireguard-socket-remove-extra-call-to-synchronize_ne.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:23 +0100 -Subject: [PATCH] wireguard: socket: remove extra call to synchronize_net - -commit 1fbc33b0a7feb6ca72bf7dc8a05d81485ee8ee2e upstream. - -synchronize_net() is a wrapper around synchronize_rcu(), so there's no -point in having synchronize_net and synchronize_rcu back to back, -despite the documentation comment suggesting maybe it's somewhat useful, -"Wait for packets currently being received to be done." This commit -removes the extra call. - -Signed-off-by: Jason A. Donenfeld -Suggested-by: Eric Dumazet -Reviewed-by: Eric Dumazet -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device * - wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); - mutex_unlock(&wg->socket_update_lock); - synchronize_rcu(); -- synchronize_net(); - sock_free(old4); - sock_free(old6); - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch b/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch new file mode 100644 index 0000000000..bcd4fbfbc1 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch @@ -0,0 +1,35 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 18 Mar 2020 18:30:46 -0600 +Subject: [PATCH] wireguard: receive: remove dead code from default packet type + case + +commit 2b8765c52db24c0fbcc81bac9b5e8390f2c7d3c8 upstream. + +The situation in which we wind up hitting the default case here +indicates a major bug in earlier parsing code. It is not a usual thing +that should ever happen, which means a "friendly" message for it doesn't +make sense. Rather, replace this with a WARN_ON, just like we do earlier +in the file for a similar situation, so that somebody sends us a bug +report and we can fix it. + +Reported-by: Fabian Freyer +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device + wg_packet_consume_data(wg, skb); + break; + default: +- net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", +- wg->dev->name, skb); ++ WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); + goto err; + } + return; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-selftests-remove-duplicated-include-sys-ty.patch b/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-selftests-remove-duplicated-include-sys-ty.patch deleted file mode 100644 index 93545e6760..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0092-wireguard-selftests-remove-duplicated-include-sys-ty.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: YueHaibing -Date: Wed, 18 Mar 2020 18:30:43 -0600 -Subject: [PATCH] wireguard: selftests: remove duplicated include - -commit 166391159c5deb84795d2ff46e95f276177fa5fb upstream. - -This commit removes a duplicated include. - -Signed-off-by: YueHaibing -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/init.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/qemu/init.c -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -13,7 +13,6 @@ - #include - #include - #include --#include - #include - #include - #include diff --git a/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch b/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch new file mode 100644 index 0000000000..dac3046e47 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch @@ -0,0 +1,224 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 18 Mar 2020 18:30:47 -0600 +Subject: [PATCH] wireguard: noise: error out precomputed DH during handshake + rather than config + +commit 11a7686aa99c7fe4b3f80f6dcccd54129817984d upstream. + +We precompute the static-static ECDH during configuration time, in order +to save an expensive computation later when receiving network packets. +However, not all ECDH computations yield a contributory result. Prior, +we were just not letting those peers be added to the interface. However, +this creates a strange inconsistency, since it was still possible to add +other weird points, like a valid public key plus a low-order point, and, +like points that result in zeros, a handshake would not complete. In +order to make the behavior more uniform and less surprising, simply +allow all peers to be added. Then, we'll error out later when doing the +crypto if there's an issue. This also adds more separation between the +crypto layer and the configuration layer. + +Discussed-with: Mathias Hall-Andersen +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/netlink.c | 8 +--- + drivers/net/wireguard/noise.c | 55 ++++++++++++---------- + drivers/net/wireguard/noise.h | 12 ++--- + drivers/net/wireguard/peer.c | 7 +-- + tools/testing/selftests/wireguard/netns.sh | 15 ++++-- + 5 files changed, 49 insertions(+), 48 deletions(-) + +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -417,11 +417,7 @@ static int set_peer(struct wg_device *wg + + peer = wg_peer_create(wg, public_key, preshared_key); + if (IS_ERR(peer)) { +- /* Similar to the above, if the key is invalid, we skip +- * it without fanfare, so that services don't need to +- * worry about doing key validation themselves. +- */ +- ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); ++ ret = PTR_ERR(peer); + peer = NULL; + goto out; + } +@@ -575,7 +571,7 @@ static int wg_set_device(struct sk_buff + private_key); + list_for_each_entry_safe(peer, temp, &wg->peer_list, + peer_list) { +- BUG_ON(!wg_noise_precompute_static_static(peer)); ++ wg_noise_precompute_static_static(peer); + wg_noise_expire_current_peer_keypairs(peer); + } + wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -44,32 +44,23 @@ void __init wg_noise_init(void) + } + + /* Must hold peer->handshake.static_identity->lock */ +-bool wg_noise_precompute_static_static(struct wg_peer *peer) ++void wg_noise_precompute_static_static(struct wg_peer *peer) + { +- bool ret; +- + down_write(&peer->handshake.lock); +- if (peer->handshake.static_identity->has_identity) { +- ret = curve25519( +- peer->handshake.precomputed_static_static, ++ if (!peer->handshake.static_identity->has_identity || ++ !curve25519(peer->handshake.precomputed_static_static, + peer->handshake.static_identity->static_private, +- peer->handshake.remote_static); +- } else { +- u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; +- +- ret = curve25519(empty, empty, peer->handshake.remote_static); ++ peer->handshake.remote_static)) + memset(peer->handshake.precomputed_static_static, 0, + NOISE_PUBLIC_KEY_LEN); +- } + up_write(&peer->handshake.lock); +- return ret; + } + +-bool wg_noise_handshake_init(struct noise_handshake *handshake, +- struct noise_static_identity *static_identity, +- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], +- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], +- struct wg_peer *peer) ++void wg_noise_handshake_init(struct noise_handshake *handshake, ++ struct noise_static_identity *static_identity, ++ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], ++ struct wg_peer *peer) + { + memset(handshake, 0, sizeof(*handshake)); + init_rwsem(&handshake->lock); +@@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct nois + NOISE_SYMMETRIC_KEY_LEN); + handshake->static_identity = static_identity; + handshake->state = HANDSHAKE_ZEROED; +- return wg_noise_precompute_static_static(peer); ++ wg_noise_precompute_static_static(peer); + } + + static void handshake_zero(struct noise_handshake *handshake) +@@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chain + return true; + } + ++static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], ++ u8 key[NOISE_SYMMETRIC_KEY_LEN], ++ const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) ++{ ++ static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; ++ if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) ++ return false; ++ kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, ++ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, ++ chaining_key); ++ return true; ++} ++ + static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) + { + struct blake2s_state blake; +@@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(str + NOISE_PUBLIC_KEY_LEN, key, handshake->hash); + + /* ss */ +- kdf(handshake->chaining_key, key, NULL, +- handshake->precomputed_static_static, NOISE_HASH_LEN, +- NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, +- handshake->chaining_key); ++ if (!mix_precomputed_dh(handshake->chaining_key, key, ++ handshake->precomputed_static_static)) ++ goto out; + + /* {t} */ + tai64n_now(timestamp); +@@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(st + handshake = &peer->handshake; + + /* ss */ +- kdf(chaining_key, key, NULL, handshake->precomputed_static_static, +- NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, +- chaining_key); ++ if (!mix_precomputed_dh(chaining_key, key, ++ handshake->precomputed_static_static)) ++ goto out; + + /* {t} */ + if (!message_decrypt(t, src->encrypted_timestamp, +--- a/drivers/net/wireguard/noise.h ++++ b/drivers/net/wireguard/noise.h +@@ -94,11 +94,11 @@ struct noise_handshake { + struct wg_device; + + void wg_noise_init(void); +-bool wg_noise_handshake_init(struct noise_handshake *handshake, +- struct noise_static_identity *static_identity, +- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], +- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], +- struct wg_peer *peer); ++void wg_noise_handshake_init(struct noise_handshake *handshake, ++ struct noise_static_identity *static_identity, ++ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], ++ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], ++ struct wg_peer *peer); + void wg_noise_handshake_clear(struct noise_handshake *handshake); + static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) + { +@@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypai + void wg_noise_set_static_identity_private_key( + struct noise_static_identity *static_identity, + const u8 private_key[NOISE_PUBLIC_KEY_LEN]); +-bool wg_noise_precompute_static_static(struct wg_peer *peer); ++void wg_noise_precompute_static_static(struct wg_peer *peer); + + bool + wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, +--- a/drivers/net/wireguard/peer.c ++++ b/drivers/net/wireguard/peer.c +@@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg + return ERR_PTR(ret); + peer->device = wg; + +- if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, +- public_key, preshared_key, peer)) { +- ret = -EKEYREJECTED; +- goto err_1; +- } ++ wg_noise_handshake_init(&peer->handshake, &wg->static_identity, ++ public_key, preshared_key, peer); + if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) + goto err_1; + if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 + n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 + n0 wg set wg0 peer "$pub2" allowed-ips ::/0 + n0 wg set wg0 peer "$pub2" remove +-low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) +-n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } +-[[ -z $(n0 wg show wg0 peers) ]] +-n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } +-[[ -z $(n0 wg show wg0 peers) ]] ++for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do ++ n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111 ++done ++[[ -n $(n0 wg show wg0 peers) ]] ++exec 4< <(n0 ncat -l -u -p 1111) ++ncat_pid=$! ++waitncatudp $netns0 $ncat_pid ++ip0 link set wg0 up ++! read -r -n 1 -t 2 <&4 || false ++kill $ncat_pid + ip0 link del wg0 + + declare -A objects diff --git a/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-queueing-account-for-skb-protocol-0.patch b/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-queueing-account-for-skb-protocol-0.patch deleted file mode 100644 index a9ca655e74..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0093-wireguard-queueing-account-for-skb-protocol-0.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:45 -0600 -Subject: [PATCH] wireguard: queueing: account for skb->protocol==0 - -commit a5588604af448664e796daf3c1d5a4523c60667b upstream. - -We carry out checks to the effect of: - - if (skb->protocol != wg_examine_packet_protocol(skb)) - goto err; - -By having wg_skb_examine_untrusted_ip_hdr return 0 on failure, this -means that the check above still passes in the case where skb->protocol -is zero, which is possible to hit with AF_PACKET: - - struct sockaddr_pkt saddr = { .spkt_device = "wg0" }; - unsigned char buffer[5] = { 0 }; - sendto(socket(AF_PACKET, SOCK_PACKET, /* skb->protocol = */ 0), - buffer, sizeof(buffer), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); - -Additional checks mean that this isn't actually a problem in the code -base, but I could imagine it becoming a problem later if the function is -used more liberally. - -I would prefer to fix this by having wg_examine_packet_protocol return a -32-bit ~0 value on failure, which will never match any value of -skb->protocol, which would simply change the generated code from a mov -to a movzx. However, sparse complains, and adding __force casts doesn't -seem like a good idea, so instead we just add a simple helper function -to check for the zero return value. Since wg_examine_packet_protocol -itself gets inlined, this winds up not adding an additional branch to -the generated code, since the 0 return value already happens in a -mergable branch. - -Reported-by: Fabian Freyer -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 2 +- - drivers/net/wireguard/queueing.h | 8 +++++++- - drivers/net/wireguard/receive.c | 4 ++-- - 3 files changed, 10 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - u32 mtu; - int ret; - -- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { -+ if (unlikely(!wg_check_packet_protocol(skb))) { - ret = -EPROTONOSUPPORT; - net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); - goto err; ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -66,7 +66,7 @@ struct packet_cb { - #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) - - /* Returns either the correct skb->protocol value, or 0 if invalid. */ --static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) -+static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) - { - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct iphdr)) <= -@@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untr - return 0; - } - -+static inline bool wg_check_packet_protocol(struct sk_buff *skb) -+{ -+ __be16 real_protocol = wg_examine_packet_protocol(skb); -+ return real_protocol && skb->protocol == real_protocol; -+} -+ - static inline void wg_reset_packet(struct sk_buff *skb) - { - skb_scrub_packet(skb, true); ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_ - size_t data_offset, data_len, header_len; - struct udphdr *udp; - -- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || -+ if (unlikely(!wg_check_packet_protocol(skb) || - skb_transport_header(skb) < skb->head || - (skb_transport_header(skb) + sizeof(struct udphdr)) > - skb_tail_pointer(skb))) -@@ -388,7 +388,7 @@ static void wg_packet_consume_data_done( - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ~0; /* All levels */ -- skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); -+ skb->protocol = wg_examine_packet_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) { - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-receive-remove-dead-code-from-default-pack.patch b/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-receive-remove-dead-code-from-default-pack.patch deleted file mode 100644 index bcd4fbfbc1..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-receive-remove-dead-code-from-default-pack.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:46 -0600 -Subject: [PATCH] wireguard: receive: remove dead code from default packet type - case - -commit 2b8765c52db24c0fbcc81bac9b5e8390f2c7d3c8 upstream. - -The situation in which we wind up hitting the default case here -indicates a major bug in earlier parsing code. It is not a usual thing -that should ever happen, which means a "friendly" message for it doesn't -make sense. Rather, replace this with a WARN_ON, just like we do earlier -in the file for a similar situation, so that somebody sends us a bug -report and we can fix it. - -Reported-by: Fabian Freyer -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device - wg_packet_consume_data(wg, skb); - break; - default: -- net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", -- wg->dev->name, skb); -+ WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); - goto err; - } - return; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch b/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch new file mode 100644 index 0000000000..c92b6a784a --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch @@ -0,0 +1,29 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Sultan Alsawaf +Date: Wed, 29 Apr 2020 14:59:20 -0600 +Subject: [PATCH] wireguard: send: remove errant newline from + packet_encrypt_worker + +commit d6833e42786e050e7522d6a91a9361e54085897d upstream. + +This commit removes a useless newline at the end of a scope, which +doesn't add anything in the way of organization or readability. + +Signed-off-by: Sultan Alsawaf +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/send.c | 1 - + 1 file changed, 1 deletion(-) + +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -304,7 +304,6 @@ void wg_packet_encrypt_worker(struct wor + } + wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, + state); +- + } + } + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-noise-error-out-precomputed-DH-during-hand.patch b/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-noise-error-out-precomputed-DH-during-hand.patch deleted file mode 100644 index dac3046e47..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-noise-error-out-precomputed-DH-during-hand.patch +++ /dev/null @@ -1,224 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:47 -0600 -Subject: [PATCH] wireguard: noise: error out precomputed DH during handshake - rather than config - -commit 11a7686aa99c7fe4b3f80f6dcccd54129817984d upstream. - -We precompute the static-static ECDH during configuration time, in order -to save an expensive computation later when receiving network packets. -However, not all ECDH computations yield a contributory result. Prior, -we were just not letting those peers be added to the interface. However, -this creates a strange inconsistency, since it was still possible to add -other weird points, like a valid public key plus a low-order point, and, -like points that result in zeros, a handshake would not complete. In -order to make the behavior more uniform and less surprising, simply -allow all peers to be added. Then, we'll error out later when doing the -crypto if there's an issue. This also adds more separation between the -crypto layer and the configuration layer. - -Discussed-with: Mathias Hall-Andersen -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 8 +--- - drivers/net/wireguard/noise.c | 55 ++++++++++++---------- - drivers/net/wireguard/noise.h | 12 ++--- - drivers/net/wireguard/peer.c | 7 +-- - tools/testing/selftests/wireguard/netns.sh | 15 ++++-- - 5 files changed, 49 insertions(+), 48 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -417,11 +417,7 @@ static int set_peer(struct wg_device *wg - - peer = wg_peer_create(wg, public_key, preshared_key); - if (IS_ERR(peer)) { -- /* Similar to the above, if the key is invalid, we skip -- * it without fanfare, so that services don't need to -- * worry about doing key validation themselves. -- */ -- ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); -+ ret = PTR_ERR(peer); - peer = NULL; - goto out; - } -@@ -575,7 +571,7 @@ static int wg_set_device(struct sk_buff - private_key); - list_for_each_entry_safe(peer, temp, &wg->peer_list, - peer_list) { -- BUG_ON(!wg_noise_precompute_static_static(peer)); -+ wg_noise_precompute_static_static(peer); - wg_noise_expire_current_peer_keypairs(peer); - } - wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -44,32 +44,23 @@ void __init wg_noise_init(void) - } - - /* Must hold peer->handshake.static_identity->lock */ --bool wg_noise_precompute_static_static(struct wg_peer *peer) -+void wg_noise_precompute_static_static(struct wg_peer *peer) - { -- bool ret; -- - down_write(&peer->handshake.lock); -- if (peer->handshake.static_identity->has_identity) { -- ret = curve25519( -- peer->handshake.precomputed_static_static, -+ if (!peer->handshake.static_identity->has_identity || -+ !curve25519(peer->handshake.precomputed_static_static, - peer->handshake.static_identity->static_private, -- peer->handshake.remote_static); -- } else { -- u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; -- -- ret = curve25519(empty, empty, peer->handshake.remote_static); -+ peer->handshake.remote_static)) - memset(peer->handshake.precomputed_static_static, 0, - NOISE_PUBLIC_KEY_LEN); -- } - up_write(&peer->handshake.lock); -- return ret; - } - --bool wg_noise_handshake_init(struct noise_handshake *handshake, -- struct noise_static_identity *static_identity, -- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -- struct wg_peer *peer) -+void wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer) - { - memset(handshake, 0, sizeof(*handshake)); - init_rwsem(&handshake->lock); -@@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct nois - NOISE_SYMMETRIC_KEY_LEN); - handshake->static_identity = static_identity; - handshake->state = HANDSHAKE_ZEROED; -- return wg_noise_precompute_static_static(peer); -+ wg_noise_precompute_static_static(peer); - } - - static void handshake_zero(struct noise_handshake *handshake) -@@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chain - return true; - } - -+static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) -+{ -+ static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; -+ if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) -+ return false; -+ kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ chaining_key); -+ return true; -+} -+ - static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) - { - struct blake2s_state blake; -@@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(str - NOISE_PUBLIC_KEY_LEN, key, handshake->hash); - - /* ss */ -- kdf(handshake->chaining_key, key, NULL, -- handshake->precomputed_static_static, NOISE_HASH_LEN, -- NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -- handshake->chaining_key); -+ if (!mix_precomputed_dh(handshake->chaining_key, key, -+ handshake->precomputed_static_static)) -+ goto out; - - /* {t} */ - tai64n_now(timestamp); -@@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(st - handshake = &peer->handshake; - - /* ss */ -- kdf(chaining_key, key, NULL, handshake->precomputed_static_static, -- NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -- chaining_key); -+ if (!mix_precomputed_dh(chaining_key, key, -+ handshake->precomputed_static_static)) -+ goto out; - - /* {t} */ - if (!message_decrypt(t, src->encrypted_timestamp, ---- a/drivers/net/wireguard/noise.h -+++ b/drivers/net/wireguard/noise.h -@@ -94,11 +94,11 @@ struct noise_handshake { - struct wg_device; - - void wg_noise_init(void); --bool wg_noise_handshake_init(struct noise_handshake *handshake, -- struct noise_static_identity *static_identity, -- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -- struct wg_peer *peer); -+void wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer); - void wg_noise_handshake_clear(struct noise_handshake *handshake); - static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) - { -@@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypai - void wg_noise_set_static_identity_private_key( - struct noise_static_identity *static_identity, - const u8 private_key[NOISE_PUBLIC_KEY_LEN]); --bool wg_noise_precompute_static_static(struct wg_peer *peer); -+void wg_noise_precompute_static_static(struct wg_peer *peer); - - bool - wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg - return ERR_PTR(ret); - peer->device = wg; - -- if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -- public_key, preshared_key, peer)) { -- ret = -EKEYREJECTED; -- goto err_1; -- } -+ wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -+ public_key, preshared_key, peer); - if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) - goto err_1; - if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0 - n0 wg set wg0 peer "$pub2" remove --low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) --n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } --[[ -z $(n0 wg show wg0 peers) ]] --n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } --[[ -z $(n0 wg show wg0 peers) ]] -+for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do -+ n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111 -+done -+[[ -n $(n0 wg show wg0 peers) ]] -+exec 4< <(n0 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns0 $ncat_pid -+ip0 link set wg0 up -+! read -r -n 1 -t 2 <&4 || false -+kill $ncat_pid - ip0 link del wg0 - - declare -A objects diff --git a/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch b/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch new file mode 100644 index 0000000000..a72c509894 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch @@ -0,0 +1,35 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 29 Apr 2020 14:59:21 -0600 +Subject: [PATCH] wireguard: queueing: cleanup ptr_ring in error path of + packet_queue_init + +commit 130c58606171326c81841a49cc913cd354113dd9 upstream. + +Prior, if the alloc_percpu of packet_percpu_multicore_worker_alloc +failed, the previously allocated ptr_ring wouldn't be freed. This commit +adds the missing call to ptr_ring_cleanup in the error case. + +Reported-by: Sultan Alsawaf +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/queueing.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/drivers/net/wireguard/queueing.c ++++ b/drivers/net/wireguard/queueing.c +@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_qu + if (multicore) { + queue->worker = wg_packet_percpu_multicore_worker_alloc( + function, queue); +- if (!queue->worker) ++ if (!queue->worker) { ++ ptr_ring_cleanup(&queue->ring, NULL); + return -ENOMEM; ++ } + } else { + INIT_WORK(&queue->work, function); + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch b/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch new file mode 100644 index 0000000000..a72358c302 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch @@ -0,0 +1,50 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= +Date: Wed, 29 Apr 2020 14:59:22 -0600 +Subject: [PATCH] wireguard: receive: use tunnel helpers for decapsulating ECN + markings +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit eebabcb26ea1e3295704477c6cd4e772c96a9559 upstream. + +WireGuard currently only propagates ECN markings on tunnel decap according +to the old RFC3168 specification. However, the spec has since been updated +in RFC6040 to recommend slightly different decapsulation semantics. This +was implemented in the kernel as a set of common helpers for ECN +decapsulation, so let's just switch over WireGuard to using those, so it +can benefit from this enhancement and any future tweaks. We do not drop +packets with invalid ECN marking combinations, because WireGuard is +frequently used to work around broken ISPs, which could be doing that. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Reported-by: Olivier Tilmans +Cc: Dave Taht +Cc: Rodney W. Grimes +Signed-off-by: Toke Høiland-Jørgensen +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -393,13 +393,11 @@ static void wg_packet_consume_data_done( + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) + goto dishonest_packet_size; +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) +- IP_ECN_set_ce(ip_hdr(skb)); ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); + } else if (skb->protocol == htons(ETH_P_IPV6)) { + len = ntohs(ipv6_hdr(skb)->payload_len) + + sizeof(struct ipv6hdr); +- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) +- IP6_ECN_set_ce(skb, ipv6_hdr(skb)); ++ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); + } else { + goto dishonest_packet_type; + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-send-remove-errant-newline-from-packet_enc.patch b/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-send-remove-errant-newline-from-packet_enc.patch deleted file mode 100644 index c92b6a784a..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0096-wireguard-send-remove-errant-newline-from-packet_enc.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Sultan Alsawaf -Date: Wed, 29 Apr 2020 14:59:20 -0600 -Subject: [PATCH] wireguard: send: remove errant newline from - packet_encrypt_worker - -commit d6833e42786e050e7522d6a91a9361e54085897d upstream. - -This commit removes a useless newline at the end of a scope, which -doesn't add anything in the way of organization or readability. - -Signed-off-by: Sultan Alsawaf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/send.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -304,7 +304,6 @@ void wg_packet_encrypt_worker(struct wor - } - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, - state); -- - } - } - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch b/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch deleted file mode 100644 index a72c509894..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 29 Apr 2020 14:59:21 -0600 -Subject: [PATCH] wireguard: queueing: cleanup ptr_ring in error path of - packet_queue_init - -commit 130c58606171326c81841a49cc913cd354113dd9 upstream. - -Prior, if the alloc_percpu of packet_percpu_multicore_worker_alloc -failed, the previously allocated ptr_ring wouldn't be freed. This commit -adds the missing call to ptr_ring_cleanup in the error case. - -Reported-by: Sultan Alsawaf -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/drivers/net/wireguard/queueing.c -+++ b/drivers/net/wireguard/queueing.c -@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_qu - if (multicore) { - queue->worker = wg_packet_percpu_multicore_worker_alloc( - function, queue); -- if (!queue->worker) -+ if (!queue->worker) { -+ ptr_ring_cleanup(&queue->ring, NULL); - return -ENOMEM; -+ } - } else { - INIT_WORK(&queue->work, function); - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch b/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch new file mode 100644 index 0000000000..f4543d2568 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch @@ -0,0 +1,28 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 6 May 2020 15:33:02 -0600 +Subject: [PATCH] wireguard: selftests: use normal kernel stack size on ppc64 + +commit a0fd7cc87a018df1a17f9d3f0bd994c1f22c6b34 upstream. + +While at some point it might have made sense to be running these tests +on ppc64 with 4k stacks, the kernel hasn't actually used 4k stacks on +64-bit powerpc in a long time, and more interesting things that we test +don't really work when we deviate from the default (16k). So, we stop +pushing our luck in this commit, and return to the default instead of +the minimum. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config | 1 + + 1 file changed, 1 insertion(+) + +--- a/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config ++++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config +@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y + CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" + CONFIG_SECTION_MISMATCH_WARN_ONLY=y + CONFIG_FRAME_WARN=1280 ++CONFIG_THREAD_SHIFT=14 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch b/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch deleted file mode 100644 index a72358c302..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= -Date: Wed, 29 Apr 2020 14:59:22 -0600 -Subject: [PATCH] wireguard: receive: use tunnel helpers for decapsulating ECN - markings -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit eebabcb26ea1e3295704477c6cd4e772c96a9559 upstream. - -WireGuard currently only propagates ECN markings on tunnel decap according -to the old RFC3168 specification. However, the spec has since been updated -in RFC6040 to recommend slightly different decapsulation semantics. This -was implemented in the kernel as a set of common helpers for ECN -decapsulation, so let's just switch over WireGuard to using those, so it -can benefit from this enhancement and any future tweaks. We do not drop -packets with invalid ECN marking combinations, because WireGuard is -frequently used to work around broken ISPs, which could be doing that. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Reported-by: Olivier Tilmans -Cc: Dave Taht -Cc: Rodney W. Grimes -Signed-off-by: Toke Høiland-Jørgensen -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -393,13 +393,11 @@ static void wg_packet_consume_data_done( - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) - goto dishonest_packet_size; -- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -- IP_ECN_set_ce(ip_hdr(skb)); -+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); - } else if (skb->protocol == htons(ETH_P_IPV6)) { - len = ntohs(ipv6_hdr(skb)->payload_len) + - sizeof(struct ipv6hdr); -- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -- IP6_ECN_set_ce(skb, ipv6_hdr(skb)); -+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); - } else { - goto dishonest_packet_type; - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch b/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch new file mode 100644 index 0000000000..6dafa4781b --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch @@ -0,0 +1,162 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 6 May 2020 15:33:03 -0600 +Subject: [PATCH] wireguard: socket: remove errant restriction on looping to + self + +commit b673e24aad36981f327a6570412ffa7754de8911 upstream. + +It's already possible to create two different interfaces and loop +packets between them. This has always been possible with tunnels in the +kernel, and isn't specific to wireguard. Therefore, the networking stack +already needs to deal with that. At the very least, the packet winds up +exceeding the MTU and is discarded at that point. So, since this is +already something that happens, there's no need to forbid the not very +exceptional case of routing a packet back to the same interface; this +loop is no different than others, and we shouldn't special case it, but +rather rely on generic handling of loops in general. This also makes it +easier to do interesting things with wireguard such as onion routing. + +At the same time, we add a selftest for this, ensuring that both onion +routing works and infinite routing loops do not crash the kernel. We +also add a test case for wireguard interfaces nesting packets and +sending traffic between each other, as well as the loop in this case +too. We make sure to send some throughput-heavy traffic for this use +case, to stress out any possible recursion issues with the locks around +workqueues. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/socket.c | 12 ----- + tools/testing/selftests/wireguard/netns.sh | 54 ++++++++++++++++++++-- + 2 files changed, 51 insertions(+), 15 deletions(-) + +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, s + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; +- } else if (unlikely(rt->dst.dev == skb->dev)) { +- ip_rt_put(rt); +- ret = -ELOOP; +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", +- wg->dev->name, &endpoint->addr); +- goto err; + } + if (cache) + dst_cache_set_ip4(cache, &rt->dst, fl.saddr); +@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, s + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); + goto err; +- } else if (unlikely(dst->dev == skb->dev)) { +- dst_release(dst); +- ret = -ELOOP; +- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", +- wg->dev->name, &endpoint->addr); +- goto err; + } + if (cache) + dst_cache_set_ip6(cache, dst, &fl.saddr); +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -48,8 +48,11 @@ cleanup() { + exec 2>/dev/null + printf "$orig_message_cost" > /proc/sys/net/core/message_cost + ip0 link del dev wg0 ++ ip0 link del dev wg1 + ip1 link del dev wg0 ++ ip1 link del dev wg1 + ip2 link del dev wg0 ++ ip2 link del dev wg1 + local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" + [[ -n $to_kill ]] && kill $to_kill + pp ip netns del $netns1 +@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2 + key1="$(pp wg genkey)" + key2="$(pp wg genkey)" + key3="$(pp wg genkey)" ++key4="$(pp wg genkey)" + pub1="$(pp wg pubkey <<<"$key1")" + pub2="$(pp wg pubkey <<<"$key2")" + pub3="$(pp wg pubkey <<<"$key3")" ++pub4="$(pp wg pubkey <<<"$key4")" + psk="$(pp wg genpsk)" + [[ -n $key1 && -n $key2 && -n $psk ]] + + configure_peers() { + ip1 addr add 192.168.241.1/24 dev wg0 +- ip1 addr add fd00::1/24 dev wg0 ++ ip1 addr add fd00::1/112 dev wg0 + + ip2 addr add 192.168.241.2/24 dev wg0 +- ip2 addr add fd00::2/24 dev wg0 ++ ip2 addr add fd00::2/112 dev wg0 + + n1 wg set wg0 \ + private-key <(echo "$key1") \ +@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2 + n1 wg set wg0 private-key <(echo "$key3") + n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove + n1 ping -W 1 -c 1 192.168.241.2 ++n2 wg set wg0 peer "$pub3" remove + +-ip1 link del wg0 ++# Test that we can route wg through wg ++ip1 addr flush dev wg0 ++ip2 addr flush dev wg0 ++ip1 addr add fd00::5:1/112 dev wg0 ++ip2 addr add fd00::5:2/112 dev wg0 ++n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 ++n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 ++ip1 link add wg1 type wireguard ++ip2 link add wg1 type wireguard ++ip1 addr add 192.168.241.1/24 dev wg1 ++ip1 addr add fd00::1/112 dev wg1 ++ip2 addr add 192.168.241.2/24 dev wg1 ++ip2 addr add fd00::2/112 dev wg1 ++ip1 link set mtu 1340 up dev wg1 ++ip2 link set mtu 1340 up dev wg1 ++n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 ++n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 ++tests ++# Try to set up a routing loop between the two namespaces ++ip1 link set netns $netns0 dev wg1 ++ip0 addr add 192.168.241.1/24 dev wg1 ++ip0 link set up dev wg1 ++n0 ping -W 1 -c 1 192.168.241.2 ++n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 + ip2 link del wg0 ++ip2 link del wg1 ++! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel ++ ++ip0 link del wg1 ++ip1 link del wg0 + + # Test using NAT. We now change the topology to this: + # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ +@@ -282,6 +316,20 @@ pp sleep 3 + n2 ping -W 1 -c 1 192.168.241.1 + n1 wg set wg0 peer "$pub2" persistent-keepalive 0 + ++# Test that onion routing works, even when it loops ++n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 ++ip1 addr add 192.168.242.1/24 dev wg0 ++ip2 link add wg1 type wireguard ++ip2 addr add 192.168.242.2/24 dev wg1 ++n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 ++ip2 link set wg1 up ++n1 ping -W 1 -c 1 192.168.242.2 ++ip2 link del wg1 ++n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 ++! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel ++n1 wg set wg0 peer "$pub3" remove ++ip1 addr del 192.168.242.1/24 dev wg0 ++ + # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. + ip1 -6 addr add fc00::9/96 dev vethc + ip1 -6 route add default via fc00::1 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-selftests-use-normal-kernel-stack-size-on-.patch b/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-selftests-use-normal-kernel-stack-size-on-.patch deleted file mode 100644 index f4543d2568..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-selftests-use-normal-kernel-stack-size-on-.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:02 -0600 -Subject: [PATCH] wireguard: selftests: use normal kernel stack size on ppc64 - -commit a0fd7cc87a018df1a17f9d3f0bd994c1f22c6b34 upstream. - -While at some point it might have made sense to be running these tests -on ppc64 with 4k stacks, the kernel hasn't actually used 4k stacks on -64-bit powerpc in a long time, and more interesting things that we test -don't really work when we deviate from the default (16k). So, we stop -pushing our luck in this commit, and return to the default instead of -the minimum. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config | 1 + - 1 file changed, 1 insertion(+) - ---- a/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y - CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" - CONFIG_SECTION_MISMATCH_WARN_ONLY=y - CONFIG_FRAME_WARN=1280 -+CONFIG_THREAD_SHIFT=14 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch b/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch new file mode 100644 index 0000000000..499b36bc5f --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch @@ -0,0 +1,58 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 6 May 2020 15:33:04 -0600 +Subject: [PATCH] wireguard: send/receive: cond_resched() when processing + worker ringbuffers + +commit 4005f5c3c9d006157ba716594e0d70c88a235c5e upstream. + +Users with pathological hardware reported CPU stalls on CONFIG_ +PREEMPT_VOLUNTARY=y, because the ringbuffers would stay full, meaning +these workers would never terminate. That turned out not to be okay on +systems without forced preemption, which Sultan observed. This commit +adds a cond_resched() to the bottom of each loop iteration, so that +these workers don't hog the core. Note that we don't need this on the +napi poll worker, since that terminates after its budget is expended. + +Suggested-by: Sultan Alsawaf +Reported-by: Wang Jian +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 2 ++ + drivers/net/wireguard/send.c | 4 ++++ + 2 files changed, 6 insertions(+) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -516,6 +516,8 @@ void wg_packet_decrypt_worker(struct wor + &PACKET_CB(skb)->keypair->receiving)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; + wg_queue_enqueue_per_peer_napi(skb, state); ++ if (need_resched()) ++ cond_resched(); + } + } + +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_str + + wg_noise_keypair_put(keypair, false); + wg_peer_put(peer); ++ if (need_resched()) ++ cond_resched(); + } + } + +@@ -304,6 +306,8 @@ void wg_packet_encrypt_worker(struct wor + } + wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, + state); ++ if (need_resched()) ++ cond_resched(); + } + } + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch b/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch new file mode 100644 index 0000000000..c1124be5ca --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch @@ -0,0 +1,51 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 6 May 2020 15:33:05 -0600 +Subject: [PATCH] wireguard: selftests: initalize ipv6 members to NULL to + squelch clang warning + +commit 4fed818ef54b08d4b29200e416cce65546ad5312 upstream. + +Without setting these to NULL, clang complains in certain +configurations that have CONFIG_IPV6=n: + +In file included from drivers/net/wireguard/ratelimiter.c:223: +drivers/net/wireguard/selftest/ratelimiter.c:173:34: error: variable 'skb6' is uninitialized when used here [-Werror,-Wuninitialized] + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); + ^~~~ +drivers/net/wireguard/selftest/ratelimiter.c:123:29: note: initialize the variable 'skb6' to silence this warning + struct sk_buff *skb4, *skb6; + ^ + = NULL +drivers/net/wireguard/selftest/ratelimiter.c:173:40: error: variable 'hdr6' is uninitialized when used here [-Werror,-Wuninitialized] + ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); + ^~~~ +drivers/net/wireguard/selftest/ratelimiter.c:125:22: note: initialize the variable 'hdr6' to silence this warning + struct ipv6hdr *hdr6; + ^ + +We silence this warning by setting the variables to NULL as the warning +suggests. + +Reported-by: Arnd Bergmann +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/selftest/ratelimiter.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/selftest/ratelimiter.c ++++ b/drivers/net/wireguard/selftest/ratelimiter.c +@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void + enum { TRIALS_BEFORE_GIVING_UP = 5000 }; + bool success = false; + int test = 0, trials; +- struct sk_buff *skb4, *skb6; ++ struct sk_buff *skb4, *skb6 = NULL; + struct iphdr *hdr4; +- struct ipv6hdr *hdr6; ++ struct ipv6hdr *hdr6 = NULL; + + if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) + return true; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-socket-remove-errant-restriction-on-loopin.patch b/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-socket-remove-errant-restriction-on-loopin.patch deleted file mode 100644 index 6dafa4781b..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0100-wireguard-socket-remove-errant-restriction-on-loopin.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:03 -0600 -Subject: [PATCH] wireguard: socket: remove errant restriction on looping to - self - -commit b673e24aad36981f327a6570412ffa7754de8911 upstream. - -It's already possible to create two different interfaces and loop -packets between them. This has always been possible with tunnels in the -kernel, and isn't specific to wireguard. Therefore, the networking stack -already needs to deal with that. At the very least, the packet winds up -exceeding the MTU and is discarded at that point. So, since this is -already something that happens, there's no need to forbid the not very -exceptional case of routing a packet back to the same interface; this -loop is no different than others, and we shouldn't special case it, but -rather rely on generic handling of loops in general. This also makes it -easier to do interesting things with wireguard such as onion routing. - -At the same time, we add a selftest for this, ensuring that both onion -routing works and infinite routing loops do not crash the kernel. We -also add a test case for wireguard interfaces nesting packets and -sending traffic between each other, as well as the loop in this case -too. We make sure to send some throughput-heavy traffic for this use -case, to stress out any possible recursion issues with the locks around -workqueues. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 12 ----- - tools/testing/selftests/wireguard/netns.sh | 54 ++++++++++++++++++++-- - 2 files changed, 51 insertions(+), 15 deletions(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, s - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); - goto err; -- } else if (unlikely(rt->dst.dev == skb->dev)) { -- ip_rt_put(rt); -- ret = -ELOOP; -- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -- wg->dev->name, &endpoint->addr); -- goto err; - } - if (cache) - dst_cache_set_ip4(cache, &rt->dst, fl.saddr); -@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, s - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); - goto err; -- } else if (unlikely(dst->dev == skb->dev)) { -- dst_release(dst); -- ret = -ELOOP; -- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -- wg->dev->name, &endpoint->addr); -- goto err; - } - if (cache) - dst_cache_set_ip6(cache, dst, &fl.saddr); ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -48,8 +48,11 @@ cleanup() { - exec 2>/dev/null - printf "$orig_message_cost" > /proc/sys/net/core/message_cost - ip0 link del dev wg0 -+ ip0 link del dev wg1 - ip1 link del dev wg0 -+ ip1 link del dev wg1 - ip2 link del dev wg0 -+ ip2 link del dev wg1 - local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" - [[ -n $to_kill ]] && kill $to_kill - pp ip netns del $netns1 -@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2 - key1="$(pp wg genkey)" - key2="$(pp wg genkey)" - key3="$(pp wg genkey)" -+key4="$(pp wg genkey)" - pub1="$(pp wg pubkey <<<"$key1")" - pub2="$(pp wg pubkey <<<"$key2")" - pub3="$(pp wg pubkey <<<"$key3")" -+pub4="$(pp wg pubkey <<<"$key4")" - psk="$(pp wg genpsk)" - [[ -n $key1 && -n $key2 && -n $psk ]] - - configure_peers() { - ip1 addr add 192.168.241.1/24 dev wg0 -- ip1 addr add fd00::1/24 dev wg0 -+ ip1 addr add fd00::1/112 dev wg0 - - ip2 addr add 192.168.241.2/24 dev wg0 -- ip2 addr add fd00::2/24 dev wg0 -+ ip2 addr add fd00::2/112 dev wg0 - - n1 wg set wg0 \ - private-key <(echo "$key1") \ -@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2 - n1 wg set wg0 private-key <(echo "$key3") - n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove - n1 ping -W 1 -c 1 192.168.241.2 -+n2 wg set wg0 peer "$pub3" remove - --ip1 link del wg0 -+# Test that we can route wg through wg -+ip1 addr flush dev wg0 -+ip2 addr flush dev wg0 -+ip1 addr add fd00::5:1/112 dev wg0 -+ip2 addr add fd00::5:2/112 dev wg0 -+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 -+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 -+ip1 link add wg1 type wireguard -+ip2 link add wg1 type wireguard -+ip1 addr add 192.168.241.1/24 dev wg1 -+ip1 addr add fd00::1/112 dev wg1 -+ip2 addr add 192.168.241.2/24 dev wg1 -+ip2 addr add fd00::2/112 dev wg1 -+ip1 link set mtu 1340 up dev wg1 -+ip2 link set mtu 1340 up dev wg1 -+n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 -+n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 -+tests -+# Try to set up a routing loop between the two namespaces -+ip1 link set netns $netns0 dev wg1 -+ip0 addr add 192.168.241.1/24 dev wg1 -+ip0 link set up dev wg1 -+n0 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 - ip2 link del wg0 -+ip2 link del wg1 -+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel -+ -+ip0 link del wg1 -+ip1 link del wg0 - - # Test using NAT. We now change the topology to this: - # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -@@ -282,6 +316,20 @@ pp sleep 3 - n2 ping -W 1 -c 1 192.168.241.1 - n1 wg set wg0 peer "$pub2" persistent-keepalive 0 - -+# Test that onion routing works, even when it loops -+n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 -+ip1 addr add 192.168.242.1/24 dev wg0 -+ip2 link add wg1 type wireguard -+ip2 addr add 192.168.242.2/24 dev wg1 -+n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 -+ip2 link set wg1 up -+n1 ping -W 1 -c 1 192.168.242.2 -+ip2 link del wg1 -+n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 -+! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel -+n1 wg set wg0 peer "$pub3" remove -+ip1 addr del 192.168.242.1/24 dev wg0 -+ - # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. - ip1 -6 addr add fc00::9/96 dev vethc - ip1 -6 route add default via fc00::1 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-cond_resched-when-processing-.patch b/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-cond_resched-when-processing-.patch deleted file mode 100644 index 499b36bc5f..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-cond_resched-when-processing-.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:04 -0600 -Subject: [PATCH] wireguard: send/receive: cond_resched() when processing - worker ringbuffers - -commit 4005f5c3c9d006157ba716594e0d70c88a235c5e upstream. - -Users with pathological hardware reported CPU stalls on CONFIG_ -PREEMPT_VOLUNTARY=y, because the ringbuffers would stay full, meaning -these workers would never terminate. That turned out not to be okay on -systems without forced preemption, which Sultan observed. This commit -adds a cond_resched() to the bottom of each loop iteration, so that -these workers don't hog the core. Note that we don't need this on the -napi poll worker, since that terminates after its budget is expended. - -Suggested-by: Sultan Alsawaf -Reported-by: Wang Jian -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 2 ++ - drivers/net/wireguard/send.c | 4 ++++ - 2 files changed, 6 insertions(+) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -516,6 +516,8 @@ void wg_packet_decrypt_worker(struct wor - &PACKET_CB(skb)->keypair->receiving)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_napi(skb, state); -+ if (need_resched()) -+ cond_resched(); - } - } - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_str - - wg_noise_keypair_put(keypair, false); - wg_peer_put(peer); -+ if (need_resched()) -+ cond_resched(); - } - } - -@@ -304,6 +306,8 @@ void wg_packet_encrypt_worker(struct wor - } - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, - state); -+ if (need_resched()) -+ cond_resched(); - } - } - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch b/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch new file mode 100644 index 0000000000..900e2f2350 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch @@ -0,0 +1,88 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 6 May 2020 15:33:06 -0600 +Subject: [PATCH] wireguard: send/receive: use explicit unlikely branch instead + of implicit coalescing + +commit 243f2148937adc72bcaaa590d482d599c936efde upstream. + +It's very unlikely that send will become true. It's nearly always false +between 0 and 120 seconds of a session, and in most cases becomes true +only between 120 and 121 seconds before becoming false again. So, +unlikely(send) is clearly the right option here. + +What happened before was that we had this complex boolean expression +with multiple likely and unlikely clauses nested. Since this is +evaluated left-to-right anyway, the whole thing got converted to +unlikely. So, we can clean this up to better represent what's going on. + +The generated code is the same. + +Suggested-by: Sultan Alsawaf +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 13 ++++++------- + drivers/net/wireguard/send.c | 15 ++++++--------- + 2 files changed, 12 insertions(+), 16 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker( + static void keep_key_fresh(struct wg_peer *peer) + { + struct noise_keypair *keypair; +- bool send = false; ++ bool send; + + if (peer->sent_lastminute_handshake) + return; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); +- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && +- keypair->i_am_the_initiator && +- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, +- REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) +- send = true; ++ send = keypair && READ_ONCE(keypair->sending.is_valid) && ++ keypair->i_am_the_initiator && ++ wg_birthdate_has_expired(keypair->sending.birthdate, ++ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); + rcu_read_unlock_bh(); + +- if (send) { ++ if (unlikely(send)) { + peer->sent_lastminute_handshake = true; + wg_packet_send_queued_handshake_initiation(peer, false); + } +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(str + static void keep_key_fresh(struct wg_peer *peer) + { + struct noise_keypair *keypair; +- bool send = false; ++ bool send; + + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); +- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && +- (unlikely(atomic64_read(&keypair->sending.counter.counter) > +- REKEY_AFTER_MESSAGES) || +- (keypair->i_am_the_initiator && +- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, +- REKEY_AFTER_TIME))))) +- send = true; ++ send = keypair && READ_ONCE(keypair->sending.is_valid) && ++ (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || ++ (keypair->i_am_the_initiator && ++ wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); + rcu_read_unlock_bh(); + +- if (send) ++ if (unlikely(send)) + wg_packet_send_queued_handshake_initiation(peer, false); + } + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch b/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch deleted file mode 100644 index c1124be5ca..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:05 -0600 -Subject: [PATCH] wireguard: selftests: initalize ipv6 members to NULL to - squelch clang warning - -commit 4fed818ef54b08d4b29200e416cce65546ad5312 upstream. - -Without setting these to NULL, clang complains in certain -configurations that have CONFIG_IPV6=n: - -In file included from drivers/net/wireguard/ratelimiter.c:223: -drivers/net/wireguard/selftest/ratelimiter.c:173:34: error: variable 'skb6' is uninitialized when used here [-Werror,-Wuninitialized] - ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); - ^~~~ -drivers/net/wireguard/selftest/ratelimiter.c:123:29: note: initialize the variable 'skb6' to silence this warning - struct sk_buff *skb4, *skb6; - ^ - = NULL -drivers/net/wireguard/selftest/ratelimiter.c:173:40: error: variable 'hdr6' is uninitialized when used here [-Werror,-Wuninitialized] - ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); - ^~~~ -drivers/net/wireguard/selftest/ratelimiter.c:125:22: note: initialize the variable 'hdr6' to silence this warning - struct ipv6hdr *hdr6; - ^ - -We silence this warning by setting the variables to NULL as the warning -suggests. - -Reported-by: Arnd Bergmann -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/selftest/ratelimiter.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/selftest/ratelimiter.c -+++ b/drivers/net/wireguard/selftest/ratelimiter.c -@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void - enum { TRIALS_BEFORE_GIVING_UP = 5000 }; - bool success = false; - int test = 0, trials; -- struct sk_buff *skb4, *skb6; -+ struct sk_buff *skb4, *skb6 = NULL; - struct iphdr *hdr4; -- struct ipv6hdr *hdr6; -+ struct ipv6hdr *hdr6 = NULL; - - if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) - return true; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch b/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch new file mode 100644 index 0000000000..d4efe37a49 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch @@ -0,0 +1,31 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 19 May 2020 22:49:27 -0600 +Subject: [PATCH] wireguard: selftests: use newer iproute2 for gcc-10 + +commit ee3c1aa3f34b7842c1557cfe5d8c3f7b8c692de8 upstream. + +gcc-10 switched to defaulting to -fno-common, which broke iproute2-5.4. +This was fixed in iproute-5.6, so switch to that. Because we're after a +stable testing surface, we generally don't like to bump these +unnecessarily, but in this case, being able to actually build is a basic +necessity. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/qemu/Makefile | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/tools/testing/selftests/wireguard/qemu/Makefile ++++ b/tools/testing/selftests/wireguard/qemu/Makefile +@@ -44,7 +44,7 @@ endef + $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) + $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) + $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) +-$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) ++$(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) + $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) + $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) + $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch b/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch new file mode 100644 index 0000000000..2dac4b7064 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch @@ -0,0 +1,61 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 19 May 2020 22:49:28 -0600 +Subject: [PATCH] wireguard: noise: read preshared key while taking lock + +commit bc67d371256f5c47d824e2eec51e46c8d62d022e upstream. + +Prior we read the preshared key after dropping the handshake lock, which +isn't an actual crypto issue if it races, but it's still not quite +correct. So copy that part of the state into a temporary like we do with +the rest of the handshake state variables. Then we can release the lock, +operate on the temporary, and zero it out at the end of the function. In +performance tests, the impact of this was entirely unnoticable, probably +because those bytes are coming from the same cacheline as other things +that are being copied out in the same manner. + +Reported-by: Matt Dunwoodie +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/noise.c | 6 +++++- + 1 file changed, 5 insertions(+), 1 deletion(-) + +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -715,6 +715,7 @@ wg_noise_handshake_consume_response(stru + u8 e[NOISE_PUBLIC_KEY_LEN]; + u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; + u8 static_private[NOISE_PUBLIC_KEY_LEN]; ++ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; + + down_read(&wg->static_identity.lock); + +@@ -733,6 +734,8 @@ wg_noise_handshake_consume_response(stru + memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); + memcpy(ephemeral_private, handshake->ephemeral_private, + NOISE_PUBLIC_KEY_LEN); ++ memcpy(preshared_key, handshake->preshared_key, ++ NOISE_SYMMETRIC_KEY_LEN); + up_read(&handshake->lock); + + if (state != HANDSHAKE_CREATED_INITIATION) +@@ -750,7 +753,7 @@ wg_noise_handshake_consume_response(stru + goto fail; + + /* psk */ +- mix_psk(chaining_key, hash, key, handshake->preshared_key); ++ mix_psk(chaining_key, hash, key, preshared_key); + + /* {} */ + if (!message_decrypt(NULL, src->encrypted_nothing, +@@ -783,6 +786,7 @@ out: + memzero_explicit(chaining_key, NOISE_HASH_LEN); + memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); + memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); ++ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); + up_read(&wg->static_identity.lock); + return ret_peer; + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-send-receive-use-explicit-unlikely-branch-.patch b/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-send-receive-use-explicit-unlikely-branch-.patch deleted file mode 100644 index 900e2f2350..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0103-wireguard-send-receive-use-explicit-unlikely-branch-.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:06 -0600 -Subject: [PATCH] wireguard: send/receive: use explicit unlikely branch instead - of implicit coalescing - -commit 243f2148937adc72bcaaa590d482d599c936efde upstream. - -It's very unlikely that send will become true. It's nearly always false -between 0 and 120 seconds of a session, and in most cases becomes true -only between 120 and 121 seconds before becoming false again. So, -unlikely(send) is clearly the right option here. - -What happened before was that we had this complex boolean expression -with multiple likely and unlikely clauses nested. Since this is -evaluated left-to-right anyway, the whole thing got converted to -unlikely. So, we can clean this up to better represent what's going on. - -The generated code is the same. - -Suggested-by: Sultan Alsawaf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 13 ++++++------- - drivers/net/wireguard/send.c | 15 ++++++--------- - 2 files changed, 12 insertions(+), 16 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker( - static void keep_key_fresh(struct wg_peer *peer) - { - struct noise_keypair *keypair; -- bool send = false; -+ bool send; - - if (peer->sent_lastminute_handshake) - return; - - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -- keypair->i_am_the_initiator && -- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -- REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) -- send = true; -+ send = keypair && READ_ONCE(keypair->sending.is_valid) && -+ keypair->i_am_the_initiator && -+ wg_birthdate_has_expired(keypair->sending.birthdate, -+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); - rcu_read_unlock_bh(); - -- if (send) { -+ if (unlikely(send)) { - peer->sent_lastminute_handshake = true; - wg_packet_send_queued_handshake_initiation(peer, false); - } ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(str - static void keep_key_fresh(struct wg_peer *peer) - { - struct noise_keypair *keypair; -- bool send = false; -+ bool send; - - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -- (unlikely(atomic64_read(&keypair->sending.counter.counter) > -- REKEY_AFTER_MESSAGES) || -- (keypair->i_am_the_initiator && -- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -- REKEY_AFTER_TIME))))) -- send = true; -+ send = keypair && READ_ONCE(keypair->sending.is_valid) && -+ (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || -+ (keypair->i_am_the_initiator && -+ wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); - rcu_read_unlock_bh(); - -- if (send) -+ if (unlikely(send)) - wg_packet_send_queued_handshake_initiation(peer, false); - } - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch b/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch new file mode 100644 index 0000000000..31deadbfc1 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch @@ -0,0 +1,116 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 19 May 2020 22:49:29 -0600 +Subject: [PATCH] wireguard: queueing: preserve flow hash across packet + scrubbing +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit c78a0b4a78839d572d8a80f6a62221c0d7843135 upstream. + +It's important that we clear most header fields during encapsulation and +decapsulation, because the packet is substantially changed, and we don't +want any info leak or logic bug due to an accidental correlation. But, +for encapsulation, it's wrong to clear skb->hash, since it's used by +fq_codel and flow dissection in general. Without it, classification does +not proceed as usual. This change might make it easier to estimate the +number of innerflows by examining clustering of out of order packets, +but this shouldn't open up anything that can't already be inferred +otherwise (e.g. syn packet size inference), and fq_codel can be disabled +anyway. + +Furthermore, it might be the case that the hash isn't used or queried at +all until after wireguard transmits the encrypted UDP packet, which +means skb->hash might still be zero at this point, and thus no hash +taken over the inner packet data. In order to address this situation, we +force a calculation of skb->hash before encrypting packet data. + +Of course this means that fq_codel might transmit packets slightly more +out of order than usual. Toke did some testing on beefy machines with +high quantities of parallel flows and found that increasing the +reply-attack counter to 8192 takes care of the most pathological cases +pretty well. + +Reported-by: Dave Taht +Reviewed-and-tested-by: Toke Høiland-Jørgensen +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/messages.h | 2 +- + drivers/net/wireguard/queueing.h | 10 +++++++++- + drivers/net/wireguard/receive.c | 2 +- + drivers/net/wireguard/send.c | 7 ++++++- + 4 files changed, 17 insertions(+), 4 deletions(-) + +--- a/drivers/net/wireguard/messages.h ++++ b/drivers/net/wireguard/messages.h +@@ -32,7 +32,7 @@ enum cookie_values { + }; + + enum counter_values { +- COUNTER_BITS_TOTAL = 2048, ++ COUNTER_BITS_TOTAL = 8192, + COUNTER_REDUNDANT_BITS = BITS_PER_LONG, + COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS + }; +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -87,12 +87,20 @@ static inline bool wg_check_packet_proto + return real_protocol && skb->protocol == real_protocol; + } + +-static inline void wg_reset_packet(struct sk_buff *skb) ++static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) + { ++ u8 l4_hash = skb->l4_hash; ++ u8 sw_hash = skb->sw_hash; ++ u32 hash = skb->hash; + skb_scrub_packet(skb, true); + memset(&skb->headers_start, 0, + offsetof(struct sk_buff, headers_end) - + offsetof(struct sk_buff, headers_start)); ++ if (encapsulating) { ++ skb->l4_hash = l4_hash; ++ skb->sw_hash = sw_hash; ++ skb->hash = hash; ++ } + skb->queue_mapping = 0; + skb->nohdr = 0; + skb->peeked = 0; +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -484,7 +484,7 @@ int wg_packet_rx_poll(struct napi_struct + if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) + goto next; + +- wg_reset_packet(skb); ++ wg_reset_packet(skb, false); + wg_packet_consume_data_done(peer, skb, &endpoint); + free = false; + +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -167,6 +167,11 @@ static bool encrypt_packet(struct sk_buf + struct sk_buff *trailer; + int num_frags; + ++ /* Force hash calculation before encryption so that flow analysis is ++ * consistent over the inner packet. ++ */ ++ skb_get_hash(skb); ++ + /* Calculate lengths. */ + padding_len = calculate_skb_padding(skb); + trailer_len = padding_len + noise_encrypted_len(0); +@@ -295,7 +300,7 @@ void wg_packet_encrypt_worker(struct wor + skb_list_walk_safe(first, skb, next) { + if (likely(encrypt_packet(skb, + PACKET_CB(first)->keypair))) { +- wg_reset_packet(skb); ++ wg_reset_packet(skb, true); + } else { + state = PACKET_STATE_DEAD; + break; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch b/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch deleted file mode 100644 index d4efe37a49..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0104-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:27 -0600 -Subject: [PATCH] wireguard: selftests: use newer iproute2 for gcc-10 - -commit ee3c1aa3f34b7842c1557cfe5d8c3f7b8c692de8 upstream. - -gcc-10 switched to defaulting to -fno-common, which broke iproute2-5.4. -This was fixed in iproute-5.6, so switch to that. Because we're after a -stable testing surface, we generally don't like to bump these -unnecessarily, but in this case, being able to actually build is a basic -necessity. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -44,7 +44,7 @@ endef - $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) - $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) --$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) - $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) - $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) - $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-read-preshared-key-while-taking-lock.patch b/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-read-preshared-key-while-taking-lock.patch deleted file mode 100644 index 2dac4b7064..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-read-preshared-key-while-taking-lock.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:28 -0600 -Subject: [PATCH] wireguard: noise: read preshared key while taking lock - -commit bc67d371256f5c47d824e2eec51e46c8d62d022e upstream. - -Prior we read the preshared key after dropping the handshake lock, which -isn't an actual crypto issue if it races, but it's still not quite -correct. So copy that part of the state into a temporary like we do with -the rest of the handshake state variables. Then we can release the lock, -operate on the temporary, and zero it out at the end of the function. In -performance tests, the impact of this was entirely unnoticable, probably -because those bytes are coming from the same cacheline as other things -that are being copied out in the same manner. - -Reported-by: Matt Dunwoodie -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -715,6 +715,7 @@ wg_noise_handshake_consume_response(stru - u8 e[NOISE_PUBLIC_KEY_LEN]; - u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; - u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; - - down_read(&wg->static_identity.lock); - -@@ -733,6 +734,8 @@ wg_noise_handshake_consume_response(stru - memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); - memcpy(ephemeral_private, handshake->ephemeral_private, - NOISE_PUBLIC_KEY_LEN); -+ memcpy(preshared_key, handshake->preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); - up_read(&handshake->lock); - - if (state != HANDSHAKE_CREATED_INITIATION) -@@ -750,7 +753,7 @@ wg_noise_handshake_consume_response(stru - goto fail; - - /* psk */ -- mix_psk(chaining_key, hash, key, handshake->preshared_key); -+ mix_psk(chaining_key, hash, key, preshared_key); - - /* {} */ - if (!message_decrypt(NULL, src->encrypted_nothing, -@@ -783,6 +786,7 @@ out: - memzero_explicit(chaining_key, NOISE_HASH_LEN); - memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); - memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); -+ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); - up_read(&wg->static_identity.lock); - return ret_peer; - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch b/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch new file mode 100644 index 0000000000..87d38d36fe --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch @@ -0,0 +1,330 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 19 May 2020 22:49:30 -0600 +Subject: [PATCH] wireguard: noise: separate receive counter from send counter + +commit a9e90d9931f3a474f04bab782ccd9d77904941e9 upstream. + +In "wireguard: queueing: preserve flow hash across packet scrubbing", we +were required to slightly increase the size of the receive replay +counter to something still fairly small, but an increase nonetheless. +It turns out that we can recoup some of the additional memory overhead +by splitting up the prior union type into two distinct types. Before, we +used the same "noise_counter" union for both sending and receiving, with +sending just using a simple atomic64_t, while receiving used the full +replay counter checker. This meant that most of the memory being +allocated for the sending counter was being wasted. Since the old +"noise_counter" type increased in size in the prior commit, now is a +good time to split up that union type into a distinct "noise_replay_ +counter" for receiving and a boring atomic64_t for sending, each using +neither more nor less memory than required. + +Also, since sometimes the replay counter is accessed without +necessitating additional accesses to the bitmap, we can reduce cache +misses by hoisting the always-necessary lock above the bitmap in the +struct layout. We also change a "noise_replay_counter" stack allocation +to kmalloc in a -DDEBUG selftest so that KASAN doesn't trigger a stack +frame warning. + +All and all, removing a bit of abstraction in this commit makes the code +simpler and smaller, in addition to the motivating memory usage +recuperation. For example, passing around raw "noise_symmetric_key" +structs is something that really only makes sense within noise.c, in the +one place where the sending and receiving keys can safely be thought of +as the same type of object; subsequent to that, it's important that we +uniformly access these through keypair->{sending,receiving}, where their +distinct roles are always made explicit. So this patch allows us to draw +that distinction clearly as well. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/noise.c | 16 +++------ + drivers/net/wireguard/noise.h | 14 ++++---- + drivers/net/wireguard/receive.c | 42 ++++++++++++------------ + drivers/net/wireguard/selftest/counter.c | 17 +++++++--- + drivers/net/wireguard/send.c | 12 +++---- + 5 files changed, 48 insertions(+), 53 deletions(-) + +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_cre + + if (unlikely(!keypair)) + return NULL; ++ spin_lock_init(&keypair->receiving_counter.lock); + keypair->internal_id = atomic64_inc_return(&keypair_counter); + keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; + keypair->entry.peer = peer; +@@ -358,25 +359,16 @@ out: + memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); + } + +-static void symmetric_key_init(struct noise_symmetric_key *key) +-{ +- spin_lock_init(&key->counter.receive.lock); +- atomic64_set(&key->counter.counter, 0); +- memset(key->counter.receive.backtrack, 0, +- sizeof(key->counter.receive.backtrack)); +- key->birthdate = ktime_get_coarse_boottime_ns(); +- key->is_valid = true; +-} +- + static void derive_keys(struct noise_symmetric_key *first_dst, + struct noise_symmetric_key *second_dst, + const u8 chaining_key[NOISE_HASH_LEN]) + { ++ u64 birthdate = ktime_get_coarse_boottime_ns(); + kdf(first_dst->key, second_dst->key, NULL, NULL, + NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, + chaining_key); +- symmetric_key_init(first_dst); +- symmetric_key_init(second_dst); ++ first_dst->birthdate = second_dst->birthdate = birthdate; ++ first_dst->is_valid = second_dst->is_valid = true; + } + + static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], +--- a/drivers/net/wireguard/noise.h ++++ b/drivers/net/wireguard/noise.h +@@ -15,18 +15,14 @@ + #include + #include + +-union noise_counter { +- struct { +- u64 counter; +- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; +- spinlock_t lock; +- } receive; +- atomic64_t counter; ++struct noise_replay_counter { ++ u64 counter; ++ spinlock_t lock; ++ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; + }; + + struct noise_symmetric_key { + u8 key[NOISE_SYMMETRIC_KEY_LEN]; +- union noise_counter counter; + u64 birthdate; + bool is_valid; + }; +@@ -34,7 +30,9 @@ struct noise_symmetric_key { + struct noise_keypair { + struct index_hashtable_entry entry; + struct noise_symmetric_key sending; ++ atomic64_t sending_counter; + struct noise_symmetric_key receiving; ++ struct noise_replay_counter receiving_counter; + __le32 remote_index; + bool i_am_the_initiator; + struct kref refcount; +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -245,20 +245,20 @@ static void keep_key_fresh(struct wg_pee + } + } + +-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) ++static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) + { + struct scatterlist sg[MAX_SKB_FRAGS + 8]; + struct sk_buff *trailer; + unsigned int offset; + int num_frags; + +- if (unlikely(!key)) ++ if (unlikely(!keypair)) + return false; + +- if (unlikely(!READ_ONCE(key->is_valid) || +- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || +- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { +- WRITE_ONCE(key->is_valid, false); ++ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || ++ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || ++ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { ++ WRITE_ONCE(keypair->receiving.is_valid, false); + return false; + } + +@@ -283,7 +283,7 @@ static bool decrypt_packet(struct sk_buf + + if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, + PACKET_CB(skb)->nonce, +- key->key)) ++ keypair->receiving.key)) + return false; + + /* Another ugly situation of pushing and pulling the header so as to +@@ -298,41 +298,41 @@ static bool decrypt_packet(struct sk_buf + } + + /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ +-static bool counter_validate(union noise_counter *counter, u64 their_counter) ++static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) + { + unsigned long index, index_current, top, i; + bool ret = false; + +- spin_lock_bh(&counter->receive.lock); ++ spin_lock_bh(&counter->lock); + +- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || ++ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || + their_counter >= REJECT_AFTER_MESSAGES)) + goto out; + + ++their_counter; + + if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < +- counter->receive.counter)) ++ counter->counter)) + goto out; + + index = their_counter >> ilog2(BITS_PER_LONG); + +- if (likely(their_counter > counter->receive.counter)) { +- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); ++ if (likely(their_counter > counter->counter)) { ++ index_current = counter->counter >> ilog2(BITS_PER_LONG); + top = min_t(unsigned long, index - index_current, + COUNTER_BITS_TOTAL / BITS_PER_LONG); + for (i = 1; i <= top; ++i) +- counter->receive.backtrack[(i + index_current) & ++ counter->backtrack[(i + index_current) & + ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; +- counter->receive.counter = their_counter; ++ counter->counter = their_counter; + } + + index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; + ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), +- &counter->receive.backtrack[index]); ++ &counter->backtrack[index]); + + out: +- spin_unlock_bh(&counter->receive.lock); ++ spin_unlock_bh(&counter->lock); + return ret; + } + +@@ -472,12 +472,12 @@ int wg_packet_rx_poll(struct napi_struct + if (unlikely(state != PACKET_STATE_CRYPTED)) + goto next; + +- if (unlikely(!counter_validate(&keypair->receiving.counter, ++ if (unlikely(!counter_validate(&keypair->receiving_counter, + PACKET_CB(skb)->nonce))) { + net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", + peer->device->dev->name, + PACKET_CB(skb)->nonce, +- keypair->receiving.counter.receive.counter); ++ keypair->receiving_counter.counter); + goto next; + } + +@@ -511,8 +511,8 @@ void wg_packet_decrypt_worker(struct wor + struct sk_buff *skb; + + while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { +- enum packet_state state = likely(decrypt_packet(skb, +- &PACKET_CB(skb)->keypair->receiving)) ? ++ enum packet_state state = ++ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; + wg_queue_enqueue_per_peer_napi(skb, state); + if (need_resched()) +--- a/drivers/net/wireguard/selftest/counter.c ++++ b/drivers/net/wireguard/selftest/counter.c +@@ -6,18 +6,24 @@ + #ifdef DEBUG + bool __init wg_packet_counter_selftest(void) + { ++ struct noise_replay_counter *counter; + unsigned int test_num = 0, i; +- union noise_counter counter; + bool success = true; + +-#define T_INIT do { \ +- memset(&counter, 0, sizeof(union noise_counter)); \ +- spin_lock_init(&counter.receive.lock); \ ++ counter = kmalloc(sizeof(*counter), GFP_KERNEL); ++ if (unlikely(!counter)) { ++ pr_err("nonce counter self-test malloc: FAIL\n"); ++ return false; ++ } ++ ++#define T_INIT do { \ ++ memset(counter, 0, sizeof(*counter)); \ ++ spin_lock_init(&counter->lock); \ + } while (0) + #define T_LIM (COUNTER_WINDOW_SIZE + 1) + #define T(n, v) do { \ + ++test_num; \ +- if (counter_validate(&counter, n) != (v)) { \ ++ if (counter_validate(counter, n) != (v)) { \ + pr_err("nonce counter self-test %u: FAIL\n", \ + test_num); \ + success = false; \ +@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(v + + if (success) + pr_info("nonce counter self-tests: pass\n"); ++ kfree(counter); + return success; + } + #endif +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_pee + rcu_read_lock_bh(); + keypair = rcu_dereference_bh(peer->keypairs.current_keypair); + send = keypair && READ_ONCE(keypair->sending.is_valid) && +- (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || ++ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || + (keypair->i_am_the_initiator && + wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); + rcu_read_unlock_bh(); +@@ -349,7 +349,6 @@ void wg_packet_purge_staged_packets(stru + + void wg_packet_send_staged_packets(struct wg_peer *peer) + { +- struct noise_symmetric_key *key; + struct noise_keypair *keypair; + struct sk_buff_head packets; + struct sk_buff *skb; +@@ -369,10 +368,9 @@ void wg_packet_send_staged_packets(struc + rcu_read_unlock_bh(); + if (unlikely(!keypair)) + goto out_nokey; +- key = &keypair->sending; +- if (unlikely(!READ_ONCE(key->is_valid))) ++ if (unlikely(!READ_ONCE(keypair->sending.is_valid))) + goto out_nokey; +- if (unlikely(wg_birthdate_has_expired(key->birthdate, ++ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, + REJECT_AFTER_TIME))) + goto out_invalid; + +@@ -387,7 +385,7 @@ void wg_packet_send_staged_packets(struc + */ + PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); + PACKET_CB(skb)->nonce = +- atomic64_inc_return(&key->counter.counter) - 1; ++ atomic64_inc_return(&keypair->sending_counter) - 1; + if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) + goto out_invalid; + } +@@ -399,7 +397,7 @@ void wg_packet_send_staged_packets(struc + return; + + out_invalid: +- WRITE_ONCE(key->is_valid, false); ++ WRITE_ONCE(keypair->sending.is_valid, false); + out_nokey: + wg_noise_keypair_put(keypair, false); + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch b/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch new file mode 100644 index 0000000000..a53c764708 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch @@ -0,0 +1,33 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Frank Werner-Krippendorf +Date: Tue, 23 Jun 2020 03:59:44 -0600 +Subject: [PATCH] wireguard: noise: do not assign initiation time in if + condition + +commit 558b353c9c2a717509f291c066c6bd8f5f5e21be upstream. + +Fixes an error condition reported by checkpatch.pl which caused by +assigning a variable in an if condition in wg_noise_handshake_consume_ +initiation(). + +Signed-off-by: Frank Werner-Krippendorf +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/noise.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(st + memcpy(handshake->hash, hash, NOISE_HASH_LEN); + memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); + handshake->remote_index = src->sender_index; +- if ((s64)(handshake->last_initiation_consumption - +- (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) ++ initiation_consumption = ktime_get_coarse_boottime_ns(); ++ if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) + handshake->last_initiation_consumption = initiation_consumption; + handshake->state = HANDSHAKE_CONSUMED_INITIATION; + up_write(&handshake->lock); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-queueing-preserve-flow-hash-across-packet-.patch b/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-queueing-preserve-flow-hash-across-packet-.patch deleted file mode 100644 index 31deadbfc1..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0106-wireguard-queueing-preserve-flow-hash-across-packet-.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:29 -0600 -Subject: [PATCH] wireguard: queueing: preserve flow hash across packet - scrubbing -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit c78a0b4a78839d572d8a80f6a62221c0d7843135 upstream. - -It's important that we clear most header fields during encapsulation and -decapsulation, because the packet is substantially changed, and we don't -want any info leak or logic bug due to an accidental correlation. But, -for encapsulation, it's wrong to clear skb->hash, since it's used by -fq_codel and flow dissection in general. Without it, classification does -not proceed as usual. This change might make it easier to estimate the -number of innerflows by examining clustering of out of order packets, -but this shouldn't open up anything that can't already be inferred -otherwise (e.g. syn packet size inference), and fq_codel can be disabled -anyway. - -Furthermore, it might be the case that the hash isn't used or queried at -all until after wireguard transmits the encrypted UDP packet, which -means skb->hash might still be zero at this point, and thus no hash -taken over the inner packet data. In order to address this situation, we -force a calculation of skb->hash before encrypting packet data. - -Of course this means that fq_codel might transmit packets slightly more -out of order than usual. Toke did some testing on beefy machines with -high quantities of parallel flows and found that increasing the -reply-attack counter to 8192 takes care of the most pathological cases -pretty well. - -Reported-by: Dave Taht -Reviewed-and-tested-by: Toke Høiland-Jørgensen -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/messages.h | 2 +- - drivers/net/wireguard/queueing.h | 10 +++++++++- - drivers/net/wireguard/receive.c | 2 +- - drivers/net/wireguard/send.c | 7 ++++++- - 4 files changed, 17 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/messages.h -+++ b/drivers/net/wireguard/messages.h -@@ -32,7 +32,7 @@ enum cookie_values { - }; - - enum counter_values { -- COUNTER_BITS_TOTAL = 2048, -+ COUNTER_BITS_TOTAL = 8192, - COUNTER_REDUNDANT_BITS = BITS_PER_LONG, - COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS - }; ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -87,12 +87,20 @@ static inline bool wg_check_packet_proto - return real_protocol && skb->protocol == real_protocol; - } - --static inline void wg_reset_packet(struct sk_buff *skb) -+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) - { -+ u8 l4_hash = skb->l4_hash; -+ u8 sw_hash = skb->sw_hash; -+ u32 hash = skb->hash; - skb_scrub_packet(skb, true); - memset(&skb->headers_start, 0, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); -+ if (encapsulating) { -+ skb->l4_hash = l4_hash; -+ skb->sw_hash = sw_hash; -+ skb->hash = hash; -+ } - skb->queue_mapping = 0; - skb->nohdr = 0; - skb->peeked = 0; ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -484,7 +484,7 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) - goto next; - -- wg_reset_packet(skb); -+ wg_reset_packet(skb, false); - wg_packet_consume_data_done(peer, skb, &endpoint); - free = false; - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -167,6 +167,11 @@ static bool encrypt_packet(struct sk_buf - struct sk_buff *trailer; - int num_frags; - -+ /* Force hash calculation before encryption so that flow analysis is -+ * consistent over the inner packet. -+ */ -+ skb_get_hash(skb); -+ - /* Calculate lengths. */ - padding_len = calculate_skb_padding(skb); - trailer_len = padding_len + noise_encrypted_len(0); -@@ -295,7 +300,7 @@ void wg_packet_encrypt_worker(struct wor - skb_list_walk_safe(first, skb, next) { - if (likely(encrypt_packet(skb, - PACKET_CB(first)->keypair))) { -- wg_reset_packet(skb); -+ wg_reset_packet(skb, true); - } else { - state = PACKET_STATE_DEAD; - break; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch b/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch new file mode 100644 index 0000000000..013023a3e2 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch @@ -0,0 +1,296 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Tue, 23 Jun 2020 03:59:45 -0600 +Subject: [PATCH] wireguard: device: avoid circular netns references + +commit 900575aa33a3eaaef802b31de187a85c4a4b4bd0 upstream. + +Before, we took a reference to the creating netns if the new netns was +different. This caused issues with circular references, with two +wireguard interfaces swapping namespaces. The solution is to rather not +take any extra references at all, but instead simply invalidate the +creating netns pointer when that netns is deleted. + +In order to prevent this from happening again, this commit improves the +rough object leak tracking by allowing it to account for created and +destroyed interfaces, aside from just peers and keys. That then makes it +possible to check for the object leak when having two interfaces take a +reference to each others' namespaces. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 58 ++++++++++------------ + drivers/net/wireguard/device.h | 3 +- + drivers/net/wireguard/netlink.c | 14 ++++-- + drivers/net/wireguard/socket.c | 25 +++++++--- + tools/testing/selftests/wireguard/netns.sh | 13 ++++- + 5 files changed, 67 insertions(+), 46 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -45,17 +45,18 @@ static int wg_open(struct net_device *de + if (dev_v6) + dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; + ++ mutex_lock(&wg->device_update_lock); + ret = wg_socket_init(wg, wg->incoming_port); + if (ret < 0) +- return ret; +- mutex_lock(&wg->device_update_lock); ++ goto out; + list_for_each_entry(peer, &wg->peer_list, peer_list) { + wg_packet_send_staged_packets(peer); + if (peer->persistent_keepalive_interval) + wg_packet_send_keepalive(peer); + } ++out: + mutex_unlock(&wg->device_update_lock); +- return 0; ++ return ret; + } + + #ifdef CONFIG_PM_SLEEP +@@ -225,6 +226,7 @@ static void wg_destruct(struct net_devic + list_del(&wg->device_list); + rtnl_unlock(); + mutex_lock(&wg->device_update_lock); ++ rcu_assign_pointer(wg->creating_net, NULL); + wg->incoming_port = 0; + wg_socket_reinit(wg, NULL, NULL); + /* The final references are cleared in the below calls to destroy_workqueue. */ +@@ -240,13 +242,11 @@ static void wg_destruct(struct net_devic + skb_queue_purge(&wg->incoming_handshakes); + free_percpu(dev->tstats); + free_percpu(wg->incoming_handshakes_worker); +- if (wg->have_creating_net_ref) +- put_net(wg->creating_net); + kvfree(wg->index_hashtable); + kvfree(wg->peer_hashtable); + mutex_unlock(&wg->device_update_lock); + +- pr_debug("%s: Interface deleted\n", dev->name); ++ pr_debug("%s: Interface destroyed\n", dev->name); + free_netdev(dev); + } + +@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_ne + struct wg_device *wg = netdev_priv(dev); + int ret = -ENOMEM; + +- wg->creating_net = src_net; ++ rcu_assign_pointer(wg->creating_net, src_net); + init_rwsem(&wg->static_identity.lock); + mutex_init(&wg->socket_update_lock); + mutex_init(&wg->device_update_lock); +@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __r + .newlink = wg_newlink, + }; + +-static int wg_netdevice_notification(struct notifier_block *nb, +- unsigned long action, void *data) ++static void wg_netns_pre_exit(struct net *net) + { +- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; +- struct wg_device *wg = netdev_priv(dev); +- +- ASSERT_RTNL(); +- +- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) +- return 0; ++ struct wg_device *wg; + +- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { +- put_net(wg->creating_net); +- wg->have_creating_net_ref = false; +- } else if (dev_net(dev) != wg->creating_net && +- !wg->have_creating_net_ref) { +- wg->have_creating_net_ref = true; +- get_net(wg->creating_net); ++ rtnl_lock(); ++ list_for_each_entry(wg, &device_list, device_list) { ++ if (rcu_access_pointer(wg->creating_net) == net) { ++ pr_debug("%s: Creating namespace exiting\n", wg->dev->name); ++ netif_carrier_off(wg->dev); ++ mutex_lock(&wg->device_update_lock); ++ rcu_assign_pointer(wg->creating_net, NULL); ++ wg_socket_reinit(wg, NULL, NULL); ++ mutex_unlock(&wg->device_update_lock); ++ } + } +- return 0; ++ rtnl_unlock(); + } + +-static struct notifier_block netdevice_notifier = { +- .notifier_call = wg_netdevice_notification ++static struct pernet_operations pernet_ops = { ++ .pre_exit = wg_netns_pre_exit + }; + + int __init wg_device_init(void) +@@ -429,18 +425,18 @@ int __init wg_device_init(void) + return ret; + #endif + +- ret = register_netdevice_notifier(&netdevice_notifier); ++ ret = register_pernet_device(&pernet_ops); + if (ret) + goto error_pm; + + ret = rtnl_link_register(&link_ops); + if (ret) +- goto error_netdevice; ++ goto error_pernet; + + return 0; + +-error_netdevice: +- unregister_netdevice_notifier(&netdevice_notifier); ++error_pernet: ++ unregister_pernet_device(&pernet_ops); + error_pm: + #ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); +@@ -451,7 +447,7 @@ error_pm: + void wg_device_uninit(void) + { + rtnl_link_unregister(&link_ops); +- unregister_netdevice_notifier(&netdevice_notifier); ++ unregister_pernet_device(&pernet_ops); + #ifdef CONFIG_PM_SLEEP + unregister_pm_notifier(&pm_notifier); + #endif +--- a/drivers/net/wireguard/device.h ++++ b/drivers/net/wireguard/device.h +@@ -40,7 +40,7 @@ struct wg_device { + struct net_device *dev; + struct crypt_queue encrypt_queue, decrypt_queue; + struct sock __rcu *sock4, *sock6; +- struct net *creating_net; ++ struct net __rcu *creating_net; + struct noise_static_identity static_identity; + struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; + struct workqueue_struct *packet_crypt_wq; +@@ -56,7 +56,6 @@ struct wg_device { + unsigned int num_peers, device_update_gen; + u32 fwmark; + u16 incoming_port; +- bool have_creating_net_ref; + }; + + int wg_device_init(void); +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -517,11 +517,15 @@ static int wg_set_device(struct sk_buff + if (flags & ~__WGDEVICE_F_ALL) + goto out; + +- ret = -EPERM; +- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || +- info->attrs[WGDEVICE_A_FWMARK]) && +- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) +- goto out; ++ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { ++ struct net *net; ++ rcu_read_lock(); ++ net = rcu_dereference(wg->creating_net); ++ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; ++ rcu_read_unlock(); ++ if (ret) ++ goto out; ++ } + + ++wg->device_update_gen; + +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket + + int wg_socket_init(struct wg_device *wg, u16 port) + { ++ struct net *net; + int ret; + struct udp_tunnel_sock_cfg cfg = { + .sk_user_data = wg, +@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, + }; + #endif + ++ rcu_read_lock(); ++ net = rcu_dereference(wg->creating_net); ++ net = net ? maybe_get_net(net) : NULL; ++ rcu_read_unlock(); ++ if (unlikely(!net)) ++ return -ENONET; ++ + #if IS_ENABLED(CONFIG_IPV6) + retry: + #endif + +- ret = udp_sock_create(wg->creating_net, &port4, &new4); ++ ret = udp_sock_create(net, &port4, &new4); + if (ret < 0) { + pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); +- return ret; ++ goto out; + } + set_sock_opts(new4); +- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); ++ setup_udp_tunnel_sock(net, new4, &cfg); + + #if IS_ENABLED(CONFIG_IPV6) + if (ipv6_mod_enabled()) { + port6.local_udp_port = inet_sk(new4->sk)->inet_sport; +- ret = udp_sock_create(wg->creating_net, &port6, &new6); ++ ret = udp_sock_create(net, &port6, &new6); + if (ret < 0) { + udp_tunnel_sock_release(new4); + if (ret == -EADDRINUSE && !port && retries++ < 100) + goto retry; + pr_err("%s: Could not create IPv6 socket\n", + wg->dev->name); +- return ret; ++ goto out; + } + set_sock_opts(new6); +- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); ++ setup_udp_tunnel_sock(net, new6, &cfg); + } + #endif + + wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); +- return 0; ++ ret = 0; ++out: ++ put_net(net); ++ return ret; + } + + void wg_socket_reinit(struct wg_device *wg, struct sock *new4, +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -587,9 +587,20 @@ ip0 link set wg0 up + kill $ncat_pid + ip0 link del wg0 + ++# Ensure there aren't circular reference loops ++ip1 link add wg1 type wireguard ++ip2 link add wg2 type wireguard ++ip1 link set wg1 netns $netns2 ++ip2 link set wg2 netns $netns1 ++pp ip netns delete $netns1 ++pp ip netns delete $netns2 ++pp ip netns add $netns1 ++pp ip netns add $netns2 ++ ++sleep 2 # Wait for cleanup and grace periods + declare -A objects + while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do +- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue ++ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue + objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" + done < /dev/kmsg + alldeleted=1 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-noise-separate-receive-counter-from-send-c.patch b/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-noise-separate-receive-counter-from-send-c.patch deleted file mode 100644 index 87d38d36fe..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0107-wireguard-noise-separate-receive-counter-from-send-c.patch +++ /dev/null @@ -1,330 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:30 -0600 -Subject: [PATCH] wireguard: noise: separate receive counter from send counter - -commit a9e90d9931f3a474f04bab782ccd9d77904941e9 upstream. - -In "wireguard: queueing: preserve flow hash across packet scrubbing", we -were required to slightly increase the size of the receive replay -counter to something still fairly small, but an increase nonetheless. -It turns out that we can recoup some of the additional memory overhead -by splitting up the prior union type into two distinct types. Before, we -used the same "noise_counter" union for both sending and receiving, with -sending just using a simple atomic64_t, while receiving used the full -replay counter checker. This meant that most of the memory being -allocated for the sending counter was being wasted. Since the old -"noise_counter" type increased in size in the prior commit, now is a -good time to split up that union type into a distinct "noise_replay_ -counter" for receiving and a boring atomic64_t for sending, each using -neither more nor less memory than required. - -Also, since sometimes the replay counter is accessed without -necessitating additional accesses to the bitmap, we can reduce cache -misses by hoisting the always-necessary lock above the bitmap in the -struct layout. We also change a "noise_replay_counter" stack allocation -to kmalloc in a -DDEBUG selftest so that KASAN doesn't trigger a stack -frame warning. - -All and all, removing a bit of abstraction in this commit makes the code -simpler and smaller, in addition to the motivating memory usage -recuperation. For example, passing around raw "noise_symmetric_key" -structs is something that really only makes sense within noise.c, in the -one place where the sending and receiving keys can safely be thought of -as the same type of object; subsequent to that, it's important that we -uniformly access these through keypair->{sending,receiving}, where their -distinct roles are always made explicit. So this patch allows us to draw -that distinction clearly as well. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 16 +++------ - drivers/net/wireguard/noise.h | 14 ++++---- - drivers/net/wireguard/receive.c | 42 ++++++++++++------------ - drivers/net/wireguard/selftest/counter.c | 17 +++++++--- - drivers/net/wireguard/send.c | 12 +++---- - 5 files changed, 48 insertions(+), 53 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_cre - - if (unlikely(!keypair)) - return NULL; -+ spin_lock_init(&keypair->receiving_counter.lock); - keypair->internal_id = atomic64_inc_return(&keypair_counter); - keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; - keypair->entry.peer = peer; -@@ -358,25 +359,16 @@ out: - memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); - } - --static void symmetric_key_init(struct noise_symmetric_key *key) --{ -- spin_lock_init(&key->counter.receive.lock); -- atomic64_set(&key->counter.counter, 0); -- memset(key->counter.receive.backtrack, 0, -- sizeof(key->counter.receive.backtrack)); -- key->birthdate = ktime_get_coarse_boottime_ns(); -- key->is_valid = true; --} -- - static void derive_keys(struct noise_symmetric_key *first_dst, - struct noise_symmetric_key *second_dst, - const u8 chaining_key[NOISE_HASH_LEN]) - { -+ u64 birthdate = ktime_get_coarse_boottime_ns(); - kdf(first_dst->key, second_dst->key, NULL, NULL, - NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, - chaining_key); -- symmetric_key_init(first_dst); -- symmetric_key_init(second_dst); -+ first_dst->birthdate = second_dst->birthdate = birthdate; -+ first_dst->is_valid = second_dst->is_valid = true; - } - - static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ---- a/drivers/net/wireguard/noise.h -+++ b/drivers/net/wireguard/noise.h -@@ -15,18 +15,14 @@ - #include - #include - --union noise_counter { -- struct { -- u64 counter; -- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; -- spinlock_t lock; -- } receive; -- atomic64_t counter; -+struct noise_replay_counter { -+ u64 counter; -+ spinlock_t lock; -+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; - }; - - struct noise_symmetric_key { - u8 key[NOISE_SYMMETRIC_KEY_LEN]; -- union noise_counter counter; - u64 birthdate; - bool is_valid; - }; -@@ -34,7 +30,9 @@ struct noise_symmetric_key { - struct noise_keypair { - struct index_hashtable_entry entry; - struct noise_symmetric_key sending; -+ atomic64_t sending_counter; - struct noise_symmetric_key receiving; -+ struct noise_replay_counter receiving_counter; - __le32 remote_index; - bool i_am_the_initiator; - struct kref refcount; ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -245,20 +245,20 @@ static void keep_key_fresh(struct wg_pee - } - } - --static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) -+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) - { - struct scatterlist sg[MAX_SKB_FRAGS + 8]; - struct sk_buff *trailer; - unsigned int offset; - int num_frags; - -- if (unlikely(!key)) -+ if (unlikely(!keypair)) - return false; - -- if (unlikely(!READ_ONCE(key->is_valid) || -- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || -- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { -- WRITE_ONCE(key->is_valid, false); -+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || -+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || -+ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { -+ WRITE_ONCE(keypair->receiving.is_valid, false); - return false; - } - -@@ -283,7 +283,7 @@ static bool decrypt_packet(struct sk_buf - - if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, - PACKET_CB(skb)->nonce, -- key->key)) -+ keypair->receiving.key)) - return false; - - /* Another ugly situation of pushing and pulling the header so as to -@@ -298,41 +298,41 @@ static bool decrypt_packet(struct sk_buf - } - - /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ --static bool counter_validate(union noise_counter *counter, u64 their_counter) -+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) - { - unsigned long index, index_current, top, i; - bool ret = false; - -- spin_lock_bh(&counter->receive.lock); -+ spin_lock_bh(&counter->lock); - -- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || -+ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || - their_counter >= REJECT_AFTER_MESSAGES)) - goto out; - - ++their_counter; - - if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < -- counter->receive.counter)) -+ counter->counter)) - goto out; - - index = their_counter >> ilog2(BITS_PER_LONG); - -- if (likely(their_counter > counter->receive.counter)) { -- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); -+ if (likely(their_counter > counter->counter)) { -+ index_current = counter->counter >> ilog2(BITS_PER_LONG); - top = min_t(unsigned long, index - index_current, - COUNTER_BITS_TOTAL / BITS_PER_LONG); - for (i = 1; i <= top; ++i) -- counter->receive.backtrack[(i + index_current) & -+ counter->backtrack[(i + index_current) & - ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; -- counter->receive.counter = their_counter; -+ counter->counter = their_counter; - } - - index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; - ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), -- &counter->receive.backtrack[index]); -+ &counter->backtrack[index]); - - out: -- spin_unlock_bh(&counter->receive.lock); -+ spin_unlock_bh(&counter->lock); - return ret; - } - -@@ -472,12 +472,12 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(state != PACKET_STATE_CRYPTED)) - goto next; - -- if (unlikely(!counter_validate(&keypair->receiving.counter, -+ if (unlikely(!counter_validate(&keypair->receiving_counter, - PACKET_CB(skb)->nonce))) { - net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", - peer->device->dev->name, - PACKET_CB(skb)->nonce, -- keypair->receiving.counter.receive.counter); -+ keypair->receiving_counter.counter); - goto next; - } - -@@ -511,8 +511,8 @@ void wg_packet_decrypt_worker(struct wor - struct sk_buff *skb; - - while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { -- enum packet_state state = likely(decrypt_packet(skb, -- &PACKET_CB(skb)->keypair->receiving)) ? -+ enum packet_state state = -+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_napi(skb, state); - if (need_resched()) ---- a/drivers/net/wireguard/selftest/counter.c -+++ b/drivers/net/wireguard/selftest/counter.c -@@ -6,18 +6,24 @@ - #ifdef DEBUG - bool __init wg_packet_counter_selftest(void) - { -+ struct noise_replay_counter *counter; - unsigned int test_num = 0, i; -- union noise_counter counter; - bool success = true; - --#define T_INIT do { \ -- memset(&counter, 0, sizeof(union noise_counter)); \ -- spin_lock_init(&counter.receive.lock); \ -+ counter = kmalloc(sizeof(*counter), GFP_KERNEL); -+ if (unlikely(!counter)) { -+ pr_err("nonce counter self-test malloc: FAIL\n"); -+ return false; -+ } -+ -+#define T_INIT do { \ -+ memset(counter, 0, sizeof(*counter)); \ -+ spin_lock_init(&counter->lock); \ - } while (0) - #define T_LIM (COUNTER_WINDOW_SIZE + 1) - #define T(n, v) do { \ - ++test_num; \ -- if (counter_validate(&counter, n) != (v)) { \ -+ if (counter_validate(counter, n) != (v)) { \ - pr_err("nonce counter self-test %u: FAIL\n", \ - test_num); \ - success = false; \ -@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(v - - if (success) - pr_info("nonce counter self-tests: pass\n"); -+ kfree(counter); - return success; - } - #endif ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_pee - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); - send = keypair && READ_ONCE(keypair->sending.is_valid) && -- (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || -+ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || - (keypair->i_am_the_initiator && - wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); - rcu_read_unlock_bh(); -@@ -349,7 +349,6 @@ void wg_packet_purge_staged_packets(stru - - void wg_packet_send_staged_packets(struct wg_peer *peer) - { -- struct noise_symmetric_key *key; - struct noise_keypair *keypair; - struct sk_buff_head packets; - struct sk_buff *skb; -@@ -369,10 +368,9 @@ void wg_packet_send_staged_packets(struc - rcu_read_unlock_bh(); - if (unlikely(!keypair)) - goto out_nokey; -- key = &keypair->sending; -- if (unlikely(!READ_ONCE(key->is_valid))) -+ if (unlikely(!READ_ONCE(keypair->sending.is_valid))) - goto out_nokey; -- if (unlikely(wg_birthdate_has_expired(key->birthdate, -+ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, - REJECT_AFTER_TIME))) - goto out_invalid; - -@@ -387,7 +385,7 @@ void wg_packet_send_staged_packets(struc - */ - PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); - PACKET_CB(skb)->nonce = -- atomic64_inc_return(&key->counter.counter) - 1; -+ atomic64_inc_return(&keypair->sending_counter) - 1; - if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) - goto out_invalid; - } -@@ -399,7 +397,7 @@ void wg_packet_send_staged_packets(struc - return; - - out_invalid: -- WRITE_ONCE(key->is_valid, false); -+ WRITE_ONCE(keypair->sending.is_valid, false); - out_nokey: - wg_noise_keypair_put(keypair, false); - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-noise-do-not-assign-initiation-time-in-if-.patch b/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-noise-do-not-assign-initiation-time-in-if-.patch deleted file mode 100644 index a53c764708..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-noise-do-not-assign-initiation-time-in-if-.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Frank Werner-Krippendorf -Date: Tue, 23 Jun 2020 03:59:44 -0600 -Subject: [PATCH] wireguard: noise: do not assign initiation time in if - condition - -commit 558b353c9c2a717509f291c066c6bd8f5f5e21be upstream. - -Fixes an error condition reported by checkpatch.pl which caused by -assigning a variable in an if condition in wg_noise_handshake_consume_ -initiation(). - -Signed-off-by: Frank Werner-Krippendorf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(st - memcpy(handshake->hash, hash, NOISE_HASH_LEN); - memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); - handshake->remote_index = src->sender_index; -- if ((s64)(handshake->last_initiation_consumption - -- (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) -+ initiation_consumption = ktime_get_coarse_boottime_ns(); -+ if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) - handshake->last_initiation_consumption = initiation_consumption; - handshake->state = HANDSHAKE_CONSUMED_INITIATION; - up_write(&handshake->lock); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch b/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch new file mode 100644 index 0000000000..eceb0b9255 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch @@ -0,0 +1,42 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 24 Jun 2020 16:06:03 -0600 +Subject: [PATCH] wireguard: receive: account for napi_gro_receive never + returning GRO_DROP + +commit df08126e3833e9dca19e2407db5f5860a7c194fb upstream. + +The napi_gro_receive function no longer returns GRO_DROP ever, making +handling GRO_DROP dead code. This commit removes that dead code. +Further, it's not even clear that device drivers have any business in +taking action after passing off received packets; that's arguably out of +their hands. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Fixes: 6570bc79c0df ("net: core: use listified Rx for GRO_NORMAL in napi_gro_receive()") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/receive.c | 10 ++-------- + 1 file changed, 2 insertions(+), 8 deletions(-) + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done( + if (unlikely(routed_peer != peer)) + goto dishonest_packet_peer; + +- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { +- ++dev->stats.rx_dropped; +- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", +- dev->name, peer->internal_id, +- &peer->endpoint.addr); +- } else { +- update_rx_stats(peer, message_data_len(len_before_trim)); +- } ++ napi_gro_receive(&peer->napi, skb); ++ update_rx_stats(peer, message_data_len(len_before_trim)); + return; + + dishonest_packet_peer: diff --git a/target/linux/generic/backport-5.4/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch b/target/linux/generic/backport-5.4/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch new file mode 100644 index 0000000000..cfd6b1457c --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch @@ -0,0 +1,58 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 29 Jun 2020 19:06:18 -0600 +Subject: [PATCH] net: ip_tunnel: add header_ops for layer 3 devices + +commit 2606aff916854b61234bf85001be9777bab2d5f8 upstream. + +Some devices that take straight up layer 3 packets benefit from having a +shared header_ops so that AF_PACKET sockets can inject packets that are +recognized. This shared infrastructure will be used by other drivers +that currently can't inject packets using AF_PACKET. It also exposes the +parser function, as it is useful in standalone form too. + +Signed-off-by: Jason A. Donenfeld +Acked-by: Willem de Bruijn +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + include/net/ip_tunnels.h | 3 +++ + net/ipv4/ip_tunnel_core.c | 18 ++++++++++++++++++ + 2 files changed, 21 insertions(+) + +--- a/include/net/ip_tunnels.h ++++ b/include/net/ip_tunnels.h +@@ -289,6 +289,9 @@ int ip_tunnel_newlink(struct net_device + struct ip_tunnel_parm *p, __u32 fwmark); + void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); + ++extern const struct header_ops ip_tunnel_header_ops; ++__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); ++ + struct ip_tunnel_encap_ops { + size_t (*encap_hlen)(struct ip_tunnel_encap *e); + int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -446,3 +446,21 @@ void ip_tunnel_unneed_metadata(void) + static_branch_dec(&ip_tunnel_metadata_cnt); + } + EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); ++ ++/* Returns either the correct skb->protocol value, or 0 if invalid. */ ++__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) ++{ ++ if (skb_network_header(skb) >= skb->head && ++ (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && ++ ip_hdr(skb)->version == 4) ++ return htons(ETH_P_IP); ++ if (skb_network_header(skb) >= skb->head && ++ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && ++ ipv6_hdr(skb)->version == 6) ++ return htons(ETH_P_IPV6); ++ return 0; ++} ++EXPORT_SYMBOL(ip_tunnel_parse_protocol); ++ ++const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; ++EXPORT_SYMBOL(ip_tunnel_header_ops); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0109-wireguard-device-avoid-circular-netns-references.patch b/target/linux/generic/backport-5.4/080-wireguard-0109-wireguard-device-avoid-circular-netns-references.patch deleted file mode 100644 index 013023a3e2..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0109-wireguard-device-avoid-circular-netns-references.patch +++ /dev/null @@ -1,296 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 23 Jun 2020 03:59:45 -0600 -Subject: [PATCH] wireguard: device: avoid circular netns references - -commit 900575aa33a3eaaef802b31de187a85c4a4b4bd0 upstream. - -Before, we took a reference to the creating netns if the new netns was -different. This caused issues with circular references, with two -wireguard interfaces swapping namespaces. The solution is to rather not -take any extra references at all, but instead simply invalidate the -creating netns pointer when that netns is deleted. - -In order to prevent this from happening again, this commit improves the -rough object leak tracking by allowing it to account for created and -destroyed interfaces, aside from just peers and keys. That then makes it -possible to check for the object leak when having two interfaces take a -reference to each others' namespaces. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 58 ++++++++++------------ - drivers/net/wireguard/device.h | 3 +- - drivers/net/wireguard/netlink.c | 14 ++++-- - drivers/net/wireguard/socket.c | 25 +++++++--- - tools/testing/selftests/wireguard/netns.sh | 13 ++++- - 5 files changed, 67 insertions(+), 46 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -45,17 +45,18 @@ static int wg_open(struct net_device *de - if (dev_v6) - dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; - -+ mutex_lock(&wg->device_update_lock); - ret = wg_socket_init(wg, wg->incoming_port); - if (ret < 0) -- return ret; -- mutex_lock(&wg->device_update_lock); -+ goto out; - list_for_each_entry(peer, &wg->peer_list, peer_list) { - wg_packet_send_staged_packets(peer); - if (peer->persistent_keepalive_interval) - wg_packet_send_keepalive(peer); - } -+out: - mutex_unlock(&wg->device_update_lock); -- return 0; -+ return ret; - } - - #ifdef CONFIG_PM_SLEEP -@@ -225,6 +226,7 @@ static void wg_destruct(struct net_devic - list_del(&wg->device_list); - rtnl_unlock(); - mutex_lock(&wg->device_update_lock); -+ rcu_assign_pointer(wg->creating_net, NULL); - wg->incoming_port = 0; - wg_socket_reinit(wg, NULL, NULL); - /* The final references are cleared in the below calls to destroy_workqueue. */ -@@ -240,13 +242,11 @@ static void wg_destruct(struct net_devic - skb_queue_purge(&wg->incoming_handshakes); - free_percpu(dev->tstats); - free_percpu(wg->incoming_handshakes_worker); -- if (wg->have_creating_net_ref) -- put_net(wg->creating_net); - kvfree(wg->index_hashtable); - kvfree(wg->peer_hashtable); - mutex_unlock(&wg->device_update_lock); - -- pr_debug("%s: Interface deleted\n", dev->name); -+ pr_debug("%s: Interface destroyed\n", dev->name); - free_netdev(dev); - } - -@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_ne - struct wg_device *wg = netdev_priv(dev); - int ret = -ENOMEM; - -- wg->creating_net = src_net; -+ rcu_assign_pointer(wg->creating_net, src_net); - init_rwsem(&wg->static_identity.lock); - mutex_init(&wg->socket_update_lock); - mutex_init(&wg->device_update_lock); -@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __r - .newlink = wg_newlink, - }; - --static int wg_netdevice_notification(struct notifier_block *nb, -- unsigned long action, void *data) -+static void wg_netns_pre_exit(struct net *net) - { -- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; -- struct wg_device *wg = netdev_priv(dev); -- -- ASSERT_RTNL(); -- -- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) -- return 0; -+ struct wg_device *wg; - -- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { -- put_net(wg->creating_net); -- wg->have_creating_net_ref = false; -- } else if (dev_net(dev) != wg->creating_net && -- !wg->have_creating_net_ref) { -- wg->have_creating_net_ref = true; -- get_net(wg->creating_net); -+ rtnl_lock(); -+ list_for_each_entry(wg, &device_list, device_list) { -+ if (rcu_access_pointer(wg->creating_net) == net) { -+ pr_debug("%s: Creating namespace exiting\n", wg->dev->name); -+ netif_carrier_off(wg->dev); -+ mutex_lock(&wg->device_update_lock); -+ rcu_assign_pointer(wg->creating_net, NULL); -+ wg_socket_reinit(wg, NULL, NULL); -+ mutex_unlock(&wg->device_update_lock); -+ } - } -- return 0; -+ rtnl_unlock(); - } - --static struct notifier_block netdevice_notifier = { -- .notifier_call = wg_netdevice_notification -+static struct pernet_operations pernet_ops = { -+ .pre_exit = wg_netns_pre_exit - }; - - int __init wg_device_init(void) -@@ -429,18 +425,18 @@ int __init wg_device_init(void) - return ret; - #endif - -- ret = register_netdevice_notifier(&netdevice_notifier); -+ ret = register_pernet_device(&pernet_ops); - if (ret) - goto error_pm; - - ret = rtnl_link_register(&link_ops); - if (ret) -- goto error_netdevice; -+ goto error_pernet; - - return 0; - --error_netdevice: -- unregister_netdevice_notifier(&netdevice_notifier); -+error_pernet: -+ unregister_pernet_device(&pernet_ops); - error_pm: - #ifdef CONFIG_PM_SLEEP - unregister_pm_notifier(&pm_notifier); -@@ -451,7 +447,7 @@ error_pm: - void wg_device_uninit(void) - { - rtnl_link_unregister(&link_ops); -- unregister_netdevice_notifier(&netdevice_notifier); -+ unregister_pernet_device(&pernet_ops); - #ifdef CONFIG_PM_SLEEP - unregister_pm_notifier(&pm_notifier); - #endif ---- a/drivers/net/wireguard/device.h -+++ b/drivers/net/wireguard/device.h -@@ -40,7 +40,7 @@ struct wg_device { - struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue; - struct sock __rcu *sock4, *sock6; -- struct net *creating_net; -+ struct net __rcu *creating_net; - struct noise_static_identity static_identity; - struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; - struct workqueue_struct *packet_crypt_wq; -@@ -56,7 +56,6 @@ struct wg_device { - unsigned int num_peers, device_update_gen; - u32 fwmark; - u16 incoming_port; -- bool have_creating_net_ref; - }; - - int wg_device_init(void); ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -517,11 +517,15 @@ static int wg_set_device(struct sk_buff - if (flags & ~__WGDEVICE_F_ALL) - goto out; - -- ret = -EPERM; -- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || -- info->attrs[WGDEVICE_A_FWMARK]) && -- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) -- goto out; -+ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { -+ struct net *net; -+ rcu_read_lock(); -+ net = rcu_dereference(wg->creating_net); -+ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; -+ rcu_read_unlock(); -+ if (ret) -+ goto out; -+ } - - ++wg->device_update_gen; - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket - - int wg_socket_init(struct wg_device *wg, u16 port) - { -+ struct net *net; - int ret; - struct udp_tunnel_sock_cfg cfg = { - .sk_user_data = wg, -@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, - }; - #endif - -+ rcu_read_lock(); -+ net = rcu_dereference(wg->creating_net); -+ net = net ? maybe_get_net(net) : NULL; -+ rcu_read_unlock(); -+ if (unlikely(!net)) -+ return -ENONET; -+ - #if IS_ENABLED(CONFIG_IPV6) - retry: - #endif - -- ret = udp_sock_create(wg->creating_net, &port4, &new4); -+ ret = udp_sock_create(net, &port4, &new4); - if (ret < 0) { - pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); -- return ret; -+ goto out; - } - set_sock_opts(new4); -- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); -+ setup_udp_tunnel_sock(net, new4, &cfg); - - #if IS_ENABLED(CONFIG_IPV6) - if (ipv6_mod_enabled()) { - port6.local_udp_port = inet_sk(new4->sk)->inet_sport; -- ret = udp_sock_create(wg->creating_net, &port6, &new6); -+ ret = udp_sock_create(net, &port6, &new6); - if (ret < 0) { - udp_tunnel_sock_release(new4); - if (ret == -EADDRINUSE && !port && retries++ < 100) - goto retry; - pr_err("%s: Could not create IPv6 socket\n", - wg->dev->name); -- return ret; -+ goto out; - } - set_sock_opts(new6); -- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); -+ setup_udp_tunnel_sock(net, new6, &cfg); - } - #endif - - wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); -- return 0; -+ ret = 0; -+out: -+ put_net(net); -+ return ret; - } - - void wg_socket_reinit(struct wg_device *wg, struct sock *new4, ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -587,9 +587,20 @@ ip0 link set wg0 up - kill $ncat_pid - ip0 link del wg0 - -+# Ensure there aren't circular reference loops -+ip1 link add wg1 type wireguard -+ip2 link add wg2 type wireguard -+ip1 link set wg1 netns $netns2 -+ip2 link set wg2 netns $netns1 -+pp ip netns delete $netns1 -+pp ip netns delete $netns2 -+pp ip netns add $netns1 -+pp ip netns add $netns2 -+ -+sleep 2 # Wait for cleanup and grace periods - declare -A objects - while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do -- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue -+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue - objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" - done < /dev/kmsg - alldeleted=1 diff --git a/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch b/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch new file mode 100644 index 0000000000..415ecffeef --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch @@ -0,0 +1,36 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 29 Jun 2020 19:06:20 -0600 +Subject: [PATCH] wireguard: implement header_ops->parse_protocol for AF_PACKET + +commit 01a4967c71c004f8ecad4ab57021348636502fa9 upstream. + +WireGuard uses skb->protocol to determine packet type, and bails out if +it's not set or set to something it's not expecting. For AF_PACKET +injection, we need to support its call chain of: + + packet_sendmsg -> packet_snd -> packet_parse_headers -> + dev_parse_header_protocol -> parse_protocol + +Without a valid parse_protocol, this returns zero, and wireguard then +rejects the skb. So, this wires up the ip_tunnel handler for layer 3 +packets for that case. + +Reported-by: Hans Wippel +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -262,6 +262,7 @@ static void wg_setup(struct net_device * + max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); + + dev->netdev_ops = &netdev_ops; ++ dev->header_ops = &ip_tunnel_header_ops; + dev->hard_header_len = 0; + dev->addr_len = 0; + dev->needed_headroom = DATA_PACKET_HEAD_ROOM; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-receive-account-for-napi_gro_receive-never.patch b/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-receive-account-for-napi_gro_receive-never.patch deleted file mode 100644 index eceb0b9255..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0110-wireguard-receive-account-for-napi_gro_receive-never.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 24 Jun 2020 16:06:03 -0600 -Subject: [PATCH] wireguard: receive: account for napi_gro_receive never - returning GRO_DROP - -commit df08126e3833e9dca19e2407db5f5860a7c194fb upstream. - -The napi_gro_receive function no longer returns GRO_DROP ever, making -handling GRO_DROP dead code. This commit removes that dead code. -Further, it's not even clear that device drivers have any business in -taking action after passing off received packets; that's arguably out of -their hands. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Fixes: 6570bc79c0df ("net: core: use listified Rx for GRO_NORMAL in napi_gro_receive()") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 10 ++-------- - 1 file changed, 2 insertions(+), 8 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done( - if (unlikely(routed_peer != peer)) - goto dishonest_packet_peer; - -- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { -- ++dev->stats.rx_dropped; -- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", -- dev->name, peer->internal_id, -- &peer->endpoint.addr); -- } else { -- update_rx_stats(peer, message_data_len(len_before_trim)); -- } -+ napi_gro_receive(&peer->napi, skb); -+ update_rx_stats(peer, message_data_len(len_before_trim)); - return; - - dishonest_packet_peer: diff --git a/target/linux/generic/backport-5.4/080-wireguard-0111-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch b/target/linux/generic/backport-5.4/080-wireguard-0111-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch deleted file mode 100644 index cfd6b1457c..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0111-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:18 -0600 -Subject: [PATCH] net: ip_tunnel: add header_ops for layer 3 devices - -commit 2606aff916854b61234bf85001be9777bab2d5f8 upstream. - -Some devices that take straight up layer 3 packets benefit from having a -shared header_ops so that AF_PACKET sockets can inject packets that are -recognized. This shared infrastructure will be used by other drivers -that currently can't inject packets using AF_PACKET. It also exposes the -parser function, as it is useful in standalone form too. - -Signed-off-by: Jason A. Donenfeld -Acked-by: Willem de Bruijn -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - include/net/ip_tunnels.h | 3 +++ - net/ipv4/ip_tunnel_core.c | 18 ++++++++++++++++++ - 2 files changed, 21 insertions(+) - ---- a/include/net/ip_tunnels.h -+++ b/include/net/ip_tunnels.h -@@ -289,6 +289,9 @@ int ip_tunnel_newlink(struct net_device - struct ip_tunnel_parm *p, __u32 fwmark); - void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); - -+extern const struct header_ops ip_tunnel_header_ops; -+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); -+ - struct ip_tunnel_encap_ops { - size_t (*encap_hlen)(struct ip_tunnel_encap *e); - int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, ---- a/net/ipv4/ip_tunnel_core.c -+++ b/net/ipv4/ip_tunnel_core.c -@@ -446,3 +446,21 @@ void ip_tunnel_unneed_metadata(void) - static_branch_dec(&ip_tunnel_metadata_cnt); - } - EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); -+ -+/* Returns either the correct skb->protocol value, or 0 if invalid. */ -+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) -+{ -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && -+ ip_hdr(skb)->version == 4) -+ return htons(ETH_P_IP); -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && -+ ipv6_hdr(skb)->version == 6) -+ return htons(ETH_P_IPV6); -+ return 0; -+} -+EXPORT_SYMBOL(ip_tunnel_parse_protocol); -+ -+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; -+EXPORT_SYMBOL(ip_tunnel_header_ops); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch b/target/linux/generic/backport-5.4/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch new file mode 100644 index 0000000000..a777732ce7 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch @@ -0,0 +1,68 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 29 Jun 2020 19:06:21 -0600 +Subject: [PATCH] wireguard: queueing: make use of ip_tunnel_parse_protocol + +commit 1a574074ae7d1d745c16f7710655f38a53174c27 upstream. + +Now that wg_examine_packet_protocol has been added for general +consumption as ip_tunnel_parse_protocol, it's possible to remove +wg_examine_packet_protocol and simply use the new +ip_tunnel_parse_protocol function directly. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/queueing.h | 19 ++----------------- + drivers/net/wireguard/receive.c | 2 +- + 2 files changed, 3 insertions(+), 18 deletions(-) + +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + + struct wg_device; + struct wg_peer; +@@ -65,25 +66,9 @@ struct packet_cb { + #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) + #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) + +-/* Returns either the correct skb->protocol value, or 0 if invalid. */ +-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) +-{ +- if (skb_network_header(skb) >= skb->head && +- (skb_network_header(skb) + sizeof(struct iphdr)) <= +- skb_tail_pointer(skb) && +- ip_hdr(skb)->version == 4) +- return htons(ETH_P_IP); +- if (skb_network_header(skb) >= skb->head && +- (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= +- skb_tail_pointer(skb) && +- ipv6_hdr(skb)->version == 6) +- return htons(ETH_P_IPV6); +- return 0; +-} +- + static inline bool wg_check_packet_protocol(struct sk_buff *skb) + { +- __be16 real_protocol = wg_examine_packet_protocol(skb); ++ __be16 real_protocol = ip_tunnel_parse_protocol(skb); + return real_protocol && skb->protocol == real_protocol; + } + +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done( + */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb->csum_level = ~0; /* All levels */ +- skb->protocol = wg_examine_packet_protocol(skb); ++ skb->protocol = ip_tunnel_parse_protocol(skb); + if (skb->protocol == htons(ETH_P_IP)) { + len = ntohs(ip_hdr(skb)->tot_len); + if (unlikely(len < sizeof(struct iphdr))) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch b/target/linux/generic/backport-5.4/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch new file mode 100644 index 0000000000..4b2712bb2d --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch @@ -0,0 +1,49 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Johannes Berg +Date: Tue, 18 Aug 2020 10:17:31 +0200 +Subject: [PATCH] netlink: consistently use NLA_POLICY_EXACT_LEN() + +commit 8140860c817f3e9f78bcd1e420b9777ddcbaa629 upstream. + +Change places that open-code NLA_POLICY_EXACT_LEN() to +use the macro instead, giving us flexibility in how we +handle the details of the macro. + +Signed-off-by: Johannes Berg +Acked-by: Matthieu Baerts +Signed-off-by: David S. Miller +[Jason: only picked the drivers/net/wireguard/* part] +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/netlink.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -22,8 +22,8 @@ static struct genl_family genl_family; + static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { + [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, + [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, +- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, +- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, ++ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), ++ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, + [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, + [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, +@@ -31,12 +31,12 @@ static const struct nla_policy device_po + }; + + static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { +- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, +- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, ++ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), ++ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), + [WGPEER_A_FLAGS] = { .type = NLA_U32 }, + [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, + [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, +- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, ++ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), + [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, + [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0112-wireguard-implement-header_ops-parse_protocol-for-AF.patch b/target/linux/generic/backport-5.4/080-wireguard-0112-wireguard-implement-header_ops-parse_protocol-for-AF.patch deleted file mode 100644 index 415ecffeef..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0112-wireguard-implement-header_ops-parse_protocol-for-AF.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:20 -0600 -Subject: [PATCH] wireguard: implement header_ops->parse_protocol for AF_PACKET - -commit 01a4967c71c004f8ecad4ab57021348636502fa9 upstream. - -WireGuard uses skb->protocol to determine packet type, and bails out if -it's not set or set to something it's not expecting. For AF_PACKET -injection, we need to support its call chain of: - - packet_sendmsg -> packet_snd -> packet_parse_headers -> - dev_parse_header_protocol -> parse_protocol - -Without a valid parse_protocol, this returns zero, and wireguard then -rejects the skb. So, this wires up the ip_tunnel handler for layer 3 -packets for that case. - -Reported-by: Hans Wippel -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -262,6 +262,7 @@ static void wg_setup(struct net_device * - max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); - - dev->netdev_ops = &netdev_ops; -+ dev->header_ops = &ip_tunnel_header_ops; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->needed_headroom = DATA_PACKET_HEAD_ROOM; diff --git a/target/linux/generic/backport-5.4/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch b/target/linux/generic/backport-5.4/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch new file mode 100644 index 0000000000..4b414bc309 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch @@ -0,0 +1,39 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Johannes Berg +Date: Tue, 18 Aug 2020 10:17:32 +0200 +Subject: [PATCH] netlink: consistently use NLA_POLICY_MIN_LEN() + +commit bc0435855041d7fff0b83dd992fc4be34aa11afb upstream. + +Change places that open-code NLA_POLICY_MIN_LEN() to +use the macro instead, giving us flexibility in how we +handle the details of the macro. + +Signed-off-by: Johannes Berg +Signed-off-by: David S. Miller +[Jason: only picked the drivers/net/wireguard/* part] +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/netlink.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/netlink.c ++++ b/drivers/net/wireguard/netlink.c +@@ -34,7 +34,7 @@ static const struct nla_policy peer_poli + [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), + [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), + [WGPEER_A_FLAGS] = { .type = NLA_U32 }, +- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, ++ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), + [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, + [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), + [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, +@@ -45,7 +45,7 @@ static const struct nla_policy peer_poli + + static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { + [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, +- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, ++ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)), + [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } + }; + diff --git a/target/linux/generic/backport-5.4/080-wireguard-0113-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch b/target/linux/generic/backport-5.4/080-wireguard-0113-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch deleted file mode 100644 index a777732ce7..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0113-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:21 -0600 -Subject: [PATCH] wireguard: queueing: make use of ip_tunnel_parse_protocol - -commit 1a574074ae7d1d745c16f7710655f38a53174c27 upstream. - -Now that wg_examine_packet_protocol has been added for general -consumption as ip_tunnel_parse_protocol, it's possible to remove -wg_examine_packet_protocol and simply use the new -ip_tunnel_parse_protocol function directly. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.h | 19 ++----------------- - drivers/net/wireguard/receive.c | 2 +- - 2 files changed, 3 insertions(+), 18 deletions(-) - ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - - struct wg_device; - struct wg_peer; -@@ -65,25 +66,9 @@ struct packet_cb { - #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) - #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) - --/* Returns either the correct skb->protocol value, or 0 if invalid. */ --static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) --{ -- if (skb_network_header(skb) >= skb->head && -- (skb_network_header(skb) + sizeof(struct iphdr)) <= -- skb_tail_pointer(skb) && -- ip_hdr(skb)->version == 4) -- return htons(ETH_P_IP); -- if (skb_network_header(skb) >= skb->head && -- (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= -- skb_tail_pointer(skb) && -- ipv6_hdr(skb)->version == 6) -- return htons(ETH_P_IPV6); -- return 0; --} -- - static inline bool wg_check_packet_protocol(struct sk_buff *skb) - { -- __be16 real_protocol = wg_examine_packet_protocol(skb); -+ __be16 real_protocol = ip_tunnel_parse_protocol(skb); - return real_protocol && skb->protocol == real_protocol; - } - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done( - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ~0; /* All levels */ -- skb->protocol = wg_examine_packet_protocol(skb); -+ skb->protocol = ip_tunnel_parse_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) { - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0114-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch b/target/linux/generic/backport-5.4/080-wireguard-0114-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch deleted file mode 100644 index 4b2712bb2d..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0114-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Johannes Berg -Date: Tue, 18 Aug 2020 10:17:31 +0200 -Subject: [PATCH] netlink: consistently use NLA_POLICY_EXACT_LEN() - -commit 8140860c817f3e9f78bcd1e420b9777ddcbaa629 upstream. - -Change places that open-code NLA_POLICY_EXACT_LEN() to -use the macro instead, giving us flexibility in how we -handle the details of the macro. - -Signed-off-by: Johannes Berg -Acked-by: Matthieu Baerts -Signed-off-by: David S. Miller -[Jason: only picked the drivers/net/wireguard/* part] -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -22,8 +22,8 @@ static struct genl_family genl_family; - static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { - [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, - [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, -- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), -+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), - [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, - [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, - [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, -@@ -31,12 +31,12 @@ static const struct nla_policy device_po - }; - - static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { -- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, -+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), -+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), - [WGPEER_A_FLAGS] = { .type = NLA_U32 }, - [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, - [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, -- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, -+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), - [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, - [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, - [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch b/target/linux/generic/backport-5.4/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch new file mode 100644 index 0000000000..e80528c91b --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch @@ -0,0 +1,127 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 9 Sep 2020 13:58:14 +0200 +Subject: [PATCH] wireguard: noise: take lock when removing handshake entry + from table + +commit 9179ba31367bcf481c3c79b5f028c94faad9f30a upstream. + +Eric reported that syzkaller found a race of this variety: + +CPU 1 CPU 2 +-------------------------------------------|--------------------------------------- +wg_index_hashtable_replace(old, ...) | + if (hlist_unhashed(&old->index_hash)) | + | wg_index_hashtable_remove(old) + | hlist_del_init_rcu(&old->index_hash) + | old->index_hash.pprev = NULL + hlist_replace_rcu(&old->index_hash, ...) | + *old->index_hash.pprev | + +Syzbot wasn't actually able to reproduce this more than once or create a +reproducer, because the race window between checking "hlist_unhashed" and +calling "hlist_replace_rcu" is just so small. Adding an mdelay(5) or +similar there helps make this demonstrable using this simple script: + + #!/bin/bash + set -ex + trap 'kill $pid1; kill $pid2; ip link del wg0; ip link del wg1' EXIT + ip link add wg0 type wireguard + ip link add wg1 type wireguard + wg set wg0 private-key <(wg genkey) listen-port 9999 + wg set wg1 private-key <(wg genkey) peer $(wg show wg0 public-key) endpoint 127.0.0.1:9999 persistent-keepalive 1 + wg set wg0 peer $(wg show wg1 public-key) + ip link set wg0 up + yes link set wg1 up | ip -force -batch - & + pid1=$! + yes link set wg1 down | ip -force -batch - & + pid2=$! + wait + +The fundumental underlying problem is that we permit calls to wg_index_ +hashtable_remove(handshake.entry) without requiring the caller to take +the handshake mutex that is intended to protect members of handshake +during mutations. This is consistently the case with calls to wg_index_ +hashtable_insert(handshake.entry) and wg_index_hashtable_replace( +handshake.entry), but it's missing from a pertinent callsite of wg_ +index_hashtable_remove(handshake.entry). So, this patch makes sure that +mutex is taken. + +The original code was a little bit funky though, in the form of: + + remove(handshake.entry) + lock(), memzero(handshake.some_members), unlock() + remove(handshake.entry) + +The original intention of that double removal pattern outside the lock +appears to be some attempt to prevent insertions that might happen while +locks are dropped during expensive crypto operations, but actually, all +callers of wg_index_hashtable_insert(handshake.entry) take the write +lock and then explicitly check handshake.state, as they should, which +the aforementioned memzero clears, which means an insertion should +already be impossible. And regardless, the original intention was +necessarily racy, since it wasn't guaranteed that something else would +run after the unlock() instead of after the remove(). So, from a +soundness perspective, it seems positive to remove what looks like a +hack at best. + +The crash from both syzbot and from the script above is as follows: + + general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN + KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] + CPU: 0 PID: 7395 Comm: kworker/0:3 Not tainted 5.9.0-rc4-syzkaller #0 + Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 + Workqueue: wg-kex-wg1 wg_packet_handshake_receive_worker + RIP: 0010:hlist_replace_rcu include/linux/rculist.h:505 [inline] + RIP: 0010:wg_index_hashtable_replace+0x176/0x330 drivers/net/wireguard/peerlookup.c:174 + Code: 00 fc ff df 48 89 f9 48 c1 e9 03 80 3c 01 00 0f 85 44 01 00 00 48 b9 00 00 00 00 00 fc ff df 48 8b 45 10 48 89 c6 48 c1 ee 03 <80> 3c 0e 00 0f 85 06 01 00 00 48 85 d2 4c 89 28 74 47 e8 a3 4f b5 + RSP: 0018:ffffc90006a97bf8 EFLAGS: 00010246 + RAX: 0000000000000000 RBX: ffff888050ffc4f8 RCX: dffffc0000000000 + RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88808e04e010 + RBP: ffff88808e04e000 R08: 0000000000000001 R09: ffff8880543d0000 + R10: ffffed100a87a000 R11: 000000000000016e R12: ffff8880543d0000 + R13: ffff88808e04e008 R14: ffff888050ffc508 R15: ffff888050ffc500 + FS: 0000000000000000(0000) GS:ffff8880ae600000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 00000000f5505db0 CR3: 0000000097cf7000 CR4: 00000000001526f0 + DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 + DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 + Call Trace: + wg_noise_handshake_begin_session+0x752/0xc9a drivers/net/wireguard/noise.c:820 + wg_receive_handshake_packet drivers/net/wireguard/receive.c:183 [inline] + wg_packet_handshake_receive_worker+0x33b/0x730 drivers/net/wireguard/receive.c:220 + process_one_work+0x94c/0x1670 kernel/workqueue.c:2269 + worker_thread+0x64c/0x1120 kernel/workqueue.c:2415 + kthread+0x3b5/0x4a0 kernel/kthread.c:292 + ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 + +Reported-by: syzbot +Reported-by: Eric Dumazet +Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/noise.c | 5 +---- + 1 file changed, 1 insertion(+), 4 deletions(-) + +--- a/drivers/net/wireguard/noise.c ++++ b/drivers/net/wireguard/noise.c +@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_ + + void wg_noise_handshake_clear(struct noise_handshake *handshake) + { ++ down_write(&handshake->lock); + wg_index_hashtable_remove( + handshake->entry.peer->device->index_hashtable, + &handshake->entry); +- down_write(&handshake->lock); + handshake_zero(handshake); + up_write(&handshake->lock); +- wg_index_hashtable_remove( +- handshake->entry.peer->device->index_hashtable, +- &handshake->entry); + } + + static struct noise_keypair *keypair_create(struct wg_peer *peer) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0115-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch b/target/linux/generic/backport-5.4/080-wireguard-0115-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch deleted file mode 100644 index 4b414bc309..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0115-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Johannes Berg -Date: Tue, 18 Aug 2020 10:17:32 +0200 -Subject: [PATCH] netlink: consistently use NLA_POLICY_MIN_LEN() - -commit bc0435855041d7fff0b83dd992fc4be34aa11afb upstream. - -Change places that open-code NLA_POLICY_MIN_LEN() to -use the macro instead, giving us flexibility in how we -handle the details of the macro. - -Signed-off-by: Johannes Berg -Signed-off-by: David S. Miller -[Jason: only picked the drivers/net/wireguard/* part] -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -34,7 +34,7 @@ static const struct nla_policy peer_poli - [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), - [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), - [WGPEER_A_FLAGS] = { .type = NLA_U32 }, -- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, -+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), - [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, - [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), - [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, -@@ -45,7 +45,7 @@ static const struct nla_policy peer_poli - - static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { - [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, -- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, -+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)), - [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } - }; - diff --git a/target/linux/generic/backport-5.4/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch b/target/linux/generic/backport-5.4/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch new file mode 100644 index 0000000000..e7f46ddf9c --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch @@ -0,0 +1,62 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Wed, 9 Sep 2020 13:58:15 +0200 +Subject: [PATCH] wireguard: peerlookup: take lock before checking hash in + replace operation + +commit 6147f7b1e90ff09bd52afc8b9206a7fcd133daf7 upstream. + +Eric's suggested fix for the previous commit's mentioned race condition +was to simply take the table->lock in wg_index_hashtable_replace(). The +table->lock of the hash table is supposed to protect the bucket heads, +not the entires, but actually, since all the mutator functions are +already taking it, it makes sense to take it too for the test to +hlist_unhashed, as a defense in depth measure, so that it no longer +races with deletions, regardless of what other locks are protecting +individual entries. This is sensible from a performance perspective +because, as Eric pointed out, the case of being unhashed is already the +unlikely case, so this won't add common contention. And comparing +instructions, this basically doesn't make much of a difference other +than pushing and popping %r13, used by the new `bool ret`. More +generally, I like the idea of locking consistency across table mutator +functions, and this might let me rest slightly easier at night. + +Suggested-by: Eric Dumazet +Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: David S. Miller +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/peerlookup.c | 11 ++++++++--- + 1 file changed, 8 insertions(+), 3 deletions(-) + +--- a/drivers/net/wireguard/peerlookup.c ++++ b/drivers/net/wireguard/peerlookup.c +@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct i + struct index_hashtable_entry *old, + struct index_hashtable_entry *new) + { +- if (unlikely(hlist_unhashed(&old->index_hash))) +- return false; ++ bool ret; ++ + spin_lock_bh(&table->lock); ++ ret = !hlist_unhashed(&old->index_hash); ++ if (unlikely(!ret)) ++ goto out; ++ + new->index = old->index; + hlist_replace_rcu(&old->index_hash, &new->index_hash); + +@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct i + * simply gets dropped, which isn't terrible. + */ + INIT_HLIST_NODE(&old->index_hash); ++out: + spin_unlock_bh(&table->lock); +- return true; ++ return ret; + } + + void wg_index_hashtable_remove(struct index_hashtable *table, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-noise-take-lock-when-removing-handshake-en.patch b/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-noise-take-lock-when-removing-handshake-en.patch deleted file mode 100644 index e80528c91b..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-noise-take-lock-when-removing-handshake-en.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 9 Sep 2020 13:58:14 +0200 -Subject: [PATCH] wireguard: noise: take lock when removing handshake entry - from table - -commit 9179ba31367bcf481c3c79b5f028c94faad9f30a upstream. - -Eric reported that syzkaller found a race of this variety: - -CPU 1 CPU 2 --------------------------------------------|--------------------------------------- -wg_index_hashtable_replace(old, ...) | - if (hlist_unhashed(&old->index_hash)) | - | wg_index_hashtable_remove(old) - | hlist_del_init_rcu(&old->index_hash) - | old->index_hash.pprev = NULL - hlist_replace_rcu(&old->index_hash, ...) | - *old->index_hash.pprev | - -Syzbot wasn't actually able to reproduce this more than once or create a -reproducer, because the race window between checking "hlist_unhashed" and -calling "hlist_replace_rcu" is just so small. Adding an mdelay(5) or -similar there helps make this demonstrable using this simple script: - - #!/bin/bash - set -ex - trap 'kill $pid1; kill $pid2; ip link del wg0; ip link del wg1' EXIT - ip link add wg0 type wireguard - ip link add wg1 type wireguard - wg set wg0 private-key <(wg genkey) listen-port 9999 - wg set wg1 private-key <(wg genkey) peer $(wg show wg0 public-key) endpoint 127.0.0.1:9999 persistent-keepalive 1 - wg set wg0 peer $(wg show wg1 public-key) - ip link set wg0 up - yes link set wg1 up | ip -force -batch - & - pid1=$! - yes link set wg1 down | ip -force -batch - & - pid2=$! - wait - -The fundumental underlying problem is that we permit calls to wg_index_ -hashtable_remove(handshake.entry) without requiring the caller to take -the handshake mutex that is intended to protect members of handshake -during mutations. This is consistently the case with calls to wg_index_ -hashtable_insert(handshake.entry) and wg_index_hashtable_replace( -handshake.entry), but it's missing from a pertinent callsite of wg_ -index_hashtable_remove(handshake.entry). So, this patch makes sure that -mutex is taken. - -The original code was a little bit funky though, in the form of: - - remove(handshake.entry) - lock(), memzero(handshake.some_members), unlock() - remove(handshake.entry) - -The original intention of that double removal pattern outside the lock -appears to be some attempt to prevent insertions that might happen while -locks are dropped during expensive crypto operations, but actually, all -callers of wg_index_hashtable_insert(handshake.entry) take the write -lock and then explicitly check handshake.state, as they should, which -the aforementioned memzero clears, which means an insertion should -already be impossible. And regardless, the original intention was -necessarily racy, since it wasn't guaranteed that something else would -run after the unlock() instead of after the remove(). So, from a -soundness perspective, it seems positive to remove what looks like a -hack at best. - -The crash from both syzbot and from the script above is as follows: - - general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN - KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] - CPU: 0 PID: 7395 Comm: kworker/0:3 Not tainted 5.9.0-rc4-syzkaller #0 - Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 - Workqueue: wg-kex-wg1 wg_packet_handshake_receive_worker - RIP: 0010:hlist_replace_rcu include/linux/rculist.h:505 [inline] - RIP: 0010:wg_index_hashtable_replace+0x176/0x330 drivers/net/wireguard/peerlookup.c:174 - Code: 00 fc ff df 48 89 f9 48 c1 e9 03 80 3c 01 00 0f 85 44 01 00 00 48 b9 00 00 00 00 00 fc ff df 48 8b 45 10 48 89 c6 48 c1 ee 03 <80> 3c 0e 00 0f 85 06 01 00 00 48 85 d2 4c 89 28 74 47 e8 a3 4f b5 - RSP: 0018:ffffc90006a97bf8 EFLAGS: 00010246 - RAX: 0000000000000000 RBX: ffff888050ffc4f8 RCX: dffffc0000000000 - RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88808e04e010 - RBP: ffff88808e04e000 R08: 0000000000000001 R09: ffff8880543d0000 - R10: ffffed100a87a000 R11: 000000000000016e R12: ffff8880543d0000 - R13: ffff88808e04e008 R14: ffff888050ffc508 R15: ffff888050ffc500 - FS: 0000000000000000(0000) GS:ffff8880ae600000(0000) knlGS:0000000000000000 - CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 - CR2: 00000000f5505db0 CR3: 0000000097cf7000 CR4: 00000000001526f0 - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 - DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 - Call Trace: - wg_noise_handshake_begin_session+0x752/0xc9a drivers/net/wireguard/noise.c:820 - wg_receive_handshake_packet drivers/net/wireguard/receive.c:183 [inline] - wg_packet_handshake_receive_worker+0x33b/0x730 drivers/net/wireguard/receive.c:220 - process_one_work+0x94c/0x1670 kernel/workqueue.c:2269 - worker_thread+0x64c/0x1120 kernel/workqueue.c:2415 - kthread+0x3b5/0x4a0 kernel/kthread.c:292 - ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 - -Reported-by: syzbot -Reported-by: Eric Dumazet -Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_ - - void wg_noise_handshake_clear(struct noise_handshake *handshake) - { -+ down_write(&handshake->lock); - wg_index_hashtable_remove( - handshake->entry.peer->device->index_hashtable, - &handshake->entry); -- down_write(&handshake->lock); - handshake_zero(handshake); - up_write(&handshake->lock); -- wg_index_hashtable_remove( -- handshake->entry.peer->device->index_hashtable, -- &handshake->entry); - } - - static struct noise_keypair *keypair_create(struct wg_peer *peer) diff --git a/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch b/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch new file mode 100644 index 0000000000..09c1b0b8f8 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch @@ -0,0 +1,56 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Thu, 29 Oct 2020 03:56:05 +0100 +Subject: [PATCH] wireguard: selftests: check that route_me_harder packets use + the right sk + +commit af8afcf1fdd5f365f70e2386c2d8c7a1abd853d7 upstream. + +If netfilter changes the packet mark, the packet is rerouted. The +ip_route_me_harder family of functions fails to use the right sk, opting +to instead use skb->sk, resulting in a routing loop when used with +tunnels. With the next change fixing this issue in netfilter, test for +the relevant condition inside our test suite, since wireguard was where +the bug was discovered. + +Reported-by: Chen Minqiang +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/netns.sh | 8 ++++++++ + tools/testing/selftests/wireguard/qemu/kernel.config | 2 ++ + 2 files changed, 10 insertions(+) + +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -316,6 +316,14 @@ pp sleep 3 + n2 ping -W 1 -c 1 192.168.241.1 + n1 wg set wg0 peer "$pub2" persistent-keepalive 0 + ++# Test that sk_bound_dev_if works ++n1 ping -I wg0 -c 1 -W 1 192.168.241.2 ++# What about when the mark changes and the packet must be rerouted? ++n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 ++n1 ping -c 1 -W 1 192.168.241.2 # First the boring case ++n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case ++n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 ++ + # Test that onion routing works, even when it loops + n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 + ip1 addr add 192.168.242.1/24 dev wg0 +--- a/tools/testing/selftests/wireguard/qemu/kernel.config ++++ b/tools/testing/selftests/wireguard/qemu/kernel.config +@@ -18,10 +18,12 @@ CONFIG_NF_NAT=y + CONFIG_NETFILTER_XTABLES=y + CONFIG_NETFILTER_XT_NAT=y + CONFIG_NETFILTER_XT_MATCH_LENGTH=y ++CONFIG_NETFILTER_XT_MARK=y + CONFIG_NF_CONNTRACK_IPV4=y + CONFIG_NF_NAT_IPV4=y + CONFIG_IP_NF_IPTABLES=y + CONFIG_IP_NF_FILTER=y ++CONFIG_IP_NF_MANGLE=y + CONFIG_IP_NF_NAT=y + CONFIG_IP_ADVANCED_ROUTER=y + CONFIG_IP_MULTIPLE_TABLES=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch b/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch new file mode 100644 index 0000000000..7dfc1bb919 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch @@ -0,0 +1,55 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Antonio Quartulli +Date: Mon, 22 Feb 2021 17:25:43 +0100 +Subject: [PATCH] wireguard: avoid double unlikely() notation when using + IS_ERR() + +commit 30ac4e2f54ec067b7b9ca0db27e75681581378d6 upstream. + +The definition of IS_ERR() already applies the unlikely() notation +when checking the error status of the passed pointer. For this +reason there is no need to have the same notation outside of +IS_ERR() itself. + +Clean up code by removing redundant notation. + +Signed-off-by: Antonio Quartulli +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 2 +- + drivers/net/wireguard/socket.c | 4 ++-- + 2 files changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buf + } else { + struct sk_buff *segs = skb_gso_segment(skb, 0); + +- if (unlikely(IS_ERR(segs))) { ++ if (IS_ERR(segs)) { + ret = PTR_ERR(segs); + goto err_peer; + } +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, s + ip_rt_put(rt); + rt = ip_route_output_flow(sock_net(sock), &fl, sock); + } +- if (unlikely(IS_ERR(rt))) { ++ if (IS_ERR(rt)) { + ret = PTR_ERR(rt); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); +@@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, s + } + dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, + NULL); +- if (unlikely(IS_ERR(dst))) { ++ if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", + wg->dev->name, &endpoint->addr, ret); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch b/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch deleted file mode 100644 index e7f46ddf9c..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 9 Sep 2020 13:58:15 +0200 -Subject: [PATCH] wireguard: peerlookup: take lock before checking hash in - replace operation - -commit 6147f7b1e90ff09bd52afc8b9206a7fcd133daf7 upstream. - -Eric's suggested fix for the previous commit's mentioned race condition -was to simply take the table->lock in wg_index_hashtable_replace(). The -table->lock of the hash table is supposed to protect the bucket heads, -not the entires, but actually, since all the mutator functions are -already taking it, it makes sense to take it too for the test to -hlist_unhashed, as a defense in depth measure, so that it no longer -races with deletions, regardless of what other locks are protecting -individual entries. This is sensible from a performance perspective -because, as Eric pointed out, the case of being unhashed is already the -unlikely case, so this won't add common contention. And comparing -instructions, this basically doesn't make much of a difference other -than pushing and popping %r13, used by the new `bool ret`. More -generally, I like the idea of locking consistency across table mutator -functions, and this might let me rest slightly easier at night. - -Suggested-by: Eric Dumazet -Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/peerlookup.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/peerlookup.c -+++ b/drivers/net/wireguard/peerlookup.c -@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct i - struct index_hashtable_entry *old, - struct index_hashtable_entry *new) - { -- if (unlikely(hlist_unhashed(&old->index_hash))) -- return false; -+ bool ret; -+ - spin_lock_bh(&table->lock); -+ ret = !hlist_unhashed(&old->index_hash); -+ if (unlikely(!ret)) -+ goto out; -+ - new->index = old->index; - hlist_replace_rcu(&old->index_hash, &new->index_hash); - -@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct i - * simply gets dropped, which isn't terrible. - */ - INIT_HLIST_NODE(&old->index_hash); -+out: - spin_unlock_bh(&table->lock); -- return true; -+ return ret; - } - - void wg_index_hashtable_remove(struct index_hashtable *table, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-selftests-check-that-route_me_harder-packe.patch b/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-selftests-check-that-route_me_harder-packe.patch deleted file mode 100644 index 09c1b0b8f8..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-selftests-check-that-route_me_harder-packe.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 29 Oct 2020 03:56:05 +0100 -Subject: [PATCH] wireguard: selftests: check that route_me_harder packets use - the right sk - -commit af8afcf1fdd5f365f70e2386c2d8c7a1abd853d7 upstream. - -If netfilter changes the packet mark, the packet is rerouted. The -ip_route_me_harder family of functions fails to use the right sk, opting -to instead use skb->sk, resulting in a routing loop when used with -tunnels. With the next change fixing this issue in netfilter, test for -the relevant condition inside our test suite, since wireguard was where -the bug was discovered. - -Reported-by: Chen Minqiang -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Pablo Neira Ayuso -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 8 ++++++++ - tools/testing/selftests/wireguard/qemu/kernel.config | 2 ++ - 2 files changed, 10 insertions(+) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -316,6 +316,14 @@ pp sleep 3 - n2 ping -W 1 -c 1 192.168.241.1 - n1 wg set wg0 peer "$pub2" persistent-keepalive 0 - -+# Test that sk_bound_dev_if works -+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 -+# What about when the mark changes and the packet must be rerouted? -+n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 -+n1 ping -c 1 -W 1 192.168.241.2 # First the boring case -+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case -+n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 -+ - # Test that onion routing works, even when it loops - n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 - ip1 addr add 192.168.242.1/24 dev wg0 ---- a/tools/testing/selftests/wireguard/qemu/kernel.config -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -18,10 +18,12 @@ CONFIG_NF_NAT=y - CONFIG_NETFILTER_XTABLES=y - CONFIG_NETFILTER_XT_NAT=y - CONFIG_NETFILTER_XT_MATCH_LENGTH=y -+CONFIG_NETFILTER_XT_MARK=y - CONFIG_NF_CONNTRACK_IPV4=y - CONFIG_NF_NAT_IPV4=y - CONFIG_IP_NF_IPTABLES=y - CONFIG_IP_NF_FILTER=y -+CONFIG_IP_NF_MANGLE=y - CONFIG_IP_NF_NAT=y - CONFIG_IP_ADVANCED_ROUTER=y - CONFIG_IP_MULTIPLE_TABLES=y diff --git a/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch b/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch new file mode 100644 index 0000000000..1796f54de9 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch @@ -0,0 +1,52 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: Jann Horn +Date: Mon, 22 Feb 2021 17:25:44 +0100 +Subject: [PATCH] wireguard: socket: remove bogus __be32 annotation + +commit 7f57bd8dc22de35ddd895294aa554003e4f19a72 upstream. + +The endpoint->src_if4 has nothing to do with fixed-endian numbers; remove +the bogus annotation. + +This was introduced in +https://git.zx2c4.com/wireguard-monolithic-historical/commit?id=14e7d0a499a676ec55176c0de2f9fcbd34074a82 +in the historical WireGuard repo because the old code used to +zero-initialize multiple members as follows: + + endpoint->src4.s_addr = endpoint->src_if4 = fl.saddr = 0; + +Because fl.saddr is fixed-endian and an assignment returns a value with the +type of its left operand, this meant that sparse detected an assignment +between values of different endianness. + +Since then, this assignment was already split up into separate statements; +just the cast survived. + +Signed-off-by: Jann Horn +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/socket.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, s + if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, + fl.saddr, RT_SCOPE_HOST))) { + endpoint->src4.s_addr = 0; +- *(__force __be32 *)&endpoint->src_if4 = 0; ++ endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); +@@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, s + PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && + rt->dst.dev->ifindex != endpoint->src_if4)))) { + endpoint->src4.s_addr = 0; +- *(__force __be32 *)&endpoint->src_if4 = 0; ++ endpoint->src_if4 = 0; + fl.saddr = 0; + if (cache) + dst_cache_reset(cache); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-avoid-double-unlikely-notation-when-using-.patch b/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-avoid-double-unlikely-notation-when-using-.patch deleted file mode 100644 index 7dfc1bb919..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-avoid-double-unlikely-notation-when-using-.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Antonio Quartulli -Date: Mon, 22 Feb 2021 17:25:43 +0100 -Subject: [PATCH] wireguard: avoid double unlikely() notation when using - IS_ERR() - -commit 30ac4e2f54ec067b7b9ca0db27e75681581378d6 upstream. - -The definition of IS_ERR() already applies the unlikely() notation -when checking the error status of the passed pointer. For this -reason there is no need to have the same notation outside of -IS_ERR() itself. - -Clean up code by removing redundant notation. - -Signed-off-by: Antonio Quartulli -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 2 +- - drivers/net/wireguard/socket.c | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - } else { - struct sk_buff *segs = skb_gso_segment(skb, 0); - -- if (unlikely(IS_ERR(segs))) { -+ if (IS_ERR(segs)) { - ret = PTR_ERR(segs); - goto err_peer; - } ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, s - ip_rt_put(rt); - rt = ip_route_output_flow(sock_net(sock), &fl, sock); - } -- if (unlikely(IS_ERR(rt))) { -+ if (IS_ERR(rt)) { - ret = PTR_ERR(rt); - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); -@@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, s - } - dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, - NULL); -- if (unlikely(IS_ERR(dst))) { -+ if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch b/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch new file mode 100644 index 0000000000..3093de45f7 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch @@ -0,0 +1,52 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 22 Feb 2021 17:25:45 +0100 +Subject: [PATCH] wireguard: selftests: test multiple parallel streams + +commit d5a49aa6c3e264a93a7d08485d66e346be0969dd upstream. + +In order to test ndo_start_xmit being called in parallel, explicitly add +separate tests, which should all run on different cores. This should +help tease out bugs associated with queueing up packets from different +cores in parallel. Currently, it hasn't found those types of bugs, but +given future planned work, this is a useful regression to avoid. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + tools/testing/selftests/wireguard/netns.sh | 15 ++++++++++++++- + 1 file changed, 14 insertions(+), 1 deletion(-) + +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 + ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } + ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } + sleep() { read -t "$1" -N 1 || true; } +-waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } ++waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } + waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } + waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } + +@@ -141,6 +141,19 @@ tests() { + n2 iperf3 -s -1 -B fd00::2 & + waitiperf $netns2 $! + n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 ++ ++ # TCP over IPv4, in parallel ++ for max in 4 5 50; do ++ local pids=( ) ++ for ((i=0; i < max; ++i)) do ++ n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & ++ pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) ++ done ++ for ((i=0; i < max; ++i)) do ++ n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & ++ done ++ wait "${pids[@]}" ++ done + } + + [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" diff --git a/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch b/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch new file mode 100644 index 0000000000..69e76b96e3 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch @@ -0,0 +1,42 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 22 Feb 2021 17:25:46 +0100 +Subject: [PATCH] wireguard: peer: put frequently used members above cache + lines + +commit 5a0598695634a6bb4126818902dd9140cd9df8b6 upstream. + +The is_dead boolean is checked for every single packet, while the +internal_id member is used basically only for pr_debug messages. So it +makes sense to hoist up is_dead into some space formerly unused by a +struct hole, while demoting internal_api to below the lowest struct +cache line. + +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/peer.h | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/net/wireguard/peer.h ++++ b/drivers/net/wireguard/peer.h +@@ -39,6 +39,7 @@ struct wg_peer { + struct crypt_queue tx_queue, rx_queue; + struct sk_buff_head staged_packet_queue; + int serial_work_cpu; ++ bool is_dead; + struct noise_keypairs keypairs; + struct endpoint endpoint; + struct dst_cache endpoint_cache; +@@ -61,9 +62,8 @@ struct wg_peer { + struct rcu_head rcu; + struct list_head peer_list; + struct list_head allowedips_list; +- u64 internal_id; + struct napi_struct napi; +- bool is_dead; ++ u64 internal_id; + }; + + struct wg_peer *wg_peer_create(struct wg_device *wg, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-socket-remove-bogus-__be32-annotation.patch b/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-socket-remove-bogus-__be32-annotation.patch deleted file mode 100644 index 1796f54de9..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0120-wireguard-socket-remove-bogus-__be32-annotation.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Jann Horn -Date: Mon, 22 Feb 2021 17:25:44 +0100 -Subject: [PATCH] wireguard: socket: remove bogus __be32 annotation - -commit 7f57bd8dc22de35ddd895294aa554003e4f19a72 upstream. - -The endpoint->src_if4 has nothing to do with fixed-endian numbers; remove -the bogus annotation. - -This was introduced in -https://git.zx2c4.com/wireguard-monolithic-historical/commit?id=14e7d0a499a676ec55176c0de2f9fcbd34074a82 -in the historical WireGuard repo because the old code used to -zero-initialize multiple members as follows: - - endpoint->src4.s_addr = endpoint->src_if4 = fl.saddr = 0; - -Because fl.saddr is fixed-endian and an assignment returns a value with the -type of its left operand, this meant that sparse detected an assignment -between values of different endianness. - -Since then, this assignment was already split up into separate statements; -just the cast survived. - -Signed-off-by: Jann Horn -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, s - if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, - fl.saddr, RT_SCOPE_HOST))) { - endpoint->src4.s_addr = 0; -- *(__force __be32 *)&endpoint->src_if4 = 0; -+ endpoint->src_if4 = 0; - fl.saddr = 0; - if (cache) - dst_cache_reset(cache); -@@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, s - PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && - rt->dst.dev->ifindex != endpoint->src_if4)))) { - endpoint->src4.s_addr = 0; -- *(__force __be32 *)&endpoint->src_if4 = 0; -+ endpoint->src_if4 = 0; - fl.saddr = 0; - if (cache) - dst_cache_reset(cache); diff --git a/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch b/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch new file mode 100644 index 0000000000..073ee9b0d5 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch @@ -0,0 +1,47 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 22 Feb 2021 17:25:47 +0100 +Subject: [PATCH] wireguard: device: do not generate ICMP for non-IP packets + +commit 99fff5264e7ab06f45b0ad60243475be0a8d0559 upstream. + +If skb->protocol doesn't match the actual skb->data header, it's +probably not a good idea to pass it off to icmp{,v6}_ndo_send, which is +expecting to reply to a valid IP packet. So this commit has that early +mismatch case jump to a later error label. + +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buf + else if (skb->protocol == htons(ETH_P_IPV6)) + net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", + dev->name, &ipv6_hdr(skb)->daddr); +- goto err; ++ goto err_icmp; + } + + family = READ_ONCE(peer->endpoint.addr.sa_family); +@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buf + + err_peer: + wg_peer_put(peer); +-err: +- ++dev->stats.tx_errors; ++err_icmp: + if (skb->protocol == htons(ETH_P_IP)) + icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); + else if (skb->protocol == htons(ETH_P_IPV6)) + icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); ++err: ++ ++dev->stats.tx_errors; + kfree_skb(skb); + return ret; + } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-selftests-test-multiple-parallel-streams.patch b/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-selftests-test-multiple-parallel-streams.patch deleted file mode 100644 index 3093de45f7..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0121-wireguard-selftests-test-multiple-parallel-streams.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:45 +0100 -Subject: [PATCH] wireguard: selftests: test multiple parallel streams - -commit d5a49aa6c3e264a93a7d08485d66e346be0969dd upstream. - -In order to test ndo_start_xmit being called in parallel, explicitly add -separate tests, which should all run on different cores. This should -help tease out bugs associated with queueing up packets from different -cores in parallel. Currently, it hasn't found those types of bugs, but -given future planned work, this is a useful regression to avoid. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } - sleep() { read -t "$1" -N 1 || true; } --waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } - -@@ -141,6 +141,19 @@ tests() { - n2 iperf3 -s -1 -B fd00::2 & - waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 -+ -+ # TCP over IPv4, in parallel -+ for max in 4 5 50; do -+ local pids=( ) -+ for ((i=0; i < max; ++i)) do -+ n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & -+ pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) -+ done -+ for ((i=0; i < max; ++i)) do -+ n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & -+ done -+ wait "${pids[@]}" -+ done - } - - [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" diff --git a/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-peer-put-frequently-used-members-above-cac.patch b/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-peer-put-frequently-used-members-above-cac.patch deleted file mode 100644 index 69e76b96e3..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-peer-put-frequently-used-members-above-cac.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:46 +0100 -Subject: [PATCH] wireguard: peer: put frequently used members above cache - lines - -commit 5a0598695634a6bb4126818902dd9140cd9df8b6 upstream. - -The is_dead boolean is checked for every single packet, while the -internal_id member is used basically only for pr_debug messages. So it -makes sense to hoist up is_dead into some space formerly unused by a -struct hole, while demoting internal_api to below the lowest struct -cache line. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/peer.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/peer.h -+++ b/drivers/net/wireguard/peer.h -@@ -39,6 +39,7 @@ struct wg_peer { - struct crypt_queue tx_queue, rx_queue; - struct sk_buff_head staged_packet_queue; - int serial_work_cpu; -+ bool is_dead; - struct noise_keypairs keypairs; - struct endpoint endpoint; - struct dst_cache endpoint_cache; -@@ -61,9 +62,8 @@ struct wg_peer { - struct rcu_head rcu; - struct list_head peer_list; - struct list_head allowedips_list; -- u64 internal_id; - struct napi_struct napi; -- bool is_dead; -+ u64 internal_id; - }; - - struct wg_peer *wg_peer_create(struct wg_device *wg, diff --git a/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch b/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch new file mode 100644 index 0000000000..9dc7ddae7f --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch @@ -0,0 +1,560 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 22 Feb 2021 17:25:48 +0100 +Subject: [PATCH] wireguard: queueing: get rid of per-peer ring buffers +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +commit 8b5553ace83cced775eefd0f3f18b5c6214ccf7a upstream. + +Having two ring buffers per-peer means that every peer results in two +massive ring allocations. On an 8-core x86_64 machine, this commit +reduces the per-peer allocation from 18,688 bytes to 1,856 bytes, which +is an 90% reduction. Ninety percent! With some single-machine +deployments approaching 500,000 peers, we're talking about a reduction +from 7 gigs of memory down to 700 megs of memory. + +In order to get rid of these per-peer allocations, this commit switches +to using a list-based queueing approach. Currently GSO fragments are +chained together using the skb->next pointer (the skb_list_* singly +linked list approach), so we form the per-peer queue around the unused +skb->prev pointer (which sort of makes sense because the links are +pointing backwards). Use of skb_queue_* is not possible here, because +that is based on doubly linked lists and spinlocks. Multiple cores can +write into the queue at any given time, because its writes occur in the +start_xmit path or in the udp_recv path. But reads happen in a single +workqueue item per-peer, amounting to a multi-producer, single-consumer +paradigm. + +The MPSC queue is implemented locklessly and never blocks. However, it +is not linearizable (though it is serializable), with a very tight and +unlikely race on writes, which, when hit (some tiny fraction of the +0.15% of partial adds on a fully loaded 16-core x86_64 system), causes +the queue reader to terminate early. However, because every packet sent +queues up the same workqueue item after it is fully added, the worker +resumes again, and stopping early isn't actually a problem, since at +that point the packet wouldn't have yet been added to the encryption +queue. These properties allow us to avoid disabling interrupts or +spinning. The design is based on Dmitry Vyukov's algorithm [1]. + +Performance-wise, ordinarily list-based queues aren't preferable to +ringbuffers, because of cache misses when following pointers around. +However, we *already* have to follow the adjacent pointers when working +through fragments, so there shouldn't actually be any change there. A +potential downside is that dequeueing is a bit more complicated, but the +ptr_ring structure used prior had a spinlock when dequeueing, so all and +all the difference appears to be a wash. + +Actually, from profiling, the biggest performance hit, by far, of this +commit winds up being atomic_add_unless(count, 1, max) and atomic_ +dec(count), which account for the majority of CPU time, according to +perf. In that sense, the previous ring buffer was superior in that it +could check if it was full by head==tail, which the list-based approach +cannot do. + +But all and all, this enables us to get massive memory savings, allowing +WireGuard to scale for real world deployments, without taking much of a +performance hit. + +[1] http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue + +Reviewed-by: Dmitry Vyukov +Reviewed-by: Toke Høiland-Jørgensen +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/wireguard/device.c | 12 ++--- + drivers/net/wireguard/device.h | 15 +++--- + drivers/net/wireguard/peer.c | 28 ++++------- + drivers/net/wireguard/peer.h | 4 +- + drivers/net/wireguard/queueing.c | 86 +++++++++++++++++++++++++------- + drivers/net/wireguard/queueing.h | 45 ++++++++++++----- + drivers/net/wireguard/receive.c | 16 +++--- + drivers/net/wireguard/send.c | 31 ++++-------- + 8 files changed, 144 insertions(+), 93 deletions(-) + +--- a/drivers/net/wireguard/device.c ++++ b/drivers/net/wireguard/device.c +@@ -235,8 +235,8 @@ static void wg_destruct(struct net_devic + destroy_workqueue(wg->handshake_receive_wq); + destroy_workqueue(wg->handshake_send_wq); + destroy_workqueue(wg->packet_crypt_wq); +- wg_packet_queue_free(&wg->decrypt_queue, true); +- wg_packet_queue_free(&wg->encrypt_queue, true); ++ wg_packet_queue_free(&wg->decrypt_queue); ++ wg_packet_queue_free(&wg->encrypt_queue); + rcu_barrier(); /* Wait for all the peers to be actually freed. */ + wg_ratelimiter_uninit(); + memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); +@@ -338,12 +338,12 @@ static int wg_newlink(struct net *src_ne + goto err_destroy_handshake_send; + + ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, +- true, MAX_QUEUED_PACKETS); ++ MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_destroy_packet_crypt; + + ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, +- true, MAX_QUEUED_PACKETS); ++ MAX_QUEUED_PACKETS); + if (ret < 0) + goto err_free_encrypt_queue; + +@@ -368,9 +368,9 @@ static int wg_newlink(struct net *src_ne + err_uninit_ratelimiter: + wg_ratelimiter_uninit(); + err_free_decrypt_queue: +- wg_packet_queue_free(&wg->decrypt_queue, true); ++ wg_packet_queue_free(&wg->decrypt_queue); + err_free_encrypt_queue: +- wg_packet_queue_free(&wg->encrypt_queue, true); ++ wg_packet_queue_free(&wg->encrypt_queue); + err_destroy_packet_crypt: + destroy_workqueue(wg->packet_crypt_wq); + err_destroy_handshake_send: +--- a/drivers/net/wireguard/device.h ++++ b/drivers/net/wireguard/device.h +@@ -27,13 +27,14 @@ struct multicore_worker { + + struct crypt_queue { + struct ptr_ring ring; +- union { +- struct { +- struct multicore_worker __percpu *worker; +- int last_cpu; +- }; +- struct work_struct work; +- }; ++ struct multicore_worker __percpu *worker; ++ int last_cpu; ++}; ++ ++struct prev_queue { ++ struct sk_buff *head, *tail, *peeked; ++ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. ++ atomic_t count; + }; + + struct wg_device { +--- a/drivers/net/wireguard/peer.c ++++ b/drivers/net/wireguard/peer.c +@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg + peer = kzalloc(sizeof(*peer), GFP_KERNEL); + if (unlikely(!peer)) + return ERR_PTR(ret); +- peer->device = wg; ++ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) ++ goto err; + ++ peer->device = wg; + wg_noise_handshake_init(&peer->handshake, &wg->static_identity, + public_key, preshared_key, peer); +- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) +- goto err_1; +- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, +- MAX_QUEUED_PACKETS)) +- goto err_2; +- if (wg_packet_queue_init(&peer->rx_queue, NULL, false, +- MAX_QUEUED_PACKETS)) +- goto err_3; +- + peer->internal_id = atomic64_inc_return(&peer_counter); + peer->serial_work_cpu = nr_cpumask_bits; + wg_cookie_init(&peer->latest_cookie); + wg_timers_init(peer); + wg_cookie_checker_precompute_peer_keys(peer); + spin_lock_init(&peer->keypairs.keypair_update_lock); +- INIT_WORK(&peer->transmit_handshake_work, +- wg_packet_handshake_send_worker); ++ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); ++ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); ++ wg_prev_queue_init(&peer->tx_queue); ++ wg_prev_queue_init(&peer->rx_queue); + rwlock_init(&peer->endpoint_lock); + kref_init(&peer->refcount); + skb_queue_head_init(&peer->staged_packet_queue); +@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg + pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); + return peer; + +-err_3: +- wg_packet_queue_free(&peer->tx_queue, false); +-err_2: +- dst_cache_destroy(&peer->endpoint_cache); +-err_1: ++err: + kfree(peer); + return ERR_PTR(ret); + } +@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head + struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); + + dst_cache_destroy(&peer->endpoint_cache); +- wg_packet_queue_free(&peer->rx_queue, false); +- wg_packet_queue_free(&peer->tx_queue, false); ++ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); + + /* The final zeroing takes care of clearing any remaining handshake key + * material and other potentially sensitive information. +--- a/drivers/net/wireguard/peer.h ++++ b/drivers/net/wireguard/peer.h +@@ -36,7 +36,7 @@ struct endpoint { + + struct wg_peer { + struct wg_device *device; +- struct crypt_queue tx_queue, rx_queue; ++ struct prev_queue tx_queue, rx_queue; + struct sk_buff_head staged_packet_queue; + int serial_work_cpu; + bool is_dead; +@@ -46,7 +46,7 @@ struct wg_peer { + rwlock_t endpoint_lock; + struct noise_handshake handshake; + atomic64_t last_sent_handshake; +- struct work_struct transmit_handshake_work, clear_peer_work; ++ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; + struct cookie latest_cookie; + struct hlist_node pubkey_hash; + u64 rx_bytes, tx_bytes; +--- a/drivers/net/wireguard/queueing.c ++++ b/drivers/net/wireguard/queueing.c +@@ -9,8 +9,7 @@ struct multicore_worker __percpu * + wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) + { + int cpu; +- struct multicore_worker __percpu *worker = +- alloc_percpu(struct multicore_worker); ++ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); + + if (!worker) + return NULL; +@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc( + } + + int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, +- bool multicore, unsigned int len) ++ unsigned int len) + { + int ret; + +@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_qu + ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); + if (ret) + return ret; +- if (function) { +- if (multicore) { +- queue->worker = wg_packet_percpu_multicore_worker_alloc( +- function, queue); +- if (!queue->worker) { +- ptr_ring_cleanup(&queue->ring, NULL); +- return -ENOMEM; +- } +- } else { +- INIT_WORK(&queue->work, function); +- } ++ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); ++ if (!queue->worker) { ++ ptr_ring_cleanup(&queue->ring, NULL); ++ return -ENOMEM; + } + return 0; + } + +-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) ++void wg_packet_queue_free(struct crypt_queue *queue) + { +- if (multicore) +- free_percpu(queue->worker); ++ free_percpu(queue->worker); + WARN_ON(!__ptr_ring_empty(&queue->ring)); + ptr_ring_cleanup(&queue->ring, NULL); + } ++ ++#define NEXT(skb) ((skb)->prev) ++#define STUB(queue) ((struct sk_buff *)&queue->empty) ++ ++void wg_prev_queue_init(struct prev_queue *queue) ++{ ++ NEXT(STUB(queue)) = NULL; ++ queue->head = queue->tail = STUB(queue); ++ queue->peeked = NULL; ++ atomic_set(&queue->count, 0); ++ BUILD_BUG_ON( ++ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - ++ offsetof(struct prev_queue, empty) || ++ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - ++ offsetof(struct prev_queue, empty)); ++} ++ ++static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) ++{ ++ WRITE_ONCE(NEXT(skb), NULL); ++ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); ++} ++ ++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) ++{ ++ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) ++ return false; ++ __wg_prev_queue_enqueue(queue, skb); ++ return true; ++} ++ ++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) ++{ ++ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); ++ ++ if (tail == STUB(queue)) { ++ if (!next) ++ return NULL; ++ queue->tail = next; ++ tail = next; ++ next = smp_load_acquire(&NEXT(next)); ++ } ++ if (next) { ++ queue->tail = next; ++ atomic_dec(&queue->count); ++ return tail; ++ } ++ if (tail != READ_ONCE(queue->head)) ++ return NULL; ++ __wg_prev_queue_enqueue(queue, STUB(queue)); ++ next = smp_load_acquire(&NEXT(tail)); ++ if (next) { ++ queue->tail = next; ++ atomic_dec(&queue->count); ++ return tail; ++ } ++ return NULL; ++} ++ ++#undef NEXT ++#undef STUB +--- a/drivers/net/wireguard/queueing.h ++++ b/drivers/net/wireguard/queueing.h +@@ -17,12 +17,13 @@ struct wg_device; + struct wg_peer; + struct multicore_worker; + struct crypt_queue; ++struct prev_queue; + struct sk_buff; + + /* queueing.c APIs: */ + int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, +- bool multicore, unsigned int len); +-void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); ++ unsigned int len); ++void wg_packet_queue_free(struct crypt_queue *queue); + struct multicore_worker __percpu * + wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); + +@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online + return cpu; + } + ++void wg_prev_queue_init(struct prev_queue *queue); ++ ++/* Multi producer */ ++bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); ++ ++/* Single consumer */ ++struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); ++ ++/* Single consumer */ ++static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) ++{ ++ if (queue->peeked) ++ return queue->peeked; ++ queue->peeked = wg_prev_queue_dequeue(queue); ++ return queue->peeked; ++} ++ ++/* Single consumer */ ++static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) ++{ ++ queue->peeked = NULL; ++} ++ + static inline int wg_queue_enqueue_per_device_and_peer( +- struct crypt_queue *device_queue, struct crypt_queue *peer_queue, ++ struct crypt_queue *device_queue, struct prev_queue *peer_queue, + struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) + { + int cpu; +@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_d + /* We first queue this up for the peer ingestion, but the consumer + * will wait for the state to change to CRYPTED or DEAD before. + */ +- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) ++ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) + return -ENOSPC; ++ + /* Then we queue it up in the device queue, which consumes the + * packet as soon as it can. + */ +@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_d + return 0; + } + +-static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, +- struct sk_buff *skb, +- enum packet_state state) ++static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) + { + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. +@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_ + struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); + + atomic_set_release(&PACKET_CB(skb)->state, state); +- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, +- peer->internal_id), +- peer->device->packet_crypt_wq, &queue->work); ++ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), ++ peer->device->packet_crypt_wq, &peer->transmit_packet_work); + wg_peer_put(peer); + } + +-static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, +- enum packet_state state) ++static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) + { + /* We take a reference, because as soon as we call atomic_set, the + * peer can be freed from below us. +--- a/drivers/net/wireguard/receive.c ++++ b/drivers/net/wireguard/receive.c +@@ -444,7 +444,6 @@ packet_processed: + int wg_packet_rx_poll(struct napi_struct *napi, int budget) + { + struct wg_peer *peer = container_of(napi, struct wg_peer, napi); +- struct crypt_queue *queue = &peer->rx_queue; + struct noise_keypair *keypair; + struct endpoint endpoint; + enum packet_state state; +@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct + if (unlikely(budget <= 0)) + return 0; + +- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && ++ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != + PACKET_STATE_UNCRYPTED) { +- __ptr_ring_discard_one(&queue->ring); +- peer = PACKET_PEER(skb); ++ wg_prev_queue_drop_peeked(&peer->rx_queue); + keypair = PACKET_CB(skb)->keypair; + free = true; + +@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct wor + enum packet_state state = + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? + PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; +- wg_queue_enqueue_per_peer_napi(skb, state); ++ wg_queue_enqueue_per_peer_rx(skb, state); + if (need_resched()) + cond_resched(); + } +@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struc + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + +- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, +- &peer->rx_queue, skb, +- wg->packet_crypt_wq, +- &wg->decrypt_queue.last_cpu); ++ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, ++ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) +- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); ++ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); + if (likely(!ret || ret == -EPIPE)) { + rcu_read_unlock_bh(); + return; +--- a/drivers/net/wireguard/send.c ++++ b/drivers/net/wireguard/send.c +@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_ + wg_packet_send_staged_packets(peer); + } + +-static void wg_packet_create_data_done(struct sk_buff *first, +- struct wg_peer *peer) ++static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) + { + struct sk_buff *skb, *next; + bool is_keepalive, data_sent = false; +@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(s + + void wg_packet_tx_worker(struct work_struct *work) + { +- struct crypt_queue *queue = container_of(work, struct crypt_queue, +- work); ++ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); + struct noise_keypair *keypair; + enum packet_state state; + struct sk_buff *first; +- struct wg_peer *peer; + +- while ((first = __ptr_ring_peek(&queue->ring)) != NULL && ++ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && + (state = atomic_read_acquire(&PACKET_CB(first)->state)) != + PACKET_STATE_UNCRYPTED) { +- __ptr_ring_discard_one(&queue->ring); +- peer = PACKET_PEER(first); ++ wg_prev_queue_drop_peeked(&peer->tx_queue); + keypair = PACKET_CB(first)->keypair; + + if (likely(state == PACKET_STATE_CRYPTED)) +- wg_packet_create_data_done(first, peer); ++ wg_packet_create_data_done(peer, first); + else + kfree_skb_list(first); + +@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct wor + break; + } + } +- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, +- state); ++ wg_queue_enqueue_per_peer_tx(first, state); + if (need_resched()) + cond_resched(); + } + } + +-static void wg_packet_create_data(struct sk_buff *first) ++static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) + { +- struct wg_peer *peer = PACKET_PEER(first); + struct wg_device *wg = peer->device; + int ret = -EINVAL; + +@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct + if (unlikely(READ_ONCE(peer->is_dead))) + goto err; + +- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, +- &peer->tx_queue, first, +- wg->packet_crypt_wq, +- &wg->encrypt_queue.last_cpu); ++ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, ++ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); + if (unlikely(ret == -EPIPE)) +- wg_queue_enqueue_per_peer(&peer->tx_queue, first, +- PACKET_STATE_DEAD); ++ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); + err: + rcu_read_unlock_bh(); + if (likely(!ret || ret == -EPIPE)) +@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struc + packets.prev->next = NULL; + wg_peer_get(keypair->entry.peer); + PACKET_CB(packets.next)->keypair = keypair; +- wg_packet_create_data(packets.next); ++ wg_packet_create_data(peer, packets.next); + return; + + out_invalid: diff --git a/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch b/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch deleted file mode 100644 index 073ee9b0d5..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:47 +0100 -Subject: [PATCH] wireguard: device: do not generate ICMP for non-IP packets - -commit 99fff5264e7ab06f45b0ad60243475be0a8d0559 upstream. - -If skb->protocol doesn't match the actual skb->data header, it's -probably not a good idea to pass it off to icmp{,v6}_ndo_send, which is -expecting to reply to a valid IP packet. So this commit has that early -mismatch case jump to a later error label. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - else if (skb->protocol == htons(ETH_P_IPV6)) - net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", - dev->name, &ipv6_hdr(skb)->daddr); -- goto err; -+ goto err_icmp; - } - - family = READ_ONCE(peer->endpoint.addr.sa_family); -@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buf - - err_peer: - wg_peer_put(peer); --err: -- ++dev->stats.tx_errors; -+err_icmp: - if (skb->protocol == htons(ETH_P_IP)) - icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); - else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+err: -+ ++dev->stats.tx_errors; - kfree_skb(skb); - return ret; - } diff --git a/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch b/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch new file mode 100644 index 0000000000..9a251492c2 --- /dev/null +++ b/target/linux/generic/backport-5.4/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch @@ -0,0 +1,30 @@ +From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 +From: "Jason A. Donenfeld" +Date: Mon, 22 Feb 2021 17:25:49 +0100 +Subject: [PATCH] wireguard: kconfig: use arm chacha even with no neon + +commit bce2473927af8de12ad131a743f55d69d358c0b9 upstream. + +The condition here was incorrect: a non-neon fallback implementation is +available on arm32 when NEON is not supported. + +Reported-by: Ilya Lipnitskiy +Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") +Signed-off-by: Jason A. Donenfeld +Signed-off-by: Jakub Kicinski +Signed-off-by: Jason A. Donenfeld +--- + drivers/net/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -87,7 +87,7 @@ config WIREGUARD + select CRYPTO_CURVE25519_X86 if X86 && 64BIT + select ARM_CRYPTO if ARM + select ARM64_CRYPTO if ARM64 +- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON ++ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) + select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON + select CRYPTO_POLY1305_ARM if ARM + select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON diff --git a/target/linux/generic/backport-5.4/080-wireguard-0124-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch b/target/linux/generic/backport-5.4/080-wireguard-0124-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch deleted file mode 100644 index 9dc7ddae7f..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0124-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch +++ /dev/null @@ -1,560 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:48 +0100 -Subject: [PATCH] wireguard: queueing: get rid of per-peer ring buffers -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 8b5553ace83cced775eefd0f3f18b5c6214ccf7a upstream. - -Having two ring buffers per-peer means that every peer results in two -massive ring allocations. On an 8-core x86_64 machine, this commit -reduces the per-peer allocation from 18,688 bytes to 1,856 bytes, which -is an 90% reduction. Ninety percent! With some single-machine -deployments approaching 500,000 peers, we're talking about a reduction -from 7 gigs of memory down to 700 megs of memory. - -In order to get rid of these per-peer allocations, this commit switches -to using a list-based queueing approach. Currently GSO fragments are -chained together using the skb->next pointer (the skb_list_* singly -linked list approach), so we form the per-peer queue around the unused -skb->prev pointer (which sort of makes sense because the links are -pointing backwards). Use of skb_queue_* is not possible here, because -that is based on doubly linked lists and spinlocks. Multiple cores can -write into the queue at any given time, because its writes occur in the -start_xmit path or in the udp_recv path. But reads happen in a single -workqueue item per-peer, amounting to a multi-producer, single-consumer -paradigm. - -The MPSC queue is implemented locklessly and never blocks. However, it -is not linearizable (though it is serializable), with a very tight and -unlikely race on writes, which, when hit (some tiny fraction of the -0.15% of partial adds on a fully loaded 16-core x86_64 system), causes -the queue reader to terminate early. However, because every packet sent -queues up the same workqueue item after it is fully added, the worker -resumes again, and stopping early isn't actually a problem, since at -that point the packet wouldn't have yet been added to the encryption -queue. These properties allow us to avoid disabling interrupts or -spinning. The design is based on Dmitry Vyukov's algorithm [1]. - -Performance-wise, ordinarily list-based queues aren't preferable to -ringbuffers, because of cache misses when following pointers around. -However, we *already* have to follow the adjacent pointers when working -through fragments, so there shouldn't actually be any change there. A -potential downside is that dequeueing is a bit more complicated, but the -ptr_ring structure used prior had a spinlock when dequeueing, so all and -all the difference appears to be a wash. - -Actually, from profiling, the biggest performance hit, by far, of this -commit winds up being atomic_add_unless(count, 1, max) and atomic_ -dec(count), which account for the majority of CPU time, according to -perf. In that sense, the previous ring buffer was superior in that it -could check if it was full by head==tail, which the list-based approach -cannot do. - -But all and all, this enables us to get massive memory savings, allowing -WireGuard to scale for real world deployments, without taking much of a -performance hit. - -[1] http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue - -Reviewed-by: Dmitry Vyukov -Reviewed-by: Toke Høiland-Jørgensen -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 12 ++--- - drivers/net/wireguard/device.h | 15 +++--- - drivers/net/wireguard/peer.c | 28 ++++------- - drivers/net/wireguard/peer.h | 4 +- - drivers/net/wireguard/queueing.c | 86 +++++++++++++++++++++++++------- - drivers/net/wireguard/queueing.h | 45 ++++++++++++----- - drivers/net/wireguard/receive.c | 16 +++--- - drivers/net/wireguard/send.c | 31 ++++-------- - 8 files changed, 144 insertions(+), 93 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -235,8 +235,8 @@ static void wg_destruct(struct net_devic - destroy_workqueue(wg->handshake_receive_wq); - destroy_workqueue(wg->handshake_send_wq); - destroy_workqueue(wg->packet_crypt_wq); -- wg_packet_queue_free(&wg->decrypt_queue, true); -- wg_packet_queue_free(&wg->encrypt_queue, true); -+ wg_packet_queue_free(&wg->decrypt_queue); -+ wg_packet_queue_free(&wg->encrypt_queue); - rcu_barrier(); /* Wait for all the peers to be actually freed. */ - wg_ratelimiter_uninit(); - memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); -@@ -338,12 +338,12 @@ static int wg_newlink(struct net *src_ne - goto err_destroy_handshake_send; - - ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, -- true, MAX_QUEUED_PACKETS); -+ MAX_QUEUED_PACKETS); - if (ret < 0) - goto err_destroy_packet_crypt; - - ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, -- true, MAX_QUEUED_PACKETS); -+ MAX_QUEUED_PACKETS); - if (ret < 0) - goto err_free_encrypt_queue; - -@@ -368,9 +368,9 @@ static int wg_newlink(struct net *src_ne - err_uninit_ratelimiter: - wg_ratelimiter_uninit(); - err_free_decrypt_queue: -- wg_packet_queue_free(&wg->decrypt_queue, true); -+ wg_packet_queue_free(&wg->decrypt_queue); - err_free_encrypt_queue: -- wg_packet_queue_free(&wg->encrypt_queue, true); -+ wg_packet_queue_free(&wg->encrypt_queue); - err_destroy_packet_crypt: - destroy_workqueue(wg->packet_crypt_wq); - err_destroy_handshake_send: ---- a/drivers/net/wireguard/device.h -+++ b/drivers/net/wireguard/device.h -@@ -27,13 +27,14 @@ struct multicore_worker { - - struct crypt_queue { - struct ptr_ring ring; -- union { -- struct { -- struct multicore_worker __percpu *worker; -- int last_cpu; -- }; -- struct work_struct work; -- }; -+ struct multicore_worker __percpu *worker; -+ int last_cpu; -+}; -+ -+struct prev_queue { -+ struct sk_buff *head, *tail, *peeked; -+ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. -+ atomic_t count; - }; - - struct wg_device { ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg - peer = kzalloc(sizeof(*peer), GFP_KERNEL); - if (unlikely(!peer)) - return ERR_PTR(ret); -- peer->device = wg; -+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -+ goto err; - -+ peer->device = wg; - wg_noise_handshake_init(&peer->handshake, &wg->static_identity, - public_key, preshared_key, peer); -- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -- goto err_1; -- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, -- MAX_QUEUED_PACKETS)) -- goto err_2; -- if (wg_packet_queue_init(&peer->rx_queue, NULL, false, -- MAX_QUEUED_PACKETS)) -- goto err_3; -- - peer->internal_id = atomic64_inc_return(&peer_counter); - peer->serial_work_cpu = nr_cpumask_bits; - wg_cookie_init(&peer->latest_cookie); - wg_timers_init(peer); - wg_cookie_checker_precompute_peer_keys(peer); - spin_lock_init(&peer->keypairs.keypair_update_lock); -- INIT_WORK(&peer->transmit_handshake_work, -- wg_packet_handshake_send_worker); -+ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); -+ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); -+ wg_prev_queue_init(&peer->tx_queue); -+ wg_prev_queue_init(&peer->rx_queue); - rwlock_init(&peer->endpoint_lock); - kref_init(&peer->refcount); - skb_queue_head_init(&peer->staged_packet_queue); -@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg - pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); - return peer; - --err_3: -- wg_packet_queue_free(&peer->tx_queue, false); --err_2: -- dst_cache_destroy(&peer->endpoint_cache); --err_1: -+err: - kfree(peer); - return ERR_PTR(ret); - } -@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head - struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); - - dst_cache_destroy(&peer->endpoint_cache); -- wg_packet_queue_free(&peer->rx_queue, false); -- wg_packet_queue_free(&peer->tx_queue, false); -+ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); - - /* The final zeroing takes care of clearing any remaining handshake key - * material and other potentially sensitive information. ---- a/drivers/net/wireguard/peer.h -+++ b/drivers/net/wireguard/peer.h -@@ -36,7 +36,7 @@ struct endpoint { - - struct wg_peer { - struct wg_device *device; -- struct crypt_queue tx_queue, rx_queue; -+ struct prev_queue tx_queue, rx_queue; - struct sk_buff_head staged_packet_queue; - int serial_work_cpu; - bool is_dead; -@@ -46,7 +46,7 @@ struct wg_peer { - rwlock_t endpoint_lock; - struct noise_handshake handshake; - atomic64_t last_sent_handshake; -- struct work_struct transmit_handshake_work, clear_peer_work; -+ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; - struct cookie latest_cookie; - struct hlist_node pubkey_hash; - u64 rx_bytes, tx_bytes; ---- a/drivers/net/wireguard/queueing.c -+++ b/drivers/net/wireguard/queueing.c -@@ -9,8 +9,7 @@ struct multicore_worker __percpu * - wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) - { - int cpu; -- struct multicore_worker __percpu *worker = -- alloc_percpu(struct multicore_worker); -+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); - - if (!worker) - return NULL; -@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc( - } - - int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -- bool multicore, unsigned int len) -+ unsigned int len) - { - int ret; - -@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_qu - ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); - if (ret) - return ret; -- if (function) { -- if (multicore) { -- queue->worker = wg_packet_percpu_multicore_worker_alloc( -- function, queue); -- if (!queue->worker) { -- ptr_ring_cleanup(&queue->ring, NULL); -- return -ENOMEM; -- } -- } else { -- INIT_WORK(&queue->work, function); -- } -+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); -+ if (!queue->worker) { -+ ptr_ring_cleanup(&queue->ring, NULL); -+ return -ENOMEM; - } - return 0; - } - --void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) -+void wg_packet_queue_free(struct crypt_queue *queue) - { -- if (multicore) -- free_percpu(queue->worker); -+ free_percpu(queue->worker); - WARN_ON(!__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, NULL); - } -+ -+#define NEXT(skb) ((skb)->prev) -+#define STUB(queue) ((struct sk_buff *)&queue->empty) -+ -+void wg_prev_queue_init(struct prev_queue *queue) -+{ -+ NEXT(STUB(queue)) = NULL; -+ queue->head = queue->tail = STUB(queue); -+ queue->peeked = NULL; -+ atomic_set(&queue->count, 0); -+ BUILD_BUG_ON( -+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - -+ offsetof(struct prev_queue, empty) || -+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - -+ offsetof(struct prev_queue, empty)); -+} -+ -+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -+{ -+ WRITE_ONCE(NEXT(skb), NULL); -+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); -+} -+ -+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -+{ -+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) -+ return false; -+ __wg_prev_queue_enqueue(queue, skb); -+ return true; -+} -+ -+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) -+{ -+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); -+ -+ if (tail == STUB(queue)) { -+ if (!next) -+ return NULL; -+ queue->tail = next; -+ tail = next; -+ next = smp_load_acquire(&NEXT(next)); -+ } -+ if (next) { -+ queue->tail = next; -+ atomic_dec(&queue->count); -+ return tail; -+ } -+ if (tail != READ_ONCE(queue->head)) -+ return NULL; -+ __wg_prev_queue_enqueue(queue, STUB(queue)); -+ next = smp_load_acquire(&NEXT(tail)); -+ if (next) { -+ queue->tail = next; -+ atomic_dec(&queue->count); -+ return tail; -+ } -+ return NULL; -+} -+ -+#undef NEXT -+#undef STUB ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -17,12 +17,13 @@ struct wg_device; - struct wg_peer; - struct multicore_worker; - struct crypt_queue; -+struct prev_queue; - struct sk_buff; - - /* queueing.c APIs: */ - int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -- bool multicore, unsigned int len); --void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); -+ unsigned int len); -+void wg_packet_queue_free(struct crypt_queue *queue); - struct multicore_worker __percpu * - wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); - -@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online - return cpu; - } - -+void wg_prev_queue_init(struct prev_queue *queue); -+ -+/* Multi producer */ -+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); -+ -+/* Single consumer */ -+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); -+ -+/* Single consumer */ -+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) -+{ -+ if (queue->peeked) -+ return queue->peeked; -+ queue->peeked = wg_prev_queue_dequeue(queue); -+ return queue->peeked; -+} -+ -+/* Single consumer */ -+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) -+{ -+ queue->peeked = NULL; -+} -+ - static inline int wg_queue_enqueue_per_device_and_peer( -- struct crypt_queue *device_queue, struct crypt_queue *peer_queue, -+ struct crypt_queue *device_queue, struct prev_queue *peer_queue, - struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) - { - int cpu; -@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_d - /* We first queue this up for the peer ingestion, but the consumer - * will wait for the state to change to CRYPTED or DEAD before. - */ -- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) -+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) - return -ENOSPC; -+ - /* Then we queue it up in the device queue, which consumes the - * packet as soon as it can. - */ -@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_d - return 0; - } - --static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, -- struct sk_buff *skb, -- enum packet_state state) -+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) - { - /* We take a reference, because as soon as we call atomic_set, the - * peer can be freed from below us. -@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_ - struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); - - atomic_set_release(&PACKET_CB(skb)->state, state); -- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, -- peer->internal_id), -- peer->device->packet_crypt_wq, &queue->work); -+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), -+ peer->device->packet_crypt_wq, &peer->transmit_packet_work); - wg_peer_put(peer); - } - --static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, -- enum packet_state state) -+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) - { - /* We take a reference, because as soon as we call atomic_set, the - * peer can be freed from below us. ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -444,7 +444,6 @@ packet_processed: - int wg_packet_rx_poll(struct napi_struct *napi, int budget) - { - struct wg_peer *peer = container_of(napi, struct wg_peer, napi); -- struct crypt_queue *queue = &peer->rx_queue; - struct noise_keypair *keypair; - struct endpoint endpoint; - enum packet_state state; -@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(budget <= 0)) - return 0; - -- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && -+ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && - (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != - PACKET_STATE_UNCRYPTED) { -- __ptr_ring_discard_one(&queue->ring); -- peer = PACKET_PEER(skb); -+ wg_prev_queue_drop_peeked(&peer->rx_queue); - keypair = PACKET_CB(skb)->keypair; - free = true; - -@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct wor - enum packet_state state = - likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; -- wg_queue_enqueue_per_peer_napi(skb, state); -+ wg_queue_enqueue_per_peer_rx(skb, state); - if (need_resched()) - cond_resched(); - } -@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struc - if (unlikely(READ_ONCE(peer->is_dead))) - goto err; - -- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, -- &peer->rx_queue, skb, -- wg->packet_crypt_wq, -- &wg->decrypt_queue.last_cpu); -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, -+ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); - if (unlikely(ret == -EPIPE)) -- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); -+ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); - if (likely(!ret || ret == -EPIPE)) { - rcu_read_unlock_bh(); - return; ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_ - wg_packet_send_staged_packets(peer); - } - --static void wg_packet_create_data_done(struct sk_buff *first, -- struct wg_peer *peer) -+static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) - { - struct sk_buff *skb, *next; - bool is_keepalive, data_sent = false; -@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(s - - void wg_packet_tx_worker(struct work_struct *work) - { -- struct crypt_queue *queue = container_of(work, struct crypt_queue, -- work); -+ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); - struct noise_keypair *keypair; - enum packet_state state; - struct sk_buff *first; -- struct wg_peer *peer; - -- while ((first = __ptr_ring_peek(&queue->ring)) != NULL && -+ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && - (state = atomic_read_acquire(&PACKET_CB(first)->state)) != - PACKET_STATE_UNCRYPTED) { -- __ptr_ring_discard_one(&queue->ring); -- peer = PACKET_PEER(first); -+ wg_prev_queue_drop_peeked(&peer->tx_queue); - keypair = PACKET_CB(first)->keypair; - - if (likely(state == PACKET_STATE_CRYPTED)) -- wg_packet_create_data_done(first, peer); -+ wg_packet_create_data_done(peer, first); - else - kfree_skb_list(first); - -@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct wor - break; - } - } -- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, -- state); -+ wg_queue_enqueue_per_peer_tx(first, state); - if (need_resched()) - cond_resched(); - } - } - --static void wg_packet_create_data(struct sk_buff *first) -+static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) - { -- struct wg_peer *peer = PACKET_PEER(first); - struct wg_device *wg = peer->device; - int ret = -EINVAL; - -@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct - if (unlikely(READ_ONCE(peer->is_dead))) - goto err; - -- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, -- &peer->tx_queue, first, -- wg->packet_crypt_wq, -- &wg->encrypt_queue.last_cpu); -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, -+ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); - if (unlikely(ret == -EPIPE)) -- wg_queue_enqueue_per_peer(&peer->tx_queue, first, -- PACKET_STATE_DEAD); -+ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); - err: - rcu_read_unlock_bh(); - if (likely(!ret || ret == -EPIPE)) -@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struc - packets.prev->next = NULL; - wg_peer_get(keypair->entry.peer); - PACKET_CB(packets.next)->keypair = keypair; -- wg_packet_create_data(packets.next); -+ wg_packet_create_data(peer, packets.next); - return; - - out_invalid: diff --git a/target/linux/generic/backport-5.4/080-wireguard-0125-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch b/target/linux/generic/backport-5.4/080-wireguard-0125-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch deleted file mode 100644 index 9a251492c2..0000000000 --- a/target/linux/generic/backport-5.4/080-wireguard-0125-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:49 +0100 -Subject: [PATCH] wireguard: kconfig: use arm chacha even with no neon - -commit bce2473927af8de12ad131a743f55d69d358c0b9 upstream. - -The condition here was incorrect: a non-neon fallback implementation is -available on arm32 when NEON is not supported. - -Reported-by: Ilya Lipnitskiy -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -87,7 +87,7 @@ config WIREGUARD - select CRYPTO_CURVE25519_X86 if X86 && 64BIT - select ARM_CRYPTO if ARM - select ARM64_CRYPTO if ARM64 -- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON -+ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) - select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON - select CRYPTO_POLY1305_ARM if ARM - select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON diff --git a/target/linux/generic/backport-5.4/830-v5.12-0002-usb-serial-option-update-interface-mapping-for-ZTE-P685M.patch b/target/linux/generic/backport-5.4/830-v5.12-0002-usb-serial-option-update-interface-mapping-for-ZTE-P685M.patch deleted file mode 100644 index 87eed649ec..0000000000 --- a/target/linux/generic/backport-5.4/830-v5.12-0002-usb-serial-option-update-interface-mapping-for-ZTE-P685M.patch +++ /dev/null @@ -1,73 +0,0 @@ -From 6420a569504e212d618d4a4736e2c59ed80a8478 Mon Sep 17 00:00:00 2001 -From: Lech Perczak -Date: Sun, 7 Feb 2021 01:54:43 +0100 -Subject: USB: serial: option: update interface mapping for ZTE P685M -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -This patch prepares for qmi_wwan driver support for the device. -Previously "option" driver mapped itself to interfaces 0 and 3 (matching -ff/ff/ff), while interface 3 is in fact a QMI port. -Interfaces 1 and 2 (matching ff/00/00) expose AT commands, -and weren't supported previously at all. -Without this patch, a possible conflict would exist if device ID was -added to qmi_wwan driver for interface 3. - -Update and simplify device ID to match interfaces 0-2 directly, -to expose QCDM (0), PCUI (1), and modem (2) ports and avoid conflict -with QMI (3), and ADB (4). - -The modem is used inside ZTE MF283+ router and carriers identify it as -such. -Interface mapping is: -0: QCDM, 1: AT (PCUI), 2: AT (Modem), 3: QMI, 4: ADB - -T: Bus=02 Lev=02 Prnt=02 Port=05 Cnt=01 Dev#= 3 Spd=480 MxCh= 0 -D: Ver= 2.01 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 -P: Vendor=19d2 ProdID=1275 Rev=f0.00 -S: Manufacturer=ZTE,Incorporated -S: Product=ZTE Technologies MSM -S: SerialNumber=P685M510ZTED0000CP&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&0 -C:* #Ifs= 5 Cfg#= 1 Atr=a0 MxPwr=500mA -I:* If#= 0 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option -E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 1 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option -E: Ad=83(I) Atr=03(Int.) MxPS= 10 Ivl=32ms -E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 2 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option -E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms -E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan -E: Ad=87(I) Atr=03(Int.) MxPS= 8 Ivl=32ms -E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=04(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms -I:* If#= 4 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=(none) -E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms -E: Ad=05(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms - -Cc: Johan Hovold -Cc: Bjørn Mork -Signed-off-by: Lech Perczak -Link: https://lore.kernel.org/r/20210207005443.12936-1-lech.perczak@gmail.com -Cc: stable@vger.kernel.org -Signed-off-by: Johan Hovold ---- - drivers/usb/serial/option.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/usb/serial/option.c -+++ b/drivers/usb/serial/option.c -@@ -1569,7 +1569,8 @@ static const struct usb_device_id option - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, -- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, -+ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */ -+ .driver_info = RSVD(3) | RSVD(4) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, diff --git a/target/linux/generic/hack-5.4/204-module_strip.patch b/target/linux/generic/hack-5.4/204-module_strip.patch index 2b4435f93e..ca0c228e10 100644 --- a/target/linux/generic/hack-5.4/204-module_strip.patch +++ b/target/linux/generic/hack-5.4/204-module_strip.patch @@ -112,7 +112,7 @@ Signed-off-by: Felix Fietkau config MODULES_TREE_LOOKUP --- a/kernel/module.c +++ b/kernel/module.c -@@ -3125,9 +3125,11 @@ static int setup_load_info(struct load_i +@@ -3142,9 +3142,11 @@ static int setup_load_info(struct load_i static int check_modinfo(struct module *mod, struct load_info *info, int flags) { @@ -125,7 +125,7 @@ Signed-off-by: Felix Fietkau if (flags & MODULE_INIT_IGNORE_VERMAGIC) modmagic = NULL; -@@ -3148,6 +3150,7 @@ static int check_modinfo(struct module * +@@ -3165,6 +3167,7 @@ static int check_modinfo(struct module * mod->name); add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK); } diff --git a/target/linux/generic/hack-5.4/221-module_exports.patch b/target/linux/generic/hack-5.4/221-module_exports.patch index c56ec9909b..09855b04d1 100644 --- a/target/linux/generic/hack-5.4/221-module_exports.patch +++ b/target/linux/generic/hack-5.4/221-module_exports.patch @@ -56,7 +56,7 @@ Signed-off-by: Felix Fietkau } \ \ /* __*init sections */ \ -@@ -898,6 +908,8 @@ +@@ -903,6 +913,8 @@ EXIT_TEXT \ EXIT_DATA \ EXIT_CALL \ diff --git a/target/linux/generic/hack-5.4/400-unlock_mx25l6406e_with_4bit_block_protect.patch b/target/linux/generic/hack-5.4/400-unlock_mx25l6406e_with_4bit_block_protect.patch index e25fdcd858..af0a14948c 100644 --- a/target/linux/generic/hack-5.4/400-unlock_mx25l6406e_with_4bit_block_protect.patch +++ b/target/linux/generic/hack-5.4/400-unlock_mx25l6406e_with_4bit_block_protect.patch @@ -20,7 +20,7 @@ /* Part specific fixup hooks. */ const struct spi_nor_fixups *fixups; -@@ -1983,6 +1987,9 @@ static int spi_nor_clear_sr_bp(struct sp +@@ -1985,6 +1989,9 @@ static int spi_nor_clear_sr_bp(struct sp int ret; u8 mask = SR_BP2 | SR_BP1 | SR_BP0; @@ -30,7 +30,7 @@ ret = read_sr(nor); if (ret < 0) { dev_err(nor->dev, "error while reading status register\n"); -@@ -2335,7 +2342,7 @@ static const struct flash_info spi_nor_i +@@ -2337,7 +2344,7 @@ static const struct flash_info spi_nor_i { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) }, { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, @@ -39,7 +39,7 @@ { "mx25u2033e", INFO(0xc22532, 0, 64 * 1024, 4, SECT_4K) }, { "mx25u3235f", INFO(0xc22536, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, -@@ -5024,6 +5031,9 @@ int spi_nor_scan(struct spi_nor *nor, co +@@ -5025,6 +5032,9 @@ int spi_nor_scan(struct spi_nor *nor, co if (info->flags & USE_CLSR) nor->flags |= SNOR_F_USE_CLSR; diff --git a/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch b/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch index a84af43a29..fe2d7a6b23 100644 --- a/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch +++ b/target/linux/generic/pending-5.4/450-mtd-spi-nor-allow-NOR-driver-to-write-fewer-bytes-th.patch @@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2704,7 +2704,7 @@ static int spi_nor_write(struct mtd_info +@@ -2706,7 +2706,7 @@ static int spi_nor_write(struct mtd_info write_enable(nor); ret = spi_nor_write_data(nor, addr, page_remain, buf + i); diff --git a/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch b/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch index 7c11ad356b..24d2d4567d 100644 --- a/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch +++ b/target/linux/generic/pending-5.4/465-m25p80-mx-disable-software-protection.patch @@ -8,7 +8,7 @@ Signed-off-by: Felix Fietkau --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -4883,6 +4883,7 @@ int spi_nor_scan(struct spi_nor *nor, co +@@ -4884,6 +4884,7 @@ int spi_nor_scan(struct spi_nor *nor, co */ if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || diff --git a/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch b/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch index 77b9b72b37..70f1e9f059 100644 --- a/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch +++ b/target/linux/generic/pending-5.4/466-Revert-mtd-spi-nor-fix-Spansion-regressions-aliased-.patch @@ -17,7 +17,7 @@ Signed-off-by: Matthias Schiffer --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -4397,6 +4397,7 @@ static void st_micron_set_default_init(s +@@ -4398,6 +4398,7 @@ static void st_micron_set_default_init(s static void winbond_set_default_init(struct spi_nor *nor) { @@ -25,7 +25,7 @@ Signed-off-by: Matthias Schiffer nor->params.set_4byte = winbond_set_4byte; } -@@ -4885,6 +4886,7 @@ int spi_nor_scan(struct spi_nor *nor, co +@@ -4886,6 +4887,7 @@ int spi_nor_scan(struct spi_nor *nor, co JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || JEDEC_MFR(nor->info) == SNOR_MFR_MACRONIX || JEDEC_MFR(nor->info) == SNOR_MFR_SST || diff --git a/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch b/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch index 7773a8ec05..d3e587ff97 100644 --- a/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch +++ b/target/linux/generic/pending-5.4/470-mtd-spi-nor-support-limiting-4K-sectors-support-base.patch @@ -39,7 +39,7 @@ Signed-off-by: Felix Fietkau depends on OF && (ARM || ARM64 || COMPILE_TEST) --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -4463,6 +4463,7 @@ static void spi_nor_info_init_params(str +@@ -4464,6 +4464,7 @@ static void spi_nor_info_init_params(str struct spi_nor_erase_map *map = ¶ms->erase_map; const struct flash_info *info = nor->info; struct device_node *np = spi_nor_get_flash_node(nor); @@ -47,7 +47,7 @@ Signed-off-by: Felix Fietkau u8 i, erase_mask; /* Initialize legacy flash parameters and settings. */ -@@ -4526,6 +4527,21 @@ static void spi_nor_info_init_params(str +@@ -4527,6 +4528,21 @@ static void spi_nor_info_init_params(str */ erase_mask = 0; i = 0; @@ -69,7 +69,7 @@ Signed-off-by: Felix Fietkau if (info->flags & SECT_4K_PMC) { erase_mask |= BIT(i); spi_nor_set_erase_type(&map->erase_type[i], 4096u, -@@ -4537,6 +4553,7 @@ static void spi_nor_info_init_params(str +@@ -4538,6 +4554,7 @@ static void spi_nor_info_init_params(str SPINOR_OP_BE_4K); i++; } diff --git a/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch b/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch index d35530492a..b62dae536b 100644 --- a/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch +++ b/target/linux/generic/pending-5.4/476-mtd-spi-nor-add-eon-en25q128.patch @@ -8,7 +8,7 @@ Signed-off-by: Piotr Dymacz --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2177,6 +2177,7 @@ static const struct flash_info spi_nor_i +@@ -2179,6 +2179,7 @@ static const struct flash_info spi_nor_i { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, diff --git a/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch index 0c58f29d5b..39e02604fe 100644 --- a/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch +++ b/target/linux/generic/pending-5.4/479-mtd-spi-nor-add-xtx-xt25f128b.patch @@ -30,7 +30,7 @@ Signed-off-by: Daniel Golle --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2504,6 +2504,9 @@ static const struct flash_info spi_nor_i +@@ -2506,6 +2506,9 @@ static const struct flash_info spi_nor_i /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */ { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, diff --git a/target/linux/generic/pending-5.4/481-mtd-spi-nor-rework-broken-flash-reset-support.patch b/target/linux/generic/pending-5.4/481-mtd-spi-nor-rework-broken-flash-reset-support.patch index 8add1f7795..81b4f190d4 100644 --- a/target/linux/generic/pending-5.4/481-mtd-spi-nor-rework-broken-flash-reset-support.patch +++ b/target/linux/generic/pending-5.4/481-mtd-spi-nor-rework-broken-flash-reset-support.patch @@ -42,7 +42,7 @@ Signed-off-by: Chuanhong Guo static int macronix_set_4byte(struct spi_nor *nor, bool enable) { if (nor->spimem) { -@@ -1259,6 +1275,10 @@ static int spi_nor_erase(struct mtd_info +@@ -1261,6 +1277,10 @@ static int spi_nor_erase(struct mtd_info if (ret) return ret; @@ -53,7 +53,7 @@ Signed-off-by: Chuanhong Guo /* whole-chip erase? */ if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) { unsigned long timeout; -@@ -1315,6 +1335,7 @@ static int spi_nor_erase(struct mtd_info +@@ -1317,6 +1337,7 @@ static int spi_nor_erase(struct mtd_info write_disable(nor); erase_err: @@ -61,7 +61,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); return ret; -@@ -1621,7 +1642,9 @@ static int spi_nor_lock(struct mtd_info +@@ -1623,7 +1644,9 @@ static int spi_nor_lock(struct mtd_info if (ret) return ret; @@ -71,7 +71,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK); return ret; -@@ -1636,7 +1659,9 @@ static int spi_nor_unlock(struct mtd_inf +@@ -1638,7 +1661,9 @@ static int spi_nor_unlock(struct mtd_inf if (ret) return ret; @@ -81,7 +81,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); return ret; -@@ -1651,7 +1676,9 @@ static int spi_nor_is_locked(struct mtd_ +@@ -1653,7 +1678,9 @@ static int spi_nor_is_locked(struct mtd_ if (ret) return ret; @@ -91,7 +91,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); return ret; -@@ -2557,6 +2584,10 @@ static int spi_nor_read(struct mtd_info +@@ -2559,6 +2586,10 @@ static int spi_nor_read(struct mtd_info if (ret) return ret; @@ -102,7 +102,7 @@ Signed-off-by: Chuanhong Guo while (len) { loff_t addr = from; -@@ -2580,6 +2611,7 @@ static int spi_nor_read(struct mtd_info +@@ -2582,6 +2613,7 @@ static int spi_nor_read(struct mtd_info ret = 0; read_err: @@ -110,7 +110,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); return ret; } -@@ -2597,6 +2629,10 @@ static int sst_write(struct mtd_info *mt +@@ -2599,6 +2631,10 @@ static int sst_write(struct mtd_info *mt if (ret) return ret; @@ -121,7 +121,7 @@ Signed-off-by: Chuanhong Guo write_enable(nor); nor->sst_write_second = false; -@@ -2659,6 +2695,7 @@ static int sst_write(struct mtd_info *mt +@@ -2661,6 +2697,7 @@ static int sst_write(struct mtd_info *mt } sst_write_err: *retlen += actual; @@ -129,7 +129,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); return ret; } -@@ -2681,6 +2718,10 @@ static int spi_nor_write(struct mtd_info +@@ -2683,6 +2720,10 @@ static int spi_nor_write(struct mtd_info if (ret) return ret; @@ -140,7 +140,7 @@ Signed-off-by: Chuanhong Guo for (i = 0; i < len; ) { ssize_t written; loff_t addr = to + i; -@@ -2720,6 +2761,7 @@ static int spi_nor_write(struct mtd_info +@@ -2722,6 +2763,7 @@ static int spi_nor_write(struct mtd_info } write_err: @@ -148,7 +148,7 @@ Signed-off-by: Chuanhong Guo spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); return ret; } -@@ -4725,9 +4767,13 @@ static int spi_nor_init(struct spi_nor * +@@ -4726,9 +4768,13 @@ static int spi_nor_init(struct spi_nor * * reboots (e.g., crashes). Warn the user (or hopefully, system * designer) that this is bad. */ diff --git a/target/linux/generic/pending-5.4/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch b/target/linux/generic/pending-5.4/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch index e28fcc9d5c..3a22133230 100644 --- a/target/linux/generic/pending-5.4/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch +++ b/target/linux/generic/pending-5.4/482-mtd-spi-nor-add-support-for-Gigadevice-GD25D05.patch @@ -10,7 +10,7 @@ Signed-off-by: Koen Vandeputte --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2230,6 +2230,11 @@ static const struct flash_info spi_nor_i +@@ -2232,6 +2232,11 @@ static const struct flash_info spi_nor_i /* GigaDevice */ { diff --git a/target/linux/generic/pending-5.4/482-mtd-spi-nor-fix-4-byte-opcode-support-for-w25q256.patch b/target/linux/generic/pending-5.4/482-mtd-spi-nor-fix-4-byte-opcode-support-for-w25q256.patch index 1d88adf14c..63366e6032 100644 --- a/target/linux/generic/pending-5.4/482-mtd-spi-nor-fix-4-byte-opcode-support-for-w25q256.patch +++ b/target/linux/generic/pending-5.4/482-mtd-spi-nor-fix-4-byte-opcode-support-for-w25q256.patch @@ -15,7 +15,7 @@ Signed-off-by: Mantas Pucka --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2170,6 +2170,32 @@ static struct spi_nor_fixups gd25q256_fi +@@ -2172,6 +2172,32 @@ static struct spi_nor_fixups gd25q256_fi .default_init = gd25q256_default_init, }; @@ -48,7 +48,7 @@ Signed-off-by: Mantas Pucka /* NOTE: double check command sets and memory organization when you add * more nor chips. This current list focusses on newer chips, which * have been converging on command sets which including JEDEC ID. -@@ -2513,7 +2539,8 @@ static const struct flash_info spi_nor_i +@@ -2515,7 +2541,8 @@ static const struct flash_info spi_nor_i { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, diff --git a/target/linux/ipq40xx/patches-5.4/0019-v5.6-mtd-spi-nor-Add-support-for-mx25r3235f.patch b/target/linux/ipq40xx/patches-5.4/0019-v5.6-mtd-spi-nor-Add-support-for-mx25r3235f.patch index 60fe8f90c6..f1be01c8e1 100644 --- a/target/linux/ipq40xx/patches-5.4/0019-v5.6-mtd-spi-nor-Add-support-for-mx25r3235f.patch +++ b/target/linux/ipq40xx/patches-5.4/0019-v5.6-mtd-spi-nor-Add-support-for-mx25r3235f.patch @@ -18,7 +18,7 @@ Signed-off-by: Tudor Ambarus --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2351,6 +2351,8 @@ static const struct flash_info spi_nor_i +@@ -2353,6 +2353,8 @@ static const struct flash_info spi_nor_i { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, diff --git a/target/linux/ipq806x/patches-5.4/088-v5.8-watchdog-qcom-wdt-disable-pretimeout-on-timer-platfo.patch b/target/linux/ipq806x/patches-5.4/088-v5.8-watchdog-qcom-wdt-disable-pretimeout-on-timer-platfo.patch index 605eb7d07b..1bd44fd5a0 100644 --- a/target/linux/ipq806x/patches-5.4/088-v5.8-watchdog-qcom-wdt-disable-pretimeout-on-timer-platfo.patch +++ b/target/linux/ipq806x/patches-5.4/088-v5.8-watchdog-qcom-wdt-disable-pretimeout-on-timer-platfo.patch @@ -20,7 +20,7 @@ Signed-off-by: Wim Van Sebroeck --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c -@@ -40,6 +40,11 @@ static const u32 reg_offset_data_kpss[] +@@ -39,6 +39,11 @@ static const u32 reg_offset_data_kpss[] [WDT_BITE_TIME] = 0x14, }; @@ -32,7 +32,7 @@ Signed-off-by: Wim Van Sebroeck struct qcom_wdt { struct watchdog_device wdd; unsigned long rate; -@@ -179,19 +184,29 @@ static void qcom_clk_disable_unprepare(v +@@ -168,19 +173,29 @@ static void qcom_clk_disable_unprepare(v clk_disable_unprepare(data); } @@ -65,7 +65,7 @@ Signed-off-by: Wim Van Sebroeck dev_err(dev, "Unsupported QCOM WDT module\n"); return -ENODEV; } -@@ -247,7 +262,7 @@ static int qcom_wdt_probe(struct platfor +@@ -236,7 +251,7 @@ static int qcom_wdt_probe(struct platfor /* check if there is pretimeout support */ irq = platform_get_irq_optional(pdev, 0); @@ -74,7 +74,7 @@ Signed-off-by: Wim Van Sebroeck ret = devm_request_irq(dev, irq, qcom_wdt_isr, IRQF_TRIGGER_RISING, "wdt_bark", &wdt->wdd); -@@ -267,7 +282,7 @@ static int qcom_wdt_probe(struct platfor +@@ -256,7 +271,7 @@ static int qcom_wdt_probe(struct platfor wdt->wdd.min_timeout = 1; wdt->wdd.max_timeout = 0x10000000U / wdt->rate; wdt->wdd.parent = dev; @@ -83,7 +83,7 @@ Signed-off-by: Wim Van Sebroeck if (readl(wdt_addr(wdt, WDT_STS)) & 1) wdt->wdd.bootstatus = WDIOF_CARDRESET; -@@ -311,9 +326,9 @@ static int __maybe_unused qcom_wdt_resum +@@ -300,9 +315,9 @@ static int __maybe_unused qcom_wdt_resum static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume); static const struct of_device_id qcom_wdt_of_table[] = { diff --git a/target/linux/ipq806x/patches-5.4/093-4-v5.8-ipq806x-PCI-qcom-Use-bulk-clk-api-and-assert-on-error.patch b/target/linux/ipq806x/patches-5.4/093-4-v5.8-ipq806x-PCI-qcom-Use-bulk-clk-api-and-assert-on-error.patch index 95531508b6..ecfcd74693 100644 --- a/target/linux/ipq806x/patches-5.4/093-4-v5.8-ipq806x-PCI-qcom-Use-bulk-clk-api-and-assert-on-error.patch +++ b/target/linux/ipq806x/patches-5.4/093-4-v5.8-ipq806x-PCI-qcom-Use-bulk-clk-api-and-assert-on-error.patch @@ -161,7 +161,7 @@ Acked-by: Stanimir Varbanov /* enable PCIe clocks and resets */ val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); -@@ -406,36 +393,6 @@ static int qcom_pcie_init_2_1_0(struct q +@@ -408,36 +395,6 @@ static int qcom_pcie_init_2_1_0(struct q val |= PHY_REFCLK_SSP_EN; writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); @@ -198,7 +198,7 @@ Acked-by: Stanimir Varbanov /* wait for clock acquisition */ usleep_range(1000, 1500); -@@ -448,15 +405,19 @@ static int qcom_pcie_init_2_1_0(struct q +@@ -450,15 +407,19 @@ static int qcom_pcie_init_2_1_0(struct q return 0; diff --git a/target/linux/ipq806x/patches-5.4/093-7-v5.8-ipq806x-PCI-qcom-Add-ipq8064-rev2-variant.patch b/target/linux/ipq806x/patches-5.4/093-7-v5.8-ipq806x-PCI-qcom-Add-ipq8064-rev2-variant.patch index c3d61f164c..be334d72e9 100644 --- a/target/linux/ipq806x/patches-5.4/093-7-v5.8-ipq806x-PCI-qcom-Add-ipq8064-rev2-variant.patch +++ b/target/linux/ipq806x/patches-5.4/093-7-v5.8-ipq806x-PCI-qcom-Add-ipq8064-rev2-variant.patch @@ -26,7 +26,7 @@ Acked-by: Stanimir Varbanov writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), -@@ -1328,6 +1329,7 @@ err_pm_runtime_put: +@@ -1330,6 +1331,7 @@ err_pm_runtime_put: static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, diff --git a/target/linux/ipq806x/patches-5.4/093-8-v5.8-ipq806x-PCI-qcom-Support-pci-speed-set-for-ipq806x.patch b/target/linux/ipq806x/patches-5.4/093-8-v5.8-ipq806x-PCI-qcom-Support-pci-speed-set-for-ipq806x.patch index 02ac7460e8..5440b2f7f8 100644 --- a/target/linux/ipq806x/patches-5.4/093-8-v5.8-ipq806x-PCI-qcom-Support-pci-speed-set-for-ipq806x.patch +++ b/target/linux/ipq806x/patches-5.4/093-8-v5.8-ipq806x-PCI-qcom-Support-pci-speed-set-for-ipq806x.patch @@ -49,7 +49,7 @@ Backported with light changes: }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) -@@ -397,6 +401,11 @@ static int qcom_pcie_init_2_1_0(struct q +@@ -399,6 +403,11 @@ static int qcom_pcie_init_2_1_0(struct q /* wait for clock acquisition */ usleep_range(1000, 1500); @@ -61,7 +61,7 @@ Backported with light changes: /* Set the Max TLP size to 2K, instead of using default of 4K */ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, -@@ -1261,6 +1270,10 @@ static int qcom_pcie_probe(struct platfo +@@ -1263,6 +1272,10 @@ static int qcom_pcie_probe(struct platfo goto err_pm_runtime_put; } diff --git a/target/linux/lantiq/patches-5.4/0152-lantiq-VPE.patch b/target/linux/lantiq/patches-5.4/0152-lantiq-VPE.patch index 822d905246..f8c2343e19 100644 --- a/target/linux/lantiq/patches-5.4/0152-lantiq-VPE.patch +++ b/target/linux/lantiq/patches-5.4/0152-lantiq-VPE.patch @@ -1,6 +1,6 @@ --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -2377,6 +2377,12 @@ config MIPS_VPE_LOADER +@@ -2378,6 +2378,12 @@ config MIPS_VPE_LOADER Includes a loader for loading an elf relocatable object onto another VPE and running it. diff --git a/target/linux/layerscape/patches-5.4/804-crypto-0001-crypto-add-support-for-TLS-1.0-record-encryption.patch b/target/linux/layerscape/patches-5.4/804-crypto-0001-crypto-add-support-for-TLS-1.0-record-encryption.patch index ad557fa823..c73baa42ea 100644 --- a/target/linux/layerscape/patches-5.4/804-crypto-0001-crypto-add-support-for-TLS-1.0-record-encryption.patch +++ b/target/linux/layerscape/patches-5.4/804-crypto-0001-crypto-add-support-for-TLS-1.0-record-encryption.patch @@ -28,7 +28,7 @@ Signed-off-by: Horia Geantă --- a/crypto/Kconfig +++ b/crypto/Kconfig -@@ -342,6 +342,26 @@ config CRYPTO_ECHAINIV +@@ -349,6 +349,26 @@ config CRYPTO_ECHAINIV a sequence number xored with a salt. This is the default algorithm for CBC. @@ -57,7 +57,7 @@ Signed-off-by: Horia Geantă config CRYPTO_CBC --- a/crypto/Makefile +++ b/crypto/Makefile -@@ -143,6 +143,7 @@ obj-$(CONFIG_CRYPTO_CRC32) += crc32_gene +@@ -144,6 +144,7 @@ obj-$(CONFIG_CRYPTO_CRC32) += crc32_gene obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o obj-$(CONFIG_CRYPTO_LZO) += lzo.o lzo-rle.o @@ -329,7 +329,7 @@ Signed-off-by: Horia Geantă static int alg_test_aead(const struct alg_test_desc *desc, const char *driver, u32 type, u32 mask) { -@@ -5012,6 +5241,15 @@ static const struct alg_test_desc alg_te +@@ -5042,6 +5271,15 @@ static const struct alg_test_desc alg_te .hash = __VECS(tgr192_tv_template) } }, { diff --git a/target/linux/layerscape/patches-5.4/809-jailhouse-0001-ivshmem-net-virtual-network-device-for-Jailhouse.patch b/target/linux/layerscape/patches-5.4/809-jailhouse-0001-ivshmem-net-virtual-network-device-for-Jailhouse.patch index 617e937107..ede28ba23e 100644 --- a/target/linux/layerscape/patches-5.4/809-jailhouse-0001-ivshmem-net-virtual-network-device-for-Jailhouse.patch +++ b/target/linux/layerscape/patches-5.4/809-jailhouse-0001-ivshmem-net-virtual-network-device-for-Jailhouse.patch @@ -15,7 +15,7 @@ Work in progress. --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -528,4 +528,8 @@ config NET_FAILOVER +@@ -571,4 +571,8 @@ config NET_FAILOVER a VM with direct attached VF by failing over to the paravirtual datapath when the VF is unplugged. @@ -26,7 +26,7 @@ Work in progress. endif # NETDEVICES --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -79,3 +79,5 @@ thunderbolt-net-y += thunderbolt.o +@@ -80,3 +80,5 @@ thunderbolt-net-y += thunderbolt.o obj-$(CONFIG_THUNDERBOLT_NET) += thunderbolt-net.o obj-$(CONFIG_NETDEVSIM) += netdevsim/ obj-$(CONFIG_NET_FAILOVER) += net_failover.o diff --git a/target/linux/layerscape/patches-5.4/817-spi-0006-LF-20-2-mtd-spi-nor-Use-1-bit-mode-of-spansion-s25fs.patch b/target/linux/layerscape/patches-5.4/817-spi-0006-LF-20-2-mtd-spi-nor-Use-1-bit-mode-of-spansion-s25fs.patch index 067bf025a3..9c83b4c7f7 100644 --- a/target/linux/layerscape/patches-5.4/817-spi-0006-LF-20-2-mtd-spi-nor-Use-1-bit-mode-of-spansion-s25fs.patch +++ b/target/linux/layerscape/patches-5.4/817-spi-0006-LF-20-2-mtd-spi-nor-Use-1-bit-mode-of-spansion-s25fs.patch @@ -11,7 +11,7 @@ Signed-off-by: Kuldeep Singh --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2417,7 +2417,7 @@ static const struct flash_info spi_nor_i +@@ -2419,7 +2419,7 @@ static const struct flash_info spi_nor_i { "s25fl512s", INFO6(0x010220, 0x4d0080, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | USE_CLSR) }, diff --git a/target/linux/layerscape/patches-5.4/820-usb-0005-usb-dwc3-add-otg-properties-update.patch b/target/linux/layerscape/patches-5.4/820-usb-0005-usb-dwc3-add-otg-properties-update.patch index 22d8f2bace..848be9b5fe 100644 --- a/target/linux/layerscape/patches-5.4/820-usb-0005-usb-dwc3-add-otg-properties-update.patch +++ b/target/linux/layerscape/patches-5.4/820-usb-0005-usb-dwc3-add-otg-properties-update.patch @@ -54,7 +54,7 @@ Signed-off-by: Peter Chen * All 3.1 IP version constants are greater than the 3.0 IP --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c -@@ -3543,6 +3543,10 @@ int dwc3_gadget_init(struct dwc3 *dwc) +@@ -3558,6 +3558,10 @@ int dwc3_gadget_init(struct dwc3 *dwc) dwc->gadget.sg_supported = true; dwc->gadget.name = "dwc3-gadget"; dwc->gadget.lpm_capable = true; diff --git a/target/linux/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch b/target/linux/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch index 92296060c3..84aed89752 100644 --- a/target/linux/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch +++ b/target/linux/mediatek/patches-5.4/0310-dts-add-wmac-support-for-mt7622-rfb1.patch @@ -1,6 +1,6 @@ --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -714,6 +714,17 @@ +@@ -716,6 +716,17 @@ status = "disabled"; }; diff --git a/target/linux/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch b/target/linux/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch index 3bf1d0c135..bd854613b7 100644 --- a/target/linux/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch +++ b/target/linux/mediatek/patches-5.4/0600-net-phylink-propagate-resolved-link-config-via-mac_l.patch @@ -52,7 +52,7 @@ Signed-off-by: David S. Miller when making changes to the MAC configuration. This means the --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -3653,9 +3653,11 @@ static void mvneta_mac_link_down(struct +@@ -3655,9 +3655,11 @@ static void mvneta_mac_link_down(struct mvneta_set_eee(pp, false); } diff --git a/target/linux/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch b/target/linux/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch index cea83710b7..3c5558b606 100644 --- a/target/linux/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch +++ b/target/linux/mediatek/patches-5.4/0993-arm64-dts-mediatek-Split-PCIe-node-for-MT2712-MT7622.patch @@ -281,7 +281,7 @@ Signed-off-by: chuanjia.liu &pio { --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -792,45 +792,41 @@ +@@ -794,45 +794,41 @@ #reset-cells = <1>; }; @@ -344,7 +344,7 @@ Signed-off-by: chuanjia.liu interrupt-map-mask = <0 0 0 7>; interrupt-map = <0 0 0 1 &pcie_intc0 0>, <0 0 0 2 &pcie_intc0 1>, -@@ -842,15 +838,39 @@ +@@ -844,15 +840,39 @@ #interrupt-cells = <1>; }; }; diff --git a/target/linux/mediatek/patches-5.4/1011-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch b/target/linux/mediatek/patches-5.4/1011-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch index efb30f2e50..98886767a2 100644 --- a/target/linux/mediatek/patches-5.4/1011-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch +++ b/target/linux/mediatek/patches-5.4/1011-net-ethernet-mtk_eth_soc-add-support-for-coherent-DM.patch @@ -18,7 +18,7 @@ Signed-off-by: Felix Fietkau interface-type = "ace"; reg = <0x5000 0x1000>; }; -@@ -967,6 +967,8 @@ +@@ -969,6 +969,8 @@ power-domains = <&scpsys MT7622_POWER_DOMAIN_ETHSYS>; mediatek,ethsys = <ðsys>; mediatek,sgmiisys = <&sgmiisys>; diff --git a/target/linux/mediatek/patches-5.4/1012-pci-pcie-mediatek-add-support-for-coherent-DMA.patch b/target/linux/mediatek/patches-5.4/1012-pci-pcie-mediatek-add-support-for-coherent-DMA.patch index c24126284b..20a67676e3 100644 --- a/target/linux/mediatek/patches-5.4/1012-pci-pcie-mediatek-add-support-for-coherent-DMA.patch +++ b/target/linux/mediatek/patches-5.4/1012-pci-pcie-mediatek-add-support-for-coherent-DMA.patch @@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau --- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -803,6 +803,8 @@ +@@ -805,6 +805,8 @@ reg = <0 0x1a143000 0 0x1000>; reg-names = "port0"; mediatek,pcie-cfg = <&pciecfg>; @@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau #address-cells = <3>; #size-cells = <2>; interrupts = ; -@@ -820,6 +822,7 @@ +@@ -822,6 +824,7 @@ bus-range = <0x00 0xff>; ranges = <0x82000000 0 0x20000000 0x0 0x20000000 0 0x8000000>; status = "disabled"; @@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau slot0: pcie@0,0 { reg = <0x0000 0 0 0 0>; -@@ -846,6 +849,8 @@ +@@ -848,6 +851,8 @@ reg = <0 0x1a145000 0 0x1000>; reg-names = "port1"; mediatek,pcie-cfg = <&pciecfg>; @@ -36,7 +36,7 @@ Signed-off-by: Felix Fietkau #address-cells = <3>; #size-cells = <2>; interrupts = ; -@@ -864,6 +869,7 @@ +@@ -866,6 +871,7 @@ bus-range = <0x00 0xff>; ranges = <0x82000000 0 0x28000000 0x0 0x28000000 0 0x8000000>; status = "disabled"; @@ -44,7 +44,7 @@ Signed-off-by: Felix Fietkau slot1: pcie@1,0 { reg = <0x0800 0 0 0 0>; -@@ -923,6 +929,11 @@ +@@ -925,6 +931,11 @@ }; }; diff --git a/target/linux/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch b/target/linux/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch index 1a16574c2b..a234555556 100644 --- a/target/linux/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch +++ b/target/linux/mediatek/patches-5.4/1020-spi-nor-w25q512jv.patch @@ -13,7 +13,7 @@ Ref: https://patchwork.ozlabs.org/project/linux-mtd/patch/20210213151047.11700-1 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2550,6 +2550,9 @@ static const struct flash_info spi_nor_i +@@ -2552,6 +2552,9 @@ static const struct flash_info spi_nor_i .fixups = &w25q256_fixups }, { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, diff --git a/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch b/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch index 9038ca8ca8..4d9539adbb 100644 --- a/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch +++ b/target/linux/mvebu/patches-5.4/005-net-mvneta-rely-on-build_skb-in-mvneta_rx_swbm-poll-.patch @@ -292,7 +292,7 @@ Signed-off-by: David S. Miller MVNETA_RX_BUF_SIZE(pp->pkt_size)); mvneta_rxq_bm_disable(pp, rxq); mvneta_rxq_fill(pp, rxq, rxq->size); -@@ -4708,7 +4713,7 @@ static int mvneta_probe(struct platform_ +@@ -4715,7 +4720,7 @@ static int mvneta_probe(struct platform_ SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; diff --git a/target/linux/mvebu/patches-5.4/006-net-mvneta-add-basic-XDP-support.patch b/target/linux/mvebu/patches-5.4/006-net-mvneta-add-basic-XDP-support.patch index 163632df51..4e42bf9307 100644 --- a/target/linux/mvebu/patches-5.4/006-net-mvneta-add-basic-XDP-support.patch +++ b/target/linux/mvebu/patches-5.4/006-net-mvneta-add-basic-XDP-support.patch @@ -232,7 +232,7 @@ Signed-off-by: David S. Miller }; int err; -@@ -3364,6 +3447,11 @@ static int mvneta_change_mtu(struct net_ +@@ -3366,6 +3449,11 @@ static int mvneta_change_mtu(struct net_ mtu = ALIGN(MVNETA_RX_PKT_SIZE(mtu), 8); } @@ -244,7 +244,7 @@ Signed-off-by: David S. Miller dev->mtu = mtu; if (!netif_running(dev)) { -@@ -4029,6 +4117,47 @@ static int mvneta_ioctl(struct net_devic +@@ -4036,6 +4124,47 @@ static int mvneta_ioctl(struct net_devic return phylink_mii_ioctl(pp->phylink, ifr, cmd); } @@ -292,7 +292,7 @@ Signed-off-by: David S. Miller /* Ethtool methods */ /* Set link ksettings (phy address, speed) for ethtools */ -@@ -4425,6 +4554,7 @@ static const struct net_device_ops mvnet +@@ -4432,6 +4561,7 @@ static const struct net_device_ops mvnet .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, @@ -300,7 +300,7 @@ Signed-off-by: David S. Miller }; static const struct ethtool_ops mvneta_eth_tool_ops = { -@@ -4713,7 +4843,7 @@ static int mvneta_probe(struct platform_ +@@ -4720,7 +4850,7 @@ static int mvneta_probe(struct platform_ SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; diff --git a/target/linux/mvebu/patches-5.4/008-net-mvneta-make-tx-buffer-array-agnostic.patch b/target/linux/mvebu/patches-5.4/008-net-mvneta-make-tx-buffer-array-agnostic.patch index e79aa816b8..4698c23be4 100644 --- a/target/linux/mvebu/patches-5.4/008-net-mvneta-make-tx-buffer-array-agnostic.patch +++ b/target/linux/mvebu/patches-5.4/008-net-mvneta-make-tx-buffer-array-agnostic.patch @@ -199,7 +199,7 @@ Signed-off-by: David S. Miller dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); -@@ -3205,7 +3225,7 @@ static void mvneta_txq_sw_deinit(struct +@@ -3207,7 +3227,7 @@ static void mvneta_txq_sw_deinit(struct { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); diff --git a/target/linux/mvebu/patches-5.4/009-net-mvneta-add-XDP_TX-support.patch b/target/linux/mvebu/patches-5.4/009-net-mvneta-add-XDP_TX-support.patch index c971030a1e..8d9b775f7b 100644 --- a/target/linux/mvebu/patches-5.4/009-net-mvneta-add-XDP_TX-support.patch +++ b/target/linux/mvebu/patches-5.4/009-net-mvneta-add-XDP_TX-support.patch @@ -165,7 +165,7 @@ Signed-off-by: David S. Miller default: bpf_warn_invalid_xdp_action(act); /* fall through */ -@@ -4574,6 +4687,7 @@ static const struct net_device_ops mvnet +@@ -4581,6 +4694,7 @@ static const struct net_device_ops mvnet .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, diff --git a/target/linux/mvebu/patches-5.4/014-mvneta-driver-disallow-XDP-program-on-hardware-buffe.patch b/target/linux/mvebu/patches-5.4/014-mvneta-driver-disallow-XDP-program-on-hardware-buffe.patch index e8a41d9eb2..6558c6a6e2 100644 --- a/target/linux/mvebu/patches-5.4/014-mvneta-driver-disallow-XDP-program-on-hardware-buffe.patch +++ b/target/linux/mvebu/patches-5.4/014-mvneta-driver-disallow-XDP-program-on-hardware-buffe.patch @@ -38,7 +38,7 @@ Signed-off-by: David S. Miller --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -4263,6 +4263,12 @@ static int mvneta_xdp_setup(struct net_d +@@ -4270,6 +4270,12 @@ static int mvneta_xdp_setup(struct net_d return -EOPNOTSUPP; } diff --git a/target/linux/mvebu/patches-5.4/015-net-mvneta-fix-XDP-support-if-sw-bm-is-used-as-fallb.patch b/target/linux/mvebu/patches-5.4/015-net-mvneta-fix-XDP-support-if-sw-bm-is-used-as-fallb.patch index 86bffa538a..61a588db70 100644 --- a/target/linux/mvebu/patches-5.4/015-net-mvneta-fix-XDP-support-if-sw-bm-is-used-as-fallb.patch +++ b/target/linux/mvebu/patches-5.4/015-net-mvneta-fix-XDP-support-if-sw-bm-is-used-as-fallb.patch @@ -38,7 +38,7 @@ Signed-off-by: David S. Miller mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); } -@@ -4984,7 +4984,6 @@ static int mvneta_probe(struct platform_ +@@ -4991,7 +4991,6 @@ static int mvneta_probe(struct platform_ SET_NETDEV_DEV(dev, &pdev->dev); pp->id = global_port_id++; @@ -46,7 +46,7 @@ Signed-off-by: David S. Miller /* Obtain access to BM resources if enabled and already initialized */ bm_node = of_parse_phandle(dn, "buffer-manager", 0); -@@ -5009,6 +5008,10 @@ static int mvneta_probe(struct platform_ +@@ -5016,6 +5015,10 @@ static int mvneta_probe(struct platform_ } of_node_put(bm_node); @@ -57,7 +57,7 @@ Signed-off-by: David S. Miller err = mvneta_init(&pdev->dev, pp); if (err < 0) goto err_netdev; -@@ -5166,6 +5169,7 @@ static int mvneta_resume(struct device * +@@ -5173,6 +5176,7 @@ static int mvneta_resume(struct device * err = mvneta_bm_port_init(pdev, pp); if (err < 0) { dev_info(&pdev->dev, "use SW buffer management\n"); diff --git a/target/linux/mvebu/patches-5.4/312-helios4-dts-status-led-alias.patch b/target/linux/mvebu/patches-5.4/312-helios4-dts-status-led-alias.patch index 4c4fbec764..b837989d48 100644 --- a/target/linux/mvebu/patches-5.4/312-helios4-dts-status-led-alias.patch +++ b/target/linux/mvebu/patches-5.4/312-helios4-dts-status-led-alias.patch @@ -1,23 +1,20 @@ --- a/arch/arm/boot/dts/armada-388-helios4.dts +++ b/arch/arm/boot/dts/armada-388-helios4.dts -@@ -15,6 +15,13 @@ - model = "Helios4"; - compatible = "kobol,helios4", "marvell,armada388", - "marvell,armada385", "marvell,armada380"; -+ -+ aliases { +@@ -24,6 +24,10 @@ + aliases { + /* So that mvebu u-boot can update the MAC addresses */ + ethernet1 = ð0; + led-boot = &led_status; + led-failsafe = &led_status; + led-running = &led_status; + led-upgrade = &led_status; -+ }; + }; - memory { - device_type = "memory"; -@@ -70,10 +77,9 @@ + chosen { +@@ -73,10 +77,9 @@ + pinctrl-names = "default"; + pinctrl-0 = <&helios_system_led_pins>; - system-leds { - compatible = "gpio-leds"; - status-led { + led_status: status-led { label = "helios4:green:status"; diff --git a/target/linux/mvebu/patches-5.4/700-mvneta-tx-queue-workaround.patch b/target/linux/mvebu/patches-5.4/700-mvneta-tx-queue-workaround.patch index a82a4c0d62..a83d891ff8 100644 --- a/target/linux/mvebu/patches-5.4/700-mvneta-tx-queue-workaround.patch +++ b/target/linux/mvebu/patches-5.4/700-mvneta-tx-queue-workaround.patch @@ -9,7 +9,7 @@ Signed-off-by: Felix Fietkau --- --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c -@@ -4684,6 +4684,14 @@ static int mvneta_ethtool_set_eee(struct +@@ -4691,6 +4691,14 @@ static int mvneta_ethtool_set_eee(struct return phylink_ethtool_set_eee(pp->phylink, eee); } @@ -24,7 +24,7 @@ Signed-off-by: Felix Fietkau static const struct net_device_ops mvneta_netdev_ops = { .ndo_open = mvneta_open, .ndo_stop = mvneta_stop, -@@ -4694,6 +4702,7 @@ static const struct net_device_ops mvnet +@@ -4701,6 +4709,7 @@ static const struct net_device_ops mvnet .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, diff --git a/target/linux/pistachio/patches-5.4/401-mtd-nor-support-mtd-name-from-device-tree.patch b/target/linux/pistachio/patches-5.4/401-mtd-nor-support-mtd-name-from-device-tree.patch index a02b632e8c..2d0b1cc634 100644 --- a/target/linux/pistachio/patches-5.4/401-mtd-nor-support-mtd-name-from-device-tree.patch +++ b/target/linux/pistachio/patches-5.4/401-mtd-nor-support-mtd-name-from-device-tree.patch @@ -10,7 +10,7 @@ Signed-off-by: Abhimanyu Vishwakarma --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -4937,6 +4937,7 @@ int spi_nor_scan(struct spi_nor *nor, co +@@ -4938,6 +4938,7 @@ int spi_nor_scan(struct spi_nor *nor, co struct mtd_info *mtd = &nor->mtd; struct device_node *np = spi_nor_get_flash_node(nor); struct spi_nor_flash_parameter *params = &nor->params; @@ -18,7 +18,7 @@ Signed-off-by: Abhimanyu Vishwakarma int ret; int i; -@@ -4999,7 +5000,12 @@ int spi_nor_scan(struct spi_nor *nor, co +@@ -5000,7 +5001,12 @@ int spi_nor_scan(struct spi_nor *nor, co /* Init flash parameters based on flash_info struct and SFDP */ spi_nor_init_params(nor); diff --git a/target/linux/ramips/patches-5.4/302-spi-nor-add-gd25q512.patch b/target/linux/ramips/patches-5.4/302-spi-nor-add-gd25q512.patch index 7fcf0c54b6..3fbb0bf323 100644 --- a/target/linux/ramips/patches-5.4/302-spi-nor-add-gd25q512.patch +++ b/target/linux/ramips/patches-5.4/302-spi-nor-add-gd25q512.patch @@ -1,6 +1,6 @@ --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c -@@ -2303,6 +2303,11 @@ static const struct flash_info spi_nor_i +@@ -2305,6 +2305,11 @@ static const struct flash_info spi_nor_i SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) .fixups = &gd25q256_fixups, }, diff --git a/target/linux/realtek/patches-5.4/302-clocksource-add-rtl9300-driver.patch b/target/linux/realtek/patches-5.4/302-clocksource-add-rtl9300-driver.patch index 8359dfdcbc..1c41db75b2 100644 --- a/target/linux/realtek/patches-5.4/302-clocksource-add-rtl9300-driver.patch +++ b/target/linux/realtek/patches-5.4/302-clocksource-add-rtl9300-driver.patch @@ -1,6 +1,6 @@ --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig -@@ -126,6 +126,15 @@ config RDA_TIMER +@@ -127,6 +127,15 @@ config RDA_TIMER help Enables the support for the RDA Micro timer driver. @@ -16,7 +16,7 @@ config SUN4I_TIMER bool "Sun4i timer driver" if COMPILE_TEST depends on HAS_IOMEM -@@ -695,5 +704,4 @@ config INGENIC_TIMER +@@ -696,5 +705,4 @@ config INGENIC_TIMER select IRQ_DOMAIN help Support for the timer/counter unit of the Ingenic JZ SoCs. diff --git a/target/linux/sunxi/patches-5.4/400-arm64-allwinner-a64-sopine-Add-Sopine-flash-partitio.patch b/target/linux/sunxi/patches-5.4/400-arm64-allwinner-a64-sopine-Add-Sopine-flash-partitio.patch index 7c729ad4f7..1cf94c0b53 100644 --- a/target/linux/sunxi/patches-5.4/400-arm64-allwinner-a64-sopine-Add-Sopine-flash-partitio.patch +++ b/target/linux/sunxi/patches-5.4/400-arm64-allwinner-a64-sopine-Add-Sopine-flash-partitio.patch @@ -15,7 +15,7 @@ Signed-off-by: Oskari Lemmela --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi -@@ -82,6 +82,28 @@ +@@ -81,6 +81,28 @@ compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <40000000>; -- cgit v1.2.3