diff --git a/build.sh b/build.sh index 882aacbf..00d03d1c 100755 --- a/build.sh +++ b/build.sh @@ -26,10 +26,12 @@ OMR_REPO=${OMR_REPO:-http://$OMR_HOST:$OMR_PORT/release/$OMR_KERNEL} OMR_KEEPBIN=${OMR_KEEPBIN:-no} OMR_IMG=${OMR_IMG:-yes} #OMR_UEFI=${OMR_UEFI:-yes} +OMR_PACKAGES=${OMR_PACKAGES:-full} OMR_ALL_PACKAGES=${OMR_ALL_PACKAGES:-no} OMR_TARGET=${OMR_TARGET:-x86_64} OMR_TARGET_CONFIG="config-$OMR_TARGET" OMR_KERNEL=${OMR_KERNEL:-5.4} +OMR_RELEASE=${OMR_RELEASE:-$(git describe --tags `git rev-list --tags --max-count=1` | sed 's/[^0-9.]//g')} OMR_FEED_URL="${OMR_FEED_URL:-https://github.com/ysurac/openmptcprouter-feeds}" OMR_FEED_SRC="${OMR_FEED_SRC:-develop}" @@ -61,9 +63,9 @@ fi #_get_repo source https://github.com/ysurac/openmptcprouter-source "master" if [ "$OMR_OPENWRT" = "default" ]; then - _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "df27e949fbbf13e1e2ab4db49f608165ef0ba9fe" - _get_repo feeds/packages https://github.com/openwrt/packages "a4bb706918c58c7f8718e5de1de2e719eecabbd2" - _get_repo feeds/luci https://github.com/openwrt/luci "d0518a11e124e124bfaa02551bc2d028fad2d69d" + _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "e5aa498acb847320a382034ba0b9cfc55e6f13ca" + _get_repo feeds/packages https://github.com/openwrt/packages "69fd6ab319e170dd690a6495e8c1a7abe79f3960" + _get_repo feeds/luci https://github.com/openwrt/luci "16f443bf4caf6e7dd85efd1ce111b45779acdf5e" elif [ "$OMR_OPENWRT" = "master" ]; then _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "master" _get_repo feeds/packages https://github.com/openwrt/packages "master" @@ -107,13 +109,23 @@ src-link luci $(readlink -f feeds/luci) src-link openmptcprouter $(readlink -f "$OMR_FEED") EOF -cat > "$OMR_TARGET/source/package/system/opkg/files/customfeeds.conf" < "$OMR_TARGET/source/package/system/opkg/files/customfeeds.conf" <<-EOF + src/gz openwrt_luci http://packages.openmptcprouter.com/${OMR_RELEASE}/${OMR_REAL_TARGET}/luci + src/gz openwrt_packages http://packages.openmptcprouter.com/${OMR_RELEASE}/${OMR_REAL_TARGET}/packages + src/gz openwrt_base http://packages.openmptcprouter.com/${OMR_RELEASE}/${OMR_REAL_TARGET}/base + src/gz openwrt_routing http://packages.openmptcprouter.com/${OMR_RELEASE}/${OMR_REAL_TARGET}/routing + src/gz openwrt_telephony http://packages.openmptcprouter.com/${OMR_RELEASE}/${OMR_REAL_TARGET}/telephony + EOF +else + cat > "$OMR_TARGET/source/package/system/opkg/files/customfeeds.conf" <<-EOF + src/gz openwrt_luci http://downloads.openwrt.org/snapshots/packages/${OMR_REAL_TARGET}/luci + src/gz openwrt_packages http://downloads.openwrt.org/snapshots/packages/${OMR_REAL_TARGET}/packages + src/gz openwrt_base http://downloads.openwrt.org/snapshots/packages/${OMR_REAL_TARGET}/base + src/gz openwrt_routing http://downloads.openwrt.org/snapshots/packages/${OMR_REAL_TARGET}/routing + src/gz openwrt_telephony http://downloads.openwrt.org/snapshots/packages/${OMR_REAL_TARGET}/telephony + EOF +fi #cat > "$OMR_TARGET/source/package/system/opkg/files/customfeeds.conf" <> "$OMR_TARGET/source/.config" + echo 'CONFIG_ALL_NONSHARED=y' >> "$OMR_TARGET/source/.config" fi if [ "$OMR_IMG" = "yes" ] && [ "$OMR_TARGET" = "x86_64" ]; then echo 'CONFIG_VDI_IMAGES=y' >> "$OMR_TARGET/source/.config" @@ -150,6 +163,13 @@ if [ "$OMR_IMG" = "yes" ] && [ "$OMR_TARGET" = "x86_64" ]; then echo 'CONFIG_VHDX_IMAGES=y' >> "$OMR_TARGET/source/.config" fi +if [ "$OMR_PACKAGES" = "full" ]; then + echo 'CONFIG_PACKAGE_${OMR_DIST}-full=y' >> "$OMR_TARGET/source/.config" +fi +if [ "$OMR_PACKAGES" = "mini" ]; then + echo 'CONFIG_PACKAGE_${OMR_DIST}-mini=y' >> "$OMR_TARGET/source/.config" +fi + cd "$OMR_TARGET/source" #if [ "$OMR_UEFI" = "yes" ] && [ "$OMR_TARGET" = "x86_64" ]; then @@ -214,7 +234,7 @@ if ! patch -Rf -N -p1 -s --dry-run < ../../patches/package-too-long.patch; then fi echo "Done" -echo "Downlaod via IPv4" +echo "Download via IPv4" if ! patch -Rf -N -p1 -s --dry-run < ../../patches/download-ipv4.patch; then patch -N -p1 -s < ../../patches/download-ipv4.patch fi @@ -252,7 +272,11 @@ fi # Remove patch that can make BPI-R2 slow rm -rf target/linux/mediatek/patches-4.14/0027-*.patch +rm -rf feeds/packages/libs/libwebp + echo "Update feeds index" +rm -rf feeds/luci/modules/luci-mod-network + cp .config .config.keep scripts/feeds clean scripts/feeds update -a @@ -267,8 +291,8 @@ scripts/feeds update -a #cd "$OMR_TARGET/source" if [ "$OMR_ALL_PACKAGES" = "yes" ]; then - scripts/feeds install -a -p packages - scripts/feeds install -a -p luci + scripts/feeds install -a -d m -p packages + scripts/feeds install -a -d m -p luci fi scripts/feeds install -a -d y -f -p openmptcprouter cp .config.keep .config diff --git a/config b/config index 59064a66..971e38db 100644 --- a/config +++ b/config @@ -194,17 +194,18 @@ CONFIG_KERNEL_TCP_CONG_BALIA=y CONFIG_KERNEL_MPTCP_FULLMESH=y CONFIG_KERNEL_DEFAULT_FULLMESH=y CONFIG_KERNEL_MPTCP_NDIFFPORTS=y -CONFIG_KERNEL_DEFAULT_NDIFFPORTS=n +# CONFIG_KERNEL_DEFAULT_NDIFFPORTS is not set CONFIG_KERNEL_MPTCP_BINDER=y CONFIG_KERNEL_MPTCP_ECF=y -CONFIG_KERNEL_DEFAULT_BINDER=n -CONFIG_KERNEL_DEFAULT_DUMMY=n +# CONFIG_KERNEL_DEFAULT_BINDER is not set +# CONFIG_KERNEL_DEFAULT_DUMMY is not set CONFIG_KERNEL_MPTCP_ROUNDROBIN=y -CONFIG_KERNEL_DEFAULT_ROUNDROBIN=n +# CONFIG_KERNEL_DEFAULT_ROUNDROBIN is not set CONFIG_KERNEL_MPTCP_REDUNDANT=y -CONFIG_KERNEL_DEFAULT_REDUNDANT=n +# CONFIG_KERNEL_DEFAULT_REDUNDANT is not set CONFIG_KERNEL_DEFAULT_SCHEDULER=y CONFIG_KERNEL_MPTCP=y +CONFIG_KERNEL_CRYPTO_SHA256=y CONFIG_LUCI_LANG_hu=y CONFIG_LUCI_LANG_pt=y CONFIG_LUCI_LANG_sk=y diff --git a/config-rpi2 b/config-rpi2 index de4667ba..e79e72d2 100644 --- a/config-rpi2 +++ b/config-rpi2 @@ -4,3 +4,4 @@ CONFIG_TARGET_bcm27xx_bcm2709_DEVICE_rpi-2=y CONFIG_PACKAGE_kmod-ath10k-ct=n CONFIG_PACKAGE_kmod-ath9k=y CONFIG_PACKAGE_bcm27xx-eeprom=y +CONFIG_PACKAGE_bcm27xx-userland=y diff --git a/config-rpi4 b/config-rpi4 index c5c07f5c..7fb408aa 100644 --- a/config-rpi4 +++ b/config-rpi4 @@ -4,3 +4,4 @@ CONFIG_TARGET_bcm27xx_bcm2711_DEVICE_rpi-4=y CONFIG_PACKAGE_kmod-ath10k-ct=n CONFIG_PACKAGE_kmod-ath9k=y CONFIG_PACKAGE_bcm27xx-eeprom=y +CONFIG_PACKAGE_bcm27xx-userland=y diff --git a/root/target/linux/generic/config-5.4 b/root/target/linux/generic/config-5.4 index 91d8268c..3b71ac91 100644 --- a/root/target/linux/generic/config-5.4 +++ b/root/target/linux/generic/config-5.4 @@ -81,24 +81,17 @@ CONFIG_64BIT_TIME=y # CONFIG_AD9832 is not set # CONFIG_AD9834 is not set # CONFIG_ADAPTEC_STARFIRE is not set -# CONFIG_ADE7753 is not set -# CONFIG_ADE7754 is not set -# CONFIG_ADE7758 is not set -# CONFIG_ADE7759 is not set # CONFIG_ADE7854 is not set # CONFIG_ADF4350 is not set # CONFIG_ADF4371 is not set # CONFIG_ADFS_FS is not set # CONFIG_ADIN_PHY is not set -# CONFIG_ADIS16060 is not set # CONFIG_ADIS16080 is not set # CONFIG_ADIS16130 is not set # CONFIG_ADIS16136 is not set # CONFIG_ADIS16201 is not set # CONFIG_ADIS16203 is not set -# CONFIG_ADIS16204 is not set # CONFIG_ADIS16209 is not set -# CONFIG_ADIS16220 is not set # CONFIG_ADIS16240 is not set # CONFIG_ADIS16260 is not set # CONFIG_ADIS16400 is not set @@ -118,6 +111,7 @@ CONFIG_AEABI=y # CONFIG_AFE4403 is not set # CONFIG_AFE4404 is not set # CONFIG_AFFS_FS is not set +# CONFIG_AFS_DEBUG_CURSOR is not set # CONFIG_AFS_FS is not set # CONFIG_AF_KCM is not set # CONFIG_AF_RXRPC is not set @@ -161,7 +155,6 @@ CONFIG_ANON_INODES=y # CONFIG_APDS9960 is not set # CONFIG_APM8018X is not set # CONFIG_APM_EMULATION is not set -# CONFIG_ENERGY_MODEL is not set # CONFIG_APPLE_GMUX is not set # CONFIG_APPLE_PROPERTIES is not set # CONFIG_APPLICOM is not set @@ -172,10 +165,12 @@ CONFIG_ANON_INODES=y # CONFIG_AR8216_PHY is not set # CONFIG_AR8216_PHY_LEDS is not set # CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_AGILEX is not set # CONFIG_ARCH_ALPINE is not set # CONFIG_ARCH_ARTPEC is not set # CONFIG_ARCH_ASPEED is not set # CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_AXXIA is not set # CONFIG_ARCH_BCM is not set # CONFIG_ARCH_BCM2835 is not set # CONFIG_ARCH_BCM_21664 is not set @@ -188,6 +183,7 @@ CONFIG_ANON_INODES=y # CONFIG_ARCH_BCM_IPROC is not set # CONFIG_ARCH_BCM_NSP is not set # CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set # CONFIG_ARCH_BRCMSTB is not set # CONFIG_ARCH_CLPS711X is not set # CONFIG_ARCH_CNS3XXX is not set @@ -248,7 +244,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 # CONFIG_ARCH_OMAP4 is not set # CONFIG_ARCH_ORION5X is not set # CONFIG_ARCH_OXNAS is not set -# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set # CONFIG_ARCH_PICOXCELL is not set # CONFIG_ARCH_PRIMA2 is not set # CONFIG_ARCH_PXA is not set @@ -265,7 +260,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 # CONFIG_ARCH_SA1100 is not set # CONFIG_ARCH_SEATTLE is not set # CONFIG_ARCH_SHMOBILE is not set -# CONFIG_ARCH_SHMOBILE_MULTI is not set # CONFIG_ARCH_SIRF is not set # CONFIG_ARCH_SOCFPGA is not set # CONFIG_ARCH_SPRD is not set @@ -288,7 +282,6 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 # CONFIG_ARCH_VULCAN is not set # CONFIG_ARCH_W90X900 is not set # CONFIG_ARCH_WANTS_THP_SWAP is not set -# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set # CONFIG_ARCH_WM8505 is not set # CONFIG_ARCH_WM8750 is not set # CONFIG_ARCH_WM8850 is not set @@ -299,6 +292,9 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 # CONFIG_ARCNET is not set # CONFIG_ARC_EMAC is not set # CONFIG_ARC_IRQ_NO_AUTOSAVE is not set +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_CRYPTO is not set # CONFIG_ARM64_ERRATUM_1024718 is not set # CONFIG_ARM64_ERRATUM_1463225 is not set # CONFIG_ARM64_ERRATUM_819472 is not set @@ -310,11 +306,23 @@ CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 # CONFIG_ARM64_ERRATUM_843419 is not set # CONFIG_ARM64_ERRATUM_845719 is not set # CONFIG_ARM64_ERRATUM_858921 is not set +# CONFIG_ARM64_HW_AFDBM is not set +# CONFIG_ARM64_LSE_ATOMICS is not set +# CONFIG_ARM64_MODULE_PLTS is not set +# CONFIG_ARM64_PAN is not set +# CONFIG_ARM64_PMEM is not set +# CONFIG_ARM64_PSEUDO_NMI is not set +# CONFIG_ARM64_PTDUMP_DEBUGFS is not set +# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set # CONFIG_ARM64_RAS_EXTN is not set # CONFIG_ARM64_RELOC_TEST is not set CONFIG_ARM64_SW_TTBR0_PAN=y +# CONFIG_ARM64_UAO is not set +# CONFIG_ARM64_VA_BITS_48 is not set +# CONFIG_ARM64_VHE is not set # CONFIG_ARM_APPENDED_DTB is not set # CONFIG_ARM_ARCH_TIMER is not set +# CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND is not set # CONFIG_ARM_BIG_LITTLE_CPUFREQ is not set # CONFIG_ARM_CCI is not set # CONFIG_ARM_CCI400_PMU is not set @@ -344,29 +352,30 @@ CONFIG_ARM_DMA_MEM_BUFFERABLE=y # CONFIG_ARM_ERRATA_773022 is not set # CONFIG_ARM_ERRATA_775420 is not set # CONFIG_ARM_ERRATA_798181 is not set +# CONFIG_ARM_ERRATA_814220 is not set # CONFIG_ARM_ERRATA_818325_852422 is not set # CONFIG_ARM_ERRATA_821420 is not set # CONFIG_ARM_ERRATA_825619 is not set -# CONFIG_ARM_ERRATA_857271 is not set # CONFIG_ARM_ERRATA_852421 is not set # CONFIG_ARM_ERRATA_852423 is not set +# CONFIG_ARM_ERRATA_857271 is not set # CONFIG_ARM_ERRATA_857272 is not set -# CONFIG_ARM_ERRATA_814220 is not set CONFIG_ARM_GIC_MAX_NR=1 -# CONFIG_ARM_KERNMEM_PERMS is not set # CONFIG_ARM_KIRKWOOD_CPUFREQ is not set # CONFIG_ARM_KPROBES_TEST is not set +# CONFIG_ARM_LPAE is not set # CONFIG_ARM_MHU is not set # CONFIG_ARM_MODULE_PLTS is not set # CONFIG_ARM_PATCH_PHYS_VIRT is not set # CONFIG_ARM_PSCI is not set # CONFIG_ARM_PSCI_CHECKER is not set -# CONFIG_ARM_PTDUMP is not set # CONFIG_ARM_PTDUMP_DEBUGFS is not set # CONFIG_ARM_SBSA_WATCHDOG is not set # CONFIG_ARM_SCPI_PROTOCOL is not set # CONFIG_ARM_SDE_INTERFACE is not set +# CONFIG_ARM_SP805_WATCHDOG is not set # CONFIG_ARM_SPE_PMU is not set +# CONFIG_ARM_THUMBEE is not set # CONFIG_ARM_TIMER_SP804 is not set # CONFIG_ARM_UNWIND is not set # CONFIG_ARM_VIRT_EXT is not set @@ -528,8 +537,6 @@ CONFIG_BITREVERSE=y # CONFIG_BLK_CGROUP_IOCOST is not set # CONFIG_BLK_CGROUP_IOLATENCY is not set # CONFIG_BLK_CMDLINE_PARSER is not set -# CONFIG_BLK_CPQ_CISS_DA is not set -# CONFIG_BLK_CPQ_DA is not set # CONFIG_BLK_DEBUG_FS is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_3W_XXXX_RAID is not set @@ -556,7 +563,6 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_DTC2278 is not set # CONFIG_BLK_DEV_FD is not set # CONFIG_BLK_DEV_GENERIC is not set -# CONFIG_BLK_DEV_HD is not set # CONFIG_BLK_DEV_HPT366 is not set # CONFIG_BLK_DEV_HT6560B is not set # CONFIG_BLK_DEV_IDEACPI is not set @@ -625,9 +631,6 @@ CONFIG_BLOCK=y # CONFIG_BMI160_I2C is not set # CONFIG_BMI160_SPI is not set # CONFIG_BMIPS_GENERIC is not set -# CONFIG_BMP085 is not set -# CONFIG_BMP085_I2C is not set -# CONFIG_BMP085_SPI is not set # CONFIG_BMP280 is not set # CONFIG_BNA is not set # CONFIG_BNX2 is not set @@ -700,7 +703,6 @@ CONFIG_BT_BNEP_PROTO_FILTER=y # CONFIG_BT_HCIBPA10X is not set # CONFIG_BT_HCIBT3C is not set # CONFIG_BT_HCIBTSDIO is not set -# CONFIG_BT_HCIBTUART is not set # CONFIG_BT_HCIBTUSB is not set # CONFIG_BT_HCIBTUSB_AUTOSUSPEND is not set # CONFIG_BT_HCIBTUSB_MTK is not set @@ -790,6 +792,7 @@ CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CGROUPS is not set # CONFIG_CGROUP_BPF is not set # CONFIG_CGROUP_DEBUG is not set +# CONFIG_CGROUP_HUGETLB is not set # CONFIG_CGROUP_NET_CLASSID is not set # CONFIG_CGROUP_NET_PRIO is not set # CONFIG_CGROUP_RDMA is not set @@ -903,6 +906,8 @@ CONFIG_CONSTRUCTORS=y # CONFIG_CORTINA_PHY is not set # CONFIG_COUNTER is not set # CONFIG_CPA_DEBUG is not set +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set # CONFIG_CPU_DCACHE_DISABLE is not set # CONFIG_CPU_FREQ is not set # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set @@ -912,8 +917,12 @@ CONFIG_CONSTRUCTORS=y # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set # CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set # CONFIG_CPU_FREQ_STAT_DETAILS is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND is not set # CONFIG_CPU_IDLE is not set # CONFIG_CPU_IDLE_GOV_MENU is not set +# CONFIG_CPU_IDLE_GOV_TEO is not set # CONFIG_CPU_IDLE_MULTIPLE_DRIVERS is not set # CONFIG_CPU_ISOLATION is not set # CONFIG_CPU_NO_EFFICIENT_FFS is not set @@ -999,6 +1008,7 @@ CONFIG_CRYPTO_BLKCIPHER2=y # CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set # CONFIG_CRYPTO_DEV_HIFN_795X is not set # CONFIG_CRYPTO_DEV_HISI_SEC is not set +# CONFIG_CRYPTO_DEV_HISI_ZIP is not set # CONFIG_CRYPTO_DEV_IMGTEC_HASH is not set # CONFIG_CRYPTO_DEV_MARVELL_CESA is not set # CONFIG_CRYPTO_DEV_MV_CESA is not set @@ -1186,6 +1196,7 @@ CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_TIMEKEEPING is not set # CONFIG_DEBUG_UART_8250_PALMCHIP is not set # CONFIG_DEBUG_UART_BCM63XX is not set +# CONFIG_DEBUG_USER is not set # CONFIG_DEBUG_VIRTUAL is not set # CONFIG_DEBUG_VM is not set # CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set @@ -1326,6 +1337,7 @@ CONFIG_DQL=y # CONFIG_DRM_LOAD_EDID_FIRMWARE is not set # CONFIG_DRM_LVDS_ENCODER is not set # CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_MCDE is not set # CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set # CONFIG_DRM_MGAG200 is not set # CONFIG_DRM_MXSFB is not set @@ -1341,6 +1353,8 @@ CONFIG_DQL=y # CONFIG_DRM_PANEL_LG_LG4573 is not set # CONFIG_DRM_PANEL_LVDS is not set # CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set # CONFIG_DRM_PANEL_ORISETECH_OTM8009A is not set # CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00 is not set # CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set @@ -1348,16 +1362,18 @@ CONFIG_DQL=y # CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set # CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set # CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set # CONFIG_DRM_PANEL_SHARP_LQ101R1SX01 is not set # CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set # CONFIG_DRM_PANEL_SHARP_LS043T1LE01 is not set # CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set # CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set # CONFIG_DRM_PANFROST is not set -# CONFIG_DRM_TOSHIBA_TC358764 is not set -# CONFIG_DRM_TI_SN65DSI86 is not set # CONFIG_DRM_PARADE_PS8622 is not set # CONFIG_DRM_PL111 is not set # CONFIG_DRM_QXL is not set @@ -1374,7 +1390,9 @@ CONFIG_DQL=y # CONFIG_DRM_THINE_THC63LVD1024 is not set # CONFIG_DRM_TILCDC is not set # CONFIG_DRM_TINYDRM is not set +# CONFIG_DRM_TI_SN65DSI86 is not set # CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set # CONFIG_DRM_TOSHIBA_TC358767 is not set # CONFIG_DRM_UDL is not set # CONFIG_DRM_VBOXVIDEO is not set @@ -1398,6 +1416,7 @@ CONFIG_DUMMY_CONSOLE_ROWS=25 # CONFIG_DVB_TUNER_DIB0070 is not set # CONFIG_DVB_TUNER_DIB0090 is not set # CONFIG_DWC_XLGMAC is not set +# CONFIG_DWMAC_DWC_QOS_ETH is not set # CONFIG_DWMAC_IPQ806X is not set # CONFIG_DWMAC_LPC18XX is not set # CONFIG_DWMAC_MESON is not set @@ -1446,6 +1465,7 @@ CONFIG_ENABLE_WARN_DEPRECATED=y # CONFIG_ENCLOSURE_SERVICES is not set # CONFIG_ENCRYPTED_KEYS is not set # CONFIG_ENCX24J600 is not set +# CONFIG_ENERGY_MODEL is not set # CONFIG_ENIC is not set # CONFIG_ENVELOPE_DETECTOR is not set # CONFIG_EPAPR_PARAVIRT is not set @@ -1489,10 +1509,12 @@ CONFIG_EXTRA_TARGETS="" # CONFIG_EXYNOS_VIDEO is not set # CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set # CONFIG_EZX_PCAP is not set +# CONFIG_F2FS_CHECK_FS is not set # CONFIG_F2FS_FAULT_INJECTION is not set # CONFIG_F2FS_FS is not set # CONFIG_F2FS_FS_ENCRYPTION is not set # CONFIG_F2FS_FS_POSIX_ACL is not set +# CONFIG_F2FS_FS_SECURITY is not set # CONFIG_F2FS_IO_TRACE is not set # CONFIG_FAILOVER is not set # CONFIG_FAIR_GROUP_SCHED is not set @@ -1631,13 +1653,13 @@ CONFIG_FILE_LOCKING=y # CONFIG_FIRMWARE_EDID is not set # CONFIG_FIRMWARE_IN_KERNEL is not set # CONFIG_FIRMWARE_MEMMAP is not set -# CONFIG_TRUSTED_FOUNDATIONS is not set # CONFIG_FIXED_PHY is not set CONFIG_FLATMEM=y CONFIG_FLATMEM_MANUAL=y CONFIG_FLAT_NODE_MEM_MAP=y # CONFIG_FM10K is not set # CONFIG_FMC is not set +# CONFIG_FONTS is not set # CONFIG_FONT_TER16x32 is not set # CONFIG_FORCEDETH is not set CONFIG_FORCE_MAX_ZONEORDER=11 @@ -1652,10 +1674,10 @@ CONFIG_FRAME_WARN=1024 # CONFIG_FSCACHE is not set # CONFIG_FSI is not set # CONFIG_FSL_EDMA is not set -# CONFIG_FSL_QDMA is not set # CONFIG_FSL_ERRATUM_A008585 is not set # CONFIG_FSL_MC_BUS is not set # CONFIG_FSL_PQ_MDIO is not set +# CONFIG_FSL_QDMA is not set # CONFIG_FSL_XGMAC_MDIO is not set CONFIG_FSNOTIFY=y # CONFIG_FS_DAX is not set @@ -1686,6 +1708,8 @@ CONFIG_FW_LOADER=y CONFIG_FW_LOADER_USER_HELPER=y CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y # CONFIG_FXAS21002C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set CONFIG_GACT_PROB=y # CONFIG_GADGET_UAC1 is not set # CONFIG_GAMEPORT is not set @@ -1969,6 +1993,7 @@ CONFIG_HPET_MMAP_DEFAULT=y # CONFIG_HTC_PASIC3 is not set # CONFIG_HTS221 is not set # CONFIG_HTU21 is not set +# CONFIG_HUGETLBFS is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_HVC_DCC is not set # CONFIG_HVC_UDBG is not set @@ -1998,8 +2023,8 @@ CONFIG_HW_RANDOM_TPM=y # CONFIG_HYPERV is not set # CONFIG_HYPERV_TSCPAGE is not set # CONFIG_HYSDN is not set -CONFIG_HZ=100 -CONFIG_HZ_100=y +# CONFIG_HZ is not set +# CONFIG_HZ_100 is not set # CONFIG_HZ_1000 is not set # CONFIG_HZ_1024 is not set # CONFIG_HZ_128 is not set @@ -2127,6 +2152,7 @@ CONFIG_HZ_100=y # CONFIG_IGBVF is not set # CONFIG_IGC is not set # CONFIG_IIO is not set +# CONFIG_IIO_BUFFER is not set # CONFIG_IIO_BUFFER_CB is not set # CONFIG_IIO_BUFFER_HW_CONSUMER is not set # CONFIG_IIO_CONFIGFS is not set @@ -2146,6 +2172,7 @@ CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 # CONFIG_IIO_SW_DEVICE is not set # CONFIG_IIO_SW_TRIGGER is not set # CONFIG_IIO_SYSFS_TRIGGER is not set +# CONFIG_IIO_TRIGGER is not set # CONFIG_IKCONFIG is not set # CONFIG_IKCONFIG_PROC is not set # CONFIG_IKHEADERS is not set @@ -2819,6 +2846,7 @@ CONFIG_MAY_USE_DEVLINK=y CONFIG_MEMBARRIER=y # CONFIG_MEMORY is not set # CONFIG_MEMORY_FAILURE is not set +# CONFIG_MEMORY_HOTPLUG is not set # CONFIG_MEMSTICK is not set # CONFIG_MEMTEST is not set # CONFIG_MEN_A21_WDT is not set @@ -2828,6 +2856,7 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_MFD_88PM805 is not set # CONFIG_MFD_88PM860X is not set # CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_AC100 is not set # CONFIG_MFD_ACT8945A is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set @@ -2888,8 +2917,6 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_MFD_RN5T618 is not set # CONFIG_MFD_ROHM_BD70528 is not set # CONFIG_MFD_ROHM_BD718XX is not set -# CONFIG_MFD_ROHM_BD70528 is not set -# CONFIG_MFD_STPMIC1 is not set # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RTSX_PCI is not set # CONFIG_MFD_RTSX_USB is not set @@ -2926,7 +2953,6 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_MFD_TQMX86 is not set # CONFIG_MFD_VIPERBOARD is not set # CONFIG_MFD_VX855 is not set -# CONFIG_MFD_LOCHNAGAR is not set # CONFIG_MFD_WL1273_CORE is not set # CONFIG_MFD_WM831X is not set # CONFIG_MFD_WM831X_I2C is not set @@ -2989,7 +3015,6 @@ CONFIG_MISC_FILESYSTEMS=y # CONFIG_MMC is not set # CONFIG_MMC35240 is not set # CONFIG_MMC_ARMMMCI is not set -# CONFIG_MMC_STM32_SDMMC is not set # CONFIG_MMC_AU1X is not set # CONFIG_MMC_BLOCK is not set CONFIG_MMC_BLOCK_BOUNCE=y @@ -3024,6 +3049,7 @@ CONFIG_MMC_BLOCK_MINORS=8 # CONFIG_MMC_SDHCI_XENON is not set # CONFIG_MMC_SDRICOH_CS is not set # CONFIG_MMC_SPI is not set +# CONFIG_MMC_STM32_SDMMC is not set # CONFIG_MMC_TEST is not set # CONFIG_MMC_TOSHIBA_PCI is not set # CONFIG_MMC_USDHI6ROL0 is not set @@ -3096,6 +3122,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_GEN_PROBE=y # CONFIG_MTD_GPIO_ADDR is not set # CONFIG_MTD_HYPERBUS is not set +# CONFIG_MTD_IMPA7 is not set # CONFIG_MTD_INTEL_VR_NOR is not set # CONFIG_MTD_JEDECPROBE is not set # CONFIG_MTD_LATCH_ADDR is not set @@ -3187,7 +3214,6 @@ CONFIG_MTD_PHYSMAP_OF=y # CONFIG_MTD_PMC551 is not set # CONFIG_MTD_RAM is not set # CONFIG_MTD_RAW_NAND is not set -# CONFIG_MTD_NAND_ECC_SW_BCH is not set CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 # CONFIG_MTD_REDBOOT_PARTS is not set # CONFIG_MTD_REDBOOT_PARTS_READONLY is not set @@ -3512,7 +3538,6 @@ CONFIG_NET_VENDOR_FARADAY=y CONFIG_NET_VENDOR_FREESCALE=y CONFIG_NET_VENDOR_FUJITSU=y CONFIG_NET_VENDOR_GOOGLE=y -# CONFIG_GVE is not set CONFIG_NET_VENDOR_HISILICON=y CONFIG_NET_VENDOR_HP=y CONFIG_NET_VENDOR_HUAWEI=y @@ -3738,10 +3763,12 @@ CONFIG_NMI_LOG_BUF_SHIFT=13 # CONFIG_NTFS_RW is not set # CONFIG_NTP_PPS is not set # CONFIG_NULL_TTY is not set +# CONFIG_NUMA is not set # CONFIG_NVM is not set # CONFIG_NVMEM is not set # CONFIG_NVMEM_BCM_OCOTP is not set # CONFIG_NVMEM_IMX_OCOTP is not set +# CONFIG_NVMEM_REBOOT_MODE is not set # CONFIG_NVMEM_SYSFS is not set # CONFIG_NVME_FC is not set # CONFIG_NVME_TARGET is not set @@ -3983,6 +4010,10 @@ CONFIG_PINCTRL_SINGLE=y # CONFIG_PINCTRL_SX150X is not set CONFIG_PINMUX=y # CONFIG_PKCS7_MESSAGE_PARSER is not set +# CONFIG_PL310_ERRATA_588369 is not set +# CONFIG_PL310_ERRATA_727915 is not set +# CONFIG_PL310_ERRATA_753970 is not set +# CONFIG_PL310_ERRATA_769419 is not set # CONFIG_PL320_MBOX is not set # CONFIG_PL330_DMA is not set # CONFIG_PLATFORM_MHU is not set @@ -3995,7 +4026,9 @@ CONFIG_PLUGIN_HOSTCC="" # CONFIG_PMC_MSP is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_PMIC_DA903X is not set +# CONFIG_PMS7003 is not set # CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_DEBUG is not set # CONFIG_PM_DEVFREQ is not set # CONFIG_PM_WAKELOCKS is not set # CONFIG_POSIX_MQUEUE is not set @@ -4137,12 +4170,14 @@ CONFIG_PWRSEQ_SIMPLE=y # CONFIG_RAID_ATTRS is not set # CONFIG_RALINK is not set # CONFIG_RANDOM32_SELFTEST is not set +# CONFIG_RANDOMIZE_BASE is not set # CONFIG_RANDOM_TRUST_BOOTLOADER is not set # CONFIG_RANDOM_TRUST_CPU is not set # CONFIG_RAPIDIO is not set # CONFIG_RAS is not set # CONFIG_RAW_DRIVER is not set # CONFIG_RBTREE_TEST is not set +# CONFIG_RCU_BOOST is not set CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_EQS_DEBUG is not set # CONFIG_RCU_EXPEDITE_BOOT is not set @@ -4277,7 +4312,7 @@ CONFIG_RFKILL=y # CONFIG_RTC_DRV_BQ32K is not set # CONFIG_RTC_DRV_BQ4802 is not set # CONFIG_RTC_DRV_CADENCE is not set -# CONFIG_RTC_DRV_CMOS is not set +CONFIG_RTC_DRV_CMOS=y # CONFIG_RTC_DRV_DS1286 is not set # CONFIG_RTC_DRV_DS1302 is not set # CONFIG_RTC_DRV_DS1305 is not set @@ -4634,6 +4669,7 @@ CONFIG_SELECT_MEMORY_MODEL=y # CONFIG_SENSORS_LTC4245 is not set # CONFIG_SENSORS_LTC4260 is not set # CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_LTQ_CPUTEMP is not set # CONFIG_SENSORS_MAX1111 is not set # CONFIG_SENSORS_MAX16064 is not set # CONFIG_SENSORS_MAX16065 is not set @@ -4657,7 +4693,6 @@ CONFIG_SELECT_MEMORY_MODEL=y # CONFIG_SENSORS_NCT7802 is not set # CONFIG_SENSORS_NCT7904 is not set # CONFIG_SENSORS_NPCM7XX is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set # CONFIG_SENSORS_NSA320 is not set # CONFIG_SENSORS_NTC_THERMISTOR is not set # CONFIG_SENSORS_OCC_P8_I2C is not set @@ -4744,6 +4779,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=2 # CONFIG_SERIAL_ALTERA_JTAGUART is not set # CONFIG_SERIAL_ALTERA_UART is not set # CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set # CONFIG_SERIAL_ARC is not set # CONFIG_SERIAL_BCM63XX is not set # CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set @@ -5039,6 +5075,7 @@ CONFIG_SND_PROC_FS=y # CONFIG_SND_SOC_CS53L30 is not set # CONFIG_SND_SOC_CX2072X is not set # CONFIG_SND_SOC_DIO2125 is not set +# CONFIG_SND_SOC_DMIC is not set # CONFIG_SND_SOC_ES7134 is not set # CONFIG_SND_SOC_ES7241 is not set # CONFIG_SND_SOC_ES8316 is not set @@ -5246,6 +5283,7 @@ CONFIG_SND_X86=y # CONFIG_SOUND is not set # CONFIG_SOUNDWIRE is not set # CONFIG_SOUND_OSS_CORE is not set +# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set # CONFIG_SOUND_PRIME is not set # CONFIG_SP5100_TCO is not set # CONFIG_SPARSEMEM_MANUAL is not set @@ -5289,8 +5327,8 @@ CONFIG_SND_X86=y # CONFIG_SPI_PPC4xx is not set # CONFIG_SPI_PXA2XX is not set # CONFIG_SPI_PXA2XX_PCI is not set -# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_QCOM_QSPI is not set +# CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_S3C64XX is not set # CONFIG_SPI_SC18IS602 is not set # CONFIG_SPI_SIFIVE is not set @@ -5398,7 +5436,6 @@ CONFIG_SWAP=y # CONFIG_SYNOPSYS_DWC_ETH_QOS is not set CONFIG_SYN_COOKIES=y # CONFIG_SYSCON_REBOOT_MODE is not set -# CONFIG_NVMEM_REBOOT_MODE is not set CONFIG_SYSCTL=y # CONFIG_SYSCTL_SYSCALL is not set CONFIG_SYSFS=y @@ -5513,6 +5550,7 @@ CONFIG_TEXTSEARCH=y # CONFIG_THINKPAD_ACPI is not set CONFIG_THIN_ARCHIVES=y # CONFIG_THRUSTMASTER_FF is not set +# CONFIG_THUMB2_KERNEL is not set # CONFIG_THUNDERBOLT is not set # CONFIG_THUNDER_NIC_BGX is not set # CONFIG_THUNDER_NIC_PF is not set @@ -5712,6 +5750,7 @@ CONFIG_TRAD_SIGNALS=y # CONFIG_TREE_RCU is not set # CONFIG_TREE_RCU_TRACE is not set # CONFIG_TRIM_UNUSED_KSYMS is not set +# CONFIG_TRUSTED_FOUNDATIONS is not set # CONFIG_TRUSTED_KEYS is not set # CONFIG_TSL2583 is not set # CONFIG_TSL2772 is not set @@ -5762,6 +5801,7 @@ CONFIG_UNIX98_PTYS=y # CONFIG_UNIX_DIAG is not set CONFIG_UNIX_SCM=y # CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_UNWINDER_FRAME_POINTER is not set # CONFIG_UPROBES is not set # CONFIG_UPROBE_EVENTS is not set # CONFIG_US5182D is not set @@ -5803,6 +5843,7 @@ CONFIG_USB_DEFAULT_PERSIST=y # CONFIG_USB_DWC2_DUAL_ROLE is not set # CONFIG_USB_DWC2_HOST is not set # CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set # CONFIG_USB_DWC3 is not set # CONFIG_USB_DWC3_EXYNOS is not set # CONFIG_USB_DWC3_HAPS is not set @@ -5814,6 +5855,7 @@ CONFIG_USB_DEFAULT_PERSIST=y # CONFIG_USB_DYNAMIC_MINORS is not set # CONFIG_USB_EG20T is not set # CONFIG_USB_EHCI_ATH79 is not set +# CONFIG_USB_EHCI_FSL is not set # CONFIG_USB_EHCI_HCD_AT91 is not set # CONFIG_USB_EHCI_HCD_OMAP is not set # CONFIG_USB_EHCI_HCD_PPC_OF is not set @@ -6284,7 +6326,6 @@ CONFIG_VMSPLIT_3G=y # CONFIG_W1_MASTER_GPIO is not set # CONFIG_W1_MASTER_MATROX is not set # CONFIG_W1_MASTER_SGI is not set -# CONFIG_W1_SLAVE_BQ27000 is not set # CONFIG_W1_SLAVE_DS2405 is not set # CONFIG_W1_SLAVE_DS2406 is not set # CONFIG_W1_SLAVE_DS2408 is not set @@ -6294,7 +6335,6 @@ CONFIG_VMSPLIT_3G=y # CONFIG_W1_SLAVE_DS2433 is not set # CONFIG_W1_SLAVE_DS2438 is not set # CONFIG_W1_SLAVE_DS250X is not set -# CONFIG_W1_SLAVE_DS2760 is not set # CONFIG_W1_SLAVE_DS2780 is not set # CONFIG_W1_SLAVE_DS2781 is not set # CONFIG_W1_SLAVE_DS2805 is not set @@ -6324,7 +6364,6 @@ CONFIG_WEXT_PROC=y CONFIG_WEXT_SPY=y CONFIG_WILINK_PLATFORM_DATA=y # CONFIG_WIMAX is not set -# CONFIG_WIMAX_GDM72XX is not set CONFIG_WIRELESS=y CONFIG_WIRELESS_EXT=y # CONFIG_WIRELESS_WDS is not set @@ -6351,14 +6390,11 @@ CONFIG_WLAN=y # CONFIG_WLAN_VENDOR_TI is not set # CONFIG_WLAN_VENDOR_ZYDAS is not set # CONFIG_WLCORE is not set -# CONFIG_WL_MEDIATEK is not set -CONFIG_WL_TI=y CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y # CONFIG_WQ_WATCHDOG is not set # CONFIG_WW_MUTEX_SELFTEST is not set # CONFIG_X25 is not set # CONFIG_X509_CERTIFICATE_PARSER is not set -# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set # CONFIG_X86_PKG_TEMP_THERMAL is not set CONFIG_X86_SYSFB=y # CONFIG_XDP_SOCKETS is not set @@ -6462,3 +6498,4 @@ CONFIG_DEFAULT_SCHEDULER=y # CONFIG_DEFAULT_BLEST is not set # CONFIG_DEFAULT_REDUNDANT is not set CONFIG_NF_CONNTRACK_CUSTOM=2 +CONFIG_CRYPTO_SHA256=y \ No newline at end of file diff --git a/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch b/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch index d429ab22..af8944c8 100644 --- a/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch +++ b/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch @@ -1,6 +1,20 @@ +diff -aurN linux-5.4/Documentation/admin-guide/kernel-parameters.txt mptcp-mptcp_trunk/Documentation/admin-guide/kernel-parameters.txt +--- linux-5.4/Documentation/admin-guide/kernel-parameters.txt 2019-11-25 01:32:01.000000000 +0100 ++++ mptcp-mptcp_trunk/Documentation/admin-guide/kernel-parameters.txt 2020-06-09 17:33:41.000000000 +0200 +@@ -2724,6 +2724,10 @@ + allocations which rules out almost all kernel + allocations. Use with caution! + ++ mptcp_htable_entries= ++ [KNL,NET] Set number of hash buckets for MPTCP token ++ hashtables. ++ + MTD_Partition= [MTD] + Format: ,,, + diff -aurN linux-5.4/Documentation/networking/ip-sysctl.txt mptcp-mptcp_trunk/Documentation/networking/ip-sysctl.txt --- linux-5.4/Documentation/networking/ip-sysctl.txt 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/Documentation/networking/ip-sysctl.txt 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/Documentation/networking/ip-sysctl.txt 2020-06-09 17:33:41.000000000 +0200 @@ -818,6 +818,18 @@ Default: 0 (disabled) @@ -22,7 +36,7 @@ diff -aurN linux-5.4/Documentation/networking/ip-sysctl.txt mptcp-mptcp_trunk/Do udp_l3mdev_accept - BOOLEAN diff -aurN linux-5.4/drivers/infiniband/hw/cxgb4/cm.c mptcp-mptcp_trunk/drivers/infiniband/hw/cxgb4/cm.c --- linux-5.4/drivers/infiniband/hw/cxgb4/cm.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/drivers/infiniband/hw/cxgb4/cm.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/drivers/infiniband/hw/cxgb4/cm.c 2020-06-09 17:33:41.000000000 +0200 @@ -3946,7 +3946,7 @@ */ memset(&tmp_opt, 0, sizeof(tmp_opt)); @@ -34,7 +48,7 @@ diff -aurN linux-5.4/drivers/infiniband/hw/cxgb4/cm.c mptcp-mptcp_trunk/drivers/ memset(req, 0, sizeof(*req)); diff -aurN linux-5.4/include/linux/skbuff.h mptcp-mptcp_trunk/include/linux/skbuff.h --- linux-5.4/include/linux/skbuff.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/linux/skbuff.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/linux/skbuff.h 2020-06-09 17:33:41.000000000 +0200 @@ -717,7 +717,7 @@ * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. @@ -46,7 +60,7 @@ diff -aurN linux-5.4/include/linux/skbuff.h mptcp-mptcp_trunk/include/linux/skbu struct { diff -aurN linux-5.4/include/linux/tcp.h mptcp-mptcp_trunk/include/linux/tcp.h --- linux-5.4/include/linux/tcp.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/linux/tcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/linux/tcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -54,7 +54,7 @@ /* TCP Fast Open */ #define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ @@ -205,7 +219,7 @@ diff -aurN linux-5.4/include/linux/tcp.h mptcp-mptcp_trunk/include/linux/tcp.h static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) diff -aurN linux-5.4/include/net/inet_common.h mptcp-mptcp_trunk/include/net/inet_common.h --- linux-5.4/include/net/inet_common.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/inet_common.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/inet_common.h 2020-06-09 17:33:41.000000000 +0200 @@ -2,6 +2,7 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H @@ -225,7 +239,7 @@ diff -aurN linux-5.4/include/net/inet_common.h mptcp-mptcp_trunk/include/net/ine int addr_len, int flags); diff -aurN linux-5.4/include/net/inet_connection_sock.h mptcp-mptcp_trunk/include/net/inet_connection_sock.h --- linux-5.4/include/net/inet_connection_sock.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/inet_connection_sock.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/inet_connection_sock.h 2020-06-09 17:33:41.000000000 +0200 @@ -25,6 +25,7 @@ struct inet_bind_bucket; @@ -236,7 +250,7 @@ diff -aurN linux-5.4/include/net/inet_connection_sock.h mptcp-mptcp_trunk/includ * Pointers to address related TCP functions diff -aurN linux-5.4/include/net/inet_sock.h mptcp-mptcp_trunk/include/net/inet_sock.h --- linux-5.4/include/net/inet_sock.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/inet_sock.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/inet_sock.h 2020-06-09 17:33:41.000000000 +0200 @@ -79,7 +79,7 @@ #define ireq_state req.__req_common.skc_state #define ireq_family req.__req_common.skc_family @@ -257,8 +271,8 @@ diff -aurN linux-5.4/include/net/inet_sock.h mptcp-mptcp_trunk/include/net/inet_ union { diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h --- linux-5.4/include/net/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/mptcp.h 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,1497 @@ ++++ mptcp-mptcp_trunk/include/net/mptcp.h 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,1545 @@ +/* + * MPTCP implementation + * @@ -305,6 +319,7 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +#include +#include +#include ++#include +#include + +#if defined(__LITTLE_ENDIAN_BITFIELD) @@ -363,7 +378,8 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + + u8 loc_id; + u8 rem_id; /* Address-id in the MP_JOIN */ -+ u8 dss_csum:1, ++ u16 dss_csum:1, ++ rem_key_set:1, + is_sub:1, /* Is this a new subflow? */ + low_prio:1, /* Interface set to low-prio? */ + rcv_low_prio:1, @@ -462,7 +478,7 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + struct timer_list mptcp_ack_timer; + + /* HMAC of the third ack */ -+ char sender_mac[20]; ++ char sender_mac[SHA256_DIGEST_SIZE]; +}; + +struct mptcp_tw { @@ -501,7 +517,6 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + struct module *owner; +}; + -+#define MPTCP_SCHED_NAME_MAX 16 +struct mptcp_sched_ops { + struct list_head list; + @@ -533,6 +548,8 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + u32 rcv_high_order[2]; + + u16 send_infinite_mapping:1, ++ send_mptcpv1_mpcapable:1, ++ rem_key_set:1, + in_time_wait:1, + list_rcvd:1, /* XXX TO REMOVE */ + addr_signal:1, /* Path-manager wants us to call addr_signal */ @@ -615,6 +632,16 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +#define MPTCP_SUB_LEN_CAPABLE_ACK 20 +#define MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN 20 + ++#define MPTCPV1_SUB_LEN_CAPABLE_SYN 4 ++#define MPTCPV1_SUB_LEN_CAPABLE_SYN_ALIGN 4 ++#define MPTCPV1_SUB_LEN_CAPABLE_SYNACK 12 ++#define MPTCPV1_SUB_LEN_CAPABLE_SYNACK_ALIGN 12 ++#define MPTCPV1_SUB_LEN_CAPABLE_ACK 20 ++#define MPTCPV1_SUB_LEN_CAPABLE_ACK_ALIGN 20 ++#define MPTCPV1_SUB_LEN_CAPABLE_DATA 22 ++#define MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM 22 ++#define MPTCPV1_SUB_LEN_CAPABLE_DATA_ALIGN 24 ++ +#define MPTCP_SUB_JOIN 1 +#define MPTCP_SUB_LEN_JOIN_SYN 12 +#define MPTCP_SUB_LEN_JOIN_SYN_ALIGN 12 @@ -711,14 +738,15 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +#define MPTCPHDR_SEQ 0x01 /* DSS.M option is present */ +#define MPTCPHDR_FIN 0x02 /* DSS.F option is present */ +#define MPTCPHDR_SEQ64_INDEX 0x04 /* index of seq in mpcb->snd_high_order */ ++#define MPTCPHDR_MPC_DATA 0x08 +/* MPTCP flags: RX only */ -+#define MPTCPHDR_ACK 0x08 -+#define MPTCPHDR_SEQ64_SET 0x10 /* Did we received a 64-bit seq number? */ -+#define MPTCPHDR_SEQ64_OFO 0x20 /* Is it not in our circular array? */ -+#define MPTCPHDR_DSS_CSUM 0x40 ++#define MPTCPHDR_ACK 0x10 ++#define MPTCPHDR_SEQ64_SET 0x20 /* Did we received a 64-bit seq number? */ ++#define MPTCPHDR_SEQ64_OFO 0x40 /* Is it not in our circular array? */ ++#define MPTCPHDR_DSS_CSUM 0x80 +/* MPTCP flags: TX only */ -+#define MPTCPHDR_INF 0x08 -+#define MPTCP_REINJECT 0x10 /* Did we reinject this segment? */ ++#define MPTCPHDR_INF 0x10 ++#define MPTCP_REINJECT 0x20 /* Did we reinject this segment? */ + +struct mptcp_option { + __u8 kind; @@ -818,11 +846,29 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + __u8 kind; + __u8 len; +#if defined(__LITTLE_ENDIAN_BITFIELD) -+ __u8 ipver:4, -+ sub:4; ++ union { ++ struct { ++ __u8 ipver:4, ++ sub:4; ++ } v0; ++ struct { ++ __u8 echo:1, ++ rsv:3, ++ sub:4; ++ } v1; ++ } u_bit; +#elif defined(__BIG_ENDIAN_BITFIELD) -+ __u8 sub:4, -+ ipver:4; ++ union { ++ struct { ++ __u8 sub:4, ++ ipver:4; ++ } v0; ++ struct { ++ __u8 sub:4, ++ rsv:3, ++ echo:1; ++ } v1; ++ } u_bit; +#else +#error "Adjust your defines" +#endif @@ -908,6 +954,11 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + __u8 addr_id; +} __attribute__((__packed__)); + ++struct mptcp_hashtable { ++ struct hlist_nulls_head *hashtable; ++ unsigned int mask; ++}; ++ +static inline int mptcp_sub_len_dss(const struct mp_dss *m, const int csum) +{ + return 4 + m->A * (4 + m->a * 4) + m->M * (10 + m->m * 4 + csum * 2); @@ -1014,9 +1065,7 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + */ +extern u32 mptcp_seed; + -+#define MPTCP_HASH_SIZE 1024 -+ -+extern struct hlist_nulls_head tk_hashtable[MPTCP_HASH_SIZE]; ++extern struct mptcp_hashtable mptcp_tk_htable; + +/* Request-sockets can be hashed in the tk_htb for collision-detection or in + * the regular htb for join-connections. We need to define different NULLS @@ -1061,10 +1110,11 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +void mptcp_close(struct sock *meta_sk, long timeout); +bool mptcp_doit(struct sock *sk); +int mptcp_create_master_sk(struct sock *meta_sk, __u64 remote_key, -+ __u8 mptcp_ver, u32 window); ++ int rem_key_set, __u8 mptcp_ver, u32 window); +int mptcp_check_req_fastopen(struct sock *child, struct request_sock *req); +int mptcp_check_req_master(struct sock *sk, struct sock *child, + struct request_sock *req, const struct sk_buff *skb, ++ const struct mptcp_options_received *mopt, + int drop, u32 tsoff); +struct sock *mptcp_check_req_child(struct sock *meta_sk, + struct sock *child, @@ -1077,8 +1127,8 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + int wscale_ok, __u8 *rcv_wscale, + __u32 init_rcv_wnd); +unsigned int mptcp_current_mss(struct sock *meta_sk); -+void mptcp_hmac_sha1(const u8 *key_1, const u8 *key_2, u32 *hash_out, -+ int arg_num, ...); ++void mptcp_hmac(u8 ver, const u8 *key_1, const u8 *key_2, u8 *hash_out, ++ int arg_num, ...); +void mptcp_clean_rtx_infinite(const struct sk_buff *skb, struct sock *sk); +void mptcp_fin(struct sock *meta_sk); +void mptcp_meta_retransmit_timer(struct sock *meta_sk); @@ -1088,6 +1138,8 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +void mptcp_sub_close(struct sock *sk, unsigned long delay); +struct sock *mptcp_select_ack_sock(const struct sock *meta_sk); +void mptcp_prepare_for_backlog(struct sock *sk, struct sk_buff *skb); ++void mptcp_initialize_recv_vars(struct tcp_sock *meta_tp, struct mptcp_cb *mpcb, ++ __u64 remote_key); +int mptcp_backlog_rcv(struct sock *meta_sk, struct sk_buff *skb); +void mptcp_ack_handler(struct timer_list *t); +bool mptcp_check_rtt(const struct tcp_sock *tp, int time); @@ -1163,6 +1215,10 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +bool subflow_is_backup(const struct tcp_sock *tp); +struct sock *get_available_subflow(struct sock *meta_sk, struct sk_buff *skb, + bool zero_wnd_test); ++struct sk_buff *mptcp_next_segment(struct sock *meta_sk, ++ int *reinject, ++ struct sock **subsk, ++ unsigned int *limit); +extern struct mptcp_sched_ops mptcp_sched_default; + +/* Initializes function-pointers and MPTCP-flags */ @@ -1243,6 +1299,11 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + } +} + ++static inline bool mptcp_is_data_mpcapable(const struct sk_buff *skb) ++{ ++ return TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_MPC_DATA; ++} ++ +static inline bool mptcp_is_data_seq(const struct sk_buff *skb) +{ + return TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_SEQ; @@ -1660,6 +1721,7 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h + const struct sock *child, + const struct request_sock *req, + const struct sk_buff *skb, ++ const struct mptcp_options_received *mopt, + int drop, + u32 tsoff) +{ @@ -1758,7 +1820,7 @@ diff -aurN linux-5.4/include/net/mptcp.h mptcp-mptcp_trunk/include/net/mptcp.h +#endif /* _MPTCP_H */ diff -aurN linux-5.4/include/net/mptcp_v4.h mptcp-mptcp_trunk/include/net/mptcp_v4.h --- linux-5.4/include/net/mptcp_v4.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/mptcp_v4.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/mptcp_v4.h 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,76 @@ +/* + * MPTCP implementation @@ -1838,7 +1900,7 @@ diff -aurN linux-5.4/include/net/mptcp_v4.h mptcp-mptcp_trunk/include/net/mptcp_ +#endif /* MPTCP_V4_H_ */ diff -aurN linux-5.4/include/net/mptcp_v6.h mptcp-mptcp_trunk/include/net/mptcp_v6.h --- linux-5.4/include/net/mptcp_v6.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/mptcp_v6.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/mptcp_v6.h 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,77 @@ +/* + * MPTCP implementation @@ -1919,7 +1981,7 @@ diff -aurN linux-5.4/include/net/mptcp_v6.h mptcp-mptcp_trunk/include/net/mptcp_ +#endif /* _MPTCP_V6_H */ diff -aurN linux-5.4/include/net/net_namespace.h mptcp-mptcp_trunk/include/net/net_namespace.h --- linux-5.4/include/net/net_namespace.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/net_namespace.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/net_namespace.h 2020-06-09 17:33:41.000000000 +0200 @@ -19,6 +19,7 @@ #include #include @@ -1940,7 +2002,7 @@ diff -aurN linux-5.4/include/net/net_namespace.h mptcp-mptcp_trunk/include/net/n #endif diff -aurN linux-5.4/include/net/netns/mptcp.h mptcp-mptcp_trunk/include/net/netns/mptcp.h --- linux-5.4/include/net/netns/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/netns/mptcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/netns/mptcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,52 @@ +/* + * MPTCP implementation - MPTCP namespace @@ -1996,7 +2058,7 @@ diff -aurN linux-5.4/include/net/netns/mptcp.h mptcp-mptcp_trunk/include/net/net +#endif /* __NETNS_MPTCP_H__ */ diff -aurN linux-5.4/include/net/snmp.h mptcp-mptcp_trunk/include/net/snmp.h --- linux-5.4/include/net/snmp.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/snmp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/snmp.h 2020-06-09 17:33:41.000000000 +0200 @@ -86,7 +86,6 @@ atomic_long_t mibs[ICMP6MSG_MIB_MAX]; }; @@ -2007,7 +2069,7 @@ diff -aurN linux-5.4/include/net/snmp.h mptcp-mptcp_trunk/include/net/snmp.h struct tcp_mib { diff -aurN linux-5.4/include/net/sock.h mptcp-mptcp_trunk/include/net/sock.h --- linux-5.4/include/net/sock.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/sock.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/sock.h 2020-06-09 17:33:41.000000000 +0200 @@ -814,6 +814,7 @@ SOCK_TXTIME, SOCK_XDP, /* XDP is attached */ @@ -2026,7 +2088,7 @@ diff -aurN linux-5.4/include/net/sock.h mptcp-mptcp_trunk/include/net/sock.h #ifdef CONFIG_PROC_FS diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h --- linux-5.4/include/net/tcp.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/tcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/tcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -182,6 +182,7 @@ #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ @@ -2067,7 +2129,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h /* sysctl variables for tcp */ extern int sysctl_tcp_max_orphans; -@@ -310,6 +336,97 @@ +@@ -310,6 +336,96 @@ #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) @@ -2078,7 +2140,6 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h +struct mptcp_options_received; + +void tcp_cleanup_rbuf(struct sock *sk, int copied); -+void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited); +int tcp_close_state(struct sock *sk); +void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, + const struct sk_buff *skb); @@ -2165,7 +2226,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h void tcp_tasklet_init(void); int tcp_v4_err(struct sk_buff *skb, u32); -@@ -411,7 +528,9 @@ +@@ -411,7 +527,9 @@ #endif void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, @@ -2176,7 +2237,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* -@@ -430,6 +549,7 @@ +@@ -430,6 +548,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); @@ -2184,7 +2245,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, -@@ -453,6 +573,7 @@ +@@ -453,6 +572,7 @@ struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type); @@ -2192,6 +2253,14 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h int tcp_disconnect(struct sock *sk, int flags); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); +@@ -462,6 +582,7 @@ + /* From syncookies.c */ + struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, ++ const struct mptcp_options_received *mopt, + struct dst_entry *dst, u32 tsoff); + int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, + u32 cookie); @@ -536,7 +657,8 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, @@ -2295,7 +2364,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { -@@ -1949,6 +2107,31 @@ +@@ -1949,6 +2107,30 @@ #endif }; @@ -2318,7 +2387,6 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h + void (*retransmit_timer)(struct sock *sk); + void (*time_wait)(struct sock *sk, int state, int timeo); + void (*cleanup_rbuf)(struct sock *sk, int copied); -+ void (*cwnd_validate)(struct sock *sk, bool is_cwnd_limited); + int (*set_cong_ctrl)(struct sock *sk, const char *name, bool load, + bool reinit, bool cap_net_admin); +}; @@ -2327,7 +2395,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG -@@ -1959,12 +2142,13 @@ +@@ -1959,12 +2141,13 @@ const struct sock *sk, const struct sk_buff *skb); #endif @@ -2346,7 +2414,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req); -@@ -1978,15 +2162,17 @@ +@@ -1978,15 +2161,17 @@ #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, @@ -2367,7 +2435,7 @@ diff -aurN linux-5.4/include/net/tcp.h mptcp-mptcp_trunk/include/net/tcp.h { diff -aurN linux-5.4/include/net/tcp_states.h mptcp-mptcp_trunk/include/net/tcp_states.h --- linux-5.4/include/net/tcp_states.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/tcp_states.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/tcp_states.h 2020-06-09 17:33:41.000000000 +0200 @@ -22,6 +22,7 @@ TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ @@ -2386,7 +2454,7 @@ diff -aurN linux-5.4/include/net/tcp_states.h mptcp-mptcp_trunk/include/net/tcp_ #endif /* _LINUX_TCP_STATES_H */ diff -aurN linux-5.4/include/net/transp_v6.h mptcp-mptcp_trunk/include/net/transp_v6.h --- linux-5.4/include/net/transp_v6.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/net/transp_v6.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/net/transp_v6.h 2020-06-09 17:33:41.000000000 +0200 @@ -58,6 +58,8 @@ /* address family specific functions */ @@ -2398,7 +2466,7 @@ diff -aurN linux-5.4/include/net/transp_v6.h mptcp-mptcp_trunk/include/net/trans diff -aurN linux-5.4/include/trace/events/tcp.h mptcp-mptcp_trunk/include/trace/events/tcp.h --- linux-5.4/include/trace/events/tcp.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/trace/events/tcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/trace/events/tcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -10,6 +10,7 @@ #include #include @@ -2449,7 +2517,7 @@ diff -aurN linux-5.4/include/trace/events/tcp.h mptcp-mptcp_trunk/include/trace/ #endif /* _TRACE_TCP_H */ diff -aurN linux-5.4/include/uapi/linux/bpf.h mptcp-mptcp_trunk/include/uapi/linux/bpf.h --- linux-5.4/include/uapi/linux/bpf.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/uapi/linux/bpf.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/uapi/linux/bpf.h 2020-06-09 17:33:41.000000000 +0200 @@ -3438,6 +3438,7 @@ BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ @@ -2460,7 +2528,7 @@ diff -aurN linux-5.4/include/uapi/linux/bpf.h mptcp-mptcp_trunk/include/uapi/lin }; diff -aurN linux-5.4/include/uapi/linux/if.h mptcp-mptcp_trunk/include/uapi/linux/if.h --- linux-5.4/include/uapi/linux/if.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/uapi/linux/if.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/uapi/linux/if.h 2020-06-09 17:33:41.000000000 +0200 @@ -132,6 +132,9 @@ #define IFF_ECHO IFF_ECHO #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ @@ -2473,7 +2541,7 @@ diff -aurN linux-5.4/include/uapi/linux/if.h mptcp-mptcp_trunk/include/uapi/linu diff -aurN linux-5.4/include/uapi/linux/mptcp.h mptcp-mptcp_trunk/include/uapi/linux/mptcp.h --- linux-5.4/include/uapi/linux/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/include/uapi/linux/mptcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/uapi/linux/mptcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* @@ -2626,7 +2694,7 @@ diff -aurN linux-5.4/include/uapi/linux/mptcp.h mptcp-mptcp_trunk/include/uapi/l +#endif /* _LINUX_MPTCP_H */ diff -aurN linux-5.4/include/uapi/linux/tcp.h mptcp-mptcp_trunk/include/uapi/linux/tcp.h --- linux-5.4/include/uapi/linux/tcp.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/include/uapi/linux/tcp.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/include/uapi/linux/tcp.h 2020-06-09 17:33:41.000000000 +0200 @@ -18,9 +18,15 @@ #ifndef _UAPI_LINUX_TCP_H #define _UAPI_LINUX_TCP_H @@ -2714,7 +2782,7 @@ diff -aurN linux-5.4/include/uapi/linux/tcp.h mptcp-mptcp_trunk/include/uapi/lin diff -aurN linux-5.4/net/core/dev.c mptcp-mptcp_trunk/net/core/dev.c --- linux-5.4/net/core/dev.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/core/dev.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/core/dev.c 2020-06-09 17:33:41.000000000 +0200 @@ -7855,7 +7855,7 @@ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | @@ -2726,7 +2794,7 @@ diff -aurN linux-5.4/net/core/dev.c mptcp-mptcp_trunk/net/core/dev.c diff -aurN linux-5.4/net/core/net-traces.c mptcp-mptcp_trunk/net/core/net-traces.c --- linux-5.4/net/core/net-traces.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/core/net-traces.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/core/net-traces.c 2020-06-09 17:33:41.000000000 +0200 @@ -60,3 +60,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll); @@ -2735,7 +2803,7 @@ diff -aurN linux-5.4/net/core/net-traces.c mptcp-mptcp_trunk/net/core/net-traces +EXPORT_TRACEPOINT_SYMBOL_GPL(mptcp_retransmit); diff -aurN linux-5.4/net/core/skbuff.c mptcp-mptcp_trunk/net/core/skbuff.c --- linux-5.4/net/core/skbuff.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/core/skbuff.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/core/skbuff.c 2020-06-09 17:33:41.000000000 +0200 @@ -573,7 +573,7 @@ skb_drop_list(&skb_shinfo(skb)->frag_list); } @@ -2747,7 +2815,7 @@ diff -aurN linux-5.4/net/core/skbuff.c mptcp-mptcp_trunk/net/core/skbuff.c diff -aurN linux-5.4/net/core/sock.c mptcp-mptcp_trunk/net/core/sock.c --- linux-5.4/net/core/sock.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/core/sock.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/core/sock.c 2020-06-09 17:33:41.000000000 +0200 @@ -135,6 +135,11 @@ #include @@ -2799,20 +2867,17 @@ diff -aurN linux-5.4/net/core/sock.c mptcp-mptcp_trunk/net/core/sock.c } else sk = kmalloc(prot->obj_size, priority); -@@ -1832,9 +1858,10 @@ +@@ -1832,6 +1858,7 @@ atomic_set(&newsk->sk_zckey, 0); sock_reset_flag(newsk, SOCK_DONE); + sock_reset_flag(newsk, SOCK_MPTCP); /* sk->sk_memcg will be populated at accept() time */ - newsk->sk_memcg = NULL; - - cgroup_sk_alloc(&newsk->sk_cgrp_data); diff -aurN linux-5.4/net/ipv4/af_inet.c mptcp-mptcp_trunk/net/ipv4/af_inet.c --- linux-5.4/net/ipv4/af_inet.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/af_inet.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/af_inet.c 2020-06-09 17:33:41.000000000 +0200 @@ -100,6 +100,7 @@ #include #include @@ -2878,7 +2943,7 @@ diff -aurN linux-5.4/net/ipv4/af_inet.c mptcp-mptcp_trunk/net/ipv4/af_inet.c diff -aurN linux-5.4/net/ipv4/inet_connection_sock.c mptcp-mptcp_trunk/net/ipv4/inet_connection_sock.c --- linux-5.4/net/ipv4/inet_connection_sock.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/inet_connection_sock.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/inet_connection_sock.c 2020-06-09 17:33:41.000000000 +0200 @@ -19,6 +19,7 @@ #include #include @@ -2938,7 +3003,7 @@ diff -aurN linux-5.4/net/ipv4/inet_connection_sock.c mptcp-mptcp_trunk/net/ipv4/ cond_resched(); diff -aurN linux-5.4/net/ipv4/ip_sockglue.c mptcp-mptcp_trunk/net/ipv4/ip_sockglue.c --- linux-5.4/net/ipv4/ip_sockglue.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/ip_sockglue.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/ip_sockglue.c 2020-06-09 17:33:41.000000000 +0200 @@ -44,6 +44,8 @@ #endif #include @@ -2980,7 +3045,7 @@ diff -aurN linux-5.4/net/ipv4/ip_sockglue.c mptcp-mptcp_trunk/net/ipv4/ip_sockgl case IP_TTL: diff -aurN linux-5.4/net/ipv4/Kconfig mptcp-mptcp_trunk/net/ipv4/Kconfig --- linux-5.4/net/ipv4/Kconfig 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/Kconfig 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/Kconfig 2020-06-09 17:33:41.000000000 +0200 @@ -654,6 +654,51 @@ bufferbloat, policers, or AQM schemes that do not provide a delay signal. It requires the fq ("Fair Queue") pacing packet scheduler. @@ -3068,7 +3133,7 @@ diff -aurN linux-5.4/net/ipv4/Kconfig mptcp-mptcp_trunk/net/ipv4/Kconfig default "cdg" if DEFAULT_CDG diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies.c --- linux-5.4/net/ipv4/syncookies.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/syncookies.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/syncookies.c 2020-06-09 17:33:41.000000000 +0200 @@ -12,6 +12,8 @@ #include #include @@ -3088,7 +3153,13 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies { const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); -@@ -205,9 +208,27 @@ +@@ -200,14 +203,33 @@ + + struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, + struct request_sock *req, ++ const struct mptcp_options_received *mopt, + struct dst_entry *dst, u32 tsoff) + { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; bool own_req; @@ -3103,7 +3174,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies + if (!child) + goto listen_overflow; + -+ ret = mptcp_check_req_master(sk, child, req, skb, 0, tsoff); ++ ret = mptcp_check_req_master(sk, child, req, skb, mopt, 0, tsoff); + if (ret < 0) + return NULL; + @@ -3116,7 +3187,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies if (child) { refcount_set(&req->rsk_refcnt, 1); tcp_sk(child)->tsoffset = tsoff; -@@ -284,6 +305,7 @@ +@@ -284,6 +306,7 @@ { struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; struct tcp_options_received tcp_opt; @@ -3124,7 +3195,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); -@@ -313,7 +335,8 @@ +@@ -313,7 +336,8 @@ /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); @@ -3134,7 +3205,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { tsoff = secure_tcp_ts_off(sock_net(sk), -@@ -326,7 +349,12 @@ +@@ -326,7 +350,12 @@ goto out; ret = NULL; @@ -3148,7 +3219,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies if (!req) goto out; -@@ -346,6 +374,8 @@ +@@ -346,6 +375,8 @@ ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; @@ -3157,7 +3228,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack = 0; treq->tfo_listener = false; -@@ -354,6 +384,9 @@ +@@ -354,6 +385,9 @@ ireq->ir_iif = inet_request_bound_dev_if(sk, skb); @@ -3167,7 +3238,7 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ -@@ -387,10 +420,10 @@ +@@ -387,15 +421,15 @@ /* Try to redo what tcp_v4_send_synack did. */ req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); @@ -3182,9 +3253,15 @@ diff -aurN linux-5.4/net/ipv4/syncookies.c mptcp-mptcp_trunk/net/ipv4/syncookies ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), &rt->dst); + +- ret = tcp_get_cookie_sock(sk, skb, req, &rt->dst, tsoff); ++ ret = tcp_get_cookie_sock(sk, skb, req, &mopt, &rt->dst, tsoff); + /* ip_queue_xmit() depends on our flow being setup + * Normal sockets get it right from inet_csk_route_child_sock() + */ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c --- linux-5.4/net/ipv4/tcp.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp.c 2020-06-09 17:33:41.000000000 +0200 @@ -270,6 +270,7 @@ #include @@ -3193,7 +3270,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c #include #include #include -@@ -400,6 +401,24 @@ +@@ -400,6 +401,23 @@ return rate64; } @@ -3211,14 +3288,13 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c + .retransmit_timer = tcp_retransmit_timer, + .time_wait = tcp_time_wait, + .cleanup_rbuf = tcp_cleanup_rbuf, -+ .cwnd_validate = tcp_cwnd_validate, + .set_cong_ctrl = __tcp_set_congestion_control, +}; + /* Address-family independent initialization for a tcp_sock. * * NOTE: A lot of things set to zero explicitly by call to -@@ -453,6 +472,11 @@ +@@ -453,6 +471,11 @@ WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]); WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]); @@ -3230,7 +3306,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; } -@@ -777,6 +801,7 @@ +@@ -777,6 +800,7 @@ int ret; sock_rps_record_flow(sk); @@ -3238,7 +3314,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c /* * We can't seek on a socket input */ -@@ -787,6 +812,16 @@ +@@ -787,6 +811,16 @@ lock_sock(sk); @@ -3255,7 +3331,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); while (tss.len) { ret = __tcp_splice_read(sk, &tss); -@@ -902,8 +937,7 @@ +@@ -902,8 +936,7 @@ return NULL; } @@ -3265,7 +3341,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c { struct tcp_sock *tp = tcp_sk(sk); u32 new_size_goal, size_goal; -@@ -931,8 +965,13 @@ +@@ -931,8 +964,13 @@ { int mss_now; @@ -3281,7 +3357,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c return mss_now; } -@@ -971,12 +1010,34 @@ +@@ -971,12 +1009,34 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && @@ -3317,7 +3393,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); -@@ -1099,7 +1160,8 @@ +@@ -1099,7 +1159,8 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@ -3327,7 +3403,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c return sock_no_sendpage_locked(sk, page, offset, size, flags); tcp_rate_check_app_limited(sk); /* is sending application-limited? */ -@@ -1221,12 +1283,21 @@ +@@ -1221,12 +1282,21 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && @@ -3350,7 +3426,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { copied = tcp_send_rcvq(sk, msg, size); -@@ -1520,7 +1591,7 @@ +@@ -1520,7 +1590,7 @@ * calculation of whether or not we must ACK for the sake of * a window update. */ @@ -3359,7 +3435,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; -@@ -1563,7 +1634,7 @@ +@@ -1563,7 +1633,7 @@ /* Optimize, __tcp_select_window() is not cheap. */ if (2*rcv_window_now <= tp->window_clamp) { @@ -3368,7 +3444,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c /* Send ACK now, if this read freed lots of space * in our buffer. Certainly, new_window is new window. -@@ -1679,7 +1750,7 @@ +@@ -1679,7 +1749,7 @@ /* Clean up data we have read: This will do ACK frames. */ if (copied > 0) { tcp_recv_skb(sk, seq, &offset); @@ -3377,7 +3453,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c } return copied; } -@@ -1970,6 +2041,16 @@ +@@ -1970,6 +2040,16 @@ lock_sock(sk); @@ -3394,7 +3470,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c err = -ENOTCONN; if (sk->sk_state == TCP_LISTEN) goto out; -@@ -2088,7 +2169,7 @@ +@@ -2088,7 +2168,7 @@ } } @@ -3403,7 +3479,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (copied >= target) { /* Do not sleep, just process backlog. */ -@@ -2179,7 +2260,7 @@ +@@ -2179,7 +2259,7 @@ */ /* Clean up data we have read: This will do ACK frames. */ @@ -3412,7 +3488,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c release_sock(sk); -@@ -2287,7 +2368,7 @@ +@@ -2287,7 +2367,7 @@ [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ }; @@ -3421,7 +3497,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; -@@ -2317,7 +2398,7 @@ +@@ -2317,7 +2397,7 @@ TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) @@ -3430,7 +3506,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c } } EXPORT_SYMBOL(tcp_shutdown); -@@ -2342,6 +2423,17 @@ +@@ -2342,6 +2422,17 @@ int data_was_unread = 0; int state; @@ -3448,7 +3524,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; -@@ -2386,7 +2478,7 @@ +@@ -2386,7 +2477,7 @@ /* Unread data was tossed, zap the connection. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); @@ -3457,7 +3533,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); -@@ -2460,7 +2552,7 @@ +@@ -2460,7 +2551,7 @@ struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); @@ -3466,7 +3542,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { -@@ -2470,7 +2562,8 @@ +@@ -2470,7 +2561,8 @@ inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { @@ -3476,7 +3552,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c goto out; } } -@@ -2479,7 +2572,7 @@ +@@ -2479,7 +2571,7 @@ sk_mem_reclaim(sk); if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); @@ -3485,7 +3561,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } else if (!check_net(sock_net(sk))) { -@@ -2511,15 +2604,6 @@ +@@ -2511,15 +2603,6 @@ } EXPORT_SYMBOL(tcp_close); @@ -3501,7 +3577,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c static void tcp_rtx_queue_purge(struct sock *sk) { struct rb_node *p = rb_first(&sk->tcp_rtx_queue); -@@ -2540,6 +2624,10 @@ +@@ -2540,6 +2623,10 @@ { struct sk_buff *skb; @@ -3512,7 +3588,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c tcp_chrono_stop(sk, TCP_CHRONO_BUSY); while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { tcp_skb_tsorted_anchor_cleanup(skb); -@@ -2558,6 +2646,29 @@ +@@ -2558,6 +2645,29 @@ inet_csk(sk)->icsk_backoff = 0; } @@ -3542,7 +3618,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); -@@ -2580,7 +2691,7 @@ +@@ -2580,7 +2690,7 @@ /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ @@ -3551,7 +3627,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; -@@ -2602,11 +2713,15 @@ +@@ -2602,11 +2712,15 @@ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); @@ -3570,7 +3646,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c seq = tp->write_seq + tp->max_window + 2; if (!seq) -@@ -2616,17 +2731,11 @@ +@@ -2616,17 +2730,11 @@ icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; @@ -3591,7 +3667,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c inet_csk_delack_init(sk); /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 * issue in __tcp_select_window() -@@ -2636,14 +2747,10 @@ +@@ -2636,14 +2746,10 @@ sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); tp->compressed_ack = 0; @@ -3601,12 +3677,12 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c - tp->bytes_acked = 0; - tp->bytes_received = 0; - tp->bytes_retrans = 0; - tp->data_segs_in = 0; - tp->data_segs_out = 0; + tp->data_segs_in = 0; + tp->data_segs_out = 0; tp->duplicate_sack[0].start_seq = 0; tp->duplicate_sack[0].end_seq = 0; tp->dsack_dups = 0; -@@ -2648,8 +2755,6 @@ +@@ -2648,8 +2754,6 @@ tp->sacked_out = 0; tp->tlp_high_seq = 0; tp->last_oow_ack_time = 0; @@ -3615,7 +3691,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c tp->rack.mstamp = 0; tp->rack.advanced = 0; tp->rack.reo_wnd_steps = 1; -@@ -2683,7 +2788,7 @@ +@@ -2683,7 +2787,7 @@ static inline bool tcp_can_repair_sock(const struct sock *sk) { return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && @@ -3624,7 +3700,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c } static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) -@@ -2852,6 +2957,61 @@ +@@ -2852,6 +2956,61 @@ return tcp_fastopen_reset_cipher(net, sk, key, backup_key); } @@ -3686,7 +3762,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c default: /* fallthru */ break; -@@ -3032,6 +3192,12 @@ +@@ -3032,6 +3191,12 @@ break; case TCP_DEFER_ACCEPT: @@ -3699,7 +3775,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c /* Translate value in seconds to number of retransmits */ icsk->icsk_accept_queue.rskq_defer_accept = secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, -@@ -3059,7 +3225,7 @@ +@@ -3059,7 +3224,7 @@ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; @@ -3708,7 +3784,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (!(val & 1)) inet_csk_enter_pingpong_mode(sk); } -@@ -3069,7 +3235,7 @@ +@@ -3069,7 +3234,7 @@ #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: @@ -3717,7 +3793,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c err = tp->af_specific->md5_parse(sk, optname, optval, optlen); else err = -EINVAL; -@@ -3128,6 +3294,32 @@ +@@ -3128,6 +3293,32 @@ tp->notsent_lowat = val; sk->sk_write_space(sk); break; @@ -3750,7 +3826,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c case TCP_INQ: if (val > 1 || val < 0) err = -EINVAL; -@@ -3192,7 +3384,7 @@ +@@ -3192,7 +3383,7 @@ } /* Return information about state of tcp endpoint in API format. */ @@ -3759,7 +3835,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); -@@ -3229,7 +3421,8 @@ +@@ -3229,7 +3420,8 @@ return; } @@ -3769,7 +3845,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; -@@ -3305,7 +3498,9 @@ +@@ -3305,7 +3497,9 @@ info->tcpi_reord_seen = tp->reord_seen; info->tcpi_rcv_ooopack = tp->rcv_ooopack; info->tcpi_snd_wnd = tp->snd_wnd; @@ -3780,7 +3856,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c } EXPORT_SYMBOL_GPL(tcp_get_info); -@@ -3452,7 +3647,7 @@ +@@ -3452,7 +3646,7 @@ if (get_user(len, optlen)) return -EFAULT; @@ -3789,7 +3865,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) -@@ -3649,6 +3844,87 @@ +@@ -3649,6 +3843,87 @@ } return 0; } @@ -3877,7 +3953,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c #ifdef CONFIG_MMU case TCP_ZEROCOPY_RECEIVE: { struct tcp_zerocopy_receive zc; -@@ -3851,7 +4127,9 @@ +@@ -3851,7 +4126,9 @@ if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); @@ -3887,7 +3963,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c tcp_clear_xmit_timers(sk); if (req) reqsk_fastopen_remove(sk, req, false); -@@ -3867,6 +4145,8 @@ +@@ -3867,6 +4144,8 @@ int tcp_abort(struct sock *sk, int err) { @@ -3896,7 +3972,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (!sk_fullsock(sk)) { if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); -@@ -3880,7 +4160,7 @@ +@@ -3880,7 +4159,7 @@ } /* Don't race with userspace socket closes such as tcp_close. */ @@ -3905,7 +3981,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); -@@ -3889,7 +4169,7 @@ +@@ -3889,7 +4168,7 @@ /* Don't race with BH socket closes such as inet_csk_listen_stop. */ local_bh_disable(); @@ -3914,7 +3990,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_err = err; -@@ -3897,14 +4177,14 @@ +@@ -3897,14 +4176,14 @@ smp_wmb(); sk->sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) @@ -3934,7 +4010,7 @@ diff -aurN linux-5.4/net/ipv4/tcp.c mptcp-mptcp_trunk/net/ipv4/tcp.c EXPORT_SYMBOL_GPL(tcp_abort); diff -aurN linux-5.4/net/ipv4/tcp_cong.c mptcp-mptcp_trunk/net/ipv4/tcp_cong.c --- linux-5.4/net/ipv4/tcp_cong.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_cong.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_cong.c 2020-06-09 17:33:41.000000000 +0200 @@ -328,13 +328,19 @@ return ret; } @@ -3959,7 +4035,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_cong.c mptcp-mptcp_trunk/net/ipv4/tcp_cong.c const struct tcp_congestion_ops *ca; diff -aurN linux-5.4/net/ipv4/tcp_diag.c mptcp-mptcp_trunk/net/ipv4/tcp_diag.c --- linux-5.4/net/ipv4/tcp_diag.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_diag.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_diag.c 2020-06-09 17:33:41.000000000 +0200 @@ -31,7 +31,7 @@ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una; } @@ -3971,7 +4047,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_diag.c mptcp-mptcp_trunk/net/ipv4/tcp_diag.c #ifdef CONFIG_TCP_MD5SIG diff -aurN linux-5.4/net/ipv4/tcp_fastopen.c mptcp-mptcp_trunk/net/ipv4/tcp_fastopen.c --- linux-5.4/net/ipv4/tcp_fastopen.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_fastopen.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_fastopen.c 2020-06-09 17:33:41.000000000 +0200 @@ -9,6 +9,7 @@ #include #include @@ -4032,7 +4108,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_fastopen.c mptcp-mptcp_trunk/net/ipv4/tcp_fast */ diff -aurN linux-5.4/net/ipv4/tcp_input.c mptcp-mptcp_trunk/net/ipv4/tcp_input.c --- linux-5.4/net/ipv4/tcp_input.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_input.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_input.c 2020-06-09 17:33:41.000000000 +0200 @@ -76,35 +76,15 @@ #include #include @@ -4431,12 +4507,14 @@ diff -aurN linux-5.4/net/ipv4/tcp_input.c mptcp-mptcp_trunk/net/ipv4/tcp_input.c { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); -@@ -4746,7 +4811,7 @@ +@@ -4746,8 +4811,8 @@ const struct tcp_sock *tp = tcp_sk(sk); int avail = tp->rcv_nxt - tp->copied_seq; -- if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE)) -+ if (avail < sk->sk_rcvlowat && !sock_flag(sk, SOCK_DONE) && !mptcp(tp)) +- if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && +- !sock_flag(sk, SOCK_DONE)) ++ if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && !mptcp(tp) && ++ !sock_flag(sk, SOCK_DONE)) return; sk->sk_data_ready(sk); @@ -4916,7 +4994,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_input.c mptcp-mptcp_trunk/net/ipv4/tcp_input.c tcp_rsk(req)->tfo_listener = false; diff -aurN linux-5.4/net/ipv4/tcp_ipv4.c mptcp-mptcp_trunk/net/ipv4/tcp_ipv4.c --- linux-5.4/net/ipv4/tcp_ipv4.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_ipv4.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_ipv4.c 2020-06-09 17:33:41.000000000 +0200 @@ -62,6 +62,8 @@ #include #include @@ -5381,7 +5459,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_ipv4.c mptcp-mptcp_trunk/net/ipv4/tcp_ipv4.c diff -aurN linux-5.4/net/ipv4/tcp_minisocks.c mptcp-mptcp_trunk/net/ipv4/tcp_minisocks.c --- linux-5.4/net/ipv4/tcp_minisocks.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_minisocks.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_minisocks.c 2020-06-09 17:33:41.000000000 +0200 @@ -19,11 +19,13 @@ * Jorge Cwik, */ @@ -5564,7 +5642,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_minisocks.c mptcp-mptcp_trunk/net/ipv4/tcp_min goto listen_overflow; + if (own_req && !is_meta_sk(sk)) { -+ int ret = mptcp_check_req_master(sk, child, req, skb, 1, 0); ++ int ret = mptcp_check_req_master(sk, child, req, skb, &mopt, 1, 0); + if (ret < 0) + goto listen_overflow; + @@ -5623,7 +5701,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_minisocks.c mptcp-mptcp_trunk/net/ipv4/tcp_min } diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output.c --- linux-5.4/net/ipv4/tcp_output.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_output.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_output.c 2020-06-09 17:33:41.000000000 +0200 @@ -37,6 +37,12 @@ #define pr_fmt(fmt) "TCP: " fmt @@ -5819,8 +5897,8 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output + if (mptcp(tp)) + tcp_tsq_write(meta_sk); + } else { -+ if (!test_and_set_bit(TCP_TSQ_DEFERRED, &meta_sk->sk_tsq_flags)) -+ sock_hold(meta_sk); ++ if (!test_and_set_bit(TCP_TSQ_DEFERRED, &sk->sk_tsq_flags)) ++ sock_hold(sk); + + if ((mptcp(tp)) && (sk->sk_state != TCP_CLOSE)) + mptcp_tsq_flags(sk); @@ -5944,15 +6022,6 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, -@@ -1635,7 +1672,7 @@ - tp->snd_cwnd_stamp = tcp_jiffies32; - } - --static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) -+void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) - { - const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; - struct tcp_sock *tp = tcp_sk(sk); @@ -1693,8 +1730,8 @@ * But we can avoid doing the divide again given we already have * skb_pcount = skb->len / mss_now @@ -6088,17 +6157,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { -@@ -2466,7 +2512,8 @@ - if (push_one != 2) - tcp_schedule_loss_probe(sk, false); - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); -- tcp_cwnd_validate(sk, is_cwnd_limited); -+ if (tp->ops->cwnd_validate) -+ tp->ops->cwnd_validate(sk, is_cwnd_limited); - return false; - } - return !tp->packets_out && !tcp_write_queue_empty(sk); -@@ -2549,7 +2596,7 @@ +@@ -2549,7 +2595,7 @@ skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -6107,7 +6166,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output if (tp->packets_out > pcount) goto probe_sent; goto rearm_timer; -@@ -2613,8 +2660,8 @@ +@@ -2613,8 +2659,8 @@ if (unlikely(sk->sk_state == TCP_CLOSE)) return; @@ -6118,7 +6177,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output tcp_check_probe_timer(sk); } -@@ -2627,7 +2674,8 @@ +@@ -2627,7 +2673,8 @@ BUG_ON(!skb || skb->len < mss_now); @@ -6128,7 +6187,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output } /* This function returns the amount that we can raise the -@@ -2849,6 +2897,10 @@ +@@ -2849,6 +2896,10 @@ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; @@ -6139,7 +6198,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output skb_rbtree_walk_from_safe(skb, tmp) { if (!tcp_can_collapse(sk, skb)) break; -@@ -3325,7 +3377,7 @@ +@@ -3325,7 +3376,7 @@ /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rsk_rcv_wnd, 65535U)); @@ -6148,7 +6207,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output th->doff = (tcp_header_size >> 2); __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); -@@ -3407,13 +3459,13 @@ +@@ -3407,13 +3458,13 @@ if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); @@ -6169,7 +6228,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output tp->rx_opt.rcv_wscale = rcv_wscale; tp->rcv_ssthresh = tp->rcv_wnd; -@@ -3438,6 +3490,36 @@ +@@ -3438,6 +3489,36 @@ inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); inet_csk(sk)->icsk_retransmits = 0; tcp_clear_retrans(tp); @@ -6206,7 +6265,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output } static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) -@@ -3701,6 +3783,7 @@ +@@ -3701,6 +3782,7 @@ { __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } @@ -6214,7 +6273,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. -@@ -3713,7 +3796,7 @@ +@@ -3713,7 +3795,7 @@ * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is * out-of-date with SND.UNA-1 to probe window. */ @@ -6223,7 +6282,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; -@@ -3800,7 +3883,7 @@ +@@ -3800,7 +3882,7 @@ unsigned long timeout; int err; @@ -6234,7 +6293,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_output.c mptcp-mptcp_trunk/net/ipv4/tcp_output /* Cancel probe timer, if it is not required. */ diff -aurN linux-5.4/net/ipv4/tcp_timer.c mptcp-mptcp_trunk/net/ipv4/tcp_timer.c --- linux-5.4/net/ipv4/tcp_timer.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv4/tcp_timer.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv4/tcp_timer.c 2020-06-09 17:33:41.000000000 +0200 @@ -21,6 +21,7 @@ #include @@ -6448,7 +6507,7 @@ diff -aurN linux-5.4/net/ipv4/tcp_timer.c mptcp-mptcp_trunk/net/ipv4/tcp_timer.c diff -aurN linux-5.4/net/ipv6/addrconf.c mptcp-mptcp_trunk/net/ipv6/addrconf.c --- linux-5.4/net/ipv6/addrconf.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv6/addrconf.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv6/addrconf.c 2020-06-09 17:33:41.000000000 +0200 @@ -967,6 +967,7 @@ kfree_rcu(ifp, rcu); @@ -6459,7 +6518,7 @@ diff -aurN linux-5.4/net/ipv6/addrconf.c mptcp-mptcp_trunk/net/ipv6/addrconf.c ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) diff -aurN linux-5.4/net/ipv6/af_inet6.c mptcp-mptcp_trunk/net/ipv6/af_inet6.c --- linux-5.4/net/ipv6/af_inet6.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv6/af_inet6.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv6/af_inet6.c 2020-06-09 17:33:41.000000000 +0200 @@ -104,8 +104,7 @@ return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } @@ -6472,7 +6531,7 @@ diff -aurN linux-5.4/net/ipv6/af_inet6.c mptcp-mptcp_trunk/net/ipv6/af_inet6.c struct ipv6_pinfo *np; diff -aurN linux-5.4/net/ipv6/ipv6_sockglue.c mptcp-mptcp_trunk/net/ipv6/ipv6_sockglue.c --- linux-5.4/net/ipv6/ipv6_sockglue.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv6/ipv6_sockglue.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv6/ipv6_sockglue.c 2020-06-09 17:33:41.000000000 +0200 @@ -44,6 +44,8 @@ #include #include @@ -6498,7 +6557,7 @@ diff -aurN linux-5.4/net/ipv6/ipv6_sockglue.c mptcp-mptcp_trunk/net/ipv6/ipv6_so tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); diff -aurN linux-5.4/net/ipv6/syncookies.c mptcp-mptcp_trunk/net/ipv6/syncookies.c --- linux-5.4/net/ipv6/syncookies.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv6/syncookies.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv6/syncookies.c 2020-06-09 17:33:41.000000000 +0200 @@ -15,6 +15,8 @@ #include #include @@ -6565,7 +6624,7 @@ diff -aurN linux-5.4/net/ipv6/syncookies.c mptcp-mptcp_trunk/net/ipv6/syncookies if (security_inet_conn_request(sk, skb, req)) goto out_free; -@@ -241,10 +259,10 @@ +@@ -241,15 +259,15 @@ } req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); @@ -6580,9 +6639,15 @@ diff -aurN linux-5.4/net/ipv6/syncookies.c mptcp-mptcp_trunk/net/ipv6/syncookies ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok = cookie_ecn_ok(&tcp_opt, sock_net(sk), dst); + +- ret = tcp_get_cookie_sock(sk, skb, req, dst, tsoff); ++ ret = tcp_get_cookie_sock(sk, skb, req, &mopt, dst, tsoff); + out: + return ret; + out_free: diff -aurN linux-5.4/net/ipv6/tcp_ipv6.c mptcp-mptcp_trunk/net/ipv6/tcp_ipv6.c --- linux-5.4/net/ipv6/tcp_ipv6.c 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/ipv6/tcp_ipv6.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/ipv6/tcp_ipv6.c 2020-06-09 17:33:41.000000000 +0200 @@ -58,6 +58,8 @@ #include #include @@ -7176,7 +7241,7 @@ diff -aurN linux-5.4/net/ipv6/tcp_ipv6.c mptcp-mptcp_trunk/net/ipv6/tcp_ipv6.c /* thinking of making this const? Don't. diff -aurN linux-5.4/net/Kconfig mptcp-mptcp_trunk/net/Kconfig --- linux-5.4/net/Kconfig 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/Kconfig 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/Kconfig 2020-06-09 17:33:41.000000000 +0200 @@ -91,6 +91,7 @@ source "net/ipv4/Kconfig" source "net/ipv6/Kconfig" @@ -7187,7 +7252,7 @@ diff -aurN linux-5.4/net/Kconfig mptcp-mptcp_trunk/net/Kconfig diff -aurN linux-5.4/net/Makefile mptcp-mptcp_trunk/net/Makefile --- linux-5.4/net/Makefile 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/net/Makefile 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/Makefile 2020-06-09 17:33:41.000000000 +0200 @@ -20,6 +20,7 @@ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX_SCM) += unix/ @@ -7198,8 +7263,8 @@ diff -aurN linux-5.4/net/Makefile mptcp-mptcp_trunk/net/Makefile obj-$(CONFIG_NET_KEY) += key/ diff -aurN linux-5.4/net/mptcp/Kconfig mptcp-mptcp_trunk/net/mptcp/Kconfig --- linux-5.4/net/mptcp/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/Kconfig 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,146 @@ ++++ mptcp-mptcp_trunk/net/mptcp/Kconfig 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,152 @@ +# +# MPTCP configuration +# @@ -7311,6 +7376,12 @@ diff -aurN linux-5.4/net/mptcp/Kconfig mptcp-mptcp_trunk/net/mptcp/Kconfig + This scheduler sends all packets redundantly over all subflows to decreases + latency and jitter on the cost of lower throughput. + ++config MPTCP_ECF ++ tristate "MPTCP ECF" ++ depends on (MPTCP=y) ++ ---help--- ++ This is an experimental Earliest Completion First (ECF) scheduler. ++ +choice + prompt "Default MPTCP Scheduler" + default DEFAULT_SCHEDULER @@ -7348,8 +7419,8 @@ diff -aurN linux-5.4/net/mptcp/Kconfig mptcp-mptcp_trunk/net/mptcp/Kconfig + diff -aurN linux-5.4/net/mptcp/Makefile mptcp-mptcp_trunk/net/mptcp/Makefile --- linux-5.4/net/mptcp/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/Makefile 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,24 @@ ++++ mptcp-mptcp_trunk/net/mptcp/Makefile 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,25 @@ +# +## Makefile for MultiPath TCP support code. +# @@ -7372,11 +7443,12 @@ diff -aurN linux-5.4/net/mptcp/Makefile mptcp-mptcp_trunk/net/mptcp/Makefile +obj-$(CONFIG_MPTCP_ROUNDROBIN) += mptcp_rr.o +obj-$(CONFIG_MPTCP_REDUNDANT) += mptcp_redundant.o +obj-$(CONFIG_MPTCP_BLEST) += mptcp_blest.o ++obj-$(CONFIG_MPTCP_ECF) += mptcp_ecf.o + +mptcp-$(subst m,y,$(CONFIG_IPV6)) += mptcp_ipv6.o diff -aurN linux-5.4/net/mptcp/mctcp_desync.c mptcp-mptcp_trunk/net/mptcp/mctcp_desync.c --- linux-5.4/net/mptcp/mctcp_desync.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mctcp_desync.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mctcp_desync.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,193 @@ +/* + * Desynchronized Multi-Channel TCP Congestion Control Algorithm @@ -7573,7 +7645,7 @@ diff -aurN linux-5.4/net/mptcp/mctcp_desync.c mptcp-mptcp_trunk/net/mptcp/mctcp_ +MODULE_VERSION("1.0"); diff -aurN linux-5.4/net/mptcp/mptcp_balia.c mptcp-mptcp_trunk/net/mptcp/mptcp_balia.c --- linux-5.4/net/mptcp/mptcp_balia.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_balia.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_balia.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,261 @@ +/* + * MPTCP implementation - Balia Congestion Control @@ -7838,7 +7910,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_balia.c mptcp-mptcp_trunk/net/mptcp/mptcp_b +MODULE_VERSION("0.1"); diff -aurN linux-5.4/net/mptcp/mptcp_binder.c mptcp-mptcp_trunk/net/mptcp/mptcp_binder.c --- linux-5.4/net/mptcp/mptcp_binder.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_binder.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_binder.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,494 @@ +#include + @@ -8336,8 +8408,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_binder.c mptcp-mptcp_trunk/net/mptcp/mptcp_ +MODULE_VERSION("0.1"); diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_blest.c --- linux-5.4/net/mptcp/mptcp_blest.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_blest.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,481 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_blest.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP Scheduler to reduce HoL-blocking and spurious retransmissions. + * @@ -8361,7 +8433,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b + +#include +#include -+#include + +static unsigned char lambda __read_mostly = 12; +module_param(lambda, byte, 0644); @@ -8390,7 +8461,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b +}; + +struct blestsched_cb { -+ bool retrans_flag; + s16 lambda_1000; /* values range from min_lambda * 100 to max_lambda * 100 */ + u32 last_lambda_update; +}; @@ -8417,14 +8487,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b + * during the slow flows last RTT => increase lambda + * otherwise decrease + */ -+ if (blest_cb->retrans_flag) { ++ if (tcp_sk(meta_sk)->retrans_stamp) { + /* need to slow down on the slow flow */ + blest_cb->lambda_1000 += dyn_lambda_bad; + } else { + /* use the slow flow more */ + blest_cb->lambda_1000 -= dyn_lambda_good; + } -+ blest_cb->retrans_flag = false; + + /* cap lambda_1000 to its value range */ + blest_cb->lambda_1000 = min_t(s16, blest_cb->lambda_1000, max_lambda * 100); @@ -8580,199 +8649,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b + return bestsk; +} + -+/* copy from mptcp_sched.c: mptcp_rcv_buf_optimization */ -+static struct sk_buff *mptcp_blest_rcv_buf_optimization(struct sock *sk, int penal) -+{ -+ struct sock *meta_sk; -+ const struct tcp_sock *tp = tcp_sk(sk); -+ struct mptcp_tcp_sock *mptcp; -+ struct sk_buff *skb_head; -+ struct blestsched_priv *blest_p = blestsched_get_priv(tp); -+ struct blestsched_cb *blest_cb; -+ -+ meta_sk = mptcp_meta_sk(sk); -+ skb_head = tcp_rtx_queue_head(meta_sk); -+ -+ if (!skb_head) -+ return NULL; -+ -+ /* If penalization is optional (coming from mptcp_next_segment() and -+ * We are not send-buffer-limited we do not penalize. The retransmission -+ * is just an optimization to fix the idle-time due to the delay before -+ * we wake up the application. -+ */ -+ if (!penal && sk_stream_memory_free(meta_sk)) -+ goto retrans; -+ -+ /* Record the occurrence of a retransmission to update the lambda value */ -+ blest_cb = blestsched_get_cb(tcp_sk(meta_sk)); -+ blest_cb->retrans_flag = true; -+ -+ /* Only penalize again after an RTT has elapsed */ -+ if (tcp_jiffies32 - blest_p->last_rbuf_opti < usecs_to_jiffies(tp->srtt_us >> 3)) -+ goto retrans; -+ -+ /* Half the cwnd of the slow flows */ -+ mptcp_for_each_sub(tp->mpcb, mptcp) { -+ struct tcp_sock *tp_it = mptcp->tp; -+ -+ if (tp_it != tp && -+ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -+ if (tp->srtt_us < tp_it->srtt_us && inet_csk((struct sock *)tp_it)->icsk_ca_state == TCP_CA_Open) { -+ u32 prior_cwnd = tp_it->snd_cwnd; -+ -+ tp_it->snd_cwnd = max(tp_it->snd_cwnd >> 1U, 1U); -+ -+ /* If in slow start, do not reduce the ssthresh */ -+ if (prior_cwnd >= tp_it->snd_ssthresh) -+ tp_it->snd_ssthresh = max(tp_it->snd_ssthresh >> 1U, 2U); -+ -+ blest_p->last_rbuf_opti = tcp_jiffies32; -+ } -+ } -+ } -+ -+retrans: -+ -+ /* Segment not yet injected into this path? Take it!!! */ -+ if (!(TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index))) { -+ bool do_retrans = false; -+ mptcp_for_each_sub(tp->mpcb, mptcp) { -+ struct tcp_sock *tp_it = mptcp->tp; -+ -+ if (tp_it != tp && -+ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -+ if (tp_it->snd_cwnd <= 4) { -+ do_retrans = true; -+ break; -+ } -+ -+ if (4 * tp->srtt_us >= tp_it->srtt_us) { -+ do_retrans = false; -+ break; -+ } else { -+ do_retrans = true; -+ } -+ } -+ } -+ -+ if (do_retrans && mptcp_is_available(sk, skb_head, false)) { -+ trace_mptcp_retransmit(sk, skb_head); -+ return skb_head; -+ } -+ } -+ return NULL; -+} -+ -+/* copy from mptcp_sched.c: __mptcp_next_segment */ -+/* Returns the next segment to be sent from the mptcp meta-queue. -+ * (chooses the reinject queue if any segment is waiting in it, otherwise, -+ * chooses the normal write queue). -+ * Sets *@reinject to 1 if the returned segment comes from the -+ * reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk, -+ * and sets it to -1 if it is a meta-level retransmission to optimize the -+ * receive-buffer. -+ */ -+static struct sk_buff *__mptcp_blest_next_segment(struct sock *meta_sk, int *reinject) -+{ -+ const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -+ struct sk_buff *skb = NULL; -+ -+ *reinject = 0; -+ -+ /* If we are in fallback-mode, just take from the meta-send-queue */ -+ if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping) -+ return tcp_send_head(meta_sk); -+ -+ skb = skb_peek(&mpcb->reinject_queue); -+ -+ if (skb) { -+ *reinject = 1; -+ } else { -+ skb = tcp_send_head(meta_sk); -+ -+ if (!skb && meta_sk->sk_socket && -+ test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && -+ sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -+ struct sock *subsk = blest_get_available_subflow(meta_sk, NULL, -+ false); -+ if (!subsk) -+ return NULL; -+ -+ skb = mptcp_blest_rcv_buf_optimization(subsk, 0); -+ if (skb) -+ *reinject = -1; -+ } -+ } -+ return skb; -+} -+ -+/* copy from mptcp_sched.c: mptcp_next_segment */ -+static struct sk_buff *mptcp_blest_next_segment(struct sock *meta_sk, -+ int *reinject, -+ struct sock **subsk, -+ unsigned int *limit) -+{ -+ struct sk_buff *skb = __mptcp_blest_next_segment(meta_sk, reinject); -+ unsigned int mss_now; -+ struct tcp_sock *subtp; -+ u16 gso_max_segs; -+ u32 max_len, max_segs, window, needed; -+ -+ /* As we set it, we have to reset it as well. */ -+ *limit = 0; -+ -+ if (!skb) -+ return NULL; -+ -+ *subsk = blest_get_available_subflow(meta_sk, skb, false); -+ if (!*subsk) -+ return NULL; -+ -+ subtp = tcp_sk(*subsk); -+ mss_now = tcp_current_mss(*subsk); -+ -+ if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { -+ skb = mptcp_blest_rcv_buf_optimization(*subsk, 1); -+ if (skb) -+ *reinject = -1; -+ else -+ return NULL; -+ } -+ -+ /* No splitting required, as we will only send one single segment */ -+ if (skb->len <= mss_now) -+ return skb; -+ -+ /* The following is similar to tcp_mss_split_point, but -+ * we do not care about nagle, because we will anyways -+ * use TCP_NAGLE_PUSH, which overrides this. -+ * -+ * So, we first limit according to the cwnd/gso-size and then according -+ * to the subflow's window. -+ */ -+ -+ gso_max_segs = (*subsk)->sk_gso_max_segs; -+ if (!gso_max_segs) /* No gso supported on the subflow's NIC */ -+ gso_max_segs = 1; -+ max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); -+ if (!max_segs) -+ return NULL; -+ -+ max_len = mss_now * max_segs; -+ window = tcp_wnd_end(subtp) - subtp->write_seq; -+ -+ needed = min(skb->len, window); -+ if (max_len <= skb->len) -+ /* Take max_win, which is actually the cwnd/gso-size */ -+ *limit = max_len; -+ else -+ /* Or, take the window */ -+ *limit = needed; -+ -+ return skb; -+} -+ +static void blestsched_init(struct sock *sk) +{ + struct blestsched_priv *blest_p = blestsched_get_priv(tcp_sk(sk)); @@ -8790,7 +8666,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b + +static struct mptcp_sched_ops mptcp_sched_blest = { + .get_subflow = blest_get_available_subflow, -+ .next_segment = mptcp_blest_next_segment, ++ .next_segment = mptcp_next_segment, + .init = blestsched_init, + .name = "blest", + .owner = THIS_MODULE, @@ -8821,7 +8697,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_blest.c mptcp-mptcp_trunk/net/mptcp/mptcp_b +MODULE_VERSION("0.95"); diff -aurN linux-5.4/net/mptcp/mptcp_coupled.c mptcp-mptcp_trunk/net/mptcp/mptcp_coupled.c --- linux-5.4/net/mptcp/mptcp_coupled.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_coupled.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_coupled.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,262 @@ +/* + * MPTCP implementation - Linked Increase congestion control Algorithm (LIA) @@ -9087,8 +8963,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_coupled.c mptcp-mptcp_trunk/net/mptcp/mptcp +MODULE_VERSION("0.1"); diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ctrl.c --- linux-5.4/net/mptcp/mptcp_ctrl.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_ctrl.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,3142 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_ctrl.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,3299 @@ +/* + * MPTCP implementation - MPTCP-control + * @@ -9118,6 +8994,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + * 2 of the License, or (at your option) any later version. + */ + ++#include ++ +#include +#include +#include @@ -9134,6 +9012,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +#include +#include + ++#include +#include +#include +#include @@ -9168,7 +9047,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +struct static_key mptcp_static_key = STATIC_KEY_INIT_FALSE; +EXPORT_SYMBOL(mptcp_static_key); + -+static void mptcp_key_sha1(u64 key, u32 *token, u64 *idsn); ++static void mptcp_key_hash(u8 version, u64 key, u32 *token, u64 *idsn); + +static int proc_mptcp_path_manager(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, @@ -9261,16 +9140,16 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + { } +}; + -+static inline u32 mptcp_hash_tk(u32 token) ++static inline u32 mptcp_hash_tk(u32 token, struct mptcp_hashtable *htable) +{ -+ return token % MPTCP_HASH_SIZE; ++ return token & htable->mask; +} + -+struct hlist_nulls_head tk_hashtable[MPTCP_HASH_SIZE]; -+EXPORT_SYMBOL(tk_hashtable); ++struct mptcp_hashtable mptcp_tk_htable; ++EXPORT_SYMBOL(mptcp_tk_htable); + +/* The following hash table is used to avoid collision of token */ -+static struct hlist_nulls_head mptcp_reqsk_tk_htb[MPTCP_HASH_SIZE]; ++static struct mptcp_hashtable mptcp_reqsk_tk_htb; + +/* Lock, protecting the two hash-tables that hold the token. Namely, + * mptcp_reqsk_tk_htb and tk_hashtable @@ -9279,13 +9158,14 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + +static bool mptcp_reqsk_find_tk(const u32 token) +{ -+ const u32 hash = mptcp_hash_tk(token); ++ const u32 hash = mptcp_hash_tk(token, &mptcp_reqsk_tk_htb); + const struct mptcp_request_sock *mtreqsk; + const struct hlist_nulls_node *node; + +begin: + hlist_nulls_for_each_entry_rcu(mtreqsk, node, -+ &mptcp_reqsk_tk_htb[hash], hash_entry) { ++ &mptcp_reqsk_tk_htb.hashtable[hash], ++ hash_entry) { + if (token == mtreqsk->mptcp_loc_token) + return true; + } @@ -9302,10 +9182,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + +static void mptcp_reqsk_insert_tk(struct request_sock *reqsk, const u32 token) +{ -+ u32 hash = mptcp_hash_tk(token); ++ u32 hash = mptcp_hash_tk(token, &mptcp_reqsk_tk_htb); + + hlist_nulls_add_head_rcu(&mptcp_rsk(reqsk)->hash_entry, -+ &mptcp_reqsk_tk_htb[hash]); ++ &mptcp_reqsk_tk_htb.hashtable[hash]); +} + +static void mptcp_reqsk_remove_tk(const struct request_sock *reqsk) @@ -9327,19 +9207,23 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + +static void __mptcp_hash_insert(struct tcp_sock *meta_tp, const u32 token) +{ -+ u32 hash = mptcp_hash_tk(token); -+ hlist_nulls_add_head_rcu(&meta_tp->tk_table, &tk_hashtable[hash]); ++ u32 hash = mptcp_hash_tk(token, &mptcp_tk_htable); ++ ++ hlist_nulls_add_head_rcu(&meta_tp->tk_table, ++ &mptcp_tk_htable.hashtable[hash]); + meta_tp->inside_tk_table = 1; +} + +static bool mptcp_find_token(u32 token) +{ -+ const u32 hash = mptcp_hash_tk(token); ++ const u32 hash = mptcp_hash_tk(token, &mptcp_tk_htable); + const struct tcp_sock *meta_tp; + const struct hlist_nulls_node *node; + +begin: -+ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[hash], tk_table) { ++ hlist_nulls_for_each_entry_rcu(meta_tp, node, ++ &mptcp_tk_htable.hashtable[hash], ++ tk_table) { + if (token == meta_tp->mptcp_loc_token) + return true; + } @@ -9377,7 +9261,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +#endif + } + -+ mptcp_key_sha1(mtreq->mptcp_loc_key, &mtreq->mptcp_loc_token, NULL); ++ mptcp_key_hash(mtreq->mptcp_ver, mtreq->mptcp_loc_key, &mtreq->mptcp_loc_token, NULL); +} + +/* New MPTCP-connection request, prepare a new token for the meta-socket that @@ -9410,7 +9294,11 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + spin_unlock(&mptcp_tk_hashlock); + local_bh_enable(); + rcu_read_unlock(); -+ mtreq->mptcp_rem_key = mopt->mptcp_sender_key; ++ ++ if (mtreq->mptcp_ver == MPTCP_VERSION_0) { ++ mtreq->mptcp_rem_key = mopt->mptcp_sender_key; ++ mtreq->rem_key_set = 1; ++ } +} + +static int mptcp_reqsk_new_cookie(struct request_sock *req, @@ -9446,7 +9334,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + local_bh_enable(); + rcu_read_unlock(); + -+ mtreq->mptcp_rem_key = mopt->mptcp_sender_key; ++ if (mtreq->mptcp_ver == MPTCP_VERSION_0) { ++ mtreq->mptcp_rem_key = mopt->mptcp_sender_key; ++ mtreq->rem_key_set = 1; ++ } + + return true; +} @@ -9471,8 +9362,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + mptcp_seed++); +#endif + -+ mptcp_key_sha1(tp->mptcp_loc_key, -+ &tp->mptcp_loc_token, NULL); ++ mptcp_key_hash(tp->mptcp_ver, tp->mptcp_loc_key, &tp->mptcp_loc_token, NULL); +} + +#ifdef CONFIG_JUMP_LABEL @@ -9612,7 +9502,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + */ +struct sock *mptcp_hash_find(const struct net *net, const u32 token) +{ -+ const u32 hash = mptcp_hash_tk(token); ++ const u32 hash = mptcp_hash_tk(token, &mptcp_tk_htable); + const struct tcp_sock *meta_tp; + struct sock *meta_sk = NULL; + const struct hlist_nulls_node *node; @@ -9620,7 +9510,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + rcu_read_lock(); + local_bh_disable(); +begin: -+ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[hash], ++ hlist_nulls_for_each_entry_rcu(meta_tp, node, ++ &mptcp_tk_htable.hashtable[hash], + tk_table) { + meta_sk = (struct sock *)meta_tp; + if (token == meta_tp->mptcp_loc_token && @@ -9926,6 +9817,67 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +siphash_key_t mptcp_secret __read_mostly; +u32 mptcp_seed = 0; + ++#define SHA256_DIGEST_WORDS (SHA256_DIGEST_SIZE / 4) ++ ++static void mptcp_key_sha256(const u64 key, u32 *token, u64 *idsn) ++{ ++ u32 mptcp_hashed_key[SHA256_DIGEST_WORDS]; ++ struct sha256_state state; ++ ++ sha256_init(&state); ++ sha256_update(&state, (const u8 *)&key, sizeof(key)); ++ sha256_final(&state, (u8 *)mptcp_hashed_key); ++ ++ if (token) ++ *token = mptcp_hashed_key[0]; ++ if (idsn) ++ *idsn = ntohll(*((__be64 *)&mptcp_hashed_key[6])); ++} ++ ++static void mptcp_hmac_sha256(const u8 *key_1, const u8 *key_2, u8 *hash_out, ++ int arg_num, va_list list) ++{ ++ u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; ++ struct sha256_state state; ++ int index, msg_length; ++ int length = 0; ++ u8 *msg; ++ int i; ++ ++ /* Generate key xored with ipad */ ++ memset(input, 0x36, SHA256_BLOCK_SIZE); ++ for (i = 0; i < 8; i++) ++ input[i] ^= key_1[i]; ++ for (i = 0; i < 8; i++) ++ input[i + 8] ^= key_2[i]; ++ ++ index = SHA256_BLOCK_SIZE; ++ msg_length = 0; ++ for (i = 0; i < arg_num; i++) { ++ length = va_arg(list, int); ++ msg = va_arg(list, u8 *); ++ BUG_ON(index + length >= sizeof(input)); /* Message is too long */ ++ memcpy(&input[index], msg, length); ++ index += length; ++ msg_length += length; ++ } ++ ++ sha256_init(&state); ++ sha256_update(&state, input, SHA256_BLOCK_SIZE + msg_length); ++ sha256_final(&state, &input[SHA256_BLOCK_SIZE]); ++ ++ /* Prepare second part of hmac */ ++ memset(input, 0x5C, SHA256_BLOCK_SIZE); ++ for (i = 0; i < 8; i++) ++ input[i] ^= key_1[i]; ++ for (i = 0; i < 8; i++) ++ input[i + 8] ^= key_2[i]; ++ ++ sha256_init(&state); ++ sha256_update(&state, input, sizeof(input)); ++ sha256_final(&state, hash_out); ++} ++ +static void mptcp_key_sha1(u64 key, u32 *token, u64 *idsn) +{ + u32 workspace[SHA_WORKSPACE_WORDS]; @@ -9955,8 +9907,16 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + *idsn = ntohll(*((__be64 *)&mptcp_hashed_key[3])); +} + -+void mptcp_hmac_sha1(const u8 *key_1, const u8 *key_2, u32 *hash_out, -+ int arg_num, ...) ++static void mptcp_key_hash(u8 version, u64 key, u32 *token, u64 *idsn) ++{ ++ if (version == MPTCP_VERSION_0) ++ mptcp_key_sha1(key, token, idsn); ++ else if (version >= MPTCP_VERSION_1) ++ mptcp_key_sha256(key, token, idsn); ++} ++ ++static void mptcp_hmac_sha1(const u8 *key_1, const u8 *key_2, u32 *hash_out, ++ int arg_num, va_list list) +{ + u32 workspace[SHA_WORKSPACE_WORDS]; + u8 input[128]; /* 2 512-bit blocks */ @@ -9964,7 +9924,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + int index; + int length; + u8 *msg; -+ va_list list; + + memset(workspace, 0, sizeof(workspace)); + @@ -9975,7 +9934,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + for (i = 0; i < 8; i++) + input[i + 8] ^= key_2[i]; + -+ va_start(list, arg_num); + index = 64; + for (i = 0; i < arg_num; i++) { + length = va_arg(list, int); @@ -9984,7 +9942,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + memcpy(&input[index], msg, length); + index += length; + } -+ va_end(list); + + input[index] = 0x80; /* Padding: First bit after message = 1 */ + memset(&input[index + 1], 0, (126 - index)); @@ -10027,7 +9984,20 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + for (i = 0; i < 5; i++) + hash_out[i] = (__force u32)cpu_to_be32(hash_out[i]); +} -+EXPORT_SYMBOL(mptcp_hmac_sha1); ++ ++void mptcp_hmac(u8 ver, const u8 *key_1, const u8 *key_2, u8 *hash_out, ++ int arg_num, ...) ++{ ++ va_list args; ++ ++ va_start(args, arg_num); ++ if (ver == MPTCP_VERSION_0) ++ mptcp_hmac_sha1(key_1, key_2, (u32 *)hash_out, arg_num, args); ++ else if (ver >= MPTCP_VERSION_1) ++ mptcp_hmac_sha256(key_1, key_2, hash_out, arg_num, args); ++ va_end(args); ++} ++EXPORT_SYMBOL(mptcp_hmac); + +static void mptcp_mpcb_inherit_sockopts(struct sock *meta_sk, struct sock *master_sk) +{ @@ -10260,14 +10230,33 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + .set_cong_ctrl = __tcp_set_congestion_control, +}; + ++void mptcp_initialize_recv_vars(struct tcp_sock *meta_tp, struct mptcp_cb *mpcb, ++ __u64 remote_key) ++{ ++ u64 idsn; ++ ++ mpcb->mptcp_rem_key = remote_key; ++ mpcb->rem_key_set = 1; ++ mptcp_key_hash(mpcb->mptcp_ver, mpcb->mptcp_rem_key, &mpcb->mptcp_rem_token, &idsn); ++ ++ idsn++; ++ mpcb->rcv_high_order[0] = idsn >> 32; ++ mpcb->rcv_high_order[1] = mpcb->rcv_high_order[0] + 1; ++ meta_tp->copied_seq = (u32)idsn; ++ meta_tp->rcv_nxt = (u32)idsn; ++ meta_tp->rcv_wup = (u32)idsn; ++ ++ meta_tp->snd_wl1 = meta_tp->rcv_nxt - 1; ++} ++ +static int mptcp_alloc_mpcb(struct sock *meta_sk, __u64 remote_key, -+ __u8 mptcp_ver, u32 window) ++ int rem_key_set, __u8 mptcp_ver, u32 window) +{ + struct mptcp_cb *mpcb; + struct sock *master_sk; + struct inet_connection_sock *meta_icsk = inet_csk(meta_sk); + struct tcp_sock *master_tp, *meta_tp = tcp_sk(meta_sk); -+ u64 snd_idsn, rcv_idsn; ++ u64 snd_idsn; + + dst_release(meta_sk->sk_rx_dst); + meta_sk->sk_rx_dst = NULL; @@ -10295,17 +10284,11 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + mpcb->mptcp_loc_token = meta_tp->mptcp_loc_token; + + /* Generate Initial data-sequence-numbers */ -+ mptcp_key_sha1(mpcb->mptcp_loc_key, NULL, &snd_idsn); ++ mptcp_key_hash(mpcb->mptcp_ver, mpcb->mptcp_loc_key, NULL, &snd_idsn); + snd_idsn++; + mpcb->snd_high_order[0] = snd_idsn >> 32; + mpcb->snd_high_order[1] = mpcb->snd_high_order[0] - 1; + -+ mpcb->mptcp_rem_key = remote_key; -+ mptcp_key_sha1(mpcb->mptcp_rem_key, &mpcb->mptcp_rem_token, &rcv_idsn); -+ rcv_idsn++; -+ mpcb->rcv_high_order[0] = rcv_idsn >> 32; -+ mpcb->rcv_high_order[1] = mpcb->rcv_high_order[0] + 1; -+ + mpcb->meta_sk = meta_sk; + mpcb->master_sk = master_sk; + @@ -10417,11 +10400,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + meta_tp->pushed_seq = meta_tp->write_seq; + meta_tp->snd_up = meta_tp->write_seq; + -+ meta_tp->copied_seq = (u32)rcv_idsn; -+ meta_tp->rcv_nxt = (u32)rcv_idsn; -+ meta_tp->rcv_wup = (u32)rcv_idsn; ++ if (rem_key_set) ++ mptcp_initialize_recv_vars(meta_tp, mpcb, remote_key); + -+ meta_tp->snd_wl1 = meta_tp->rcv_nxt - 1; + meta_tp->snd_wnd = window; + meta_tp->retrans_stamp = 0; /* Set in tcp_connect() */ + @@ -11168,12 +11149,12 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +} + +int mptcp_create_master_sk(struct sock *meta_sk, __u64 remote_key, -+ __u8 mptcp_ver, u32 window) ++ int rem_key_set, __u8 mptcp_ver, u32 window) +{ + struct tcp_sock *master_tp; + struct sock *master_sk; + -+ if (mptcp_alloc_mpcb(meta_sk, remote_key, mptcp_ver, window)) ++ if (mptcp_alloc_mpcb(meta_sk, remote_key, rem_key_set, mptcp_ver, window)) + goto err_alloc_mpcb; + + master_sk = tcp_sk(meta_sk)->mpcb->master_sk; @@ -11201,6 +11182,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +} + +static int __mptcp_check_req_master(struct sock *child, ++ const struct mptcp_options_received *mopt, + struct request_sock *req) +{ + struct tcp_sock *child_tp = tcp_sk(child); @@ -11212,6 +11194,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + if (!inet_rsk(req)->mptcp_rqsk) + return 1; + ++ mtreq = mptcp_rsk(req); ++ + if (!inet_rsk(req)->saw_mpc) { + /* Fallback to regular TCP, because we saw one SYN without + * MP_CAPABLE. In tcp_check_req we continue the regular path. @@ -11223,15 +11207,21 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + return 1; + } + ++ /* mopt can be NULL when coming from FAST-OPEN */ ++ if (mopt && mopt->saw_mpc && mtreq->mptcp_ver == MPTCP_VERSION_1) { ++ mtreq->mptcp_rem_key = mopt->mptcp_sender_key; ++ mtreq->rem_key_set = 1; ++ } ++ + MPTCP_INC_STATS(sock_net(meta_sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); + + /* Just set this values to pass them to mptcp_alloc_mpcb */ -+ mtreq = mptcp_rsk(req); + child_tp->mptcp_loc_key = mtreq->mptcp_loc_key; + child_tp->mptcp_loc_token = mtreq->mptcp_loc_token; + + if (mptcp_create_master_sk(meta_sk, mtreq->mptcp_rem_key, -+ mtreq->mptcp_ver, child_tp->snd_wnd)) { ++ mtreq->rem_key_set, mtreq->mptcp_ver, ++ child_tp->snd_wnd)) { + inet_csk_prepare_forced_close(meta_sk); + tcp_done(meta_sk); + @@ -11266,7 +11256,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + u32 new_mapping; + int ret; + -+ ret = __mptcp_check_req_master(child, req); ++ ret = __mptcp_check_req_master(child, NULL, req); + if (ret) + return ret; + @@ -11309,12 +11299,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + +int mptcp_check_req_master(struct sock *sk, struct sock *child, + struct request_sock *req, const struct sk_buff *skb, ++ const struct mptcp_options_received *mopt, + int drop, u32 tsoff) +{ + struct sock *meta_sk = child; + int ret; + -+ ret = __mptcp_check_req_master(child, req); ++ ret = __mptcp_check_req_master(child, mopt, req); + if (ret) + return ret; + child = tcp_sk(child)->mpcb->master_sk; @@ -11365,18 +11356,17 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; + struct mptcp_request_sock *mtreq = mptcp_rsk(req); + struct tcp_sock *child_tp = tcp_sk(child); -+ u8 hash_mac_check[20]; ++ u8 hash_mac_check[SHA256_DIGEST_SIZE]; + + if (!mopt->join_ack) { + MPTCP_INC_STATS(sock_net(meta_sk), MPTCP_MIB_JOINACKFAIL); + goto teardown; + } + -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key, -+ (u8 *)&mpcb->mptcp_loc_key, -+ (u32 *)hash_mac_check, 2, -+ 4, (u8 *)&mtreq->mptcp_rem_nonce, -+ 4, (u8 *)&mtreq->mptcp_loc_nonce); ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_rem_key, ++ (u8 *)&mpcb->mptcp_loc_key, hash_mac_check, 2, ++ 4, (u8 *)&mtreq->mptcp_rem_nonce, ++ 4, (u8 *)&mtreq->mptcp_loc_nonce); + + if (memcmp(hash_mac_check, (char *)&mopt->mptcp_recv_mac, 20)) { + MPTCP_INC_STATS(sock_net(meta_sk), MPTCP_MIB_JOINACKMAC); @@ -11627,8 +11617,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + struct sk_buff *skb) +{ + struct mptcp_request_sock *mtreq = mptcp_rsk(req); ++ u8 mptcp_hash_mac[SHA256_DIGEST_SIZE]; + struct mptcp_options_received mopt; -+ u8 mptcp_hash_mac[20]; + + mptcp_init_mp_opt(&mopt); + tcp_parse_mptcp_options(skb, &mopt); @@ -11638,11 +11628,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + + mtreq->mptcp_rem_nonce = mopt.mptcp_recv_nonce; + -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_loc_key, -+ (u8 *)&mpcb->mptcp_rem_key, -+ (u32 *)mptcp_hash_mac, 2, -+ 4, (u8 *)&mtreq->mptcp_loc_nonce, -+ 4, (u8 *)&mtreq->mptcp_rem_nonce); ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_loc_key, ++ (u8 *)&mpcb->mptcp_rem_key, mptcp_hash_mac, 2, ++ 4, (u8 *)&mtreq->mptcp_loc_nonce, ++ 4, (u8 *)&mtreq->mptcp_rem_nonce); + mtreq->mptcp_hash_tmac = *(u64 *)mptcp_hash_mac; + + mtreq->rem_id = mopt.rem_id; @@ -11682,11 +11671,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + /* Absolutely need to always initialize this. */ + mtreq->hash_entry.pprev = NULL; + ++ mtreq->mptcp_ver = mopt->mptcp_ver; + mtreq->mptcp_rem_key = mopt->mptcp_sender_key; + mtreq->mptcp_loc_key = mopt->mptcp_receiver_key; ++ mtreq->rem_key_set = 1; + + /* Generate the token */ -+ mptcp_key_sha1(mtreq->mptcp_loc_key, &mtreq->mptcp_loc_token, NULL); ++ mptcp_key_hash(mtreq->mptcp_ver, mtreq->mptcp_loc_key, &mtreq->mptcp_loc_token, NULL); + + rcu_read_lock(); + local_bh_disable(); @@ -12020,17 +12011,18 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +{ + struct tcp_sock *meta_tp; + const struct net *net = seq->private; -+ int i, n = 0; ++ unsigned int i, n = 0; + + seq_printf(seq, " sl loc_tok rem_tok v6 local_address remote_address st ns tx_queue rx_queue inode"); + seq_putc(seq, '\n'); + -+ for (i = 0; i < MPTCP_HASH_SIZE; i++) { ++ for (i = 0; i <= mptcp_tk_htable.mask; i++) { + struct hlist_nulls_node *node; + rcu_read_lock(); + local_bh_disable(); + hlist_nulls_for_each_entry_rcu(meta_tp, node, -+ &tk_hashtable[i], tk_table) { ++ &mptcp_tk_htable.hashtable[i], ++ tk_table) { + struct sock *meta_sk = (struct sock *)meta_tp; + struct inet_sock *isk = inet_sk(meta_sk); + struct mptcp_cb *mpcb = meta_tp->mpcb; @@ -12144,10 +12136,27 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + .exit = mptcp_pm_exit_net, +}; + ++static unsigned long mptcp_htable_entries __initdata; ++ ++static int __init set_mptcp_htable_entries(char *str) ++{ ++ ssize_t ret; ++ ++ if (!str) ++ return 0; ++ ++ ret = kstrtoul(str, 0, &mptcp_htable_entries); ++ if (ret) ++ return 0; ++ ++ return 1; ++} ++__setup("mptcp_htable_entries=", set_mptcp_htable_entries); ++ +/* General initialization of mptcp */ +void __init mptcp_init(void) +{ -+ int i; ++ unsigned int i; + struct ctl_table_header *mptcp_sysctl; + + mptcp_sock_cache = kmem_cache_create("mptcp_sock", @@ -12175,10 +12184,34 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct + if (!mptcp_wq) + goto alloc_workqueue_failed; + -+ for (i = 0; i < MPTCP_HASH_SIZE; i++) { -+ INIT_HLIST_NULLS_HEAD(&tk_hashtable[i], i); -+ INIT_HLIST_NULLS_HEAD(&mptcp_reqsk_tk_htb[i], i); -+ } ++ mptcp_tk_htable.hashtable = ++ alloc_large_system_hash("MPTCP tokens", ++ sizeof(mptcp_tk_htable.hashtable[0]), ++ mptcp_htable_entries, ++ 18, /* one slot per 256KB of memory */ ++ 0, ++ NULL, ++ &mptcp_tk_htable.mask, ++ 1024, ++ mptcp_htable_entries ? 0 : 1024 * 1024); ++ ++ for (i = 0; i <= mptcp_tk_htable.mask; i++) ++ INIT_HLIST_NULLS_HEAD(&mptcp_tk_htable.hashtable[i], i); ++ ++ mptcp_reqsk_tk_htb.hashtable = ++ alloc_large_system_hash("MPTCP request tokens", ++ sizeof(mptcp_reqsk_tk_htb.hashtable[0]), ++ mptcp_htable_entries, ++ 18, /* one slot per 256KB of memory */ ++ 0, ++ NULL, ++ &mptcp_reqsk_tk_htb.mask, ++ 1024, ++ mptcp_htable_entries ? 0 : 1024 * 1024); ++ ++ for (i = 0; i <= mptcp_reqsk_tk_htb.mask; i++) ++ INIT_HLIST_NULLS_HEAD(&mptcp_reqsk_tk_htb.hashtable[i], i); ++ + + spin_lock_init(&mptcp_tk_hashlock); + @@ -12231,10 +12264,209 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ctrl.c mptcp-mptcp_trunk/net/mptcp/mptcp_ct +mptcp_sock_cache_failed: + mptcp_init_failed = true; +} +diff -aurN linux-5.4/net/mptcp/mptcp_ecf.c mptcp-mptcp_trunk/net/mptcp/mptcp_ecf.c +--- linux-5.4/net/mptcp/mptcp_ecf.c 1970-01-01 01:00:00.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_ecf.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,195 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* MPTCP ECF Scheduler ++ * ++ * Algorithm Design: ++ * Yeon-sup Lim ++ * Don Towsley ++ * Erich M. Nahum ++ * Richard J. Gibbens ++ * ++ * Initial Implementation: ++ * Yeon-sup Lim ++ * ++ * Additional Authors: ++ * Daniel Weber ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License ++ * as published by the Free Software Foundation; either version ++ * 2 of the License, or (at your option) any later version. ++ */ ++ ++#include ++#include ++ ++static unsigned int mptcp_ecf_r_beta __read_mostly = 4; /* beta = 1/r_beta = 0.25 */ ++module_param(mptcp_ecf_r_beta, int, 0644); ++MODULE_PARM_DESC(mptcp_ecf_r_beta, "beta for ECF"); ++ ++struct ecfsched_priv { ++ u32 last_rbuf_opti; ++}; ++ ++struct ecfsched_cb { ++ u32 switching_margin; /* this is "waiting" in algorithm description */ ++}; ++ ++static struct ecfsched_priv *ecfsched_get_priv(const struct tcp_sock *tp) ++{ ++ return (struct ecfsched_priv *)&tp->mptcp->mptcp_sched[0]; ++} ++ ++static struct ecfsched_cb *ecfsched_get_cb(const struct tcp_sock *tp) ++{ ++ return (struct ecfsched_cb *)&tp->mpcb->mptcp_sched[0]; ++} ++ ++/* This is the ECF scheduler. This function decides on which flow to send ++ * a given MSS. If all subflows are found to be busy or the currently best ++ * subflow is estimated to be slower than waiting for minsk, NULL is returned. ++ */ ++static struct sock *ecf_get_available_subflow(struct sock *meta_sk, ++ struct sk_buff *skb, ++ bool zero_wnd_test) ++{ ++ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; ++ struct sock *bestsk, *minsk = NULL; ++ struct tcp_sock *besttp; ++ struct mptcp_tcp_sock *mptcp; ++ struct ecfsched_cb *ecf_cb = ecfsched_get_cb(tcp_sk(meta_sk)); ++ u32 min_srtt = U32_MAX; ++ u32 sub_sndbuf = 0; ++ u32 sub_packets_out = 0; ++ ++ /* Answer data_fin on same subflow!!! */ ++ if (meta_sk->sk_shutdown & RCV_SHUTDOWN && ++ skb && mptcp_is_data_fin(skb)) { ++ mptcp_for_each_sub(mpcb, mptcp) { ++ bestsk = mptcp_to_sock(mptcp); ++ ++ if (tcp_sk(bestsk)->mptcp->path_index == mpcb->dfin_path_index && ++ mptcp_is_available(bestsk, skb, zero_wnd_test)) ++ return bestsk; ++ } ++ } ++ ++ /* First, find the overall best (fastest) subflow */ ++ mptcp_for_each_sub(mpcb, mptcp) { ++ bestsk = mptcp_to_sock(mptcp); ++ besttp = tcp_sk(bestsk); ++ ++ /* Set of states for which we are allowed to send data */ ++ if (!mptcp_sk_can_send(bestsk)) ++ continue; ++ ++ /* We do not send data on this subflow unless it is ++ * fully established, i.e. the 4th ack has been received. ++ */ ++ if (besttp->mptcp->pre_established) ++ continue; ++ ++ sub_sndbuf += bestsk->sk_wmem_queued; ++ sub_packets_out += besttp->packets_out; ++ ++ /* record minimal rtt */ ++ if (besttp->srtt_us < min_srtt) { ++ min_srtt = besttp->srtt_us; ++ minsk = bestsk; ++ } ++ } ++ ++ /* find the current best subflow according to the default scheduler */ ++ bestsk = get_available_subflow(meta_sk, skb, zero_wnd_test); ++ ++ /* if we decided to use a slower flow, we have the option of not using it at all */ ++ if (bestsk && minsk && bestsk != minsk) { ++ u32 mss = tcp_current_mss(bestsk); /* assuming equal MSS */ ++ u32 sndbuf_meta = meta_sk->sk_wmem_queued; ++ u32 sndbuf_minus = sub_sndbuf; ++ u32 sndbuf = 0; ++ ++ u32 cwnd_f = tcp_sk(minsk)->snd_cwnd; ++ u32 srtt_f = tcp_sk(minsk)->srtt_us >> 3; ++ u32 rttvar_f = tcp_sk(minsk)->rttvar_us >> 1; ++ ++ u32 cwnd_s = tcp_sk(bestsk)->snd_cwnd; ++ u32 srtt_s = tcp_sk(bestsk)->srtt_us >> 3; ++ u32 rttvar_s = tcp_sk(bestsk)->rttvar_us >> 1; ++ ++ u32 delta = max(rttvar_f, rttvar_s); ++ ++ u32 x_f; ++ u64 lhs, rhs; /* to avoid overflow, using u64 */ ++ ++ if (tcp_sk(meta_sk)->packets_out > sub_packets_out) ++ sndbuf_minus += (tcp_sk(meta_sk)->packets_out - sub_packets_out) * mss; ++ ++ if (sndbuf_meta > sndbuf_minus) ++ sndbuf = sndbuf_meta - sndbuf_minus; ++ ++ /* we have something to send. ++ * at least one time tx over fastest subflow is required ++ */ ++ x_f = sndbuf > cwnd_f * mss ? sndbuf : cwnd_f * mss; ++ lhs = srtt_f * (x_f + cwnd_f * mss); ++ rhs = cwnd_f * mss * (srtt_s + delta); ++ ++ if (mptcp_ecf_r_beta * lhs < mptcp_ecf_r_beta * rhs + ecf_cb->switching_margin * rhs) { ++ u32 x_s = sndbuf > cwnd_s * mss ? sndbuf : cwnd_s * mss; ++ u64 lhs_s = srtt_s * x_s; ++ u64 rhs_s = cwnd_s * mss * (2 * srtt_f + delta); ++ ++ if (lhs_s >= rhs_s) { ++ /* too slower than fastest */ ++ ecf_cb->switching_margin = 1; ++ return NULL; ++ } ++ } else { ++ /* use slower one */ ++ ecf_cb->switching_margin = 0; ++ } ++ } ++ ++ return bestsk; ++} ++ ++static void ecfsched_init(struct sock *sk) ++{ ++ struct ecfsched_priv *ecf_p = ecfsched_get_priv(tcp_sk(sk)); ++ struct ecfsched_cb *ecf_cb = ecfsched_get_cb(tcp_sk(mptcp_meta_sk(sk))); ++ ++ ecf_p->last_rbuf_opti = tcp_jiffies32; ++ ecf_cb->switching_margin = 0; ++} ++ ++struct mptcp_sched_ops mptcp_sched_ecf = { ++ .get_subflow = ecf_get_available_subflow, ++ .next_segment = mptcp_next_segment, ++ .init = ecfsched_init, ++ .name = "ecf", ++ .owner = THIS_MODULE, ++}; ++ ++static int __init ecf_register(void) ++{ ++ BUILD_BUG_ON(sizeof(struct ecfsched_priv) > MPTCP_SCHED_SIZE); ++ BUILD_BUG_ON(sizeof(struct ecfsched_cb) > MPTCP_SCHED_DATA_SIZE); ++ ++ if (mptcp_register_scheduler(&mptcp_sched_ecf)) ++ return -1; ++ ++ return 0; ++} ++ ++static void ecf_unregister(void) ++{ ++ mptcp_unregister_scheduler(&mptcp_sched_ecf); ++} ++ ++module_init(ecf_register); ++module_exit(ecf_unregister); ++ ++MODULE_AUTHOR("Yeon-sup Lim, Daniel Weber"); ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("ECF (Earliest Completion First) scheduler for MPTCP, based on default minimum RTT scheduler"); ++MODULE_VERSION("0.95"); diff -aurN linux-5.4/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_trunk/net/mptcp/mptcp_fullmesh.c --- linux-5.4/net/mptcp/mptcp_fullmesh.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_fullmesh.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,1943 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_fullmesh.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,1938 @@ +#include +#include + @@ -13082,12 +13314,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_trunk/net/mptcp/mptc + goto next_event; + + /* Now we iterate over the MPTCP-sockets and apply the event. */ -+ for (i = 0; i < MPTCP_HASH_SIZE; i++) { ++ for (i = 0; i <= mptcp_tk_htable.mask; i++) { + const struct hlist_nulls_node *node; + struct tcp_sock *meta_tp; + + rcu_read_lock_bh(); -+ hlist_nulls_for_each_entry_rcu(meta_tp, node, &tk_hashtable[i], ++ hlist_nulls_for_each_entry_rcu(meta_tp, node, ++ &mptcp_tk_htable.hashtable[i], + tk_table) { + struct sock *meta_sk = (struct sock *)meta_tp, *sk; + bool meta_v4 = meta_sk->sk_family == AF_INET; @@ -13829,16 +14062,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_trunk/net/mptcp/mptc + opts->add_addr4.addr = mptcp_local->locaddr4[ind].addr; + opts->add_addr_v4 = 1; + if (mpcb->mptcp_ver >= MPTCP_VERSION_1) { -+ u8 mptcp_hash_mac[20]; -+ u8 no_key[8]; ++ u8 mptcp_hash_mac[SHA256_DIGEST_SIZE]; + -+ *(u64 *)no_key = 0; -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_loc_key, -+ (u8 *)no_key, -+ (u32 *)mptcp_hash_mac, 2, -+ 1, (u8 *)&mptcp_local->locaddr4[ind].loc4_id, -+ 4, (u8 *)&opts->add_addr4.addr.s_addr); -+ opts->add_addr4.trunc_mac = *(u64 *)mptcp_hash_mac; ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_loc_key, ++ (u8 *)&mpcb->mptcp_rem_key, mptcp_hash_mac, 2, ++ 1, (u8 *)&mptcp_local->locaddr4[ind].loc4_id, ++ 4, (u8 *)&opts->add_addr4.addr.s_addr); ++ opts->add_addr4.trunc_mac = *(u64 *)&mptcp_hash_mac[SHA256_DIGEST_SIZE - sizeof(u64)]; + } + + if (skb) { @@ -13872,16 +14102,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_trunk/net/mptcp/mptc + opts->add_addr6.addr = mptcp_local->locaddr6[ind].addr; + opts->add_addr_v6 = 1; + if (mpcb->mptcp_ver >= MPTCP_VERSION_1) { -+ u8 mptcp_hash_mac[20]; -+ u8 no_key[8]; ++ u8 mptcp_hash_mac[SHA256_DIGEST_SIZE]; + -+ *(u64 *)no_key = 0; -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_loc_key, -+ (u8 *)no_key, -+ (u32 *)mptcp_hash_mac, 2, -+ 1, (u8 *)&mptcp_local->locaddr6[ind].loc6_id, -+ 16, (u8 *)&opts->add_addr6.addr.s6_addr); -+ opts->add_addr6.trunc_mac = *(u64 *)mptcp_hash_mac; ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_loc_key, ++ (u8 *)&mpcb->mptcp_rem_key, mptcp_hash_mac, 2, ++ 1, (u8 *)&mptcp_local->locaddr6[ind].loc6_id, ++ 16, (u8 *)&opts->add_addr6.addr.s6_addr); ++ opts->add_addr6.trunc_mac = *(u64 *)&mptcp_hash_mac[SHA256_DIGEST_SIZE - sizeof(u64)]; + } + + if (skb) { @@ -14180,8 +14407,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_trunk/net/mptcp/mptc +MODULE_VERSION("0.88"); diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_input.c --- linux-5.4/net/mptcp/mptcp_input.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_input.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,2436 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_input.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,2529 @@ +/* + * MPTCP implementation - Sending side + * @@ -14360,6 +14587,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i +} + +/* Inspired by tcp_rcv_state_process */ ++/* Returns 0 if processing the packet can continue ++ * -1 if connection was closed with an active reset ++ * 1 if connection was closed and processing should stop. ++ */ +static int mptcp_rcv_state_process(struct sock *meta_sk, struct sock *sk, + const struct sk_buff *skb, u32 data_seq, + u16 data_len) @@ -14400,7 +14631,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + mptcp_send_active_reset(meta_sk, GFP_ATOMIC); + tcp_done(meta_sk); + __NET_INC_STATS(sock_net(meta_sk), LINUX_MIB_TCPABORTONDATA); -+ return 1; ++ return -1; + } + + tmo = tcp_fin_time(meta_sk); @@ -14443,7 +14674,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + __NET_INC_STATS(sock_net(meta_sk), LINUX_MIB_TCPABORTONDATA); + mptcp_send_active_reset(meta_sk, GFP_ATOMIC); + tcp_reset(meta_sk); -+ return 1; ++ return -1; + } + } + break; @@ -14528,6 +14759,17 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + sizeof(data_seq), csum_tcp); + + dss_csum_added = 1; /* Just do it once */ ++ } else if (mptcp_is_data_mpcapable(tmp) && !dss_csum_added) { ++ u32 offset = skb_transport_offset(tmp) + TCP_SKB_CB(tmp)->dss_off; ++ __be64 data_seq = htonll(tp->mptcp->map_data_seq); ++ __be32 rel_seq = htonl(tp->mptcp->map_subseq - tp->mptcp->rcv_isn); ++ ++ csum_tcp = csum_partial(&data_seq, sizeof(data_seq), csum_tcp); ++ csum_tcp = csum_partial(&rel_seq, sizeof(rel_seq), csum_tcp); ++ ++ csum_tcp = skb_checksum(tmp, offset, 4, csum_tcp); ++ ++ dss_csum_added = 1; + } + last = tmp; + iter++; @@ -14738,11 +14980,12 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + * this segment, this path has to fallback to infinite or be torn down. + */ + if (!tp->mptcp->fully_established && !mptcp_is_data_seq(skb) && ++ !mptcp_is_data_mpcapable(skb) && + !tp->mptcp->mapping_present && !mpcb->infinite_mapping_rcv) { -+ pr_debug("%s %#x will fallback - pi %d from %pS, seq %u\n", ++ pr_debug("%s %#x will fallback - pi %d from %pS, seq %u mptcp-flags %#x\n", + __func__, mpcb->mptcp_loc_token, + tp->mptcp->path_index, __builtin_return_address(0), -+ TCP_SKB_CB(skb)->seq); ++ TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->mptcp_flags); + + if (!is_master_tp(tp)) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBDATASUB); @@ -14850,25 +15093,36 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + return 0; + } + -+ /* No mapping here? Exit - it is either already set or still on its way */ -+ if (!mptcp_is_data_seq(skb)) { -+ /* Too many packets without a mapping - this subflow is broken */ ++ if (!tp->mptcp->mapping_present && mptcp_is_data_mpcapable(skb)) { ++ __u32 *ptr = (__u32 *)(skb_transport_header(skb) + TCP_SKB_CB(skb)->dss_off); ++ ++ sub_seq = 1 + tp->mptcp->rcv_isn; ++ data_seq = meta_tp->rcv_nxt; ++ data_len = get_unaligned_be16(ptr); ++ } else if (!mptcp_is_data_seq(skb)) { ++ /* No mapping here? ++ * Exit - it is either already set or still on its way ++ */ + if (!tp->mptcp->mapping_present && + tp->rcv_nxt - tp->copied_seq > 65536) { ++ /* Too many packets without a mapping, ++ * this subflow is broken ++ */ + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); + mptcp_send_reset(sk); + return 1; + } + + return 0; ++ } else { ++ /* Well, then the DSS-mapping is there. So, read it! */ ++ ptr = mptcp_skb_set_data_seq(skb, &data_seq, mpcb); ++ ptr++; ++ sub_seq = get_unaligned_be32(ptr) + tp->mptcp->rcv_isn; ++ ptr++; ++ data_len = get_unaligned_be16(ptr); + } + -+ ptr = mptcp_skb_set_data_seq(skb, &data_seq, mpcb); -+ ptr++; -+ sub_seq = get_unaligned_be32(ptr) + tp->mptcp->rcv_isn; -+ ptr++; -+ data_len = get_unaligned_be16(ptr); -+ + /* If it's an empty skb with DATA_FIN, sub_seq must get fixed. + * The draft sets it to 0, but we really would like to have the + * real value, to have an easy handling afterwards here in this @@ -15581,7 +15835,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i +} + +/* Handle the DATA_ACK */ -+static void mptcp_data_ack(struct sock *sk, const struct sk_buff *skb) ++static bool mptcp_process_data_ack(struct sock *sk, const struct sk_buff *skb) +{ + struct sock *meta_sk = mptcp_meta_sk(sk); + struct tcp_sock *meta_tp = tcp_sk(meta_sk), *tp = tcp_sk(sk); @@ -15609,7 +15863,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + * set by mptcp_clean_rtx_infinite. + */ + if (!(tcb->mptcp_flags & MPTCPHDR_ACK) && !tp->mpcb->infinite_mapping_snd) -+ return; ++ return false; + + if (unlikely(!tp->mptcp->fully_established) && + tp->mptcp->snt_isn + 1 != TCP_SKB_CB(skb)->ack_seq) @@ -15623,7 +15877,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + * processing. + */ + if (meta_sk->sk_state == TCP_CLOSE) -+ return; ++ return false; + + /* Get the data_seq */ + if (mptcp_is_data_seq(skb)) { @@ -15647,6 +15901,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + if (after(data_ack, meta_tp->snd_nxt)) + goto exit; + ++ /* First valid DATA_ACK, we can stop sending the special MP_CAPABLE */ ++ tp->mpcb->send_mptcpv1_mpcapable = 0; ++ + /*** Now, update the window - inspired by tcp_ack_update_window ***/ + nwin = ntohs(tcp_hdr(skb)->window); + @@ -15704,14 +15961,19 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + meta_sk->sk_write_space(meta_sk); + } + -+ if (meta_sk->sk_state != TCP_ESTABLISHED && -+ mptcp_rcv_state_process(meta_sk, sk, skb, data_seq, data_len)) -+ return; ++ if (meta_sk->sk_state != TCP_ESTABLISHED) { ++ int ret = mptcp_rcv_state_process(meta_sk, sk, skb, data_seq, data_len); ++ ++ if (ret < 0) ++ return true; ++ else if (ret > 0) ++ return false; ++ } + +exit: + mptcp_push_pending_frames(meta_sk); + -+ return; ++ return false; + +no_queue: + if (tcp_send_head(meta_sk)) @@ -15719,7 +15981,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + + mptcp_push_pending_frames(meta_sk); + -+ return; ++ return false; +} + +void mptcp_clean_rtx_infinite(const struct sk_buff *skb, struct sock *sk) @@ -15738,7 +16000,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + tp->mptcp->rx_opt.data_ack = meta_tp->snd_nxt - tp->snd_nxt + + tp->snd_una; + -+ mptcp_data_ack(sk, skb); ++ mptcp_process_data_ack(sk, skb); +} + +/**** static functions used by mptcp_parse_options */ @@ -15763,19 +16025,21 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + int opsize) +{ +#if IS_ENABLED(CONFIG_IPV6) -+ if (mptcp_ver < MPTCP_VERSION_1 && mpadd->ipver == 6) { ++ if (mptcp_ver < MPTCP_VERSION_1 && mpadd->u_bit.v0.ipver == 6) { + return opsize == MPTCP_SUB_LEN_ADD_ADDR6 || + opsize == MPTCP_SUB_LEN_ADD_ADDR6 + 2; + } -+ if (mptcp_ver >= MPTCP_VERSION_1 && mpadd->ipver == 6) ++ if (mptcp_ver >= MPTCP_VERSION_1) + return opsize == MPTCP_SUB_LEN_ADD_ADDR6_VER1 || -+ opsize == MPTCP_SUB_LEN_ADD_ADDR6_VER1 + 2; ++ opsize == MPTCP_SUB_LEN_ADD_ADDR6_VER1 + 2 || ++ opsize == MPTCP_SUB_LEN_ADD_ADDR4_VER1 || ++ opsize == MPTCP_SUB_LEN_ADD_ADDR4_VER1 + 2; +#endif -+ if (mptcp_ver < MPTCP_VERSION_1 && mpadd->ipver == 4) { ++ if (mptcp_ver < MPTCP_VERSION_1 && mpadd->u_bit.v0.ipver == 4) { + return opsize == MPTCP_SUB_LEN_ADD_ADDR4 || + opsize == MPTCP_SUB_LEN_ADD_ADDR4 + 2; + } -+ if (mptcp_ver >= MPTCP_VERSION_1 && mpadd->ipver == 4) { ++ if (mptcp_ver >= MPTCP_VERSION_1) { + return opsize == MPTCP_SUB_LEN_ADD_ADDR4_VER1 || + opsize == MPTCP_SUB_LEN_ADD_ADDR4_VER1 + 2; + } @@ -15788,6 +16052,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + struct tcp_sock *tp) +{ + const struct mptcp_option *mp_opt = (struct mptcp_option *)ptr; ++ const struct tcphdr *th = tcp_hdr(skb); + + /* If the socket is mp-capable we would have a mopt. */ + if (!mopt) @@ -15798,9 +16063,21 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + { + const struct mp_capable *mpcapable = (struct mp_capable *)ptr; + -+ if (opsize != MPTCP_SUB_LEN_CAPABLE_SYN && -+ opsize != MPTCP_SUB_LEN_CAPABLE_ACK) { -+ mptcp_debug("%s: mp_capable: bad option size %d\n", ++ if (mpcapable->ver == MPTCP_VERSION_0 && ++ ((th->syn && opsize != MPTCP_SUB_LEN_CAPABLE_SYN) || ++ (!th->syn && th->ack && opsize != MPTCP_SUB_LEN_CAPABLE_ACK))) { ++ mptcp_debug("%s: mp_capable v0: bad option size %d\n", ++ __func__, opsize); ++ break; ++ } ++ ++ if (mpcapable->ver == MPTCP_VERSION_1 && ++ ((th->syn && !th->ack && opsize != MPTCPV1_SUB_LEN_CAPABLE_SYN) || ++ (th->syn && th->ack && opsize != MPTCPV1_SUB_LEN_CAPABLE_SYNACK) || ++ (!th->syn && th->ack && opsize != MPTCPV1_SUB_LEN_CAPABLE_ACK && ++ opsize != MPTCPV1_SUB_LEN_CAPABLE_DATA && ++ opsize != MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM))) { ++ mptcp_debug("%s: mp_capable v1: bad option size %d\n", + __func__, opsize); + break; + } @@ -15824,10 +16101,38 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + mopt->saw_mpc = 1; + mopt->dss_csum = sysctl_mptcp_checksum || mpcapable->a; + -+ if (opsize >= MPTCP_SUB_LEN_CAPABLE_SYN) -+ mopt->mptcp_sender_key = mpcapable->sender_key; -+ if (opsize == MPTCP_SUB_LEN_CAPABLE_ACK) -+ mopt->mptcp_receiver_key = mpcapable->receiver_key; ++ if (mpcapable->ver == MPTCP_VERSION_0) { ++ if (opsize == MPTCP_SUB_LEN_CAPABLE_SYN) ++ mopt->mptcp_sender_key = mpcapable->sender_key; ++ ++ if (opsize == MPTCP_SUB_LEN_CAPABLE_ACK) { ++ mopt->mptcp_sender_key = mpcapable->sender_key; ++ mopt->mptcp_receiver_key = mpcapable->receiver_key; ++ } ++ } else if (mpcapable->ver == MPTCP_VERSION_1) { ++ if (opsize == MPTCPV1_SUB_LEN_CAPABLE_SYNACK) ++ mopt->mptcp_sender_key = mpcapable->sender_key; ++ ++ if (opsize == MPTCPV1_SUB_LEN_CAPABLE_ACK) { ++ mopt->mptcp_sender_key = mpcapable->sender_key; ++ mopt->mptcp_receiver_key = mpcapable->receiver_key; ++ } ++ ++ if (opsize == MPTCPV1_SUB_LEN_CAPABLE_DATA || ++ opsize == MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM) { ++ mopt->mptcp_sender_key = mpcapable->sender_key; ++ mopt->mptcp_receiver_key = mpcapable->receiver_key; ++ ++ TCP_SKB_CB(skb)->mptcp_flags |= MPTCPHDR_MPC_DATA; ++ ++ ptr += sizeof(struct mp_capable); ++ TCP_SKB_CB(skb)->dss_off = (ptr - skb_transport_header(skb)); ++ ++ /* Is a check-sum present? */ ++ if (opsize == MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM) ++ TCP_SKB_CB(skb)->mptcp_flags |= MPTCPHDR_DSS_CSUM; ++ } ++ } + + mopt->mptcp_ver = mpcapable->ver; + break; @@ -16080,34 +16385,42 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i +{ + struct mp_add_addr *mpadd = (struct mp_add_addr *)ptr; + struct mptcp_cb *mpcb = tcp_sk(sk)->mpcb; -+ __be16 port = 0; + union inet_addr addr; + sa_family_t family; ++ __be16 port = 0; ++ bool is_v4; + -+ if (mpadd->ipver == 4) { ++ if (mpcb->mptcp_ver < MPTCP_VERSION_1) { ++ is_v4 = mpadd->u_bit.v0.ipver == 4; ++ } else { ++ is_v4 = mpadd->len == MPTCP_SUB_LEN_ADD_ADDR4_VER1 || ++ mpadd->len == MPTCP_SUB_LEN_ADD_ADDR4_VER1 + 2; ++ ++ /* TODO: support ADD_ADDRv1 retransmissions */ ++ if (mpadd->u_bit.v1.echo) ++ return; ++ } ++ ++ if (is_v4) { ++ u8 hash_mac_check[SHA256_DIGEST_SIZE]; ++ __be16 hmacport = 0; + char *recv_hmac; -+ u8 hash_mac_check[20]; -+ u8 no_key[8]; -+ int msg_parts = 0; + + if (mpcb->mptcp_ver < MPTCP_VERSION_1) + goto skip_hmac_v4; + -+ *(u64 *)no_key = 0; + recv_hmac = (char *)mpadd->u.v4.mac; + if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR4_VER1) { + recv_hmac -= sizeof(mpadd->u.v4.port); -+ msg_parts = 2; + } else if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR4_VER1 + 2) { -+ msg_parts = 3; ++ hmacport = mpadd->u.v4.port; + } -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key, -+ (u8 *)no_key, -+ (u32 *)hash_mac_check, msg_parts, -+ 1, (u8 *)&mpadd->addr_id, -+ 4, (u8 *)&mpadd->u.v4.addr.s_addr, -+ 2, (u8 *)&mpadd->u.v4.port); -+ if (memcmp(hash_mac_check, recv_hmac, 8) != 0) ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_rem_key, ++ (u8 *)&mpcb->mptcp_loc_key, hash_mac_check, 3, ++ 1, (u8 *)&mpadd->addr_id, ++ 4, (u8 *)&mpadd->u.v4.addr.s_addr, ++ 2, (u8 *)&hmacport); ++ if (memcmp(&hash_mac_check[SHA256_DIGEST_SIZE - sizeof(u64)], recv_hmac, 8) != 0) + /* ADD_ADDR2 discarded */ + return; +skip_hmac_v4: @@ -16119,30 +16432,26 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + family = AF_INET; + addr.in = mpadd->u.v4.addr; +#if IS_ENABLED(CONFIG_IPV6) -+ } else if (mpadd->ipver == 6) { ++ } else { ++ u8 hash_mac_check[SHA256_DIGEST_SIZE]; ++ __be16 hmacport = 0; + char *recv_hmac; -+ u8 hash_mac_check[20]; -+ u8 no_key[8]; -+ int msg_parts = 0; + + if (mpcb->mptcp_ver < MPTCP_VERSION_1) + goto skip_hmac_v6; + -+ *(u64 *)no_key = 0; + recv_hmac = (char *)mpadd->u.v6.mac; + if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR6_VER1) { + recv_hmac -= sizeof(mpadd->u.v6.port); -+ msg_parts = 2; + } else if (mpadd->len == MPTCP_SUB_LEN_ADD_ADDR6_VER1 + 2) { -+ msg_parts = 3; ++ hmacport = mpadd->u.v6.port; + } -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key, -+ (u8 *)no_key, -+ (u32 *)hash_mac_check, msg_parts, -+ 1, (u8 *)&mpadd->addr_id, -+ 16, (u8 *)&mpadd->u.v6.addr.s6_addr, -+ 2, (u8 *)&mpadd->u.v6.port); -+ if (memcmp(hash_mac_check, recv_hmac, 8) != 0) ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_rem_key, ++ (u8 *)&mpcb->mptcp_loc_key, hash_mac_check, 3, ++ 1, (u8 *)&mpadd->addr_id, ++ 16, (u8 *)&mpadd->u.v6.addr.s6_addr, ++ 2, (u8 *)&hmacport); ++ if (memcmp(&hash_mac_check[SHA256_DIGEST_SIZE - sizeof(u64)], recv_hmac, 8) != 0) + /* ADD_ADDR2 discarded */ + return; +skip_hmac_v6: @@ -16154,8 +16463,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + family = AF_INET6; + addr.in6 = mpadd->u.v6.addr; +#endif /* CONFIG_IPV6 */ -+ } else { -+ return; + } + + if (mpcb->pm_ops->add_raddr) @@ -16299,6 +16606,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + if (sk->sk_state == TCP_RST_WAIT && !th->rst) + return true; + ++ if (mopt->saw_mpc && !tp->mpcb->rem_key_set) ++ mptcp_initialize_recv_vars(mptcp_meta_tp(tp), tp->mpcb, ++ mopt->mptcp_sender_key); ++ + if (unlikely(mopt->mp_fail)) + mptcp_mp_fail_rcvd(sk, th); + @@ -16306,7 +16617,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + * If a checksum is not present when its use has been negotiated, the + * receiver MUST close the subflow with a RST as it is considered broken. + */ -+ if (mptcp_is_data_seq(skb) && tp->mpcb->dss_csum && ++ if ((mptcp_is_data_seq(skb) || mptcp_is_data_mpcapable(skb)) && ++ tp->mpcb->dss_csum && + !(TCP_SKB_CB(skb)->mptcp_flags & MPTCPHDR_DSS_CSUM)) { + mptcp_send_reset(sk); + return true; @@ -16355,7 +16667,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + mopt->saw_low_prio = 0; + } + -+ mptcp_data_ack(sk, skb); ++ if (mptcp_process_data_ack(sk, skb)) ++ return true; + + mptcp_path_array_check(mptcp_meta_sk(sk)); + /* Socket may have been mp_killed by a REMOVE_ADDR */ @@ -16478,14 +16791,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + struct tcp_sock *tp = tcp_sk(sk); + + if (mptcp(tp)) { -+ u8 hash_mac_check[20]; ++ u8 hash_mac_check[SHA256_DIGEST_SIZE]; + struct mptcp_cb *mpcb = tp->mpcb; + -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_rem_key, -+ (u8 *)&mpcb->mptcp_loc_key, -+ (u32 *)hash_mac_check, 2, -+ 4, (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce, -+ 4, (u8 *)&tp->mptcp->mptcp_loc_nonce); ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_rem_key, ++ (u8 *)&mpcb->mptcp_loc_key, hash_mac_check, 2, ++ 4, (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce, ++ 4, (u8 *)&tp->mptcp->mptcp_loc_nonce); + if (memcmp(hash_mac_check, + (char *)&tp->mptcp->rx_opt.mptcp_recv_tmac, 8)) { + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKMAC); @@ -16499,11 +16811,11 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + tp->mptcp->pre_established = 1; + tp->mptcp->rcv_low_prio = tp->mptcp->rx_opt.low_prio; + -+ mptcp_hmac_sha1((u8 *)&mpcb->mptcp_loc_key, -+ (u8 *)&mpcb->mptcp_rem_key, -+ (u32 *)&tp->mptcp->sender_mac[0], 2, -+ 4, (u8 *)&tp->mptcp->mptcp_loc_nonce, -+ 4, (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce); ++ mptcp_hmac(mpcb->mptcp_ver, (u8 *)&mpcb->mptcp_loc_key, ++ (u8 *)&mpcb->mptcp_rem_key, ++ tp->mptcp->sender_mac, 2, ++ 4, (u8 *)&tp->mptcp->mptcp_loc_nonce, ++ 4, (u8 *)&tp->mptcp->rx_opt.mptcp_recv_nonce); + + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_JOINSYNACKRX); + } else if (mopt->saw_mpc) { @@ -16513,8 +16825,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + if (mopt->mptcp_ver > tcp_sk(sk)->mptcp_ver) + /* TODO Consider adding new MPTCP_INC_STATS entry */ + goto fallback; ++ if (tcp_sk(sk)->mptcp_ver == MPTCP_VERSION_1 && ++ mopt->mptcp_ver < MPTCP_VERSION_1) ++ /* TODO Consider adding new MPTCP_INC_STATS entry */ ++ /* TODO - record this in the cache - use v0 next time */ ++ goto fallback; + -+ if (mptcp_create_master_sk(sk, mopt->mptcp_sender_key, ++ if (mptcp_create_master_sk(sk, mopt->mptcp_sender_key, 1, + mopt->mptcp_ver, + ntohs(tcp_hdr(skb)->window))) + return 2; @@ -16542,6 +16859,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i + if (tp->mpcb->dss_csum) + MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_CSUMENABLED); + ++ if (tp->mpcb->mptcp_ver >= MPTCP_VERSION_1) ++ tp->mpcb->send_mptcpv1_mpcapable = 1; ++ + tp->mptcp->include_mpc = 1; + + /* Ensure that fastopen is handled at the meta-level. */ @@ -16620,8 +16940,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_input.c mptcp-mptcp_trunk/net/mptcp/mptcp_i +} diff -aurN linux-5.4/net/mptcp/mptcp_ipv4.c mptcp-mptcp_trunk/net/mptcp/mptcp_ipv4.c --- linux-5.4/net/mptcp/mptcp_ipv4.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_ipv4.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,427 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_ipv4.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,430 @@ +/* + * MPTCP implementation - IPv4-specific functions + * @@ -16730,6 +17050,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ipv4.c mptcp-mptcp_trunk/net/mptcp/mptcp_ip + int loc_id; + bool low_prio = false; + ++ if (!mpcb->rem_key_set) ++ return -1; ++ + /* We need to do this as early as possible. Because, if we fail later + * (e.g., get_local_id), then reqsk_free tries to remove the + * request-socket from the htb in mptcp_hash_request_remove as pprev @@ -17051,8 +17374,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ipv4.c mptcp-mptcp_trunk/net/mptcp/mptcp_ip +} diff -aurN linux-5.4/net/mptcp/mptcp_ipv6.c mptcp-mptcp_trunk/net/mptcp/mptcp_ipv6.c --- linux-5.4/net/mptcp/mptcp_ipv6.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_ipv6.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,475 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_ipv6.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,478 @@ +/* + * MPTCP implementation - IPv6-specific functions + * @@ -17190,6 +17513,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ipv6.c mptcp-mptcp_trunk/net/mptcp/mptcp_ip + int loc_id; + bool low_prio = false; + ++ if (!mpcb->rem_key_set) ++ return -1; ++ + /* We need to do this as early as possible. Because, if we fail later + * (e.g., get_local_id), then reqsk_free tries to remove the + * request-socket from the htb in mptcp_hash_request_remove as pprev @@ -17530,7 +17856,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ipv6.c mptcp-mptcp_trunk/net/mptcp/mptcp_ip +} diff -aurN linux-5.4/net/mptcp/mptcp_ndiffports.c mptcp-mptcp_trunk/net/mptcp/mptcp_ndiffports.c --- linux-5.4/net/mptcp/mptcp_ndiffports.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_ndiffports.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_ndiffports.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,174 @@ +#include + @@ -17708,7 +18034,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_ndiffports.c mptcp-mptcp_trunk/net/mptcp/mp +MODULE_VERSION("0.88"); diff -aurN linux-5.4/net/mptcp/mptcp_netlink.c mptcp-mptcp_trunk/net/mptcp/mptcp_netlink.c --- linux-5.4/net/mptcp/mptcp_netlink.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_netlink.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_netlink.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,1271 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP implementation - Netlink Path Manager @@ -18983,7 +19309,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_netlink.c mptcp-mptcp_trunk/net/mptcp/mptcp +MODULE_ALIAS_GENL_FAMILY(MPTCP_GENL_NAME); diff -aurN linux-5.4/net/mptcp/mptcp_olia.c mptcp-mptcp_trunk/net/mptcp/mptcp_olia.c --- linux-5.4/net/mptcp/mptcp_olia.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_olia.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_olia.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,318 @@ +/* + * MPTCP implementation - OPPORTUNISTIC LINKED INCREASES CONGESTION CONTROL: @@ -19305,8 +19631,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_olia.c mptcp-mptcp_trunk/net/mptcp/mptcp_ol +MODULE_VERSION("0.1"); diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_output.c --- linux-5.4/net/mptcp/mptcp_output.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_output.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,1900 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_output.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,1994 @@ +/* + * MPTCP implementation - Sending side + * @@ -19788,30 +20114,78 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + ptr += mptcp_write_dss_mapping(tp, skb, ptr); +} + ++/* Write the MP_CAPABLE with data-option */ ++static int mptcp_write_mpcapable_data(const struct tcp_sock *tp, ++ struct sk_buff *skb, ++ __be32 *ptr) ++{ ++ struct mp_capable *mpc = (struct mp_capable *)ptr; ++ u8 length; ++ ++ if (tp->mpcb->dss_csum) ++ length = MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM; ++ else ++ length = MPTCPV1_SUB_LEN_CAPABLE_DATA; ++ ++ mpc->kind = TCPOPT_MPTCP; ++ mpc->len = length; ++ mpc->sub = MPTCP_SUB_CAPABLE; ++ mpc->ver = MPTCP_VERSION_1; ++ mpc->a = tp->mpcb->dss_csum; ++ mpc->b = 0; ++ mpc->rsv = 0; ++ mpc->h = 1; ++ ++ ptr++; ++ memcpy(ptr, TCP_SKB_CB(skb)->dss, mptcp_dss_len); ++ ++ mpc->sender_key = tp->mpcb->mptcp_loc_key; ++ mpc->receiver_key = tp->mpcb->mptcp_rem_key; ++ ++ /* dss is in a union with inet_skb_parm and ++ * the IP layer expects zeroed IPCB fields. ++ */ ++ memset(TCP_SKB_CB(skb)->dss, 0, mptcp_dss_len); ++ ++ return MPTCPV1_SUB_LEN_CAPABLE_DATA_ALIGN / sizeof(*ptr); ++} ++ +/* Write the saved DSS mapping to the header */ +static int mptcp_write_dss_data_seq(const struct tcp_sock *tp, struct sk_buff *skb, + __be32 *ptr) +{ ++ int length; + __be32 *start = ptr; + -+ memcpy(ptr, TCP_SKB_CB(skb)->dss, mptcp_dss_len); ++ if (tp->mpcb->rem_key_set) { ++ memcpy(ptr, TCP_SKB_CB(skb)->dss, mptcp_dss_len); + -+ /* update the data_ack */ -+ start[1] = htonl(mptcp_meta_tp(tp)->rcv_nxt); ++ /* update the data_ack */ ++ start[1] = htonl(mptcp_meta_tp(tp)->rcv_nxt); ++ ++ length = mptcp_dss_len / sizeof(*ptr); ++ } else { ++ memcpy(ptr, TCP_SKB_CB(skb)->dss, MPTCP_SUB_LEN_DSS_ALIGN); ++ ++ ptr++; ++ memcpy(ptr, TCP_SKB_CB(skb)->dss + 2, MPTCP_SUB_LEN_SEQ_ALIGN); ++ ++ length = (MPTCP_SUB_LEN_DSS_ALIGN + MPTCP_SUB_LEN_SEQ_ALIGN) / sizeof(*ptr); ++ } + + /* dss is in a union with inet_skb_parm and + * the IP layer expects zeroed IPCB fields. + */ + memset(TCP_SKB_CB(skb)->dss, 0 , mptcp_dss_len); + -+ return mptcp_dss_len/sizeof(*ptr); ++ return length; +} + +static bool mptcp_skb_entail(struct sock *sk, struct sk_buff *skb, int reinject) +{ + struct tcp_sock *tp = tcp_sk(sk); + const struct sock *meta_sk = mptcp_meta_sk(sk); -+ const struct mptcp_cb *mpcb = tp->mpcb; ++ struct mptcp_cb *mpcb = tp->mpcb; + struct tcp_skb_cb *tcb; + struct sk_buff *subskb = NULL; + @@ -19853,6 +20227,11 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + + mptcp_save_dss_data_seq(tp, subskb); + ++ if (mpcb->send_mptcpv1_mpcapable) { ++ TCP_SKB_CB(subskb)->mptcp_flags |= MPTCPHDR_MPC_DATA; ++ mpcb->send_mptcpv1_mpcapable = 0; ++ } ++ + tcb->seq = tp->write_seq; + + /* Take into account seg len */ @@ -20160,10 +20539,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + + if (!mptcp_skb_entail(subsk, skb, reinject)) + break; -+ /* Nagle is handled at the MPTCP-layer, so -+ * always push on the subflow -+ */ -+ __tcp_push_pending_frames(subsk, mss_now, TCP_NAGLE_PUSH); ++ + if (reinject <= 0) + tcp_update_skb_after_send(meta_sk, skb, meta_tp->tcp_wstamp_ns); + meta_tp->lsndtime = tcp_jiffies32; @@ -20195,14 +20571,12 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + if (!(path_mask & mptcp_pi_to_flag(subtp->mptcp->path_index))) + continue; + -+ /* We have pushed data on this subflow. We ignore the call to -+ * cwnd_validate in tcp_write_xmit as is_cwnd_limited will never -+ * be true (we never push more than what the cwnd can accept). -+ * We need to ensure that we call tcp_cwnd_validate with -+ * is_cwnd_limited set to true if we have filled the cwnd. ++ mss_now = tcp_current_mss(subsk); ++ ++ /* Nagle is handled at the MPTCP-layer, so ++ * always push on the subflow + */ -+ tcp_cwnd_validate(subsk, tcp_packets_in_flight(subtp) >= -+ subtp->snd_cwnd); ++ __tcp_push_pending_frames(subsk, mss_now, TCP_NAGLE_PUSH); + } + + return !meta_tp->packets_out && tcp_send_head(meta_sk); @@ -20297,8 +20671,13 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + opts->options |= OPTION_MPTCP; + if (is_master_tp(tp)) { + opts->mptcp_options |= OPTION_MP_CAPABLE | OPTION_TYPE_SYN; -+ opts->mptcp_ver = tcp_sk(sk)->mptcp_ver; -+ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN; ++ opts->mptcp_ver = tp->mptcp_ver; ++ ++ if (tp->mptcp_ver >= MPTCP_VERSION_1) ++ *remaining -= MPTCPV1_SUB_LEN_CAPABLE_SYN_ALIGN; ++ else ++ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN; ++ + opts->mp_capable.sender_key = tp->mptcp_loc_key; + opts->dss_csum = !!sysctl_mptcp_checksum; + } else { @@ -20326,7 +20705,11 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + opts->mptcp_ver = mtreq->mptcp_ver; + opts->mp_capable.sender_key = mtreq->mptcp_loc_key; + opts->dss_csum = !!sysctl_mptcp_checksum || mtreq->dss_csum; -+ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN; ++ if (mtreq->mptcp_ver >= MPTCP_VERSION_1) { ++ *remaining -= MPTCPV1_SUB_LEN_CAPABLE_SYNACK_ALIGN; ++ } else { ++ *remaining -= MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN; ++ } + } else { + opts->mptcp_options |= OPTION_MP_JOIN | OPTION_TYPE_SYNACK; + opts->mp_join_syns.sender_truncated_mac = @@ -20389,7 +20772,12 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + opts->options |= OPTION_MPTCP; + opts->mptcp_options |= OPTION_MP_CAPABLE | + OPTION_TYPE_ACK; -+ *size += MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN; ++ ++ if (mpcb->mptcp_ver >= MPTCP_VERSION_1) ++ *size += MPTCPV1_SUB_LEN_CAPABLE_ACK_ALIGN; ++ else ++ *size += MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN; ++ + opts->mptcp_ver = mpcb->mptcp_ver; + opts->mp_capable.sender_key = mpcb->mptcp_loc_key; + opts->mp_capable.receiver_key = mpcb->mptcp_rem_key; @@ -20420,14 +20808,20 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + /* If !skb, we come from tcp_current_mss and thus we always + * assume that the DSS-option will be set for the data-packet. + */ -+ if (skb && !mptcp_is_data_seq(skb)) { ++ if (skb && !mptcp_is_data_seq(skb) && mpcb->rem_key_set) { + *size += MPTCP_SUB_LEN_ACK_ALIGN; ++ } else if ((skb && mptcp_is_data_mpcapable(skb)) || ++ (!skb && tp->mpcb->send_mptcpv1_mpcapable)) { ++ *size += MPTCPV1_SUB_LEN_CAPABLE_DATA_ALIGN; + } else { + /* Doesn't matter, if csum included or not. It will be + * either 10 or 12, and thus aligned = 12 + */ -+ *size += MPTCP_SUB_LEN_ACK_ALIGN + -+ MPTCP_SUB_LEN_SEQ_ALIGN; ++ if (mpcb->rem_key_set) ++ *size += MPTCP_SUB_LEN_ACK_ALIGN + ++ MPTCP_SUB_LEN_SEQ_ALIGN; ++ else ++ *size += MPTCP_SUB_LEN_SEQ_ALIGN; + } + + *size += MPTCP_SUB_LEN_DSS_ALIGN; @@ -20480,18 +20874,36 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + + mpc->kind = TCPOPT_MPTCP; + -+ if ((OPTION_TYPE_SYN & opts->mptcp_options) || -+ (OPTION_TYPE_SYNACK & opts->mptcp_options)) { -+ mpc->sender_key = opts->mp_capable.sender_key; -+ mpc->len = MPTCP_SUB_LEN_CAPABLE_SYN; ++ if (OPTION_TYPE_SYN & opts->mptcp_options) { + mpc->ver = opts->mptcp_ver; -+ ptr += MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN >> 2; -+ } else if (OPTION_TYPE_ACK & opts->mptcp_options) { ++ ++ if (mpc->ver >= MPTCP_VERSION_1) { ++ mpc->len = MPTCPV1_SUB_LEN_CAPABLE_SYN; ++ ptr += MPTCPV1_SUB_LEN_CAPABLE_SYN_ALIGN >> 2; ++ } else { ++ mpc->sender_key = opts->mp_capable.sender_key; ++ mpc->len = MPTCP_SUB_LEN_CAPABLE_SYN; ++ ptr += MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN >> 2; ++ } ++ } else if (OPTION_TYPE_SYNACK & opts->mptcp_options) { ++ mpc->ver = opts->mptcp_ver; ++ ++ if (mpc->ver >= MPTCP_VERSION_1) { ++ mpc->len = MPTCPV1_SUB_LEN_CAPABLE_SYNACK; ++ ptr += MPTCPV1_SUB_LEN_CAPABLE_SYNACK_ALIGN >> 2; ++ } else { ++ mpc->len = MPTCP_SUB_LEN_CAPABLE_SYN; ++ ptr += MPTCP_SUB_LEN_CAPABLE_SYN_ALIGN >> 2; ++ } ++ + mpc->sender_key = opts->mp_capable.sender_key; -+ mpc->receiver_key = opts->mp_capable.receiver_key; ++ } else if (OPTION_TYPE_ACK & opts->mptcp_options) { + mpc->len = MPTCP_SUB_LEN_CAPABLE_ACK; + mpc->ver = opts->mptcp_ver; + ptr += MPTCP_SUB_LEN_CAPABLE_ACK_ALIGN >> 2; ++ ++ mpc->sender_key = opts->mp_capable.sender_key; ++ mpc->receiver_key = opts->mp_capable.receiver_key; + } + + mpc->sub = MPTCP_SUB_CAPABLE; @@ -20535,29 +20947,35 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + + mpadd->kind = TCPOPT_MPTCP; + if (opts->add_addr_v4) { -+ mpadd->sub = MPTCP_SUB_ADD_ADDR; -+ mpadd->ipver = 4; + mpadd->addr_id = opts->add_addr4.addr_id; + mpadd->u.v4.addr = opts->add_addr4.addr; + if (mpcb->mptcp_ver < MPTCP_VERSION_1) { ++ mpadd->u_bit.v0.sub = MPTCP_SUB_ADD_ADDR; ++ mpadd->u_bit.v0.ipver = 4; + mpadd->len = MPTCP_SUB_LEN_ADD_ADDR4; + ptr += MPTCP_SUB_LEN_ADD_ADDR4_ALIGN >> 2; + } else { ++ mpadd->u_bit.v1.sub = MPTCP_SUB_ADD_ADDR; ++ mpadd->u_bit.v1.rsv = 0; ++ mpadd->u_bit.v1.echo = 0; + memcpy((char *)mpadd->u.v4.mac - 2, + (char *)&opts->add_addr4.trunc_mac, 8); + mpadd->len = MPTCP_SUB_LEN_ADD_ADDR4_VER1; + ptr += MPTCP_SUB_LEN_ADD_ADDR4_ALIGN_VER1 >> 2; + } + } else if (opts->add_addr_v6) { -+ mpadd->sub = MPTCP_SUB_ADD_ADDR; -+ mpadd->ipver = 6; + mpadd->addr_id = opts->add_addr6.addr_id; + memcpy(&mpadd->u.v6.addr, &opts->add_addr6.addr, + sizeof(mpadd->u.v6.addr)); + if (mpcb->mptcp_ver < MPTCP_VERSION_1) { ++ mpadd->u_bit.v0.sub = MPTCP_SUB_ADD_ADDR; ++ mpadd->u_bit.v0.ipver = 6; + mpadd->len = MPTCP_SUB_LEN_ADD_ADDR6; + ptr += MPTCP_SUB_LEN_ADD_ADDR6_ALIGN >> 2; + } else { ++ mpadd->u_bit.v1.sub = MPTCP_SUB_ADD_ADDR; ++ mpadd->u_bit.v1.rsv = 0; ++ mpadd->u_bit.v1.echo = 0; + memcpy((char *)mpadd->u.v6.mac - 2, + (char *)&opts->add_addr6.trunc_mac, 8); + mpadd->len = MPTCP_SUB_LEN_ADD_ADDR6_VER1; @@ -20621,8 +21039,10 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + } + + if (OPTION_DATA_ACK & opts->mptcp_options) { -+ if (!mptcp_is_data_seq(skb)) ++ if (!mptcp_is_data_seq(skb) && tp->mpcb->rem_key_set) + ptr += mptcp_write_dss_data_ack(tp, skb, ptr); ++ else if (mptcp_is_data_mpcapable(skb)) ++ ptr += mptcp_write_mpcapable_data(tp, skb, ptr); + else + ptr += mptcp_write_dss_data_seq(tp, skb, ptr); + } @@ -21209,7 +21629,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_output.c mptcp-mptcp_trunk/net/mptcp/mptcp_ + diff -aurN linux-5.4/net/mptcp/mptcp_pm.c mptcp-mptcp_trunk/net/mptcp/mptcp_pm.c --- linux-5.4/net/mptcp/mptcp_pm.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_pm.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_pm.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,226 @@ +/* + * MPTCP implementation - MPTCP-subflow-management @@ -21439,8 +21859,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_pm.c mptcp-mptcp_trunk/net/mptcp/mptcp_pm.c +late_initcall(mptcp_path_manager_default); diff -aurN linux-5.4/net/mptcp/mptcp_redundant.c mptcp-mptcp_trunk/net/mptcp/mptcp_redundant.c --- linux-5.4/net/mptcp/mptcp_redundant.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_redundant.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,389 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_redundant.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,392 @@ +/* + * MPTCP Scheduler to reduce latency and jitter. + * @@ -21630,7 +22050,9 @@ diff -aurN linux-5.4/net/mptcp/mptcp_redundant.c mptcp-mptcp_trunk/net/mptcp/mpt +{ + struct tcp_sock *meta_tp = tcp_sk(meta_sk); + -+ if (red_p->skb && !after(red_p->skb_end_seq, meta_tp->snd_una)) ++ if (red_p->skb && ++ (!after(red_p->skb_end_seq, meta_tp->snd_una) || ++ after(red_p->skb_end_seq, meta_tp->snd_nxt))) + red_p->skb = NULL; +} + @@ -21642,7 +22064,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_redundant.c mptcp-mptcp_trunk/net/mptcp/mpt + struct sk_buff *skb; + + if (!previous) -+ return skb_peek(queue); ++ return tcp_rtx_queue_head(meta_sk) ? : skb_peek(queue); + + /* sk_data->skb stores the last scheduled packet for this subflow. + * If sk_data->skb was scheduled but not sent (e.g., due to nagle), @@ -21689,7 +22111,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_redundant.c mptcp-mptcp_trunk/net/mptcp/mpt + *limit = 0; + + if (skb_queue_empty(&mpcb->reinject_queue) && -+ skb_queue_empty(&meta_sk->sk_write_queue)) ++ skb_queue_empty(&meta_sk->sk_write_queue) && ++ tcp_rtx_queue_empty(meta_sk)) + /* Nothing to send */ + return NULL; + @@ -21832,7 +22255,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_redundant.c mptcp-mptcp_trunk/net/mptcp/mpt +MODULE_VERSION("0.90"); diff -aurN linux-5.4/net/mptcp/mptcp_rr.c mptcp-mptcp_trunk/net/mptcp/mptcp_rr.c --- linux-5.4/net/mptcp/mptcp_rr.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_rr.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_rr.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,309 @@ +/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */ + @@ -22145,8 +22568,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_rr.c mptcp-mptcp_trunk/net/mptcp/mptcp_rr.c +MODULE_VERSION("0.89"); diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_sched.c --- linux-5.4/net/mptcp/mptcp_sched.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_sched.c 2020-02-20 18:07:47.000000000 +0100 -@@ -0,0 +1,634 @@ ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_sched.c 2020-06-09 17:33:41.000000000 +0200 +@@ -0,0 +1,647 @@ +/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */ + +#include @@ -22225,7 +22648,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + */ + space = (tp->snd_cwnd - in_flight) * tp->mss_cache; + -+ if (tp->write_seq - tp->snd_nxt > space) ++ if (tp->write_seq - tp->snd_nxt >= space) + return true; + + if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp))) @@ -22521,8 +22944,8 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + if (!skb && meta_sk->sk_socket && + test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && + sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -+ struct sock *subsk = get_available_subflow(meta_sk, NULL, -+ false); ++ struct sock *subsk = mpcb->sched_ops->get_subflow(meta_sk, NULL, ++ false); + if (!subsk) + return NULL; + @@ -22534,16 +22957,17 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + return skb; +} + -+static struct sk_buff *mptcp_next_segment(struct sock *meta_sk, ++struct sk_buff *mptcp_next_segment(struct sock *meta_sk, + int *reinject, + struct sock **subsk, + unsigned int *limit) +{ + struct sk_buff *skb = __mptcp_next_segment(meta_sk, reinject); -+ unsigned int mss_now; ++ unsigned int mss_now, in_flight_space; ++ int remaining_in_flight_space; ++ u32 max_len, max_segs, window; + struct tcp_sock *subtp; + u16 gso_max_segs; -+ u32 max_len, max_segs, window, needed; + + /* As we set it, we have to reset it as well. */ + *limit = 0; @@ -22551,7 +22975,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + if (!skb) + return NULL; + -+ *subsk = get_available_subflow(meta_sk, skb, false); ++ *subsk = tcp_sk(meta_sk)->mpcb->sched_ops->get_subflow(meta_sk, skb, false); + if (!*subsk) + return NULL; + @@ -22573,9 +22997,6 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + /* The following is similar to tcp_mss_split_point, but + * we do not care about nagle, because we will anyways + * use TCP_NAGLE_PUSH, which overrides this. -+ * -+ * So, we first limit according to the cwnd/gso-size and then according -+ * to the subflow's window. + */ + + gso_max_segs = (*subsk)->sk_gso_max_segs; @@ -22585,19 +23006,34 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s + if (!max_segs) + return NULL; + -+ max_len = mss_now * max_segs; ++ /* max_len is what would fit in the cwnd (respecting the 2GSO-limit of ++ * tcp_cwnd_test), but ignoring whatever was already queued. ++ */ ++ max_len = min(mss_now * max_segs, skb->len); ++ ++ in_flight_space = (subtp->snd_cwnd - tcp_packets_in_flight(subtp)) * mss_now; ++ remaining_in_flight_space = (int)in_flight_space - (subtp->write_seq - subtp->snd_nxt); ++ ++ if (remaining_in_flight_space <= 0) ++ WARN_ONCE(1, "in_flight %u cwnd %u wseq %u snxt %u mss_now %u cache %u", ++ tcp_packets_in_flight(subtp), subtp->snd_cwnd, ++ subtp->write_seq, subtp->snd_nxt, mss_now, subtp->mss_cache); ++ else ++ /* max_len now fits exactly in the write-queue, taking into ++ * account what was already queued. ++ */ ++ max_len = min_t(u32, max_len, remaining_in_flight_space); ++ + window = tcp_wnd_end(subtp) - subtp->write_seq; + -+ needed = min(skb->len, window); -+ if (max_len <= skb->len) -+ /* Take max_win, which is actually the cwnd/gso-size */ -+ *limit = max_len; -+ else -+ /* Or, take the window */ -+ *limit = needed; ++ /* max_len now also respects the announced receive-window */ ++ max_len = min(max_len, window); ++ ++ *limit = max_len; + + return skb; +} ++EXPORT_SYMBOL_GPL(mptcp_next_segment); + +static void defsched_init(struct sock *sk) +{ @@ -22783,7 +23219,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_sched.c mptcp-mptcp_trunk/net/mptcp/mptcp_s +late_initcall(mptcp_scheduler_default); diff -aurN linux-5.4/net/mptcp/mptcp_wvegas.c mptcp-mptcp_trunk/net/mptcp/mptcp_wvegas.c --- linux-5.4/net/mptcp/mptcp_wvegas.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_trunk/net/mptcp/mptcp_wvegas.c 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/net/mptcp/mptcp_wvegas.c 2020-06-09 17:33:41.000000000 +0200 @@ -0,0 +1,271 @@ +/* + * MPTCP implementation - WEIGHTED VEGAS @@ -23058,7 +23494,7 @@ diff -aurN linux-5.4/net/mptcp/mptcp_wvegas.c mptcp-mptcp_trunk/net/mptcp/mptcp_ +MODULE_VERSION("0.1"); diff -aurN linux-5.4/tools/include/uapi/linux/bpf.h mptcp-mptcp_trunk/tools/include/uapi/linux/bpf.h --- linux-5.4/tools/include/uapi/linux/bpf.h 2019-11-25 01:32:01.000000000 +0100 -+++ mptcp-mptcp_trunk/tools/include/uapi/linux/bpf.h 2020-02-20 18:07:47.000000000 +0100 ++++ mptcp-mptcp_trunk/tools/include/uapi/linux/bpf.h 2020-06-09 17:33:41.000000000 +0200 @@ -3438,6 +3438,7 @@ BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ diff --git a/root/target/linux/generic/hack-5.4/691-mptcp_ecf.patch b/root/target/linux/generic/hack-5.4/691-mptcp_ecf.patch deleted file mode 100644 index 4925612c..00000000 --- a/root/target/linux/generic/hack-5.4/691-mptcp_ecf.patch +++ /dev/null @@ -1,988 +0,0 @@ -From 025619486cf04c0beb9f395609d7711726fd63c6 Mon Sep 17 00:00:00 2001 -From: Daniel Weber -Date: Mon, 5 Aug 2019 14:02:30 +0200 -Subject: [PATCH 1/3] mptcp: Earliest Completion First (ECF) Scheduler - -This scheduler works much like the default MPTCP scheduler. It always -prefers the subflow with the smallest round-trip-time that is available. - -Signed-off-by: Daniel Weber ---- - net/mptcp/Kconfig | 6 + - net/mptcp/Makefile | 1 + - net/mptcp/mptcp_ecf.c | 384 ++++++++++++++++++++++++++++++++++++++++++ - 3 files changed, 391 insertions(+) - create mode 100644 net/mptcp/mptcp_ecf.c - -diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig -index 37f3af3db2a6..829ea084cf70 100644 ---- a/net/mptcp/Kconfig -+++ b/net/mptcp/Kconfig -@@ -109,6 +109,12 @@ config MPTCP_REDUNDANT - This scheduler sends all packets redundantly over all subflows to decreases - latency and jitter on the cost of lower throughput. - -+config MPTCP_ECF -+ tristate "MPTCP ECF" -+ depends on (MPTCP=y) -+ ---help--- -+ This is an experimental Earliest Completion First (ECF) scheduler. -+ - choice - prompt "Default MPTCP Scheduler" - default DEFAULT_SCHEDULER -diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile -index 82a2d4d945ae..369248a2f68e 100644 ---- a/net/mptcp/Makefile -+++ b/net/mptcp/Makefile -@@ -20,5 +20,6 @@ obj-$(CONFIG_MPTCP_NETLINK) += mptcp_netlink.o - obj-$(CONFIG_MPTCP_ROUNDROBIN) += mptcp_rr.o - obj-$(CONFIG_MPTCP_REDUNDANT) += mptcp_redundant.o - obj-$(CONFIG_MPTCP_BLEST) += mptcp_blest.o -+obj-$(CONFIG_MPTCP_ECF) += mptcp_ecf.o - - mptcp-$(subst m,y,$(CONFIG_IPV6)) += mptcp_ipv6.o -diff --git a/net/mptcp/mptcp_ecf.c b/net/mptcp/mptcp_ecf.c -new file mode 100644 -index 000000000000..e0bd430a8943 ---- /dev/null -+++ b/net/mptcp/mptcp_ecf.c -@@ -0,0 +1,384 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* MPTCP ECF Scheduler -+ * -+ * Algorithm Design: -+ * Yeon-sup Lim -+ * Don Towsley -+ * Erich M. Nahum -+ * Richard J. Gibbens -+ * -+ * Initial Implementation: -+ * Yeon-sup Lim -+ * -+ * Additional Authors: -+ * Daniel Weber -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version -+ * 2 of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+ -+static unsigned int mptcp_ecf_r_beta __read_mostly = 4; /* beta = 1/r_beta = 0.25 */ -+module_param(mptcp_ecf_r_beta, int, 0644); -+MODULE_PARM_DESC(mptcp_ecf_r_beta, "beta for ECF"); -+ -+struct ecfsched_priv { -+ u32 last_rbuf_opti; -+}; -+ -+struct ecfsched_cb { -+ u32 switching_margin; /* this is "waiting" in algorithm description */ -+}; -+ -+static struct ecfsched_priv *ecfsched_get_priv(const struct tcp_sock *tp) -+{ -+ return (struct ecfsched_priv *)&tp->mptcp->mptcp_sched[0]; -+} -+ -+static struct ecfsched_cb *ecfsched_get_cb(const struct tcp_sock *tp) -+{ -+ return (struct ecfsched_cb *)&tp->mpcb->mptcp_sched[0]; -+} -+ -+/* This is the ECF scheduler. This function decides on which flow to send -+ * a given MSS. If all subflows are found to be busy or the currently best -+ * subflow is estimated to be slower than waiting for minsk, NULL is returned. -+ */ -+static struct sock *ecf_get_available_subflow(struct sock *meta_sk, -+ struct sk_buff *skb, -+ bool zero_wnd_test) -+{ -+ struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -+ struct sock *bestsk, *minsk = NULL; -+ struct tcp_sock *besttp; -+ struct mptcp_tcp_sock *mptcp; -+ struct ecfsched_cb *ecf_cb = ecfsched_get_cb(tcp_sk(meta_sk)); -+ u32 min_srtt = U32_MAX; -+ u32 sub_sndbuf = 0; -+ u32 sub_packets_out = 0; -+ -+ /* Answer data_fin on same subflow!!! */ -+ if (meta_sk->sk_shutdown & RCV_SHUTDOWN && -+ skb && mptcp_is_data_fin(skb)) { -+ mptcp_for_each_sub(mpcb, mptcp) { -+ bestsk = mptcp_to_sock(mptcp); -+ -+ if (tcp_sk(bestsk)->mptcp->path_index == mpcb->dfin_path_index && -+ mptcp_is_available(bestsk, skb, zero_wnd_test)) -+ return bestsk; -+ } -+ } -+ -+ /* First, find the overall best (fastest) subflow */ -+ mptcp_for_each_sub(mpcb, mptcp) { -+ bestsk = mptcp_to_sock(mptcp); -+ besttp = tcp_sk(bestsk); -+ -+ /* Set of states for which we are allowed to send data */ -+ if (!mptcp_sk_can_send(bestsk)) -+ continue; -+ -+ /* We do not send data on this subflow unless it is -+ * fully established, i.e. the 4th ack has been received. -+ */ -+ if (besttp->mptcp->pre_established) -+ continue; -+ -+ sub_sndbuf += bestsk->sk_wmem_queued; -+ sub_packets_out += besttp->packets_out; -+ -+ /* record minimal rtt */ -+ if (besttp->srtt_us < min_srtt) { -+ min_srtt = besttp->srtt_us; -+ minsk = bestsk; -+ } -+ } -+ -+ /* find the current best subflow according to the default scheduler */ -+ bestsk = get_available_subflow(meta_sk, skb, zero_wnd_test); -+ -+ /* if we decided to use a slower flow, we have the option of not using it at all */ -+ if (bestsk && minsk && bestsk != minsk) { -+ u32 mss = tcp_current_mss(bestsk); /* assuming equal MSS */ -+ u32 sndbuf_meta = meta_sk->sk_wmem_queued; -+ u32 sndbuf_minus = sub_sndbuf; -+ u32 sndbuf = 0; -+ -+ u32 cwnd_f = tcp_sk(minsk)->snd_cwnd; -+ u32 srtt_f = tcp_sk(minsk)->srtt_us >> 3; -+ u32 rttvar_f = tcp_sk(minsk)->rttvar_us >> 1; -+ -+ u32 cwnd_s = tcp_sk(bestsk)->snd_cwnd; -+ u32 srtt_s = tcp_sk(bestsk)->srtt_us >> 3; -+ u32 rttvar_s = tcp_sk(bestsk)->rttvar_us >> 1; -+ -+ u32 delta = max(rttvar_f, rttvar_s); -+ -+ u32 x_f; -+ u64 lhs, rhs; /* to avoid overflow, using u64 */ -+ -+ if (tcp_sk(meta_sk)->packets_out > sub_packets_out) -+ sndbuf_minus += (tcp_sk(meta_sk)->packets_out - sub_packets_out) * mss; -+ -+ if (sndbuf_meta > sndbuf_minus) -+ sndbuf = sndbuf_meta - sndbuf_minus; -+ -+ /* we have something to send. -+ * at least one time tx over fastest subflow is required -+ */ -+ x_f = sndbuf > cwnd_f * mss ? sndbuf : cwnd_f * mss; -+ lhs = srtt_f * (x_f + cwnd_f * mss); -+ rhs = cwnd_f * mss * (srtt_s + delta); -+ -+ if (mptcp_ecf_r_beta * lhs < mptcp_ecf_r_beta * rhs + ecf_cb->switching_margin * rhs) { -+ u32 x_s = sndbuf > cwnd_s * mss ? sndbuf : cwnd_s * mss; -+ u64 lhs_s = srtt_s * x_s; -+ u64 rhs_s = cwnd_s * mss * (2 * srtt_f + delta); -+ -+ if (lhs_s >= rhs_s) { -+ /* too slower than fastest */ -+ ecf_cb->switching_margin = 1; -+ return NULL; -+ } -+ } else { -+ /* use slower one */ -+ ecf_cb->switching_margin = 0; -+ } -+ } -+ -+ return bestsk; -+} -+ -+/* copy from mptcp_sched.c: mptcp_rcv_buf_optimization */ -+static struct sk_buff *mptcp_ecf_rcv_buf_optimization(struct sock *sk, int penal) -+{ -+ struct sock *meta_sk; -+ const struct tcp_sock *tp = tcp_sk(sk); -+ struct mptcp_tcp_sock *mptcp; -+ struct sk_buff *skb_head; -+ struct ecfsched_priv *ecf_p = ecfsched_get_priv(tp); -+ -+ meta_sk = mptcp_meta_sk(sk); -+ skb_head = tcp_rtx_queue_head(meta_sk); -+ -+ if (!skb_head) -+ return NULL; -+ -+ /* If penalization is optional (coming from mptcp_next_segment() and -+ * We are not send-buffer-limited we do not penalize. The retransmission -+ * is just an optimization to fix the idle-time due to the delay before -+ * we wake up the application. -+ */ -+ if (!penal && sk_stream_memory_free(meta_sk)) -+ goto retrans; -+ -+ /* Only penalize again after an RTT has elapsed */ -+ if (tcp_jiffies32 - ecf_p->last_rbuf_opti < usecs_to_jiffies(tp->srtt_us >> 3)) -+ goto retrans; -+ -+ /* Half the cwnd of the slow flows */ -+ mptcp_for_each_sub(tp->mpcb, mptcp) { -+ struct tcp_sock *tp_it = mptcp->tp; -+ -+ if (tp_it != tp && -+ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -+ if (tp->srtt_us < tp_it->srtt_us && inet_csk((struct sock *)tp_it)->icsk_ca_state == TCP_CA_Open) { -+ u32 prior_cwnd = tp_it->snd_cwnd; -+ -+ tp_it->snd_cwnd = max(tp_it->snd_cwnd >> 1U, 1U); -+ -+ /* If in slow start, do not reduce the ssthresh */ -+ if (prior_cwnd >= tp_it->snd_ssthresh) -+ tp_it->snd_ssthresh = max(tp_it->snd_ssthresh >> 1U, 2U); -+ -+ ecf_p->last_rbuf_opti = tcp_jiffies32; -+ } -+ } -+ } -+ -+retrans: -+ -+ /* Segment not yet injected into this path? Take it!!! */ -+ if (!(TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index))) { -+ bool do_retrans = false; -+ mptcp_for_each_sub(tp->mpcb, mptcp) { -+ struct tcp_sock *tp_it = mptcp->tp; -+ -+ if (tp_it != tp && -+ TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -+ if (tp_it->snd_cwnd <= 4) { -+ do_retrans = true; -+ break; -+ } -+ -+ if (4 * tp->srtt_us >= tp_it->srtt_us) { -+ do_retrans = false; -+ break; -+ } else { -+ do_retrans = true; -+ } -+ } -+ } -+ -+ if (do_retrans && mptcp_is_available(sk, skb_head, false)) { -+ trace_mptcp_retransmit(sk, skb_head); -+ return skb_head; -+ } -+ } -+ return NULL; -+} -+ -+/* copy from mptcp_sched.c: __mptcp_next_segment */ -+/* Returns the next segment to be sent from the mptcp meta-queue. -+ * (chooses the reinject queue if any segment is waiting in it, otherwise, -+ * chooses the normal write queue). -+ * Sets *@reinject to 1 if the returned segment comes from the -+ * reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk, -+ * and sets it to -1 if it is a meta-level retransmission to optimize the -+ * receive-buffer. -+ */ -+static struct sk_buff *__mptcp_ecf_next_segment(struct sock *meta_sk, int *reinject) -+{ -+ const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -+ struct sk_buff *skb = NULL; -+ -+ *reinject = 0; -+ -+ /* If we are in fallback-mode, just take from the meta-send-queue */ -+ if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping) -+ return tcp_send_head(meta_sk); -+ -+ skb = skb_peek(&mpcb->reinject_queue); -+ -+ if (skb) { -+ *reinject = 1; -+ } else { -+ skb = tcp_send_head(meta_sk); -+ -+ if (!skb && meta_sk->sk_socket && -+ test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && -+ sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -+ struct sock *subsk = ecf_get_available_subflow(meta_sk, NULL, -+ false); -+ if (!subsk) -+ return NULL; -+ -+ skb = mptcp_ecf_rcv_buf_optimization(subsk, 0); -+ if (skb) -+ *reinject = -1; -+ } -+ } -+ return skb; -+} -+ -+/* copy from mptcp_sched.c: mptcp_next_segment */ -+static struct sk_buff *mptcp_ecf_next_segment(struct sock *meta_sk, -+ int *reinject, -+ struct sock **subsk, -+ unsigned int *limit) -+{ -+ struct sk_buff *skb = __mptcp_ecf_next_segment(meta_sk, reinject); -+ unsigned int mss_now; -+ struct tcp_sock *subtp; -+ u16 gso_max_segs; -+ u32 max_len, max_segs, window, needed; -+ -+ /* As we set it, we have to reset it as well. */ -+ *limit = 0; -+ -+ if (!skb) -+ return NULL; -+ -+ *subsk = ecf_get_available_subflow(meta_sk, skb, false); -+ if (!*subsk) -+ return NULL; -+ -+ subtp = tcp_sk(*subsk); -+ mss_now = tcp_current_mss(*subsk); -+ -+ if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { -+ skb = mptcp_ecf_rcv_buf_optimization(*subsk, 1); -+ if (skb) -+ *reinject = -1; -+ else -+ return NULL; -+ } -+ -+ /* No splitting required, as we will only send one single segment */ -+ if (skb->len <= mss_now) -+ return skb; -+ -+ /* The following is similar to tcp_mss_split_point, but -+ * we do not care about nagle, because we will anyways -+ * use TCP_NAGLE_PUSH, which overrides this. -+ * -+ * So, we first limit according to the cwnd/gso-size and then according -+ * to the subflow's window. -+ */ -+ -+ gso_max_segs = (*subsk)->sk_gso_max_segs; -+ if (!gso_max_segs) /* No gso supported on the subflow's NIC */ -+ gso_max_segs = 1; -+ max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); -+ if (!max_segs) -+ return NULL; -+ -+ max_len = mss_now * max_segs; -+ window = tcp_wnd_end(subtp) - subtp->write_seq; -+ -+ needed = min(skb->len, window); -+ if (max_len <= skb->len) -+ /* Take max_win, which is actually the cwnd/gso-size */ -+ *limit = max_len; -+ else -+ /* Or, take the window */ -+ *limit = needed; -+ -+ return skb; -+} -+ -+static void ecfsched_init(struct sock *sk) -+{ -+ struct ecfsched_priv *ecf_p = ecfsched_get_priv(tcp_sk(sk)); -+ struct ecfsched_cb *ecf_cb = ecfsched_get_cb(tcp_sk(mptcp_meta_sk(sk))); -+ -+ ecf_p->last_rbuf_opti = tcp_jiffies32; -+ ecf_cb->switching_margin = 0; -+} -+ -+struct mptcp_sched_ops mptcp_sched_ecf = { -+ .get_subflow = ecf_get_available_subflow, -+ .next_segment = mptcp_ecf_next_segment, -+ .init = ecfsched_init, -+ .name = "ecf", -+ .owner = THIS_MODULE, -+}; -+ -+static int __init ecf_register(void) -+{ -+ BUILD_BUG_ON(sizeof(struct ecfsched_priv) > MPTCP_SCHED_SIZE); -+ BUILD_BUG_ON(sizeof(struct ecfsched_cb) > MPTCP_SCHED_DATA_SIZE); -+ -+ if (mptcp_register_scheduler(&mptcp_sched_ecf)) -+ return -1; -+ -+ return 0; -+} -+ -+static void ecf_unregister(void) -+{ -+ mptcp_unregister_scheduler(&mptcp_sched_ecf); -+} -+ -+module_init(ecf_register); -+module_exit(ecf_unregister); -+ -+MODULE_AUTHOR("Yeon-sup Lim, Daniel Weber"); -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("ECF (Earliest Completion First) scheduler for MPTCP, based on default minimum RTT scheduler"); -+MODULE_VERSION("0.95"); - -From 5a9641c84cbb5a49749d7533c20035631985dbe7 Mon Sep 17 00:00:00 2001 -From: Daniel Weber -Date: Mon, 9 Mar 2020 11:00:23 +0100 -Subject: [PATCH 2/3] mptcp: Reduce code-duplication for other schedulers - -'mptcp_next_segment' now honors the function pointer to the actual part -that makes the scheduling decision in 'sched_ops->get_subflow'. This -allows for a better reuse by other schedulers. - -The BLEST scheduler needs to adapt the direction of lambda value change -depending on the occurrence of a retransmission. In order to remove the -copied 'mptcp_rcv_buf_optimization' as well the scheduler now checks the -tcp 'retrans_stamp' of the meta socket. - -Signed-off-by: Daniel Weber ---- - include/net/mptcp.h | 4 + - net/mptcp/mptcp_blest.c | 200 +--------------------------------------- - net/mptcp/mptcp_sched.c | 9 +- - 3 files changed, 11 insertions(+), 202 deletions(-) - -diff --git a/include/net/mptcp.h b/include/net/mptcp.h -index 02312c9ea3a3..82f66ce206cc 100644 ---- a/include/net/mptcp.h -+++ b/include/net/mptcp.h -@@ -902,6 +902,10 @@ bool subflow_is_active(const struct tcp_sock *tp); - bool subflow_is_backup(const struct tcp_sock *tp); - struct sock *get_available_subflow(struct sock *meta_sk, struct sk_buff *skb, - bool zero_wnd_test); -+struct sk_buff *mptcp_next_segment(struct sock *meta_sk, -+ int *reinject, -+ struct sock **subsk, -+ unsigned int *limit); - extern struct mptcp_sched_ops mptcp_sched_default; - - /* Initializes function-pointers and MPTCP-flags */ -diff --git a/net/mptcp/mptcp_blest.c b/net/mptcp/mptcp_blest.c -index 40905a0d1fe5..22e25dd0d44e 100644 ---- a/net/mptcp/mptcp_blest.c -+++ b/net/mptcp/mptcp_blest.c -@@ -21,7 +21,6 @@ - - #include - #include --#include - - static unsigned char lambda __read_mostly = 12; - module_param(lambda, byte, 0644); -@@ -50,7 +49,6 @@ struct blestsched_priv { - }; - - struct blestsched_cb { -- bool retrans_flag; - s16 lambda_1000; /* values range from min_lambda * 100 to max_lambda * 100 */ - u32 last_lambda_update; - }; -@@ -77,14 +75,13 @@ static void blestsched_update_lambda(struct sock *meta_sk, struct sock *sk) - * during the slow flows last RTT => increase lambda - * otherwise decrease - */ -- if (blest_cb->retrans_flag) { -+ if (tcp_sk(meta_sk)->retrans_stamp) { - /* need to slow down on the slow flow */ - blest_cb->lambda_1000 += dyn_lambda_bad; - } else { - /* use the slow flow more */ - blest_cb->lambda_1000 -= dyn_lambda_good; - } -- blest_cb->retrans_flag = false; - - /* cap lambda_1000 to its value range */ - blest_cb->lambda_1000 = min_t(s16, blest_cb->lambda_1000, max_lambda * 100); -@@ -240,199 +237,6 @@ struct sock *blest_get_available_subflow(struct sock *meta_sk, struct sk_buff *s - return bestsk; - } - --/* copy from mptcp_sched.c: mptcp_rcv_buf_optimization */ --static struct sk_buff *mptcp_blest_rcv_buf_optimization(struct sock *sk, int penal) --{ -- struct sock *meta_sk; -- const struct tcp_sock *tp = tcp_sk(sk); -- struct mptcp_tcp_sock *mptcp; -- struct sk_buff *skb_head; -- struct blestsched_priv *blest_p = blestsched_get_priv(tp); -- struct blestsched_cb *blest_cb; -- -- meta_sk = mptcp_meta_sk(sk); -- skb_head = tcp_rtx_queue_head(meta_sk); -- -- if (!skb_head) -- return NULL; -- -- /* If penalization is optional (coming from mptcp_next_segment() and -- * We are not send-buffer-limited we do not penalize. The retransmission -- * is just an optimization to fix the idle-time due to the delay before -- * we wake up the application. -- */ -- if (!penal && sk_stream_memory_free(meta_sk)) -- goto retrans; -- -- /* Record the occurrence of a retransmission to update the lambda value */ -- blest_cb = blestsched_get_cb(tcp_sk(meta_sk)); -- blest_cb->retrans_flag = true; -- -- /* Only penalize again after an RTT has elapsed */ -- if (tcp_jiffies32 - blest_p->last_rbuf_opti < usecs_to_jiffies(tp->srtt_us >> 3)) -- goto retrans; -- -- /* Half the cwnd of the slow flows */ -- mptcp_for_each_sub(tp->mpcb, mptcp) { -- struct tcp_sock *tp_it = mptcp->tp; -- -- if (tp_it != tp && -- TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -- if (tp->srtt_us < tp_it->srtt_us && inet_csk((struct sock *)tp_it)->icsk_ca_state == TCP_CA_Open) { -- u32 prior_cwnd = tp_it->snd_cwnd; -- -- tp_it->snd_cwnd = max(tp_it->snd_cwnd >> 1U, 1U); -- -- /* If in slow start, do not reduce the ssthresh */ -- if (prior_cwnd >= tp_it->snd_ssthresh) -- tp_it->snd_ssthresh = max(tp_it->snd_ssthresh >> 1U, 2U); -- -- blest_p->last_rbuf_opti = tcp_jiffies32; -- } -- } -- } -- --retrans: -- -- /* Segment not yet injected into this path? Take it!!! */ -- if (!(TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index))) { -- bool do_retrans = false; -- mptcp_for_each_sub(tp->mpcb, mptcp) { -- struct tcp_sock *tp_it = mptcp->tp; -- -- if (tp_it != tp && -- TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -- if (tp_it->snd_cwnd <= 4) { -- do_retrans = true; -- break; -- } -- -- if (4 * tp->srtt_us >= tp_it->srtt_us) { -- do_retrans = false; -- break; -- } else { -- do_retrans = true; -- } -- } -- } -- -- if (do_retrans && mptcp_is_available(sk, skb_head, false)) { -- trace_mptcp_retransmit(sk, skb_head); -- return skb_head; -- } -- } -- return NULL; --} -- --/* copy from mptcp_sched.c: __mptcp_next_segment */ --/* Returns the next segment to be sent from the mptcp meta-queue. -- * (chooses the reinject queue if any segment is waiting in it, otherwise, -- * chooses the normal write queue). -- * Sets *@reinject to 1 if the returned segment comes from the -- * reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk, -- * and sets it to -1 if it is a meta-level retransmission to optimize the -- * receive-buffer. -- */ --static struct sk_buff *__mptcp_blest_next_segment(struct sock *meta_sk, int *reinject) --{ -- const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -- struct sk_buff *skb = NULL; -- -- *reinject = 0; -- -- /* If we are in fallback-mode, just take from the meta-send-queue */ -- if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping) -- return tcp_send_head(meta_sk); -- -- skb = skb_peek(&mpcb->reinject_queue); -- -- if (skb) { -- *reinject = 1; -- } else { -- skb = tcp_send_head(meta_sk); -- -- if (!skb && meta_sk->sk_socket && -- test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && -- sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -- struct sock *subsk = blest_get_available_subflow(meta_sk, NULL, -- false); -- if (!subsk) -- return NULL; -- -- skb = mptcp_blest_rcv_buf_optimization(subsk, 0); -- if (skb) -- *reinject = -1; -- } -- } -- return skb; --} -- --/* copy from mptcp_sched.c: mptcp_next_segment */ --static struct sk_buff *mptcp_blest_next_segment(struct sock *meta_sk, -- int *reinject, -- struct sock **subsk, -- unsigned int *limit) --{ -- struct sk_buff *skb = __mptcp_blest_next_segment(meta_sk, reinject); -- unsigned int mss_now; -- struct tcp_sock *subtp; -- u16 gso_max_segs; -- u32 max_len, max_segs, window, needed; -- -- /* As we set it, we have to reset it as well. */ -- *limit = 0; -- -- if (!skb) -- return NULL; -- -- *subsk = blest_get_available_subflow(meta_sk, skb, false); -- if (!*subsk) -- return NULL; -- -- subtp = tcp_sk(*subsk); -- mss_now = tcp_current_mss(*subsk); -- -- if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { -- skb = mptcp_blest_rcv_buf_optimization(*subsk, 1); -- if (skb) -- *reinject = -1; -- else -- return NULL; -- } -- -- /* No splitting required, as we will only send one single segment */ -- if (skb->len <= mss_now) -- return skb; -- -- /* The following is similar to tcp_mss_split_point, but -- * we do not care about nagle, because we will anyways -- * use TCP_NAGLE_PUSH, which overrides this. -- * -- * So, we first limit according to the cwnd/gso-size and then according -- * to the subflow's window. -- */ -- -- gso_max_segs = (*subsk)->sk_gso_max_segs; -- if (!gso_max_segs) /* No gso supported on the subflow's NIC */ -- gso_max_segs = 1; -- max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); -- if (!max_segs) -- return NULL; -- -- max_len = mss_now * max_segs; -- window = tcp_wnd_end(subtp) - subtp->write_seq; -- -- needed = min(skb->len, window); -- if (max_len <= skb->len) -- /* Take max_win, which is actually the cwnd/gso-size */ -- *limit = max_len; -- else -- /* Or, take the window */ -- *limit = needed; -- -- return skb; --} -- - static void blestsched_init(struct sock *sk) - { - struct blestsched_priv *blest_p = blestsched_get_priv(tcp_sk(sk)); -@@ -450,7 +254,7 @@ static void blestsched_init(struct sock *sk) - - static struct mptcp_sched_ops mptcp_sched_blest = { - .get_subflow = blest_get_available_subflow, -- .next_segment = mptcp_blest_next_segment, -+ .next_segment = mptcp_next_segment, - .init = blestsched_init, - .name = "blest", - .owner = THIS_MODULE, -diff --git a/net/mptcp/mptcp_sched.c b/net/mptcp/mptcp_sched.c -index 18c3559b0d48..5bf2946a5caf 100644 ---- a/net/mptcp/mptcp_sched.c -+++ b/net/mptcp/mptcp_sched.c -@@ -372,8 +372,8 @@ static struct sk_buff *__mptcp_next_segment(struct sock *meta_sk, int *reinject) - if (!skb && meta_sk->sk_socket && - test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && - sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -- struct sock *subsk = get_available_subflow(meta_sk, NULL, -- false); -+ struct sock *subsk = mpcb->sched_ops->get_subflow(meta_sk, NULL, -+ false); - if (!subsk) - return NULL; - -@@ -385,7 +385,7 @@ static struct sk_buff *__mptcp_next_segment(struct sock *meta_sk, int *reinject) - return skb; - } - --static struct sk_buff *mptcp_next_segment(struct sock *meta_sk, -+struct sk_buff *mptcp_next_segment(struct sock *meta_sk, - int *reinject, - struct sock **subsk, - unsigned int *limit) -@@ -402,7 +402,7 @@ static struct sk_buff *mptcp_next_segment(struct sock *meta_sk, - if (!skb) - return NULL; - -- *subsk = get_available_subflow(meta_sk, skb, false); -+ *subsk = tcp_sk(meta_sk)->mpcb->sched_ops->get_subflow(meta_sk, skb, false); - if (!*subsk) - return NULL; - -@@ -449,6 +449,7 @@ static struct sk_buff *mptcp_next_segment(struct sock *meta_sk, - - return skb; - } -+EXPORT_SYMBOL_GPL(mptcp_next_segment); - - static void defsched_init(struct sock *sk) - { - -From 5e8425e43b38e7e0fe566ffd50e197c07807ebdf Mon Sep 17 00:00:00 2001 -From: Daniel Weber -Date: Mon, 9 Mar 2020 11:09:27 +0100 -Subject: [PATCH 3/3] mptcp: Remove code-duplication from ECF scheduler - -The ECF scheduler relies on large parts of the default scheduler. This -commit removes the copied blocks and reuses 'mptcp_next_segment' and -'mptcp_rcv_buf_optimization' directly from it via function pointers. - -Signed-off-by: Daniel Weber ---- - net/mptcp/mptcp_ecf.c | 191 +----------------------------------------- - 1 file changed, 1 insertion(+), 190 deletions(-) - -diff --git a/net/mptcp/mptcp_ecf.c b/net/mptcp/mptcp_ecf.c -index e0bd430a8943..6b976b2b0c72 100644 ---- a/net/mptcp/mptcp_ecf.c -+++ b/net/mptcp/mptcp_ecf.c -@@ -21,7 +21,6 @@ - - #include - #include --#include - - static unsigned int mptcp_ecf_r_beta __read_mostly = 4; /* beta = 1/r_beta = 0.25 */ - module_param(mptcp_ecf_r_beta, int, 0644); -@@ -154,194 +153,6 @@ static struct sock *ecf_get_available_subflow(struct sock *meta_sk, - return bestsk; - } - --/* copy from mptcp_sched.c: mptcp_rcv_buf_optimization */ --static struct sk_buff *mptcp_ecf_rcv_buf_optimization(struct sock *sk, int penal) --{ -- struct sock *meta_sk; -- const struct tcp_sock *tp = tcp_sk(sk); -- struct mptcp_tcp_sock *mptcp; -- struct sk_buff *skb_head; -- struct ecfsched_priv *ecf_p = ecfsched_get_priv(tp); -- -- meta_sk = mptcp_meta_sk(sk); -- skb_head = tcp_rtx_queue_head(meta_sk); -- -- if (!skb_head) -- return NULL; -- -- /* If penalization is optional (coming from mptcp_next_segment() and -- * We are not send-buffer-limited we do not penalize. The retransmission -- * is just an optimization to fix the idle-time due to the delay before -- * we wake up the application. -- */ -- if (!penal && sk_stream_memory_free(meta_sk)) -- goto retrans; -- -- /* Only penalize again after an RTT has elapsed */ -- if (tcp_jiffies32 - ecf_p->last_rbuf_opti < usecs_to_jiffies(tp->srtt_us >> 3)) -- goto retrans; -- -- /* Half the cwnd of the slow flows */ -- mptcp_for_each_sub(tp->mpcb, mptcp) { -- struct tcp_sock *tp_it = mptcp->tp; -- -- if (tp_it != tp && -- TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -- if (tp->srtt_us < tp_it->srtt_us && inet_csk((struct sock *)tp_it)->icsk_ca_state == TCP_CA_Open) { -- u32 prior_cwnd = tp_it->snd_cwnd; -- -- tp_it->snd_cwnd = max(tp_it->snd_cwnd >> 1U, 1U); -- -- /* If in slow start, do not reduce the ssthresh */ -- if (prior_cwnd >= tp_it->snd_ssthresh) -- tp_it->snd_ssthresh = max(tp_it->snd_ssthresh >> 1U, 2U); -- -- ecf_p->last_rbuf_opti = tcp_jiffies32; -- } -- } -- } -- --retrans: -- -- /* Segment not yet injected into this path? Take it!!! */ -- if (!(TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp->mptcp->path_index))) { -- bool do_retrans = false; -- mptcp_for_each_sub(tp->mpcb, mptcp) { -- struct tcp_sock *tp_it = mptcp->tp; -- -- if (tp_it != tp && -- TCP_SKB_CB(skb_head)->path_mask & mptcp_pi_to_flag(tp_it->mptcp->path_index)) { -- if (tp_it->snd_cwnd <= 4) { -- do_retrans = true; -- break; -- } -- -- if (4 * tp->srtt_us >= tp_it->srtt_us) { -- do_retrans = false; -- break; -- } else { -- do_retrans = true; -- } -- } -- } -- -- if (do_retrans && mptcp_is_available(sk, skb_head, false)) { -- trace_mptcp_retransmit(sk, skb_head); -- return skb_head; -- } -- } -- return NULL; --} -- --/* copy from mptcp_sched.c: __mptcp_next_segment */ --/* Returns the next segment to be sent from the mptcp meta-queue. -- * (chooses the reinject queue if any segment is waiting in it, otherwise, -- * chooses the normal write queue). -- * Sets *@reinject to 1 if the returned segment comes from the -- * reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk, -- * and sets it to -1 if it is a meta-level retransmission to optimize the -- * receive-buffer. -- */ --static struct sk_buff *__mptcp_ecf_next_segment(struct sock *meta_sk, int *reinject) --{ -- const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -- struct sk_buff *skb = NULL; -- -- *reinject = 0; -- -- /* If we are in fallback-mode, just take from the meta-send-queue */ -- if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping) -- return tcp_send_head(meta_sk); -- -- skb = skb_peek(&mpcb->reinject_queue); -- -- if (skb) { -- *reinject = 1; -- } else { -- skb = tcp_send_head(meta_sk); -- -- if (!skb && meta_sk->sk_socket && -- test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && -- sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -- struct sock *subsk = ecf_get_available_subflow(meta_sk, NULL, -- false); -- if (!subsk) -- return NULL; -- -- skb = mptcp_ecf_rcv_buf_optimization(subsk, 0); -- if (skb) -- *reinject = -1; -- } -- } -- return skb; --} -- --/* copy from mptcp_sched.c: mptcp_next_segment */ --static struct sk_buff *mptcp_ecf_next_segment(struct sock *meta_sk, -- int *reinject, -- struct sock **subsk, -- unsigned int *limit) --{ -- struct sk_buff *skb = __mptcp_ecf_next_segment(meta_sk, reinject); -- unsigned int mss_now; -- struct tcp_sock *subtp; -- u16 gso_max_segs; -- u32 max_len, max_segs, window, needed; -- -- /* As we set it, we have to reset it as well. */ -- *limit = 0; -- -- if (!skb) -- return NULL; -- -- *subsk = ecf_get_available_subflow(meta_sk, skb, false); -- if (!*subsk) -- return NULL; -- -- subtp = tcp_sk(*subsk); -- mss_now = tcp_current_mss(*subsk); -- -- if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { -- skb = mptcp_ecf_rcv_buf_optimization(*subsk, 1); -- if (skb) -- *reinject = -1; -- else -- return NULL; -- } -- -- /* No splitting required, as we will only send one single segment */ -- if (skb->len <= mss_now) -- return skb; -- -- /* The following is similar to tcp_mss_split_point, but -- * we do not care about nagle, because we will anyways -- * use TCP_NAGLE_PUSH, which overrides this. -- * -- * So, we first limit according to the cwnd/gso-size and then according -- * to the subflow's window. -- */ -- -- gso_max_segs = (*subsk)->sk_gso_max_segs; -- if (!gso_max_segs) /* No gso supported on the subflow's NIC */ -- gso_max_segs = 1; -- max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); -- if (!max_segs) -- return NULL; -- -- max_len = mss_now * max_segs; -- window = tcp_wnd_end(subtp) - subtp->write_seq; -- -- needed = min(skb->len, window); -- if (max_len <= skb->len) -- /* Take max_win, which is actually the cwnd/gso-size */ -- *limit = max_len; -- else -- /* Or, take the window */ -- *limit = needed; -- -- return skb; --} -- - static void ecfsched_init(struct sock *sk) - { - struct ecfsched_priv *ecf_p = ecfsched_get_priv(tcp_sk(sk)); -@@ -353,7 +164,7 @@ static void ecfsched_init(struct sock *sk) - - struct mptcp_sched_ops mptcp_sched_ecf = { - .get_subflow = ecf_get_available_subflow, -- .next_segment = mptcp_ecf_next_segment, -+ .next_segment = mptcp_next_segment, - .init = ecfsched_init, - .name = "ecf", - .owner = THIS_MODULE, diff --git a/root/target/linux/mediatek/patches-5.4/0101-dsa-enable-vlan.patch b/root/target/linux/mediatek/patches-5.4/0101-dsa-enable-vlan.patch deleted file mode 100644 index 065fda3f..00000000 --- a/root/target/linux/mediatek/patches-5.4/0101-dsa-enable-vlan.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 35082b70e998d5b419e351010005494e7a5b9412 Mon Sep 17 00:00:00 2001 -From: Landen Chao -Date: Tue, 31 Dec 2019 11:48:41 +0100 -Subject: [PATCH] net: dsa: enable vlan without bridge on dsa user port - ---- - drivers/net/dsa/mt7530.c | 14 ++++++++++++-- - 1 file changed, 12 insertions(+), 2 deletions(-) - -diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c -index 1d8d36de4d20..7e285aa9bd7c 100644 ---- a/drivers/net/dsa/mt7530.c -+++ b/drivers/net/dsa/mt7530.c -@@ -1165,8 +1165,13 @@ mt7530_port_vlan_add(struct dsa_switch *ds, int port, - /* The port is kept as VLAN-unaware if bridge with vlan_filtering not - * being set. - */ -- if (!dsa_port_is_vlan_filtering(&ds->ports[port])) -+ if (!dsa_port_is_vlan_filtering(&ds->ports[port])){ -+ /* Enable VLAN tagged in port-based vlan setting. */ -+ if ((vlan->vid_begin != 0) && (vlan->vid_end != 0)) -+ mt7530_rmw(priv, MT7530_PCR_P(port), EG_TAG(3), -+ EG_TAG(2)); - return; -+ } - - mutex_lock(&priv->reg_mutex); - -@@ -1196,8 +1201,13 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port, - /* The port is kept as VLAN-unaware if bridge with vlan_filtering not - * being set. - */ -- if (!dsa_port_is_vlan_filtering(&ds->ports[port])) -+ if (!dsa_port_is_vlan_filtering(&ds->ports[port])) { -+ /* Disable VLAN tagged in port-based vlan setting. */ -+ if ((vlan->vid_begin != 0) && (vlan->vid_end != 0)) -+ mt7530_rmw(priv, MT7530_PCR_P(port), EG_TAG(3), -+ EG_TAG(0)); - return 0; -+ } - - mutex_lock(&priv->reg_mutex); -