From 59527a92ffaad7339385cf5f7f72e85a28b539b8 Mon Sep 17 00:00:00 2001 From: "175338101@qq.com" <175338101@qq.com> Date: Tue, 2 Nov 2021 14:53:29 +0800 Subject: [PATCH] fixx --- build.sh | 9 +- ...ptcp_v0.96.patch => 690-mptcp_trunk.patch} | 2042 +++++++++-------- 2 files changed, 1117 insertions(+), 934 deletions(-) rename root/target/linux/generic/hack-5.4/{690-mptcp_v0.96.patch => 690-mptcp_trunk.patch} (94%) diff --git a/build.sh b/build.sh index 50834967..3ac83273 100755 --- a/build.sh +++ b/build.sh @@ -92,9 +92,15 @@ fi if [ "$OMR_OPENWRT" = "default" ]; then if [ "$OMR_KERNEL" = "5.4" ]; then # Use OpenWrt 21.02 for 5.4 kernel +<<<<<<< HEAD _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "f441be3921c769b732f0148f005d4f1bbace0508" _get_repo feeds/packages https://github.com/openwrt/packages "3aa30ceee4fcf7b131bdc0f98658391069573e12" _get_repo feeds/luci https://github.com/openwrt/luci "f28aaa35cd5c0cbbe59d8cc6a67de88ceeac382e" +======= + _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "76d90a5eaf3b7fc5bb1a1b8626db0e4e2487e876" + _get_repo feeds/packages https://github.com/openwrt/packages "dc5faddacba4a2d8c18ad65614a34ae9c9f24d52" + _get_repo feeds/luci https://github.com/openwrt/luci "b39d9bf4bb88acd3120098c3a087e47331d1d757" +>>>>>>> parent of 3a551b9 (Merge branch 'test' into develop) else _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "585cef5f1a9c1c3aecd7d231364618e96d03ab65" _get_repo feeds/packages https://github.com/openwrt/packages "e2055b5433da245e6ff8fb060d018d036499cf38" @@ -554,9 +560,6 @@ if [ "$OMR_KERNEL" = "5.14" ]; then echo "Set to kernel 5.14 for ramips" find target/linux/ramips -type f -name Makefile -exec sed -i 's%KERNEL_PATCHVER:=5.4%KERNEL_PATCHVER:=5.14%g' {} \; echo "Done" - echo "Set to kernel 5.14 for ramips" - find target/linux/ipq806x -type f -name Makefile -exec sed -i 's%KERNEL_PATCHVER:=5.10%KERNEL_PATCHVER:=5.14%g' {} \; - echo "Done" #rm -rf target/linux/generic/files/drivers/net/phy/b53 rm -f target/linux/bcm27xx/modules/sound.mk echo "CONFIG_DEVEL=y" >> ".config" diff --git a/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch b/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch similarity index 94% rename from root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch rename to root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch index 297808af..927ccca4 100644 --- a/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch +++ b/root/target/linux/generic/hack-5.4/690-mptcp_trunk.patch @@ -1,7 +1,7 @@ -diff -aurN linux-5.4.155/Documentation/admin-guide/kernel-parameters.txt mptcp-mptcp_v0.96/Documentation/admin-guide/kernel-parameters.txt ---- linux-5.4.155/Documentation/admin-guide/kernel-parameters.txt 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/Documentation/admin-guide/kernel-parameters.txt 2021-10-25 10:05:18.000000000 +0200 -@@ -2742,6 +2742,10 @@ +diff -aurN linux-5.4.64/Documentation/admin-guide/kernel-parameters.txt linux-5.4.64.mptcp/Documentation/admin-guide/kernel-parameters.txt +--- linux-5.4.64/Documentation/admin-guide/kernel-parameters.txt 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/Documentation/admin-guide/kernel-parameters.txt 2020-09-10 19:25:10.375223065 +0200 +@@ -2734,6 +2734,10 @@ allocations which rules out almost all kernel allocations. Use with caution! @@ -12,9 +12,9 @@ diff -aurN linux-5.4.155/Documentation/admin-guide/kernel-parameters.txt mptcp-m MTD_Partition= [MTD] Format: ,,, -diff -aurN linux-5.4.155/Documentation/networking/ip-sysctl.txt mptcp-mptcp_v0.96/Documentation/networking/ip-sysctl.txt ---- linux-5.4.155/Documentation/networking/ip-sysctl.txt 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/Documentation/networking/ip-sysctl.txt 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/Documentation/networking/ip-sysctl.txt linux-5.4.64.mptcp/Documentation/networking/ip-sysctl.txt +--- linux-5.4.64/Documentation/networking/ip-sysctl.txt 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/Documentation/networking/ip-sysctl.txt 2020-09-10 19:25:10.375223065 +0200 @@ -818,6 +818,18 @@ Default: 0 (disabled) @@ -34,10 +34,10 @@ diff -aurN linux-5.4.155/Documentation/networking/ip-sysctl.txt mptcp-mptcp_v0.9 UDP variables: udp_l3mdev_accept - BOOLEAN -diff -aurN linux-5.4.155/drivers/infiniband/hw/cxgb4/cm.c mptcp-mptcp_v0.96/drivers/infiniband/hw/cxgb4/cm.c ---- linux-5.4.155/drivers/infiniband/hw/cxgb4/cm.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/drivers/infiniband/hw/cxgb4/cm.c 2021-10-25 10:05:18.000000000 +0200 -@@ -3950,7 +3950,7 @@ +diff -aurN linux-5.4.64/drivers/infiniband/hw/cxgb4/cm.c linux-5.4.64.mptcp/drivers/infiniband/hw/cxgb4/cm.c +--- linux-5.4.64/drivers/infiniband/hw/cxgb4/cm.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/drivers/infiniband/hw/cxgb4/cm.c 2020-09-10 19:25:10.439222000 +0200 +@@ -3949,7 +3949,7 @@ */ memset(&tmp_opt, 0, sizeof(tmp_opt)); tcp_clear_options(&tmp_opt); @@ -46,9 +46,9 @@ diff -aurN linux-5.4.155/drivers/infiniband/hw/cxgb4/cm.c mptcp-mptcp_v0.96/driv req = __skb_push(skb, sizeof(*req)); memset(req, 0, sizeof(*req)); -diff -aurN linux-5.4.155/include/linux/skbuff.h mptcp-mptcp_v0.96/include/linux/skbuff.h ---- linux-5.4.155/include/linux/skbuff.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/linux/skbuff.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/linux/skbuff.h linux-5.4.64.mptcp/include/linux/skbuff.h +--- linux-5.4.64/include/linux/skbuff.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/linux/skbuff.h 2020-09-10 19:25:10.439222000 +0200 @@ -717,7 +717,7 @@ * want to keep them across layers you have to do a skb_clone() * first. This is owned by whoever has the skb queued ATM. @@ -58,9 +58,9 @@ diff -aurN linux-5.4.155/include/linux/skbuff.h mptcp-mptcp_v0.96/include/linux/ union { struct { -diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp.h ---- linux-5.4.155/include/linux/tcp.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/linux/tcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/linux/tcp.h linux-5.4.64.mptcp/include/linux/tcp.h +--- linux-5.4.64/include/linux/tcp.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/linux/tcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -54,7 +54,7 @@ /* TCP Fast Open */ #define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ @@ -146,15 +146,7 @@ diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; -@@ -295,6 +350,7 @@ - u32 rate_interval_us; /* saved rate sample: time elapsed */ - - u32 rcv_wnd; /* Current receiver window */ -+ u32 rcv_right_edge; /* Highest announced right edge */ - u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ - u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ - u32 pushed_seq; /* Last pushed seq, required to talk to windows */ -@@ -397,6 +453,44 @@ +@@ -397,6 +452,44 @@ */ struct request_sock __rcu *fastopen_rsk; u32 *saved_syn; @@ -199,7 +191,7 @@ diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp }; enum tsq_enum { -@@ -408,6 +502,8 @@ +@@ -408,6 +501,8 @@ TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call * tcp_v{4|6}_mtu_reduced() */ @@ -208,7 +200,7 @@ diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp }; enum tsq_flags { -@@ -417,6 +513,8 @@ +@@ -417,6 +512,8 @@ TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED), TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED), TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED), @@ -217,7 +209,7 @@ diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp }; static inline struct tcp_sock *tcp_sk(const struct sock *sk) -@@ -440,6 +538,7 @@ +@@ -440,6 +537,7 @@ #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *tw_md5_key; #endif @@ -225,9 +217,9 @@ diff -aurN linux-5.4.155/include/linux/tcp.h mptcp-mptcp_v0.96/include/linux/tcp }; static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) -diff -aurN linux-5.4.155/include/net/inet_common.h mptcp-mptcp_v0.96/include/net/inet_common.h ---- linux-5.4.155/include/net/inet_common.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/inet_common.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/inet_common.h linux-5.4.64.mptcp/include/net/inet_common.h +--- linux-5.4.64/include/net/inet_common.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/inet_common.h 2020-09-10 19:25:10.499221003 +0200 @@ -2,6 +2,7 @@ #ifndef _INET_COMMON_H #define _INET_COMMON_H @@ -245,9 +237,9 @@ diff -aurN linux-5.4.155/include/net/inet_common.h mptcp-mptcp_v0.96/include/net int inet_release(struct socket *sock); int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags); -diff -aurN linux-5.4.155/include/net/inet_connection_sock.h mptcp-mptcp_v0.96/include/net/inet_connection_sock.h ---- linux-5.4.155/include/net/inet_connection_sock.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/inet_connection_sock.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/inet_connection_sock.h linux-5.4.64.mptcp/include/net/inet_connection_sock.h +--- linux-5.4.64/include/net/inet_connection_sock.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/inet_connection_sock.h 2020-09-10 19:25:10.499221003 +0200 @@ -25,6 +25,7 @@ struct inet_bind_bucket; @@ -256,9 +248,9 @@ diff -aurN linux-5.4.155/include/net/inet_connection_sock.h mptcp-mptcp_v0.96/in /* * Pointers to address related TCP functions -diff -aurN linux-5.4.155/include/net/inet_sock.h mptcp-mptcp_v0.96/include/net/inet_sock.h ---- linux-5.4.155/include/net/inet_sock.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/inet_sock.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/inet_sock.h linux-5.4.64.mptcp/include/net/inet_sock.h +--- linux-5.4.64/include/net/inet_sock.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/inet_sock.h 2020-09-10 19:25:10.499221003 +0200 @@ -79,7 +79,7 @@ #define ireq_state req.__req_common.skc_state #define ireq_family req.__req_common.skc_family @@ -277,10 +269,10 @@ diff -aurN linux-5.4.155/include/net/inet_sock.h mptcp-mptcp_v0.96/include/net/i smc_ok : 1; u32 ir_mark; union { -diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp.h ---- linux-5.4.155/include/net/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/include/net/mptcp.h 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,1573 @@ +diff -aurN linux-5.4.64/include/net/mptcp.h linux-5.4.64.mptcp/include/net/mptcp.h +--- linux-5.4.64/include/net/mptcp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/include/net/mptcp.h 2020-09-10 19:25:10.499221003 +0200 +@@ -0,0 +1,1571 @@ +/* + * MPTCP implementation + * @@ -647,7 +639,7 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp +#define MPTCPV1_SUB_LEN_CAPABLE_ACK 20 +#define MPTCPV1_SUB_LEN_CAPABLE_ACK_ALIGN 20 +#define MPTCPV1_SUB_LEN_CAPABLE_DATA 22 -+#define MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM 24 ++#define MPTCPV1_SUB_LEN_CAPABLE_DATA_CSUM 22 +#define MPTCPV1_SUB_LEN_CAPABLE_DATA_ALIGN 24 + +#define MPTCP_SUB_JOIN 1 @@ -1012,6 +1004,7 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp + +#define MPTCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mptcp.mptcp_statistics, field) +#define MPTCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mptcp.mptcp_statistics, field) ++#define MPTCP_INC_STATS_BH(net, field) __SNMP_INC_STATS((net)->mptcp.mptcp_statistics, field) + +enum +{ @@ -1245,9 +1238,6 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp +{ + if (!mptcp_init_failed && + sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP && -+#ifdef CONFIG_TCP_MD5SIG -+ !rcu_access_pointer(tcp_sk(sk)->md5sig_info) && -+#endif + sysctl_mptcp_enabled & MPTCP_ENABLE && + !(sysctl_mptcp_enabled & MPTCP_SERVER_DISABLE)) + mptcp_enable_sock(sk); @@ -1257,9 +1247,6 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp +{ + if (!mptcp_init_failed && + sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP && -+#ifdef CONFIG_TCP_MD5SIG -+ !rcu_access_pointer(tcp_sk(sk)->md5sig_info) && -+#endif + sysctl_mptcp_enabled & MPTCP_ENABLE && + !(sysctl_mptcp_enabled & MPTCP_CLIENT_DISABLE)) + mptcp_enable_sock(sk); @@ -1673,6 +1660,9 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp +u16 mptcp_select_window(struct sock *sk); +void mptcp_tcp_set_rto(struct sock *sk); + ++/* TCP and MPTCP flag-depending functions */ ++bool mptcp_prune_ofo_queue(struct sock *sk); ++ +#else /* CONFIG_MPTCP */ +#define mptcp_debug(fmt, args...) \ + do { \ @@ -1854,9 +1844,9 @@ diff -aurN linux-5.4.155/include/net/mptcp.h mptcp-mptcp_v0.96/include/net/mptcp +#endif /* CONFIG_MPTCP */ + +#endif /* _MPTCP_H */ -diff -aurN linux-5.4.155/include/net/mptcp_v4.h mptcp-mptcp_v0.96/include/net/mptcp_v4.h ---- linux-5.4.155/include/net/mptcp_v4.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/include/net/mptcp_v4.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/mptcp_v4.h linux-5.4.64.mptcp/include/net/mptcp_v4.h +--- linux-5.4.64/include/net/mptcp_v4.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/include/net/mptcp_v4.h 2020-09-10 19:25:10.499221003 +0200 @@ -0,0 +1,76 @@ +/* + * MPTCP implementation @@ -1934,9 +1924,9 @@ diff -aurN linux-5.4.155/include/net/mptcp_v4.h mptcp-mptcp_v0.96/include/net/mp +#endif /* CONFIG_MPTCP */ + +#endif /* MPTCP_V4_H_ */ -diff -aurN linux-5.4.155/include/net/mptcp_v6.h mptcp-mptcp_v0.96/include/net/mptcp_v6.h ---- linux-5.4.155/include/net/mptcp_v6.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/include/net/mptcp_v6.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/mptcp_v6.h linux-5.4.64.mptcp/include/net/mptcp_v6.h +--- linux-5.4.64/include/net/mptcp_v6.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/include/net/mptcp_v6.h 2020-09-10 19:25:10.499221003 +0200 @@ -0,0 +1,77 @@ +/* + * MPTCP implementation @@ -2015,9 +2005,9 @@ diff -aurN linux-5.4.155/include/net/mptcp_v6.h mptcp-mptcp_v0.96/include/net/mp +#endif /* CONFIG_MPTCP */ + +#endif /* _MPTCP_V6_H */ -diff -aurN linux-5.4.155/include/net/net_namespace.h mptcp-mptcp_v0.96/include/net/net_namespace.h ---- linux-5.4.155/include/net/net_namespace.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/net_namespace.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/net_namespace.h linux-5.4.64.mptcp/include/net/net_namespace.h +--- linux-5.4.64/include/net/net_namespace.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/net_namespace.h 2020-09-10 19:25:10.499221003 +0200 @@ -19,6 +19,7 @@ #include #include @@ -2036,9 +2026,9 @@ diff -aurN linux-5.4.155/include/net/net_namespace.h mptcp-mptcp_v0.96/include/n #if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN) struct netns_ieee802154_lowpan ieee802154_lowpan; #endif -diff -aurN linux-5.4.155/include/net/netns/mptcp.h mptcp-mptcp_v0.96/include/net/netns/mptcp.h ---- linux-5.4.155/include/net/netns/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/include/net/netns/mptcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/netns/mptcp.h linux-5.4.64.mptcp/include/net/netns/mptcp.h +--- linux-5.4.64/include/net/netns/mptcp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/include/net/netns/mptcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -0,0 +1,52 @@ +/* + * MPTCP implementation - MPTCP namespace @@ -2092,9 +2082,9 @@ diff -aurN linux-5.4.155/include/net/netns/mptcp.h mptcp-mptcp_v0.96/include/net +}; + +#endif /* __NETNS_MPTCP_H__ */ -diff -aurN linux-5.4.155/include/net/snmp.h mptcp-mptcp_v0.96/include/net/snmp.h ---- linux-5.4.155/include/net/snmp.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/snmp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/snmp.h linux-5.4.64.mptcp/include/net/snmp.h +--- linux-5.4.64/include/net/snmp.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/snmp.h 2020-09-10 19:25:10.499221003 +0200 @@ -86,7 +86,6 @@ atomic_long_t mibs[ICMP6MSG_MIB_MAX]; }; @@ -2103,10 +2093,10 @@ diff -aurN linux-5.4.155/include/net/snmp.h mptcp-mptcp_v0.96/include/net/snmp.h /* TCP */ #define TCP_MIB_MAX __TCP_MIB_MAX struct tcp_mib { -diff -aurN linux-5.4.155/include/net/sock.h mptcp-mptcp_v0.96/include/net/sock.h ---- linux-5.4.155/include/net/sock.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/sock.h 2021-10-25 10:05:18.000000000 +0200 -@@ -821,6 +821,7 @@ +diff -aurN linux-5.4.64/include/net/sock.h linux-5.4.64.mptcp/include/net/sock.h +--- linux-5.4.64/include/net/sock.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/sock.h 2020-09-10 19:25:10.499221003 +0200 +@@ -819,6 +819,7 @@ SOCK_TXTIME, SOCK_XDP, /* XDP is attached */ SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */ @@ -2114,7 +2104,7 @@ diff -aurN linux-5.4.155/include/net/sock.h mptcp-mptcp_v0.96/include/net/sock.h }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) -@@ -1133,6 +1134,7 @@ +@@ -1131,6 +1132,7 @@ void (*unhash)(struct sock *sk); void (*rehash)(struct sock *sk); int (*get_port)(struct sock *sk, unsigned short snum); @@ -2122,9 +2112,9 @@ diff -aurN linux-5.4.155/include/net/sock.h mptcp-mptcp_v0.96/include/net/sock.h /* Keeping track of sockets in use */ #ifdef CONFIG_PROC_FS -diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h ---- linux-5.4.155/include/net/tcp.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/tcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/tcp.h linux-5.4.64.mptcp/include/net/tcp.h +--- linux-5.4.64/include/net/tcp.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/tcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -182,6 +182,7 @@ #define TCPOPT_SACK 5 /* SACK Block */ #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ @@ -2165,7 +2155,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h /* sysctl variables for tcp */ extern int sysctl_tcp_max_orphans; -@@ -310,6 +336,98 @@ +@@ -310,6 +336,97 @@ #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) @@ -2183,7 +2173,6 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h +void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb); +int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask); -+u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now); +unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, @@ -2264,7 +2253,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h void tcp_tasklet_init(void); int tcp_v4_err(struct sk_buff *skb, u32); -@@ -411,7 +529,9 @@ +@@ -411,7 +528,9 @@ #endif void tcp_parse_options(const struct net *net, const struct sk_buff *skb, struct tcp_options_received *opt_rx, @@ -2275,7 +2264,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); /* -@@ -430,6 +550,7 @@ +@@ -430,6 +549,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb); void tcp_v4_mtu_reduced(struct sock *sk); @@ -2283,7 +2272,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h void tcp_req_err(struct sock *sk, u32 seq, bool abort); int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb); struct sock *tcp_create_openreq_child(const struct sock *sk, -@@ -453,6 +574,7 @@ +@@ -453,6 +573,7 @@ struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type); @@ -2291,7 +2280,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h int tcp_disconnect(struct sock *sk, int flags); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); -@@ -462,6 +584,7 @@ +@@ -462,6 +583,7 @@ /* From syncookies.c */ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -2299,7 +2288,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h struct dst_entry *dst, u32 tsoff); int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, u32 cookie); -@@ -547,7 +670,8 @@ +@@ -547,7 +669,8 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u16 *mssp); @@ -2309,7 +2298,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h u64 cookie_init_timestamp(struct request_sock *req); bool cookie_timestamp_decode(const struct net *net, struct tcp_options_received *opt); -@@ -561,7 +685,8 @@ +@@ -561,7 +684,8 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph, const struct tcphdr *th, u16 *mssp); @@ -2319,7 +2308,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h #endif /* tcp_output.c */ -@@ -597,10 +722,16 @@ +@@ -597,10 +721,16 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, const struct sk_buff *next_skb); @@ -2336,7 +2325,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb); void tcp_fin(struct sock *sk); -@@ -645,7 +776,7 @@ +@@ -644,7 +774,7 @@ } /* tcp.c */ @@ -2345,49 +2334,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h /* Read 'sendfile()'-style from a TCP socket */ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, -@@ -723,7 +854,7 @@ - * Rcv_nxt can be after the window if our peer push more data - * than the offered window. - */ --static inline u32 tcp_receive_window(const struct tcp_sock *tp) -+static inline u32 tcp_receive_window_now(const struct tcp_sock *tp) - { - s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt; - -@@ -732,6 +863,32 @@ - return (u32) win; - } - -+/* right edge only moves forward, even if window shrinks due -+ * to mptcp meta -+ */ -+static inline void tcp_update_rcv_right_edge(struct tcp_sock *tp) -+{ -+ if (after(tp->rcv_wup + tp->rcv_wnd, tp->rcv_right_edge)) -+ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; -+} -+ -+/* Compute receive window which will never shrink. The way MPTCP handles -+ * the receive window can cause the effective right edge to shrink, -+ * causing valid segments to become out of window. -+ * This function should be used when checking if a segment is valid for -+ * the max right edge announced. -+ */ -+static inline u32 tcp_receive_window_no_shrink(const struct tcp_sock *tp) -+{ -+ s32 win = tp->rcv_right_edge - tp->rcv_nxt; -+ -+ win = max_t(s32, win, tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt); -+ -+ if (unlikely(win < 0)) -+ win = 0; -+ return (u32) win; -+} -+ - /* Choose a new window, without checks for shrinking, and without - * scaling applied to the result. The caller does these things - * if necessary. This is a "raw" window selection. -@@ -829,6 +986,12 @@ +@@ -828,6 +958,12 @@ u16 tcp_gso_size; }; }; @@ -2400,7 +2347,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ __u8 sacked; /* State flags for SACK. */ -@@ -847,6 +1010,14 @@ +@@ -846,6 +982,14 @@ has_rxtstamp:1, /* SKB has a RX timestamp */ unused:5; __u32 ack_seq; /* Sequence number ACK'd */ @@ -2415,7 +2362,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h union { struct { /* There is space for up to 24 bytes */ -@@ -1088,6 +1259,8 @@ +@@ -1087,6 +1231,8 @@ int tcp_set_allowed_congestion_control(char *allowed); int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit, bool cap_net_admin); @@ -2424,7 +2371,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); -@@ -1389,6 +1562,19 @@ +@@ -1388,6 +1534,19 @@ space - (space>>tcp_adv_win_scale); } @@ -2444,7 +2391,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h /* Note: caller must be prepared to deal with negative returns */ static inline int tcp_space(const struct sock *sk) { -@@ -1981,6 +2167,30 @@ +@@ -1975,6 +2134,30 @@ #endif }; @@ -2475,7 +2422,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h struct tcp_request_sock_ops { u16 mss_clamp; #ifdef CONFIG_TCP_MD5SIG -@@ -1991,12 +2201,13 @@ +@@ -1985,12 +2168,13 @@ const struct sock *sk, const struct sk_buff *skb); #endif @@ -2494,7 +2441,7 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h #endif struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl, const struct request_sock *req); -@@ -2010,15 +2221,17 @@ +@@ -2004,15 +2188,17 @@ #ifdef CONFIG_SYN_COOKIES static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, @@ -2513,9 +2460,9 @@ diff -aurN linux-5.4.155/include/net/tcp.h mptcp-mptcp_v0.96/include/net/tcp.h const struct sock *sk, struct sk_buff *skb, __u16 *mss) { -diff -aurN linux-5.4.155/include/net/tcp_states.h mptcp-mptcp_v0.96/include/net/tcp_states.h ---- linux-5.4.155/include/net/tcp_states.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/tcp_states.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/tcp_states.h linux-5.4.64.mptcp/include/net/tcp_states.h +--- linux-5.4.64/include/net/tcp_states.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/tcp_states.h 2020-09-10 19:25:10.499221003 +0200 @@ -22,6 +22,7 @@ TCP_LISTEN, TCP_CLOSING, /* Now a valid state */ @@ -2532,9 +2479,9 @@ diff -aurN linux-5.4.155/include/net/tcp_states.h mptcp-mptcp_v0.96/include/net/ }; #endif /* _LINUX_TCP_STATES_H */ -diff -aurN linux-5.4.155/include/net/transp_v6.h mptcp-mptcp_v0.96/include/net/transp_v6.h ---- linux-5.4.155/include/net/transp_v6.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/net/transp_v6.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/net/transp_v6.h linux-5.4.64.mptcp/include/net/transp_v6.h +--- linux-5.4.64/include/net/transp_v6.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/net/transp_v6.h 2020-09-10 19:25:10.499221003 +0200 @@ -58,6 +58,8 @@ /* address family specific functions */ @@ -2544,9 +2491,9 @@ diff -aurN linux-5.4.155/include/net/transp_v6.h mptcp-mptcp_v0.96/include/net/t void inet6_destroy_sock(struct sock *sk); -diff -aurN linux-5.4.155/include/trace/events/tcp.h mptcp-mptcp_v0.96/include/trace/events/tcp.h ---- linux-5.4.155/include/trace/events/tcp.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/trace/events/tcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/trace/events/tcp.h linux-5.4.64.mptcp/include/trace/events/tcp.h +--- linux-5.4.64/include/trace/events/tcp.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/trace/events/tcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -10,6 +10,7 @@ #include #include @@ -2595,9 +2542,9 @@ diff -aurN linux-5.4.155/include/trace/events/tcp.h mptcp-mptcp_v0.96/include/tr ); #endif /* _TRACE_TCP_H */ -diff -aurN linux-5.4.155/include/uapi/linux/bpf.h mptcp-mptcp_v0.96/include/uapi/linux/bpf.h ---- linux-5.4.155/include/uapi/linux/bpf.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/uapi/linux/bpf.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/uapi/linux/bpf.h linux-5.4.64.mptcp/include/uapi/linux/bpf.h +--- linux-5.4.64/include/uapi/linux/bpf.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/uapi/linux/bpf.h 2020-09-10 19:25:10.499221003 +0200 @@ -3438,6 +3438,7 @@ BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ @@ -2606,9 +2553,9 @@ diff -aurN linux-5.4.155/include/uapi/linux/bpf.h mptcp-mptcp_v0.96/include/uapi BPF_TCP_MAX_STATES /* Leave at the end! */ }; -diff -aurN linux-5.4.155/include/uapi/linux/if.h mptcp-mptcp_v0.96/include/uapi/linux/if.h ---- linux-5.4.155/include/uapi/linux/if.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/uapi/linux/if.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/uapi/linux/if.h linux-5.4.64.mptcp/include/uapi/linux/if.h +--- linux-5.4.64/include/uapi/linux/if.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/uapi/linux/if.h 2020-09-10 19:25:10.499221003 +0200 @@ -132,6 +132,9 @@ #define IFF_ECHO IFF_ECHO #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */ @@ -2619,9 +2566,9 @@ diff -aurN linux-5.4.155/include/uapi/linux/if.h mptcp-mptcp_v0.96/include/uapi/ #define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\ IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT) -diff -aurN linux-5.4.155/include/uapi/linux/in.h mptcp-mptcp_v0.96/include/uapi/linux/in.h ---- linux-5.4.155/include/uapi/linux/in.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/uapi/linux/in.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/uapi/linux/in.h linux-5.4.64.mptcp/include/uapi/linux/in.h +--- linux-5.4.64/include/uapi/linux/in.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/uapi/linux/in.h 2020-09-10 19:25:10.499221003 +0200 @@ -76,6 +76,8 @@ #define IPPROTO_MPLS IPPROTO_MPLS IPPROTO_RAW = 255, /* Raw IP packets */ @@ -2631,9 +2578,9 @@ diff -aurN linux-5.4.155/include/uapi/linux/in.h mptcp-mptcp_v0.96/include/uapi/ IPPROTO_MAX }; #endif -diff -aurN linux-5.4.155/include/uapi/linux/mptcp.h mptcp-mptcp_v0.96/include/uapi/linux/mptcp.h ---- linux-5.4.155/include/uapi/linux/mptcp.h 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/include/uapi/linux/mptcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/uapi/linux/mptcp.h linux-5.4.64.mptcp/include/uapi/linux/mptcp.h +--- linux-5.4.64/include/uapi/linux/mptcp.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/include/uapi/linux/mptcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* @@ -2724,8 +2671,8 @@ diff -aurN linux-5.4.155/include/uapi/linux/mptcp.h mptcp-mptcp_v0.96/include/ua + * - MPTCP_CMD_REMOVE: token, loc_id + * Announce that an address has been lost to the peer. + * -+ * - MPTCP_CMD_SUB_CREATE: token, family, loc_id, rem_id, daddr4 | daddr6, -+ * dport [, saddr4 | saddr6, sport, backup, if_idx] ++ * - MPTCP_CMD_SUB_CREATE: token, family, loc_id, rem_id, [saddr4 | saddr6, ++ * daddr4 | daddr6, dport [, sport, backup, if_idx]] + * Create a new subflow. + * + * - MPTCP_CMD_SUB_DESTROY: token, family, saddr4 | saddr6, daddr4 | daddr6, @@ -2784,9 +2731,9 @@ diff -aurN linux-5.4.155/include/uapi/linux/mptcp.h mptcp-mptcp_v0.96/include/ua +}; + +#endif /* _LINUX_MPTCP_H */ -diff -aurN linux-5.4.155/include/uapi/linux/tcp.h mptcp-mptcp_v0.96/include/uapi/linux/tcp.h ---- linux-5.4.155/include/uapi/linux/tcp.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/include/uapi/linux/tcp.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/include/uapi/linux/tcp.h linux-5.4.64.mptcp/include/uapi/linux/tcp.h +--- linux-5.4.64/include/uapi/linux/tcp.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/include/uapi/linux/tcp.h 2020-09-10 19:25:10.499221003 +0200 @@ -18,9 +18,15 @@ #ifndef _UAPI_LINUX_TCP_H #define _UAPI_LINUX_TCP_H @@ -2872,10 +2819,10 @@ diff -aurN linux-5.4.155/include/uapi/linux/tcp.h mptcp-mptcp_v0.96/include/uapi /* for TCP_MD5SIG socket option */ #define TCP_MD5SIG_MAXKEYLEN 80 -diff -aurN linux-5.4.155/net/core/dev.c mptcp-mptcp_v0.96/net/core/dev.c ---- linux-5.4.155/net/core/dev.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/core/dev.c 2021-10-25 10:05:18.000000000 +0200 -@@ -7880,7 +7880,7 @@ +diff -aurN linux-5.4.64/net/core/dev.c linux-5.4.64.mptcp/net/core/dev.c +--- linux-5.4.64/net/core/dev.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/core/dev.c 2020-09-10 19:25:10.503220935 +0200 +@@ -7851,7 +7851,7 @@ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP | IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL | @@ -2884,83 +2831,19 @@ diff -aurN linux-5.4.155/net/core/dev.c mptcp-mptcp_v0.96/net/core/dev.c (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC | IFF_ALLMULTI)); -diff -aurN linux-5.4.155/net/core/filter.c mptcp-mptcp_v0.96/net/core/filter.c ---- linux-5.4.155/net/core/filter.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/core/filter.c 2021-10-25 10:05:18.000000000 +0200 -@@ -73,6 +73,7 @@ - #include - #include - #include -+#include - - /** - * sk_filter_trim_cap - run a packet through a socket filter -@@ -4280,6 +4281,19 @@ - if (sk->sk_mark != val) { - sk->sk_mark = val; - sk_dst_reset(sk); -+ -+ if (is_meta_sk(sk)) { -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { -+ struct sock *sk_it = mptcp_to_sock(mptcp); -+ -+ if (val != sk_it->sk_mark) { -+ sk_it->sk_mark = val; -+ sk_dst_reset(sk_it); -+ } -+ } -+ } - } - break; - default: -@@ -4302,6 +4316,14 @@ - if (val == -1) - val = 0; - inet->tos = val; -+ -+ /* Update TOS on mptcp subflow */ -+ if (is_meta_sk(sk)) { -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) -+ inet_sk(mptcp_to_sock(mptcp))->tos = val; -+ } - } - break; - default: -@@ -4324,6 +4346,17 @@ - if (val == -1) - val = 0; - np->tclass = val; -+ -+ if (is_meta_sk(sk)) { -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { -+ struct sock *sk_it = mptcp_to_sock(mptcp); -+ -+ if (sk_it->sk_family == AF_INET6) -+ inet6_sk(sk_it)->tclass = val; -+ } -+ } - } - break; - default: -diff -aurN linux-5.4.155/net/core/net-traces.c mptcp-mptcp_v0.96/net/core/net-traces.c ---- linux-5.4.155/net/core/net-traces.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/core/net-traces.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/core/net-traces.c linux-5.4.64.mptcp/net/core/net-traces.c +--- linux-5.4.64/net/core/net-traces.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/core/net-traces.c 2020-09-10 19:25:10.503220935 +0200 @@ -60,3 +60,5 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(napi_poll); EXPORT_TRACEPOINT_SYMBOL_GPL(tcp_send_reset); + +EXPORT_TRACEPOINT_SYMBOL_GPL(mptcp_retransmit); -diff -aurN linux-5.4.155/net/core/skbuff.c mptcp-mptcp_v0.96/net/core/skbuff.c ---- linux-5.4.155/net/core/skbuff.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/core/skbuff.c 2021-10-25 10:05:18.000000000 +0200 -@@ -582,7 +582,7 @@ +diff -aurN linux-5.4.64/net/core/skbuff.c linux-5.4.64.mptcp/net/core/skbuff.c +--- linux-5.4.64/net/core/skbuff.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/core/skbuff.c 2020-09-10 19:25:10.503220935 +0200 +@@ -573,7 +573,7 @@ skb_drop_list(&skb_shinfo(skb)->frag_list); } @@ -2969,9 +2852,9 @@ diff -aurN linux-5.4.155/net/core/skbuff.c mptcp-mptcp_v0.96/net/core/skbuff.c { struct sk_buff *list; -diff -aurN linux-5.4.155/net/core/sock.c mptcp-mptcp_v0.96/net/core/sock.c ---- linux-5.4.155/net/core/sock.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/core/sock.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/core/sock.c linux-5.4.64.mptcp/net/core/sock.c +--- linux-5.4.64/net/core/sock.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/core/sock.c 2020-09-10 19:26:53.689504155 +0200 @@ -135,6 +135,11 @@ #include @@ -2984,27 +2867,7 @@ diff -aurN linux-5.4.155/net/core/sock.c mptcp-mptcp_v0.96/net/core/sock.c #include #include -@@ -1063,6 +1068,19 @@ - } else if (val != sk->sk_mark) { - sk->sk_mark = val; - sk_dst_reset(sk); -+ -+ if (is_meta_sk(sk)) { -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { -+ struct sock *sk_it = mptcp_to_sock(mptcp); -+ -+ if (val != sk_it->sk_mark) { -+ sk_it->sk_mark = val; -+ sk_dst_reset(sk_it); -+ } -+ } -+ } - } - break; - -@@ -1563,6 +1581,23 @@ +@@ -1551,6 +1556,23 @@ */ static inline void sock_lock_init(struct sock *sk) { @@ -3028,7 +2891,7 @@ diff -aurN linux-5.4.155/net/core/sock.c mptcp-mptcp_v0.96/net/core/sock.c if (sk->sk_kern_sock) sock_lock_init_class_and_name( sk, -@@ -1611,8 +1646,12 @@ +@@ -1599,8 +1621,12 @@ sk = kmem_cache_alloc(slab, priority & ~__GFP_ZERO); if (!sk) return sk; @@ -3043,17 +2906,18 @@ diff -aurN linux-5.4.155/net/core/sock.c mptcp-mptcp_v0.96/net/core/sock.c } else sk = kmalloc(prot->obj_size, priority); -@@ -1846,6 +1885,7 @@ +@@ -1832,7 +1858,7 @@ + newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; atomic_set(&newsk->sk_zckey, 0); - sock_reset_flag(newsk, SOCK_DONE); +- sock_reset_flag(newsk, SOCK_DONE); + sock_reset_flag(newsk, SOCK_MPTCP); /* sk->sk_memcg will be populated at accept() time */ newsk->sk_memcg = NULL; -diff -aurN linux-5.4.155/net/ipv4/af_inet.c mptcp-mptcp_v0.96/net/ipv4/af_inet.c ---- linux-5.4.155/net/ipv4/af_inet.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/af_inet.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/af_inet.c linux-5.4.64.mptcp/net/ipv4/af_inet.c +--- linux-5.4.64/net/ipv4/af_inet.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/af_inet.c 2020-09-10 19:25:10.503220935 +0200 @@ -100,6 +100,7 @@ #include #include @@ -3126,9 +2990,9 @@ diff -aurN linux-5.4.155/net/ipv4/af_inet.c mptcp-mptcp_v0.96/net/ipv4/af_inet.c /* Setup TCP slab cache for open requests. */ tcp_init(); -diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/ipv4/inet_connection_sock.c ---- linux-5.4.155/net/ipv4/inet_connection_sock.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/inet_connection_sock.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/inet_connection_sock.c linux-5.4.64.mptcp/net/ipv4/inet_connection_sock.c +--- linux-5.4.64/net/ipv4/inet_connection_sock.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/inet_connection_sock.c 2020-09-10 19:25:10.503220935 +0200 @@ -19,6 +19,7 @@ #include #include @@ -3137,7 +3001,7 @@ diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/i #include #include #include -@@ -730,7 +731,10 @@ +@@ -727,7 +728,10 @@ int max_retries, thresh; u8 defer_accept; @@ -3149,7 +3013,7 @@ diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/i goto drop; max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; -@@ -819,7 +823,9 @@ +@@ -816,7 +820,9 @@ const struct request_sock *req, const gfp_t priority) { @@ -3160,7 +3024,7 @@ diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/i if (newsk) { struct inet_connection_sock *newicsk = inet_csk(newsk); -@@ -1019,7 +1025,14 @@ +@@ -1015,7 +1021,14 @@ */ while ((req = reqsk_queue_remove(queue, sk)) != NULL) { struct sock *child = req->sk; @@ -3175,7 +3039,7 @@ diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/i local_bh_disable(); bh_lock_sock(child); WARN_ON(sock_owned_by_user(child)); -@@ -1029,6 +1042,10 @@ +@@ -1025,6 +1038,10 @@ reqsk_put(req); bh_unlock_sock(child); local_bh_enable(); @@ -3186,9 +3050,9 @@ diff -aurN linux-5.4.155/net/ipv4/inet_connection_sock.c mptcp-mptcp_v0.96/net/i sock_put(child); cond_resched(); -diff -aurN linux-5.4.155/net/ipv4/ip_sockglue.c mptcp-mptcp_v0.96/net/ipv4/ip_sockglue.c ---- linux-5.4.155/net/ipv4/ip_sockglue.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/ip_sockglue.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/ip_sockglue.c linux-5.4.64.mptcp/net/ipv4/ip_sockglue.c +--- linux-5.4.64/net/ipv4/ip_sockglue.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/ip_sockglue.c 2020-09-10 19:25:10.503220935 +0200 @@ -44,6 +44,8 @@ #endif #include @@ -3228,9 +3092,9 @@ diff -aurN linux-5.4.155/net/ipv4/ip_sockglue.c mptcp-mptcp_v0.96/net/ipv4/ip_so } break; case IP_TTL: -diff -aurN linux-5.4.155/net/ipv4/Kconfig mptcp-mptcp_v0.96/net/ipv4/Kconfig ---- linux-5.4.155/net/ipv4/Kconfig 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/Kconfig 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/Kconfig linux-5.4.64.mptcp/net/ipv4/Kconfig +--- linux-5.4.64/net/ipv4/Kconfig 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/Kconfig 2020-09-10 19:25:10.503220935 +0200 @@ -655,6 +655,51 @@ bufferbloat, policers, or AQM schemes that do not provide a delay signal. It requires the fq ("Fair Queue") pacing packet scheduler. @@ -3316,9 +3180,9 @@ diff -aurN linux-5.4.155/net/ipv4/Kconfig mptcp-mptcp_v0.96/net/ipv4/Kconfig default "reno" if DEFAULT_RENO default "dctcp" if DEFAULT_DCTCP default "cdg" if DEFAULT_CDG -diff -aurN linux-5.4.155/net/ipv4/syncookies.c mptcp-mptcp_v0.96/net/ipv4/syncookies.c ---- linux-5.4.155/net/ipv4/syncookies.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/syncookies.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/syncookies.c linux-5.4.64.mptcp/net/ipv4/syncookies.c +--- linux-5.4.64/net/ipv4/syncookies.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/syncookies.c 2020-09-10 19:25:10.503220935 +0200 @@ -12,6 +12,8 @@ #include #include @@ -3423,7 +3287,7 @@ diff -aurN linux-5.4.155/net/ipv4/syncookies.c mptcp-mptcp_v0.96/net/ipv4/syncoo /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ -@@ -392,15 +426,15 @@ +@@ -387,15 +421,15 @@ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; @@ -3444,9 +3308,9 @@ diff -aurN linux-5.4.155/net/ipv4/syncookies.c mptcp-mptcp_v0.96/net/ipv4/syncoo /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ -diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c ---- linux-5.4.155/net/ipv4/tcp.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp.c linux-5.4.64.mptcp/net/ipv4/tcp.c +--- linux-5.4.64/net/ipv4/tcp.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp.c 2020-09-10 19:44:12.204220735 +0200 @@ -270,6 +270,7 @@ #include @@ -3491,16 +3355,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c sk_sockets_allocated_inc(sk); sk->sk_route_forced_caps = NETIF_F_GSO; } -@@ -484,7 +507,7 @@ - return true; - if (tcp_rmem_pressure(sk)) - return true; -- if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss) -+ if (tcp_receive_window_now(tp) <= inet_csk(sk)->icsk_ack.rcv_mss) - return true; - } - if (sk->sk_prot->stream_memory_read) -@@ -787,6 +810,7 @@ +@@ -785,6 +808,7 @@ int ret; sock_rps_record_flow(sk); @@ -3508,7 +3363,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c /* * We can't seek on a socket input */ -@@ -797,6 +821,16 @@ +@@ -795,6 +819,16 @@ lock_sock(sk); @@ -3525,7 +3380,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); while (tss.len) { ret = __tcp_splice_read(sk, &tss); -@@ -912,8 +946,7 @@ +@@ -910,8 +944,7 @@ return NULL; } @@ -3535,7 +3390,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c { struct tcp_sock *tp = tcp_sk(sk); u32 new_size_goal, size_goal; -@@ -941,8 +974,13 @@ +@@ -939,8 +972,13 @@ { int mss_now; @@ -3551,7 +3406,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c return mss_now; } -@@ -982,12 +1020,34 @@ +@@ -979,12 +1017,34 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && @@ -3587,7 +3442,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); -@@ -1109,7 +1169,8 @@ +@@ -1106,7 +1166,8 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset, size_t size, int flags) { @@ -3597,7 +3452,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c return sock_no_sendpage_locked(sk, page, offset, size, flags); tcp_rate_check_app_limited(sk); /* is sending application-limited? */ -@@ -1231,12 +1292,21 @@ +@@ -1228,12 +1289,21 @@ * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && @@ -3620,7 +3475,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (unlikely(tp->repair)) { if (tp->repair_queue == TCP_RECV_QUEUE) { copied = tcp_send_rcvq(sk, msg, size); -@@ -1529,7 +1599,7 @@ +@@ -1526,7 +1596,7 @@ * calculation of whether or not we must ACK for the sake of * a window update. */ @@ -3629,12 +3484,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c { struct tcp_sock *tp = tcp_sk(sk); bool time_to_ack = false; -@@ -1568,11 +1638,11 @@ - * in states, where we will not receive more. It is useless. - */ - if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { -- __u32 rcv_window_now = tcp_receive_window(tp); -+ __u32 rcv_window_now = tcp_receive_window_now(tp); +@@ -1569,7 +1639,7 @@ /* Optimize, __tcp_select_window() is not cheap. */ if (2*rcv_window_now <= tp->window_clamp) { @@ -3643,7 +3493,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c /* Send ACK now, if this read freed lots of space * in our buffer. Certainly, new_window is new window. -@@ -1688,7 +1758,7 @@ +@@ -1685,7 +1755,7 @@ /* Clean up data we have read: This will do ACK frames. */ if (copied > 0) { tcp_recv_skb(sk, seq, &offset); @@ -3652,7 +3502,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } return copied; } -@@ -1979,6 +2049,16 @@ +@@ -1976,6 +2046,16 @@ lock_sock(sk); @@ -3669,7 +3519,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c err = -ENOTCONN; if (sk->sk_state == TCP_LISTEN) goto out; -@@ -2097,7 +2177,7 @@ +@@ -2094,7 +2174,7 @@ } } @@ -3678,7 +3528,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (copied >= target) { /* Do not sleep, just process backlog. */ -@@ -2189,7 +2269,7 @@ +@@ -2186,7 +2266,7 @@ */ /* Clean up data we have read: This will do ACK frames. */ @@ -3687,7 +3537,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c release_sock(sk); -@@ -2248,8 +2328,11 @@ +@@ -2245,8 +2325,11 @@ switch (state) { case TCP_ESTABLISHED: @@ -3700,7 +3550,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c break; case TCP_CLOSE: -@@ -2262,8 +2345,11 @@ +@@ -2259,8 +2342,11 @@ inet_put_port(sk); /* fall through */ default: @@ -3713,7 +3563,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } /* Change state AFTER socket is unhashed to avoid closed -@@ -2297,7 +2383,7 @@ +@@ -2294,7 +2380,7 @@ [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ }; @@ -3722,7 +3572,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; -@@ -2327,7 +2413,7 @@ +@@ -2324,7 +2410,7 @@ TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) @@ -3731,7 +3581,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } } EXPORT_SYMBOL(tcp_shutdown); -@@ -2352,6 +2438,17 @@ +@@ -2349,6 +2435,17 @@ int data_was_unread = 0; int state; @@ -3749,7 +3599,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; -@@ -2396,7 +2493,7 @@ +@@ -2393,7 +2490,7 @@ /* Unread data was tossed, zap the connection. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); @@ -3758,7 +3608,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); -@@ -2470,7 +2567,7 @@ +@@ -2467,7 +2564,7 @@ struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); @@ -3767,7 +3617,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { -@@ -2480,7 +2577,8 @@ +@@ -2477,7 +2574,8 @@ inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { @@ -3777,7 +3627,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c goto out; } } -@@ -2489,7 +2587,7 @@ +@@ -2486,7 +2584,7 @@ sk_mem_reclaim(sk); if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); @@ -3786,7 +3636,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } else if (!check_net(sock_net(sk))) { -@@ -2521,15 +2619,6 @@ +@@ -2518,15 +2616,6 @@ } EXPORT_SYMBOL(tcp_close); @@ -3802,7 +3652,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c static void tcp_rtx_queue_purge(struct sock *sk) { struct rb_node *p = rb_first(&sk->tcp_rtx_queue); -@@ -2551,6 +2640,10 @@ +@@ -2548,6 +2637,10 @@ { struct sk_buff *skb; @@ -3813,7 +3663,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c tcp_chrono_stop(sk, TCP_CHRONO_BUSY); while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { tcp_skb_tsorted_anchor_cleanup(skb); -@@ -2569,6 +2662,36 @@ +@@ -2566,6 +2659,35 @@ inet_csk(sk)->icsk_backoff = 0; } @@ -3825,7 +3675,6 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c + tp->srtt_us = 0; + tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); + tp->rcv_rtt_last_tsecr = 0; -+ icsk->icsk_probes_tstamp = 0; + icsk->icsk_rto = TCP_TIMEOUT_INIT; + tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; + tp->snd_cwnd = TCP_INIT_CWND; @@ -3834,13 +3683,13 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c + tp->delivered_ce = 0; + tp->is_sack_reneg = 0; + tcp_clear_retrans(tp); -+ tp->segs_in = 0; -+ tp->segs_out = 0; + tp->bytes_sent = 0; + tp->bytes_acked = 0; + tp->bytes_received = 0; + tp->bytes_retrans = 0; + tp->total_retrans = 0; ++ tp->segs_in = 0; ++ tp->segs_out = 0; + tp->data_segs_in = 0; + tp->data_segs_out = 0; + /* There's a bubble in the pipe until at least the first ACK. */ @@ -3850,7 +3699,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); -@@ -2591,7 +2714,7 @@ +@@ -2588,7 +2710,7 @@ /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ @@ -3859,7 +3708,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; -@@ -2613,11 +2736,15 @@ +@@ -2610,11 +2732,15 @@ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); @@ -3878,11 +3727,11 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c seq = tp->write_seq + tp->max_window + 2; if (!seq) -@@ -2627,21 +2754,14 @@ +@@ -2624,21 +2750,15 @@ icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; -- icsk->icsk_probes_tstamp = 0; + icsk->icsk_probes_tstamp = 0; - icsk->icsk_rto = TCP_TIMEOUT_INIT; - tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; - tp->snd_cwnd = TCP_INIT_CWND; @@ -3903,7 +3752,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c inet_csk_delack_init(sk); /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 * issue in __tcp_select_window() -@@ -2653,14 +2773,6 @@ +@@ -2649,14 +2769,6 @@ sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); tp->compressed_ack = 0; @@ -3918,7 +3767,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c tp->duplicate_sack[0].start_seq = 0; tp->duplicate_sack[0].end_seq = 0; tp->dsack_dups = 0; -@@ -2669,8 +2781,6 @@ +@@ -2665,8 +2777,6 @@ tp->sacked_out = 0; tp->tlp_high_seq = 0; tp->last_oow_ack_time = 0; @@ -3927,7 +3776,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c tp->rack.mstamp = 0; tp->rack.advanced = 0; tp->rack.reo_wnd_steps = 1; -@@ -2704,7 +2814,7 @@ +@@ -2700,7 +2810,7 @@ static inline bool tcp_can_repair_sock(const struct sock *sk) { return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && @@ -3936,15 +3785,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) -@@ -2735,6 +2845,7 @@ - - tp->rcv_wnd = opt.rcv_wnd; - tp->rcv_wup = opt.rcv_wup; -+ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; - - return 0; - } -@@ -2873,6 +2984,61 @@ +@@ -2869,6 +2979,61 @@ return tcp_fastopen_reset_cipher(net, sk, key, backup_key); } @@ -4006,7 +3847,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c default: /* fallthru */ break; -@@ -3062,6 +3228,12 @@ +@@ -3051,6 +3216,12 @@ break; case TCP_DEFER_ACCEPT: @@ -4019,7 +3860,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c /* Translate value in seconds to number of retransmits */ icsk->icsk_accept_queue.rskq_defer_accept = secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, -@@ -3089,7 +3261,7 @@ +@@ -3078,7 +3249,7 @@ (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; @@ -4028,19 +3869,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (!(val & 1)) inet_csk_enter_pingpong_mode(sk); } -@@ -3099,7 +3271,10 @@ - #ifdef CONFIG_TCP_MD5SIG - case TCP_MD5SIG: - case TCP_MD5SIG_EXT: -- err = tp->af_specific->md5_parse(sk, optname, optval, optlen); -+ if (!sock_flag(sk, SOCK_MPTCP)) -+ err = tp->af_specific->md5_parse(sk, optname, optval, optlen); -+ else -+ err = -EINVAL; - break; - #endif - case TCP_USER_TIMEOUT: -@@ -3155,6 +3330,32 @@ +@@ -3144,6 +3315,32 @@ tp->notsent_lowat = val; sk->sk_write_space(sk); break; @@ -4049,7 +3878,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c + if (mptcp_init_failed || !sysctl_mptcp_enabled || + sk->sk_state != TCP_CLOSE +#ifdef CONFIG_TCP_MD5SIG -+ || rcu_access_pointer(tp->md5sig_info) ++ || tp->md5sig_info +#endif + ) { + err = -EPERM; @@ -4073,7 +3902,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c case TCP_INQ: if (val > 1 || val < 0) err = -EINVAL; -@@ -3219,7 +3420,7 @@ +@@ -3208,7 +3405,7 @@ } /* Return information about state of tcp endpoint in API format. */ @@ -4082,7 +3911,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); -@@ -3256,7 +3457,8 @@ +@@ -3245,7 +3442,8 @@ return; } @@ -4092,7 +3921,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; -@@ -3332,7 +3534,9 @@ +@@ -3321,7 +3519,9 @@ info->tcpi_reord_seen = tp->reord_seen; info->tcpi_rcv_ooopack = tp->rcv_ooopack; info->tcpi_snd_wnd = tp->snd_wnd; @@ -4103,7 +3932,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c } EXPORT_SYMBOL_GPL(tcp_get_info); -@@ -3479,7 +3683,7 @@ +@@ -3468,7 +3668,7 @@ if (get_user(len, optlen)) return -EFAULT; @@ -4112,7 +3941,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) -@@ -3668,6 +3872,87 @@ +@@ -3657,6 +3857,87 @@ } return 0; } @@ -4200,17 +4029,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c #ifdef CONFIG_MMU case TCP_ZEROCOPY_RECEIVE: { struct tcp_zerocopy_receive zc; -@@ -3873,7 +4158,9 @@ - if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) - TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); - -+ WARN_ON(sk->sk_state == TCP_CLOSE); - tcp_set_state(sk, TCP_CLOSE); -+ - tcp_clear_xmit_timers(sk); - if (req) - reqsk_fastopen_remove(sk, req, false); -@@ -3889,6 +4176,8 @@ +@@ -3878,6 +4161,8 @@ int tcp_abort(struct sock *sk, int err) { @@ -4219,7 +4038,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (!sk_fullsock(sk)) { if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); -@@ -3902,7 +4191,7 @@ +@@ -3891,7 +4176,7 @@ } /* Don't race with userspace socket closes such as tcp_close. */ @@ -4228,7 +4047,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); -@@ -3911,7 +4200,7 @@ +@@ -3900,7 +4185,7 @@ /* Don't race with BH socket closes such as inet_csk_listen_stop. */ local_bh_disable(); @@ -4237,7 +4056,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_err = err; -@@ -3919,14 +4208,14 @@ +@@ -3908,14 +4193,14 @@ smp_wmb(); sk->sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) @@ -4255,10 +4074,10 @@ diff -aurN linux-5.4.155/net/ipv4/tcp.c mptcp-mptcp_v0.96/net/ipv4/tcp.c return 0; } EXPORT_SYMBOL_GPL(tcp_abort); -diff -aurN linux-5.4.155/net/ipv4/tcp_cong.c mptcp-mptcp_v0.96/net/ipv4/tcp_cong.c ---- linux-5.4.155/net/ipv4/tcp_cong.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_cong.c 2021-10-25 10:05:18.000000000 +0200 -@@ -337,13 +337,19 @@ +diff -aurN linux-5.4.64/net/ipv4/tcp_cong.c linux-5.4.64.mptcp/net/ipv4/tcp_cong.c +--- linux-5.4.64/net/ipv4/tcp_cong.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_cong.c 2020-09-10 19:25:10.503220935 +0200 +@@ -328,13 +328,19 @@ return ret; } @@ -4280,9 +4099,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_cong.c mptcp-mptcp_v0.96/net/ipv4/tcp_cong { struct inet_connection_sock *icsk = inet_csk(sk); const struct tcp_congestion_ops *ca; -diff -aurN linux-5.4.155/net/ipv4/tcp_diag.c mptcp-mptcp_v0.96/net/ipv4/tcp_diag.c ---- linux-5.4.155/net/ipv4/tcp_diag.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_diag.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_diag.c linux-5.4.64.mptcp/net/ipv4/tcp_diag.c +--- linux-5.4.64/net/ipv4/tcp_diag.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_diag.c 2020-09-10 19:25:10.503220935 +0200 @@ -31,7 +31,7 @@ r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una; } @@ -4292,9 +4111,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_diag.c mptcp-mptcp_v0.96/net/ipv4/tcp_diag } #ifdef CONFIG_TCP_MD5SIG -diff -aurN linux-5.4.155/net/ipv4/tcp_fastopen.c mptcp-mptcp_v0.96/net/ipv4/tcp_fastopen.c ---- linux-5.4.155/net/ipv4/tcp_fastopen.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_fastopen.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_fastopen.c linux-5.4.64.mptcp/net/ipv4/tcp_fastopen.c +--- linux-5.4.64/net/ipv4/tcp_fastopen.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_fastopen.c 2020-09-10 19:25:10.503220935 +0200 @@ -9,6 +9,7 @@ #include #include @@ -4323,7 +4142,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_fastopen.c mptcp-mptcp_v0.96/net/ipv4/tcp_ child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, NULL, &own_req); -@@ -294,15 +294,27 @@ +@@ -294,15 +294,26 @@ refcount_set(&req->rsk_refcnt, 2); @@ -4336,7 +4155,6 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_fastopen.c mptcp-mptcp_v0.96/net/ipv4/tcp_ tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; tp->rcv_wup = tp->rcv_nxt; -+ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; + + meta_sk = child; + ret = mptcp_check_req_fastopen(meta_sk, req); @@ -4354,9 +4172,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_fastopen.c mptcp-mptcp_v0.96/net/ipv4/tcp_ /* tcp_conn_request() is sending the SYNACK, * and queues the child into listener accept queue. */ -diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_input.c ---- linux-5.4.155/net/ipv4/tcp_input.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_input.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_input.c linux-5.4.64.mptcp/net/ipv4/tcp_input.c +--- linux-5.4.64/net/ipv4/tcp_input.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_input.c 2020-09-10 19:32:43.267687285 +0200 @@ -76,35 +76,15 @@ #include #include @@ -4463,7 +4281,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp inet_csk(sk)->icsk_ack.quick |= 1; } } -@@ -612,7 +609,10 @@ +@@ -611,7 +608,10 @@ tcp_mstamp_refresh(tp); time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); @@ -4475,7 +4293,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp return; /* Number of bytes copied to user in last RTT */ -@@ -835,7 +835,7 @@ +@@ -834,7 +834,7 @@ /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ @@ -4484,7 +4302,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) -@@ -1407,6 +1407,13 @@ +@@ -1406,6 +1406,13 @@ int len; int in_sack; @@ -4498,7 +4316,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) -@@ -2962,7 +2969,7 @@ +@@ -2960,7 +2967,7 @@ */ tcp_update_rtt_min(sk, ca_rtt_us, flag); tcp_rtt_estimator(sk, seq_rtt_us); @@ -4507,7 +4325,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* RFC6298: only reset backoff on valid RTT measurement. */ inet_csk(sk)->icsk_backoff = 0; -@@ -3030,7 +3037,7 @@ +@@ -3028,7 +3035,7 @@ } /* If we get here, the whole TSO packet has not been acked. */ @@ -4516,7 +4334,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; -@@ -3050,8 +3057,7 @@ +@@ -3048,8 +3055,7 @@ return packets_acked; } @@ -4526,7 +4344,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { const struct skb_shared_info *shinfo; -@@ -3156,6 +3162,8 @@ +@@ -3154,6 +3160,8 @@ */ if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { flag |= FLAG_DATA_ACKED; @@ -4535,7 +4353,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; -@@ -3276,7 +3284,7 @@ +@@ -3274,7 +3282,7 @@ return flag; } @@ -4544,7 +4362,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct inet_connection_sock *icsk = inet_csk(sk); struct sk_buff *head = tcp_send_head(sk); -@@ -3350,9 +3358,8 @@ +@@ -3346,9 +3354,8 @@ /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ @@ -4556,7 +4374,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || -@@ -3590,7 +3597,7 @@ +@@ -3586,7 +3593,7 @@ } /* This routine deals with incoming acks, but not outgoing ones. */ @@ -4565,7 +4383,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); -@@ -3713,6 +3720,16 @@ +@@ -3709,6 +3716,16 @@ tcp_rack_update_reo_wnd(sk, &rs); @@ -4581,8 +4399,8 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp + if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); - -@@ -3856,8 +3873,10 @@ + /* If needed, reset TLP/RTO timer; RACK may later override this. */ +@@ -3851,8 +3868,10 @@ */ void tcp_parse_options(const struct net *net, const struct sk_buff *skb, @@ -4595,7 +4413,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); -@@ -3943,6 +3962,10 @@ +@@ -3938,6 +3957,10 @@ */ break; #endif @@ -4606,7 +4424,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp case TCPOPT_FASTOPEN: tcp_parse_fastopen_option( opsize - TCPOLEN_FASTOPEN_BASE, -@@ -4010,7 +4033,9 @@ +@@ -4005,7 +4028,9 @@ return true; } @@ -4617,16 +4435,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; -@@ -4120,7 +4145,7 @@ - static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) - { - return !before(end_seq, tp->rcv_wup) && -- !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); -+ !after(seq, tp->rcv_nxt + tcp_receive_window_no_shrink(tp)); - } - - /* When we get a reset we do this. */ -@@ -4169,6 +4194,11 @@ +@@ -4164,6 +4189,11 @@ { struct tcp_sock *tp = tcp_sk(sk); @@ -4638,7 +4447,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; -@@ -4179,6 +4209,10 @@ +@@ -4174,6 +4204,10 @@ case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); @@ -4649,7 +4458,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp inet_csk_enter_pingpong_mode(sk); break; -@@ -4201,9 +4235,16 @@ +@@ -4196,9 +4230,16 @@ tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: @@ -4667,7 +4476,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these -@@ -4225,6 +4266,10 @@ +@@ -4220,6 +4261,10 @@ if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); @@ -4678,7 +4487,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) -@@ -4439,6 +4484,9 @@ +@@ -4434,6 +4479,9 @@ *fragstolen = false; @@ -4688,7 +4497,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; -@@ -4493,7 +4541,7 @@ +@@ -4488,7 +4536,7 @@ /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ @@ -4697,7 +4506,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; -@@ -4516,7 +4564,14 @@ +@@ -4511,7 +4559,14 @@ p = rb_next(p); rb_erase(&skb->rbnode, &tp->out_of_order_queue); @@ -4713,7 +4522,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_drop(sk, skb); continue; } -@@ -4546,6 +4601,9 @@ +@@ -4541,6 +4596,9 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) { @@ -4723,7 +4532,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, size)) { -@@ -4560,7 +4618,7 @@ +@@ -4555,7 +4613,7 @@ return 0; } @@ -4732,7 +4541,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct tcp_sock *tp = tcp_sk(sk); struct rb_node **p, *parent; -@@ -4632,7 +4690,8 @@ +@@ -4627,7 +4685,8 @@ continue; } if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { @@ -4742,7 +4551,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* All the bits are present. Drop. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); -@@ -4679,6 +4738,11 @@ +@@ -4674,6 +4733,11 @@ end_seq); break; } @@ -4754,7 +4563,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp rb_erase(&skb1->rbnode, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); -@@ -4690,7 +4754,7 @@ +@@ -4685,7 +4749,7 @@ tp->ooo_last_skb = skb; add_sack: @@ -4763,7 +4572,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { -@@ -4704,8 +4768,8 @@ +@@ -4699,8 +4763,8 @@ } } @@ -4774,17 +4583,17 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); -@@ -4780,7 +4844,8 @@ +@@ -4774,8 +4838,8 @@ + int avail = tp->rcv_nxt - tp->copied_seq; if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && - !sock_flag(sk, SOCK_DONE) && -- tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss) -+ tcp_receive_window_now(tp) > inet_csk(sk)->icsk_ack.rcv_mss && -+ !mptcp(tp)) +- !sock_flag(sk, SOCK_DONE) && ++ !sock_flag(sk, SOCK_DONE) && !mptcp(tp) && + tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss) return; sk->sk_data_ready(sk); -@@ -4792,10 +4857,14 @@ +@@ -4786,10 +4850,14 @@ bool fragstolen; int eaten; @@ -4800,16 +4609,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); -@@ -4806,7 +4875,7 @@ - * Out of sequence packets to the out_of_order_queue. - */ - if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { -- if (tcp_receive_window(tp) == 0) { -+ if (tcp_receive_window_no_shrink(tp) == 0) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); - goto out_of_window; - } -@@ -4822,7 +4891,7 @@ +@@ -4816,7 +4884,7 @@ } eaten = tcp_queue_rcv(sk, skb, &fragstolen); @@ -4818,7 +4618,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); -@@ -4844,7 +4913,11 @@ +@@ -4838,7 +4906,11 @@ if (eaten > 0) kfree_skb_partial(skb, fragstolen); @@ -4831,26 +4631,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_data_ready(sk); return; } -@@ -4864,7 +4937,8 @@ - } - - /* Out of window. F.e. zero window probe. */ -- if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) -+ if (!before(TCP_SKB_CB(skb)->seq, -+ tp->rcv_nxt + tcp_receive_window_no_shrink(tp))) - goto out_of_window; - - if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { -@@ -4874,7 +4948,7 @@ - /* If window is closed, drop tail of packet. But after - * remembering D-SACK for its head made in previous line. - */ -- if (!tcp_receive_window(tp)) { -+ if (!tcp_receive_window_no_shrink(tp)) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); - goto out_of_window; - } -@@ -5187,7 +5261,7 @@ +@@ -5181,7 +5253,7 @@ return -1; } @@ -4859,7 +4640,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { const struct tcp_sock *tp = tcp_sk(sk); -@@ -5222,7 +5296,7 @@ +@@ -5216,7 +5288,7 @@ { struct tcp_sock *tp = tcp_sk(sk); @@ -4868,7 +4649,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_sndbuf_expand(sk); tp->snd_cwnd_stamp = tcp_jiffies32; } -@@ -5236,10 +5310,11 @@ +@@ -5230,10 +5302,11 @@ sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ smp_mb(); @@ -4883,7 +4664,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } } -@@ -5258,6 +5333,8 @@ +@@ -5252,6 +5325,8 @@ { struct tcp_sock *tp = tcp_sk(sk); unsigned long rtt, delay; @@ -4892,7 +4673,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && -@@ -5266,8 +5343,8 @@ +@@ -5260,8 +5335,8 @@ * If application uses SO_RCVLOWAT, we want send ack now if * we have not received enough bytes to satisfy the condition. */ @@ -4903,7 +4684,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* Protocol state mandates a one-time immediate ACK */ -@@ -5402,6 +5479,10 @@ +@@ -5396,6 +5471,10 @@ { struct tcp_sock *tp = tcp_sk(sk); @@ -4914,7 +4695,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); -@@ -5544,9 +5625,15 @@ +@@ -5538,9 +5617,15 @@ goto discard; } @@ -4930,7 +4711,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_drop(sk, skb); return false; } -@@ -5603,6 +5690,10 @@ +@@ -5597,6 +5682,10 @@ tp->rx_opt.saw_tstamp = 0; @@ -4941,7 +4722,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 -@@ -5777,7 +5868,7 @@ +@@ -5769,7 +5858,7 @@ tcp_call_bpf(sk, bpf_op, 0, NULL); tcp_init_congestion_control(sk); @@ -4950,7 +4731,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp } void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) -@@ -5814,17 +5905,24 @@ +@@ -5806,17 +5895,24 @@ struct tcp_fastopen_cookie *cookie) { struct tcp_sock *tp = tcp_sk(sk); @@ -4977,7 +4758,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp mss = opt.mss_clamp; } -@@ -5848,7 +5946,11 @@ +@@ -5840,7 +5936,11 @@ tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); @@ -4990,7 +4771,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp skb_rbtree_walk_from(data) { if (__tcp_retransmit_skb(sk, data, 1)) break; -@@ -5903,9 +6005,13 @@ +@@ -5895,9 +5995,13 @@ struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; @@ -5005,7 +4786,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; -@@ -5966,11 +6072,41 @@ +@@ -5958,6 +6062,35 @@ tcp_try_undo_spurious_syn(sk); tcp_ack(sk, skb, FLAG_SLOWPATH); @@ -5041,13 +4822,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Ok.. it's good. Set up sequence numbers and * move to established. */ - WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); - tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; -+ tcp_update_rcv_right_edge(tp); - - /* RFC1323: The window in SYN & SYN/ACK segments is - * never scaled. -@@ -5992,6 +6128,11 @@ +@@ -5984,6 +6117,11 @@ tp->tcp_header_len = sizeof(struct tcphdr); } @@ -5059,7 +4834,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); -@@ -6015,9 +6156,12 @@ +@@ -6007,9 +6145,12 @@ } if (fastopen_fail) return -1; @@ -5074,7 +4849,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * -@@ -6056,6 +6200,7 @@ +@@ -6048,6 +6189,7 @@ tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; @@ -5082,7 +4857,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. -@@ -6072,9 +6217,15 @@ +@@ -6064,6 +6206,11 @@ tp->tcp_header_len = sizeof(struct tcphdr); } @@ -5094,11 +4869,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; -+ tcp_update_rcv_right_edge(tp); - - /* RFC1323: The window in SYN & SYN/ACK segments is - * never scaled. -@@ -6162,6 +6313,7 @@ +@@ -6154,6 +6301,7 @@ */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) @@ -5106,7 +4877,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); -@@ -6204,6 +6356,16 @@ +@@ -6196,6 +6344,16 @@ tp->rx_opt.saw_tstamp = 0; tcp_mstamp_refresh(tp); queued = tcp_rcv_synsent_state_process(sk, skb, th); @@ -5123,7 +4894,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (queued >= 0) return queued; -@@ -6276,6 +6438,8 @@ +@@ -6268,6 +6426,8 @@ if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; @@ -5132,7 +4903,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (!inet_csk(sk)->icsk_ca_ops->cong_control) tcp_update_pacing_rate(sk); -@@ -6285,6 +6449,30 @@ +@@ -6277,6 +6437,30 @@ tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); @@ -5163,7 +4934,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp break; case TCP_FIN_WAIT1: { -@@ -6325,7 +6513,8 @@ +@@ -6317,7 +6501,8 @@ tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); @@ -5173,7 +4944,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, -@@ -6334,7 +6523,7 @@ +@@ -6326,7 +6511,7 @@ */ inet_csk_reset_keepalive_timer(sk, tmo); } else { @@ -5182,7 +4953,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp goto discard; } break; -@@ -6342,7 +6531,7 @@ +@@ -6334,7 +6519,7 @@ case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { @@ -5191,7 +4962,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp goto discard; } break; -@@ -6354,6 +6543,9 @@ +@@ -6346,6 +6531,9 @@ goto discard; } break; @@ -5201,7 +4972,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp } /* step 6: check the URG bit */ -@@ -6375,7 +6567,8 @@ +@@ -6367,7 +6555,8 @@ */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && @@ -5211,7 +4982,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; -@@ -6477,6 +6670,8 @@ +@@ -6469,6 +6658,8 @@ ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; @@ -5220,7 +4991,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp ireq->ir_rmt_port = tcp_hdr(skb)->source; ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); -@@ -6602,12 +6797,17 @@ +@@ -6594,12 +6785,17 @@ /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. @@ -5239,7 +5010,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp } if (sk_acceptq_is_full(sk)) { -@@ -6625,8 +6825,8 @@ +@@ -6617,8 +6813,8 @@ tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; tmp_opt.user_mss = tp->rx_opt.user_mss; @@ -5250,7 +5021,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); -@@ -6641,7 +6841,8 @@ +@@ -6633,7 +6829,8 @@ /* Note: tcp_v6_init_req() might override ir_iif for link locals */ inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); @@ -5260,7 +5031,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; -@@ -6677,7 +6878,7 @@ +@@ -6669,7 +6866,7 @@ tcp_ecn_create_request(req, skb, sk, dst); if (want_cookie) { @@ -5269,7 +5040,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; -@@ -6692,17 +6893,25 @@ +@@ -6684,17 +6881,25 @@ fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { @@ -5296,9 +5067,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_input.c mptcp-mptcp_v0.96/net/ipv4/tcp_inp sock_put(fastopen_sk); } else { tcp_rsk(req)->tfo_listener = false; -diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4.c ---- linux-5.4.155/net/ipv4/tcp_ipv4.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_ipv4.c linux-5.4.64.mptcp/net/ipv4/tcp_ipv4.c +--- linux-5.4.64/net/ipv4/tcp_ipv4.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_ipv4.c 2020-09-10 19:25:10.503220935 +0200 @@ -62,6 +62,8 @@ #include #include @@ -5608,7 +5379,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; -@@ -1803,6 +1847,10 @@ +@@ -1802,6 +1846,10 @@ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); @@ -5619,7 +5390,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); -@@ -1822,8 +1870,8 @@ +@@ -1821,8 +1869,8 @@ int sdif = inet_sdif(skb); const struct iphdr *iph; const struct tcphdr *th; @@ -5629,7 +5400,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 int ret; if (skb->pkt_type != PACKET_HOST) -@@ -1877,7 +1925,11 @@ +@@ -1876,7 +1924,11 @@ reqsk_put(req); goto csum_error; } @@ -5642,7 +5413,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } -@@ -1886,6 +1938,7 @@ +@@ -1885,6 +1937,7 @@ */ sock_hold(sk); refcounted = true; @@ -5650,7 +5421,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 nsk = NULL; if (!tcp_filter(sk, skb)) { th = (const struct tcphdr *)skb->data; -@@ -1946,19 +1999,28 @@ +@@ -1945,19 +1998,28 @@ sk_incoming_cpu_update(sk); @@ -5683,7 +5454,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 if (skb_to_free) __kfree_skb(skb_to_free); -@@ -1974,6 +2036,19 @@ +@@ -1973,6 +2035,19 @@ tcp_v4_fill_cb(skb, iph, th); @@ -5703,7 +5474,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); -@@ -2022,6 +2097,18 @@ +@@ -2021,6 +2096,18 @@ refcounted = false; goto process; } @@ -5722,7 +5493,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 } /* to ACK */ /* fall through */ -@@ -2091,7 +2178,12 @@ +@@ -2090,7 +2177,12 @@ tcp_init_sock(sk); @@ -5736,7 +5507,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; -@@ -2110,6 +2202,11 @@ +@@ -2109,6 +2201,11 @@ tcp_cleanup_congestion_control(sk); @@ -5748,7 +5519,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 tcp_cleanup_ulp(sk); /* Cleanup up the write buffer. */ -@@ -2615,6 +2712,11 @@ +@@ -2613,6 +2710,11 @@ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), @@ -5760,7 +5531,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, -@@ -2625,6 +2727,9 @@ +@@ -2623,6 +2725,9 @@ .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, @@ -5770,9 +5541,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_ipv4.c mptcp-mptcp_v0.96/net/ipv4/tcp_ipv4 }; EXPORT_SYMBOL(tcp_prot); -diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp_minisocks.c ---- linux-5.4.155/net/ipv4/tcp_minisocks.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_minisocks.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_minisocks.c linux-5.4.64.mptcp/net/ipv4/tcp_minisocks.c +--- linux-5.4.64/net/ipv4/tcp_minisocks.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_minisocks.c 2020-09-10 19:25:10.503220935 +0200 @@ -19,11 +19,13 @@ * Jorge Cwik, */ @@ -5833,17 +5604,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp inet_twsk_put(tw); return TCP_TW_SUCCESS; } -@@ -270,11 +291,25 @@ - tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; - tcptw->tw_rcv_nxt = tp->rcv_nxt; - tcptw->tw_snd_nxt = tp->snd_nxt; -- tcptw->tw_rcv_wnd = tcp_receive_window(tp); -+ /* no need to keep track of the right-most right edge -+ * when in time wait, can directly use the currently -+ * advertised window. -+ */ -+ tcptw->tw_rcv_wnd = tcp_receive_window_now(tp); - tcptw->tw_ts_recent = tp->rx_opt.ts_recent; +@@ -275,6 +296,16 @@ tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; tcptw->tw_ts_offset = tp->tsoffset; tcptw->tw_last_oow_ack_time = 0; @@ -5860,7 +5621,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp tcptw->tw_tx_delay = tp->tcp_tx_delay; #if IS_ENABLED(CONFIG_IPV6) if (tw->tw_family == PF_INET6) { -@@ -336,6 +371,7 @@ +@@ -336,6 +367,7 @@ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); } @@ -5868,7 +5629,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp tcp_update_metrics(sk); tcp_done(sk); } -@@ -343,6 +379,10 @@ +@@ -343,6 +375,10 @@ void tcp_twsk_destructor(struct sock *sk) { @@ -5879,7 +5640,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp #ifdef CONFIG_TCP_MD5SIG if (static_branch_unlikely(&tcp_md5_needed)) { struct tcp_timewait_sock *twsk = tcp_twsk(sk); -@@ -386,8 +426,9 @@ +@@ -386,8 +422,9 @@ full_space = rcv_wnd * mss; /* tcp_full_space because it is guaranteed to be the first packet */ @@ -5891,7 +5652,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp &req->rsk_rcv_wnd, &req->rsk_window_clamp, ireq->wscale_ok, -@@ -487,6 +528,8 @@ +@@ -487,6 +524,8 @@ WRITE_ONCE(newtp->snd_nxt, seq); newtp->snd_up = seq; @@ -5900,15 +5661,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp INIT_LIST_HEAD(&newtp->tsq_node); INIT_LIST_HEAD(&newtp->tsorted_sent_queue); -@@ -511,6 +554,7 @@ - newtp->window_clamp = req->rsk_window_clamp; - newtp->rcv_ssthresh = req->rsk_rcv_wnd; - newtp->rcv_wnd = req->rsk_rcv_wnd; -+ newtp->rcv_right_edge = newtp->rcv_wnd + newtp->rcv_wup; - newtp->rx_opt.wscale_ok = ireq->wscale_ok; - if (newtp->rx_opt.wscale_ok) { - newtp->rx_opt.snd_wscale = ireq->snd_wscale; -@@ -530,6 +574,8 @@ +@@ -530,6 +569,8 @@ newtp->rx_opt.ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } @@ -5917,7 +5670,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp if (req->num_timeout) { newtp->undo_marker = treq->snt_isn; newtp->retrans_stamp = div_u64(treq->snt_synack, -@@ -547,6 +593,7 @@ +@@ -547,6 +588,7 @@ tcp_ecn_openreq_child(newtp, req); newtp->fastopen_req = NULL; RCU_INIT_POINTER(newtp->fastopen_rsk, NULL); @@ -5925,7 +5678,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); -@@ -570,15 +617,20 @@ +@@ -570,15 +612,20 @@ bool fastopen, bool *req_stolen) { struct tcp_options_received tmp_opt; @@ -5947,7 +5700,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp if (tmp_opt.saw_tstamp) { tmp_opt.ts_recent = req->ts_recent; -@@ -619,7 +671,14 @@ +@@ -619,7 +666,14 @@ * * Reset timer after retransmitting SYNACK, similar to * the idea of fast retransmit in recovery. @@ -5962,7 +5715,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp if (!tcp_oow_rate_limited(sock_net(sk), skb, LINUX_MIB_TCPACKSKIPPEDSYNRECV, &tcp_rsk(req)->last_oow_ack_time) && -@@ -767,17 +826,40 @@ +@@ -767,17 +821,40 @@ * ESTABLISHED STATE. If it will be dropped after * socket is created, wait for troubles. */ @@ -6003,7 +5756,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { inet_rsk(req)->acked = 1; return NULL; -@@ -823,12 +905,13 @@ +@@ -820,12 +897,13 @@ { int ret = 0; int state = child->sk_state; @@ -6018,7 +5771,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp ret = tcp_rcv_state_process(child, skb); /* Wakeup parent, send SIGIO */ if (state == TCP_SYN_RECV && child->sk_state != state) -@@ -838,10 +921,14 @@ +@@ -835,10 +913,14 @@ * in main socket hash table and lock on listening * socket does not protect us more. */ @@ -6034,9 +5787,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_minisocks.c mptcp-mptcp_v0.96/net/ipv4/tcp sock_put(child); return ret; } -diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_output.c ---- linux-5.4.155/net/ipv4/tcp_output.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_output.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_output.c linux-5.4.64.mptcp/net/ipv4/tcp_output.c +--- linux-5.4.64/net/ipv4/tcp_output.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_output.c 2020-09-10 19:34:56.261474044 +0200 @@ -37,6 +37,12 @@ #define pr_fmt(fmt) "TCP: " fmt @@ -6078,23 +5831,20 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou + * have to allow this. Otherwise we may announce a window too large + * for the current meta-level sk_rcvbuf. + */ -+ u32 cur_win = tcp_receive_window_now(mptcp(tp) ? tcp_sk(mptcp_meta_sk(sk)) : tp); ++ u32 cur_win = tcp_receive_window(mptcp(tp) ? tcp_sk(mptcp_meta_sk(sk)) : tp); + u32 new_win = tp->ops->__select_window(sk); /* Never shrink the offered window */ if (new_win < cur_win) { -@@ -276,8 +283,10 @@ +@@ -276,6 +283,7 @@ LINUX_MIB_TCPWANTZEROWINDOWADV); new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); } + tp->rcv_wnd = new_win; tp->rcv_wup = tp->rcv_nxt; -+ tcp_update_rcv_right_edge(tp); - /* Make sure we do not exceed the maximum possible - * scaled window. -@@ -388,7 +397,7 @@ +@@ -388,7 +396,7 @@ /* Constructs common control bits of non-data skb. If SYN/FIN is present, * auto increment end seqno. */ @@ -6103,7 +5853,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { skb->ip_summed = CHECKSUM_PARTIAL; -@@ -403,7 +412,7 @@ +@@ -403,7 +411,7 @@ TCP_SKB_CB(skb)->end_seq = seq; } @@ -6112,7 +5862,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { return tp->snd_una != tp->snd_up; } -@@ -414,6 +423,7 @@ +@@ -414,6 +422,7 @@ #define OPTION_WSCALE (1 << 3) #define OPTION_FAST_OPEN_COOKIE (1 << 8) #define OPTION_SMC (1 << 9) @@ -6120,7 +5870,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou static void smc_options_write(__be32 *ptr, u16 *options) { -@@ -430,17 +440,6 @@ +@@ -430,17 +439,6 @@ #endif } @@ -6138,7 +5888,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /* Write previously computed TCP options to the packet. * * Beware: Something in the Internet is very sensitive to the ordering of -@@ -455,7 +454,7 @@ +@@ -455,7 +453,7 @@ * (but it may well be that other scenarios fail similarly). */ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp, @@ -6147,7 +5897,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { u16 options = opts->options; /* mungable copy */ -@@ -549,6 +548,9 @@ +@@ -549,6 +547,9 @@ } smc_options_write(ptr, &options); @@ -6157,7 +5907,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } static void smc_set_option(const struct tcp_sock *tp, -@@ -635,6 +637,8 @@ +@@ -635,6 +636,8 @@ if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; } @@ -6166,7 +5916,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou if (fastopen && fastopen->cookie.len >= 0) { u32 need = fastopen->cookie.len; -@@ -718,6 +722,9 @@ +@@ -718,6 +721,9 @@ smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining); @@ -6176,7 +5926,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou return MAX_TCP_OPTION_SPACE - remaining; } -@@ -752,14 +759,19 @@ +@@ -752,14 +758,19 @@ opts->tsecr = tp->rx_opt.ts_recent; size += TCPOLEN_TSTAMP_ALIGNED; } @@ -6185,23 +5935,22 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; if (unlikely(eff_sacks)) { -- const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; + const unsigned int remaining = MAX_TCP_OPTION_SPACE - size; - opts->num_sack_blocks = - min_t(unsigned int, eff_sacks, - (remaining - TCPOLEN_SACK_BASE_ALIGNED) / - TCPOLEN_SACK_PERBLOCK); -+ const unsigned remaining = MAX_TCP_OPTION_SPACE - size; + if (remaining < TCPOLEN_SACK_BASE_ALIGNED) + opts->num_sack_blocks = 0; + else + opts->num_sack_blocks = -+ min_t(unsigned int, eff_sacks, -+ (remaining - TCPOLEN_SACK_BASE_ALIGNED) / -+ TCPOLEN_SACK_PERBLOCK); ++ min_t(unsigned int, eff_sacks, ++ (remaining - TCPOLEN_SACK_BASE_ALIGNED) / ++ TCPOLEN_SACK_PERBLOCK); if (likely(opts->num_sack_blocks)) size += TCPOLEN_SACK_BASE_ALIGNED + opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK; -@@ -802,19 +814,31 @@ +@@ -802,19 +813,31 @@ tcp_xmit_retransmit_queue(sk); } @@ -6240,7 +5989,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } /* * One tasklet per cpu tries to send more skbs. -@@ -851,7 +875,9 @@ +@@ -851,7 +874,9 @@ #define TCP_DEFERRED_ALL (TCPF_TSQ_DEFERRED | \ TCPF_WRITE_TIMER_DEFERRED | \ TCPF_DELACK_TIMER_DEFERRED | \ @@ -6251,7 +6000,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /** * tcp_release_cb - tcp release_sock() callback * @sk: socket -@@ -874,6 +900,9 @@ +@@ -874,6 +899,9 @@ if (flags & TCPF_TSQ_DEFERRED) { tcp_tsq_write(sk); __sock_put(sk); @@ -6261,7 +6010,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } /* Here begins the tricky part : * We are called from release_sock() with : -@@ -898,6 +927,13 @@ +@@ -898,6 +926,13 @@ inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); __sock_put(sk); } @@ -6275,7 +6024,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } EXPORT_SYMBOL(tcp_release_cb); -@@ -981,8 +1017,8 @@ +@@ -981,8 +1016,8 @@ return HRTIMER_NORESTART; } @@ -6286,7 +6035,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { struct tcp_sock *tp = tcp_sk(sk); -@@ -1128,10 +1164,10 @@ +@@ -1128,10 +1163,10 @@ } } @@ -6299,7 +6048,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou tcp_ecn_send(sk, skb, th, tcp_header_size); } else { /* RFC1323: The window in SYN & SYN/ACK segments -@@ -1189,8 +1225,8 @@ +@@ -1189,8 +1224,8 @@ return err; } @@ -6310,7 +6059,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, tcp_sk(sk)->rcv_nxt); -@@ -1201,7 +1237,7 @@ +@@ -1201,7 +1236,7 @@ * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, * otherwise socket can stall. */ @@ -6319,7 +6068,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { struct tcp_sock *tp = tcp_sk(sk); -@@ -1214,7 +1250,7 @@ +@@ -1214,7 +1249,7 @@ } /* Initialize TSO segments for a packet. */ @@ -6328,7 +6077,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { if (skb->len <= mss_now) { /* Avoid the costly divide in the normal -@@ -1231,7 +1267,7 @@ +@@ -1231,7 +1266,7 @@ /* Pcount in the middle of the write queue got changed, we need to do various * tweaks to fix counters */ @@ -6337,7 +6086,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { struct tcp_sock *tp = tcp_sk(sk); -@@ -1400,7 +1436,7 @@ +@@ -1400,7 +1435,7 @@ /* This is similar to __pskb_pull_tail(). The difference is that pulled * data is not copied, but immediately discarded. */ @@ -6346,7 +6095,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { struct skb_shared_info *shinfo; int i, k, eat; -@@ -1623,6 +1659,7 @@ +@@ -1622,6 +1657,7 @@ return mss_now; } @@ -6354,19 +6103,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /* RFC2861, slow part. Adjust cwnd, after it was not full during one rto. * As additional protections, we do not touch cwnd in retransmission phases, -@@ -1682,8 +1719,11 @@ - * 2) not cwnd limited (this else condition) - * 3) no more data to send (tcp_write_queue_empty()) - * 4) application is hitting buffer limit (SOCK_NOSPACE) -+ * 5) For MPTCP subflows, the scheduler determines -+ * sndbuf limited. - */ - if (tcp_write_queue_empty(sk) && sk->sk_socket && -+ !(mptcp(tcp_sk(sk)) && !is_meta_sk(sk)) && - test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && - (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) - tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); -@@ -1705,8 +1745,8 @@ +@@ -1703,8 +1739,8 @@ * But we can avoid doing the divide again given we already have * skb_pcount = skb->len / mss_now */ @@ -6377,16 +6114,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { if (skb->len < tcp_skb_pcount(skb) * mss_now) tp->snd_sml = TCP_SKB_CB(skb)->end_seq; -@@ -1752,7 +1792,7 @@ - /* Return the number of segments we want in the skb we are transmitting. - * See if congestion control module wants to decide; otherwise, autosize. - */ --static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) -+u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) - { - const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; - u32 min_tso, tso_segs; -@@ -1766,11 +1806,11 @@ +@@ -1764,11 +1800,11 @@ } /* Returns the portion of skb which can be sent right away */ @@ -6403,7 +6131,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { const struct tcp_sock *tp = tcp_sk(sk); u32 partial, needed, window, max_len; -@@ -1800,13 +1840,14 @@ +@@ -1798,13 +1834,14 @@ /* Can at least one segment of SKB be sent right now, according to the * congestion window rules? If so, return how many segments are allowed. */ @@ -6421,7 +6149,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou tcp_skb_pcount(skb) == 1) return 1; -@@ -1821,12 +1862,13 @@ +@@ -1819,12 +1856,13 @@ halfcwnd = max(cwnd >> 1, 1U); return min(halfcwnd, cwnd - in_flight); } @@ -6436,7 +6164,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { int tso_segs = tcp_skb_pcount(skb); -@@ -1841,8 +1883,8 @@ +@@ -1839,8 +1877,8 @@ /* Return true if the Nagle test allows this packet to be * sent now. */ @@ -6447,7 +6175,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { /* Nagle rule does not apply to frames, which sit in the middle of the * write_queue (they have no chances to get new data). -@@ -1854,7 +1896,8 @@ +@@ -1852,7 +1890,8 @@ return true; /* Don't use the nagle rule for urgent data (or for the final FIN). */ @@ -6457,7 +6185,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou return true; if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) -@@ -1864,9 +1907,8 @@ +@@ -1862,9 +1901,8 @@ } /* Does at least the first segment of SKB fit into the send window? */ @@ -6469,7 +6197,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { u32 end_seq = TCP_SKB_CB(skb)->end_seq; -@@ -1875,6 +1917,7 @@ +@@ -1873,6 +1911,7 @@ return !after(end_seq, tcp_wnd_end(tp)); } @@ -6477,7 +6205,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet * which is put after SKB on the list. It is very much like -@@ -2033,7 +2076,8 @@ +@@ -2031,7 +2070,8 @@ /* If this packet won't get more data, do not wait. */ if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) || @@ -6487,7 +6215,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou goto send_now; return true; -@@ -2366,7 +2410,7 @@ +@@ -2364,7 +2404,7 @@ * Returns true, if no segments are in flight and we have queued segments, * but cannot send anything now because of SWS or another problem. */ @@ -6496,7 +6224,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou int push_one, gfp_t gfp) { struct tcp_sock *tp = tcp_sk(sk); -@@ -2380,7 +2424,12 @@ +@@ -2378,7 +2418,12 @@ sent_pkts = 0; tcp_mstamp_refresh(tp); @@ -6510,7 +6238,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /* Do MTU probing. */ result = tcp_mtu_probe(sk); if (!result) { -@@ -2576,7 +2625,7 @@ +@@ -2572,7 +2617,7 @@ skb = tcp_send_head(sk); if (skb && tcp_snd_wnd_test(tp, skb, mss)) { pcount = tp->packets_out; @@ -6519,7 +6247,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou if (tp->packets_out > pcount) goto probe_sent; goto rearm_timer; -@@ -2638,8 +2687,8 @@ +@@ -2634,8 +2679,8 @@ if (unlikely(sk->sk_state == TCP_CLOSE)) return; @@ -6530,7 +6258,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou tcp_check_probe_timer(sk); } -@@ -2652,7 +2701,8 @@ +@@ -2648,7 +2693,8 @@ BUG_ON(!skb || skb->len < mss_now); @@ -6540,7 +6268,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } /* This function returns the amount that we can raise the -@@ -2874,6 +2924,10 @@ +@@ -2870,6 +2916,10 @@ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; @@ -6551,7 +6279,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou skb_rbtree_walk_from_safe(skb, tmp) { if (!tcp_can_collapse(sk, skb)) break; -@@ -3355,7 +3409,7 @@ +@@ -3351,7 +3401,7 @@ /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ th->window = htons(min(req->rsk_rcv_wnd, 65535U)); @@ -6560,7 +6288,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou th->doff = (tcp_header_size >> 2); __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); -@@ -3437,13 +3491,13 @@ +@@ -3433,13 +3483,13 @@ if (rcv_wnd == 0) rcv_wnd = dst_metric(dst, RTAX_INITRWND); @@ -6581,14 +6309,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou tp->rx_opt.rcv_wscale = rcv_wscale; tp->rcv_ssthresh = tp->rcv_wnd; -@@ -3463,11 +3517,43 @@ - else - tp->rcv_tstamp = tcp_jiffies32; - tp->rcv_wup = tp->rcv_nxt; -+ /* force set rcv_right_edge here at start of connection */ -+ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; - WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); - +@@ -3464,6 +3514,36 @@ inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); inet_csk(sk)->icsk_retransmits = 0; tcp_clear_retrans(tp); @@ -6625,7 +6346,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou } static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) -@@ -3731,6 +3817,7 @@ +@@ -3727,6 +3807,7 @@ { __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); } @@ -6633,7 +6354,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou /* This routine sends a packet with an out of date sequence * number. It assumes the other end will try to ack it. -@@ -3743,7 +3830,7 @@ +@@ -3739,7 +3820,7 @@ * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is * out-of-date with SND.UNA-1 to probe window. */ @@ -6642,7 +6363,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; -@@ -3830,7 +3917,7 @@ +@@ -3826,7 +3907,7 @@ unsigned long timeout; int err; @@ -6651,9 +6372,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_output.c mptcp-mptcp_v0.96/net/ipv4/tcp_ou if (tp->packets_out || tcp_write_queue_empty(sk)) { /* Cancel probe timer, if it is not required. */ -diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_timer.c ---- linux-5.4.155/net/ipv4/tcp_timer.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv4/tcp_timer.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv4/tcp_timer.c linux-5.4.64.mptcp/net/ipv4/tcp_timer.c +--- linux-5.4.64/net/ipv4/tcp_timer.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv4/tcp_timer.c 2020-09-10 19:25:10.507220869 +0200 @@ -21,6 +21,7 @@ #include @@ -6662,7 +6383,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim #include static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk) -@@ -65,7 +66,7 @@ +@@ -47,7 +48,7 @@ * Returns: Nothing (void) */ @@ -6671,7 +6392,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim { sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT; sk->sk_error_report(sk); -@@ -121,7 +122,7 @@ +@@ -103,7 +104,7 @@ (!tp->snd_wnd && !tp->packets_out)) do_reset = true; if (do_reset) @@ -6680,7 +6401,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim tcp_done(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); return 1; -@@ -206,9 +207,9 @@ +@@ -188,9 +189,9 @@ * after "boundary" unsuccessful, exponentially backed-off * retransmissions with an initial RTO of TCP_RTO_MIN. */ @@ -6693,7 +6414,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim { unsigned int start_ts; -@@ -228,7 +229,7 @@ +@@ -210,7 +211,7 @@ } /* A write timeout has occurred. Process the after effects. */ @@ -6702,7 +6423,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); -@@ -243,6 +244,17 @@ +@@ -225,6 +226,17 @@ sk_rethink_txhash(sk); } retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; @@ -6720,7 +6441,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim expired = icsk->icsk_retransmits >= retry_until; } else { if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { -@@ -338,18 +350,22 @@ +@@ -320,18 +332,22 @@ struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_delack_timer); struct sock *sk = &icsk->icsk_inet.sk; @@ -6747,7 +6468,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim sock_put(sk); } -@@ -393,7 +409,12 @@ +@@ -375,7 +391,12 @@ } if (icsk->icsk_probes_out >= max_probes) { @@ -6761,7 +6482,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim } else { /* Only send another probe if we didn't close things up. */ tcp_send_probe0(sk); -@@ -614,7 +635,7 @@ +@@ -596,7 +617,7 @@ break; case ICSK_TIME_RETRANS: icsk->icsk_pending = 0; @@ -6770,7 +6491,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim break; case ICSK_TIME_PROBE0: icsk->icsk_pending = 0; -@@ -631,16 +652,19 @@ +@@ -613,16 +634,19 @@ struct inet_connection_sock *icsk = from_timer(icsk, t, icsk_retransmit_timer); struct sock *sk = &icsk->icsk_inet.sk; @@ -6793,7 +6514,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim sock_put(sk); } -@@ -670,11 +694,12 @@ +@@ -652,11 +676,12 @@ struct sock *sk = from_timer(sk, t, sk_timer); struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -6808,7 +6529,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim /* Try again later. */ inet_csk_reset_keepalive_timer (sk, HZ/20); goto out; -@@ -686,16 +711,31 @@ +@@ -668,16 +693,31 @@ } tcp_mstamp_refresh(tp); @@ -6842,7 +6563,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim goto death; } -@@ -720,11 +760,11 @@ +@@ -702,11 +742,11 @@ icsk->icsk_probes_out > 0) || (icsk->icsk_user_timeout == 0 && icsk->icsk_probes_out >= keepalive_probes(tp))) { @@ -6856,7 +6577,7 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim icsk->icsk_probes_out++; elapsed = keepalive_intvl_when(tp); } else { -@@ -748,7 +788,7 @@ +@@ -730,7 +770,7 @@ tcp_done(sk); out: @@ -6865,9 +6586,9 @@ diff -aurN linux-5.4.155/net/ipv4/tcp_timer.c mptcp-mptcp_v0.96/net/ipv4/tcp_tim sock_put(sk); } -diff -aurN linux-5.4.155/net/ipv6/addrconf.c mptcp-mptcp_v0.96/net/ipv6/addrconf.c ---- linux-5.4.155/net/ipv6/addrconf.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv6/addrconf.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv6/addrconf.c linux-5.4.64.mptcp/net/ipv6/addrconf.c +--- linux-5.4.64/net/ipv6/addrconf.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv6/addrconf.c 2020-09-10 19:25:10.507220869 +0200 @@ -967,6 +967,7 @@ kfree_rcu(ifp, rcu); @@ -6876,9 +6597,9 @@ diff -aurN linux-5.4.155/net/ipv6/addrconf.c mptcp-mptcp_v0.96/net/ipv6/addrconf static void ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) -diff -aurN linux-5.4.155/net/ipv6/af_inet6.c mptcp-mptcp_v0.96/net/ipv6/af_inet6.c ---- linux-5.4.155/net/ipv6/af_inet6.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv6/af_inet6.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv6/af_inet6.c linux-5.4.64.mptcp/net/ipv6/af_inet6.c +--- linux-5.4.64/net/ipv6/af_inet6.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv6/af_inet6.c 2020-09-10 19:25:10.507220869 +0200 @@ -104,8 +104,7 @@ return (struct ipv6_pinfo *)(((u8 *)sk) + offset); } @@ -6889,9 +6610,9 @@ diff -aurN linux-5.4.155/net/ipv6/af_inet6.c mptcp-mptcp_v0.96/net/ipv6/af_inet6 { struct inet_sock *inet; struct ipv6_pinfo *np; -diff -aurN linux-5.4.155/net/ipv6/ipv6_sockglue.c mptcp-mptcp_v0.96/net/ipv6/ipv6_sockglue.c ---- linux-5.4.155/net/ipv6/ipv6_sockglue.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv6/ipv6_sockglue.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv6/ipv6_sockglue.c linux-5.4.64.mptcp/net/ipv6/ipv6_sockglue.c +--- linux-5.4.64/net/ipv6/ipv6_sockglue.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv6/ipv6_sockglue.c 2020-09-10 19:25:10.507220869 +0200 @@ -44,6 +44,8 @@ #include #include @@ -6915,27 +6636,9 @@ diff -aurN linux-5.4.155/net/ipv6/ipv6_sockglue.c mptcp-mptcp_v0.96/net/ipv6/ipv sk->sk_socket->ops = &inet_stream_ops; sk->sk_family = PF_INET; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); -@@ -345,6 +352,17 @@ - if (val == -1) - val = 0; - np->tclass = val; -+ -+ if (is_meta_sk(sk)) { -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(tcp_sk(sk)->mpcb, mptcp) { -+ struct sock *sk_it = mptcp_to_sock(mptcp); -+ -+ if (sk_it->sk_family == AF_INET6) -+ inet6_sk(sk_it)->tclass = val; -+ } -+ } - retv = 0; - break; - -diff -aurN linux-5.4.155/net/ipv6/syncookies.c mptcp-mptcp_v0.96/net/ipv6/syncookies.c ---- linux-5.4.155/net/ipv6/syncookies.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv6/syncookies.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv6/syncookies.c linux-5.4.64.mptcp/net/ipv6/syncookies.c +--- linux-5.4.64/net/ipv6/syncookies.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv6/syncookies.c 2020-09-10 19:25:10.507220869 +0200 @@ -15,6 +15,8 @@ #include #include @@ -7002,7 +6705,7 @@ diff -aurN linux-5.4.155/net/ipv6/syncookies.c mptcp-mptcp_v0.96/net/ipv6/syncoo if (security_inet_conn_request(sk, skb, req)) goto out_free; -@@ -247,15 +265,15 @@ +@@ -241,15 +259,15 @@ (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; @@ -7023,9 +6726,9 @@ diff -aurN linux-5.4.155/net/ipv6/syncookies.c mptcp-mptcp_v0.96/net/ipv6/syncoo out: return ret; out_free: -diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6.c ---- linux-5.4.155/net/ipv6/tcp_ipv6.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/ipv6/tcp_ipv6.c linux-5.4.64.mptcp/net/ipv6/tcp_ipv6.c +--- linux-5.4.64/net/ipv6/tcp_ipv6.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/ipv6/tcp_ipv6.c 2020-09-10 19:25:10.507220869 +0200 @@ -58,6 +58,8 @@ #include #include @@ -7114,8 +6817,8 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 +void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; - u32 mtu; -@@ -376,7 +381,7 @@ + +@@ -367,7 +372,7 @@ struct ipv6_pinfo *np; struct tcp_sock *tp; __u32 seq, snd_una; @@ -7124,7 +6827,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 bool fatal; int err; -@@ -402,8 +407,14 @@ +@@ -393,8 +398,14 @@ return 0; } @@ -7141,7 +6844,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) -@@ -414,7 +425,6 @@ +@@ -405,7 +416,6 @@ goto out; } @@ -7149,7 +6852,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = rcu_dereference(tp->fastopen_rsk); snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; -@@ -454,11 +464,15 @@ +@@ -439,11 +449,15 @@ goto out; tp->mtu_info = ntohl(info); @@ -7169,7 +6872,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 goto out; } -@@ -473,7 +487,7 @@ +@@ -458,7 +472,7 @@ if (fastopen && !fastopen->sk) break; @@ -7178,7 +6881,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ -@@ -483,14 +497,14 @@ +@@ -468,14 +482,14 @@ goto out; } @@ -7195,7 +6898,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 sock_put(sk); return 0; } -@@ -538,8 +552,7 @@ +@@ -523,8 +537,7 @@ return err; } @@ -7205,7 +6908,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { kfree(inet_rsk(req)->ipv6_opt); kfree_skb(inet_rsk(req)->pktopts); -@@ -757,9 +770,10 @@ +@@ -742,9 +755,10 @@ return false; } @@ -7219,7 +6922,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); struct inet_request_sock *ireq = inet_rsk(req); -@@ -781,6 +795,8 @@ +@@ -766,6 +780,8 @@ refcount_inc(&skb->users); ireq->pktopts = skb; } @@ -7228,7 +6931,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 } static struct dst_entry *tcp_v6_route_req(const struct sock *sk, -@@ -800,7 +816,7 @@ +@@ -785,7 +801,7 @@ .syn_ack_timeout = tcp_syn_ack_timeout, }; @@ -7237,7 +6940,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr), #ifdef CONFIG_TCP_MD5SIG -@@ -818,9 +834,9 @@ +@@ -803,9 +819,9 @@ }; static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, @@ -7249,7 +6952,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; -@@ -839,7 +855,10 @@ +@@ -824,7 +840,10 @@ if (key) tot_len += TCPOLEN_MD5SIG_ALIGNED; #endif @@ -7261,7 +6964,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (!buff) -@@ -877,6 +896,17 @@ +@@ -862,6 +881,17 @@ tcp_v6_md5_hash_hdr((__u8 *)topt, key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); @@ -7279,7 +6982,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 } #endif -@@ -935,7 +965,7 @@ +@@ -920,7 +950,7 @@ kfree_skb(buff); } @@ -7288,7 +6991,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { const struct tcphdr *th = tcp_hdr(skb); struct ipv6hdr *ipv6h = ipv6_hdr(skb); -@@ -1020,8 +1050,8 @@ +@@ -1005,8 +1035,8 @@ label = ip6_flowlabel(ipv6h); } @@ -7299,7 +7002,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 #ifdef CONFIG_TCP_MD5SIG out: -@@ -1030,30 +1060,37 @@ +@@ -1015,30 +1045,37 @@ } static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, @@ -7344,7 +7047,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. -@@ -1063,18 +1100,18 @@ +@@ -1048,18 +1085,18 @@ * exception of segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ @@ -7367,7 +7070,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); -@@ -1100,7 +1137,7 @@ +@@ -1085,7 +1122,7 @@ return mss; } @@ -7376,7 +7079,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); -@@ -1131,11 +1168,11 @@ +@@ -1111,11 +1148,11 @@ sizeof(struct inet6_skb_parm)); } @@ -7393,7 +7096,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; -@@ -1170,7 +1207,15 @@ +@@ -1150,7 +1187,15 @@ newnp->saddr = newsk->sk_v6_rcv_saddr; @@ -7410,7 +7113,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 newsk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG newtp->af_specific = &tcp_sock_ipv6_mapped_specific; -@@ -1217,6 +1262,14 @@ +@@ -1197,6 +1242,14 @@ if (!newsk) goto out_nonewsk; @@ -7425,7 +7128,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the -@@ -1344,7 +1397,7 @@ +@@ -1324,7 +1377,7 @@ * This is because we cannot sleep with the original spinlock * held. */ @@ -7434,7 +7137,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { struct ipv6_pinfo *np = tcp_inet6_sk(sk); struct sk_buff *opt_skb = NULL; -@@ -1361,6 +1414,9 @@ +@@ -1341,6 +1394,9 @@ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); @@ -7444,7 +7147,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. -@@ -1488,6 +1544,10 @@ +@@ -1468,6 +1524,10 @@ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); @@ -7455,7 +7158,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); -@@ -1502,8 +1562,8 @@ +@@ -1482,8 +1542,8 @@ int sdif = inet6_sdif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; @@ -7465,7 +7168,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 int ret; struct net *net = dev_net(skb->dev); -@@ -1557,12 +1617,17 @@ +@@ -1537,12 +1597,17 @@ reqsk_put(req); goto csum_error; } @@ -7484,7 +7187,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 nsk = NULL; if (!tcp_filter(sk, skb)) { th = (const struct tcphdr *)skb->data; -@@ -1621,19 +1686,28 @@ +@@ -1601,19 +1666,28 @@ sk_incoming_cpu_update(sk); @@ -7517,7 +7220,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 if (skb_to_free) __kfree_skb(skb_to_free); put_and_return: -@@ -1647,6 +1721,19 @@ +@@ -1627,6 +1701,19 @@ tcp_v6_fill_cb(skb, hdr, th); @@ -7537,7 +7240,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); -@@ -1699,6 +1786,18 @@ +@@ -1679,6 +1766,18 @@ refcounted = false; goto process; } @@ -7556,7 +7259,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 } /* to ACK */ /* fall through */ -@@ -1753,13 +1852,13 @@ +@@ -1733,13 +1832,13 @@ } } @@ -7572,7 +7275,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, -@@ -1790,7 +1889,7 @@ +@@ -1770,7 +1869,7 @@ /* * TCP over IPv4 via INET6 API */ @@ -7581,7 +7284,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, -@@ -1826,7 +1925,12 @@ +@@ -1806,7 +1905,12 @@ tcp_init_sock(sk); @@ -7595,7 +7298,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; -@@ -1835,7 +1939,7 @@ +@@ -1815,7 +1919,7 @@ return 0; } @@ -7604,7 +7307,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 { tcp_v4_destroy_sock(sk); inet6_destroy_sock(sk); -@@ -2058,6 +2162,11 @@ +@@ -2038,6 +2142,11 @@ .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem), .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), @@ -7616,7 +7319,7 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 .slab_flags = SLAB_TYPESAFE_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, -@@ -2068,6 +2177,9 @@ +@@ -2048,6 +2157,9 @@ .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, @@ -7626,9 +7329,9 @@ diff -aurN linux-5.4.155/net/ipv6/tcp_ipv6.c mptcp-mptcp_v0.96/net/ipv6/tcp_ipv6 }; /* thinking of making this const? Don't. -diff -aurN linux-5.4.155/net/Kconfig mptcp-mptcp_v0.96/net/Kconfig ---- linux-5.4.155/net/Kconfig 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/Kconfig 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/Kconfig linux-5.4.64.mptcp/net/Kconfig +--- linux-5.4.64/net/Kconfig 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/Kconfig 2020-09-10 19:25:10.507220869 +0200 @@ -94,6 +94,7 @@ source "net/ipv4/Kconfig" source "net/ipv6/Kconfig" @@ -7637,9 +7340,9 @@ diff -aurN linux-5.4.155/net/Kconfig mptcp-mptcp_v0.96/net/Kconfig endif # if INET -diff -aurN linux-5.4.155/net/Makefile mptcp-mptcp_v0.96/net/Makefile ---- linux-5.4.155/net/Makefile 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/Makefile 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/Makefile linux-5.4.64.mptcp/net/Makefile +--- linux-5.4.64/net/Makefile 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/Makefile 2020-09-10 19:25:10.507220869 +0200 @@ -20,6 +20,7 @@ obj-$(CONFIG_XFRM) += xfrm/ obj-$(CONFIG_UNIX_SCM) += unix/ @@ -7648,9 +7351,9 @@ diff -aurN linux-5.4.155/net/Makefile mptcp-mptcp_v0.96/net/Makefile obj-$(CONFIG_BPFILTER) += bpfilter/ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ -diff -aurN linux-5.4.155/net/mptcp/Kconfig mptcp-mptcp_v0.96/net/mptcp/Kconfig ---- linux-5.4.155/net/mptcp/Kconfig 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/Kconfig 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/Kconfig linux-5.4.64.mptcp/net/mptcp/Kconfig +--- linux-5.4.64/net/mptcp/Kconfig 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/Kconfig 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,154 @@ +# +# MPTCP configuration @@ -7806,9 +7509,9 @@ diff -aurN linux-5.4.155/net/mptcp/Kconfig mptcp-mptcp_v0.96/net/mptcp/Kconfig + default "redundant" if DEFAULT_REDUNDANT + default "default" + -diff -aurN linux-5.4.155/net/mptcp/Makefile mptcp-mptcp_v0.96/net/mptcp/Makefile ---- linux-5.4.155/net/mptcp/Makefile 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/Makefile 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/Makefile linux-5.4.64.mptcp/net/mptcp/Makefile +--- linux-5.4.64/net/mptcp/Makefile 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/Makefile 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,25 @@ +# +## Makefile for MultiPath TCP support code. @@ -7835,9 +7538,9 @@ diff -aurN linux-5.4.155/net/mptcp/Makefile mptcp-mptcp_v0.96/net/mptcp/Makefile +obj-$(CONFIG_MPTCP_ECF) += mptcp_ecf.o + +mptcp-$(subst m,y,$(CONFIG_IPV6)) += mptcp_ipv6.o -diff -aurN linux-5.4.155/net/mptcp/mctcp_desync.c mptcp-mptcp_v0.96/net/mptcp/mctcp_desync.c ---- linux-5.4.155/net/mptcp/mctcp_desync.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mctcp_desync.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mctcp_desync.c linux-5.4.64.mptcp/net/mptcp/mctcp_desync.c +--- linux-5.4.64/net/mptcp/mctcp_desync.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mctcp_desync.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,193 @@ +/* + * Desynchronized Multi-Channel TCP Congestion Control Algorithm @@ -8032,9 +7735,9 @@ diff -aurN linux-5.4.155/net/mptcp/mctcp_desync.c mptcp-mptcp_v0.96/net/mptcp/mc +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MCTCP: DESYNCHRONIZED MULTICHANNEL TCP CONGESTION CONTROL"); +MODULE_VERSION("1.0"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_balia.c mptcp-mptcp_v0.96/net/mptcp/mptcp_balia.c ---- linux-5.4.155/net/mptcp/mptcp_balia.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_balia.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_balia.c linux-5.4.64.mptcp/net/mptcp/mptcp_balia.c +--- linux-5.4.64/net/mptcp/mptcp_balia.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_balia.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,261 @@ +/* + * MPTCP implementation - Balia Congestion Control @@ -8297,9 +8000,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_balia.c mptcp-mptcp_v0.96/net/mptcp/mpt +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP BALIA CONGESTION CONTROL ALGORITHM"); +MODULE_VERSION("0.1"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_binder.c mptcp-mptcp_v0.96/net/mptcp/mptcp_binder.c ---- linux-5.4.155/net/mptcp/mptcp_binder.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_binder.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_binder.c linux-5.4.64.mptcp/net/mptcp/mptcp_binder.c +--- linux-5.4.64/net/mptcp/mptcp_binder.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_binder.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,494 @@ +#include + @@ -8795,9 +8498,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_binder.c mptcp-mptcp_v0.96/net/mptcp/mp +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("BINDER MPTCP"); +MODULE_VERSION("0.1"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_blest.c mptcp-mptcp_v0.96/net/mptcp/mptcp_blest.c ---- linux-5.4.155/net/mptcp/mptcp_blest.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_blest.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_blest.c linux-5.4.64.mptcp/net/mptcp/mptcp_blest.c +--- linux-5.4.64/net/mptcp/mptcp_blest.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_blest.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,285 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP Scheduler to reduce HoL-blocking and spurious retransmissions. @@ -9084,9 +8787,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_blest.c mptcp-mptcp_v0.96/net/mptcp/mpt +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("BLEST scheduler for MPTCP, based on default minimum RTT scheduler"); +MODULE_VERSION("0.95"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_coupled.c mptcp-mptcp_v0.96/net/mptcp/mptcp_coupled.c ---- linux-5.4.155/net/mptcp/mptcp_coupled.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_coupled.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_coupled.c linux-5.4.64.mptcp/net/mptcp/mptcp_coupled.c +--- linux-5.4.64/net/mptcp/mptcp_coupled.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_coupled.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,262 @@ +/* + * MPTCP implementation - Linked Increase congestion control Algorithm (LIA) @@ -9350,10 +9053,10 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_coupled.c mptcp-mptcp_v0.96/net/mptcp/m +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP LINKED INCREASE CONGESTION CONTROL ALGORITHM"); +MODULE_VERSION("0.1"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ctrl.c ---- linux-5.4.155/net/mptcp/mptcp_ctrl.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_ctrl.c 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,3313 @@ +diff -aurN linux-5.4.64/net/mptcp/mptcp_ctrl.c linux-5.4.64.mptcp/net/mptcp/mptcp_ctrl.c +--- linux-5.4.64/net/mptcp/mptcp_ctrl.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_ctrl.c 2020-09-10 19:25:10.507220869 +0200 +@@ -0,0 +1,3309 @@ +/* + * MPTCP implementation - MPTCP-control + * @@ -9662,9 +9365,15 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + const struct sk_buff *skb) +{ + struct mptcp_request_sock *mtreq = mptcp_rsk(req); ++ const struct tcp_sock *tp = tcp_sk(sk); + + inet_rsk(req)->saw_mpc = 1; -+ mtreq->mptcp_ver = mopt->mptcp_ver; ++ ++ /* MPTCP version agreement */ ++ if (mopt->mptcp_ver >= tp->mptcp_ver) ++ mtreq->mptcp_ver = tp->mptcp_ver; ++ else ++ mtreq->mptcp_ver = mopt->mptcp_ver; + + rcu_read_lock(); + local_bh_disable(); @@ -9691,10 +9400,11 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc +{ + struct mptcp_request_sock *mtreq = mptcp_rsk(req); + -+ /* Must happen before mptcp_set_key_reqsk to generate the token with -+ * the proper hash algo. -+ */ -+ mtreq->mptcp_ver = mopt->mptcp_ver; ++ /* MPTCP version agreement */ ++ if (mopt->mptcp_ver >= tcp_sk(sk)->mptcp_ver) ++ mtreq->mptcp_ver = tcp_sk(sk)->mptcp_ver; ++ else ++ mtreq->mptcp_ver = mopt->mptcp_ver; + + rcu_read_lock(); + local_bh_disable(); @@ -10453,10 +10163,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + sk_dst_reset(sub_sk); + } + -+ /* IPV6_TCLASS */ -+ if (sub_sk->sk_family == AF_INET6 && meta_sk->sk_family == AF_INET6) -+ inet6_sk(sub_sk)->tclass = inet6_sk(meta_sk)->tclass; -+ + /* Inherit SO_REUSEADDR */ + sub_sk->sk_reuse = meta_sk->sk_reuse; + @@ -10631,7 +10337,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + meta_tp->copied_seq = (u32)idsn; + meta_tp->rcv_nxt = (u32)idsn; + meta_tp->rcv_wup = (u32)idsn; -+ meta_tp->rcv_right_edge = meta_tp->rcv_wup + meta_tp->rcv_wnd; + + meta_tp->snd_wl1 = meta_tp->rcv_nxt - 1; +} @@ -10814,8 +10519,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + INIT_LIST_HEAD(&master_tp->tsq_node); + INIT_LIST_HEAD(&master_tp->tsorted_sent_queue); + -+ master_tp->fastopen_req = NULL; -+ + master_sk->sk_tsq_flags = 0; + /* icsk_bind_hash inherited from the meta, but it will be properly set in + * mptcp_create_master_sk. Same operation is done in inet_csk_clone_lock. @@ -11053,7 +10756,7 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + __u32 rcv_window_now = 0; + + if (copied > 0 && !(meta_sk->sk_shutdown & RCV_SHUTDOWN)) { -+ rcv_window_now = tcp_receive_window_now(meta_tp); ++ rcv_window_now = tcp_receive_window(meta_tp); + + /* Optimize, __mptcp_select_window() is not cheap. */ + if (2 * rcv_window_now <= meta_tp->window_clamp) @@ -12111,10 +11814,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc + if (!sock_flag(sk, SOCK_MPTCP)) + mopt.saw_mpc = 0; + -+ /* If the requested version is higher than what we support, fall back */ -+ if (mopt.saw_mpc && mopt.mptcp_ver > tcp_sk(sk)->mptcp_ver) -+ mopt.saw_mpc = 0; -+ + if (skb->protocol == htons(ETH_P_IP)) { + if (mopt.saw_mpc) { + if (skb_rtable(skb)->rt_flags & @@ -12667,9 +12366,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ctrl.c mptcp-mptcp_v0.96/net/mptcp/mptc +mptcp_sock_cache_failed: + mptcp_init_failed = true; +} -diff -aurN linux-5.4.155/net/mptcp/mptcp_ecf.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ecf.c ---- linux-5.4.155/net/mptcp/mptcp_ecf.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_ecf.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_ecf.c linux-5.4.64.mptcp/net/mptcp/mptcp_ecf.c +--- linux-5.4.64/net/mptcp/mptcp_ecf.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_ecf.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP ECF Scheduler @@ -12866,9 +12565,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ecf.c mptcp-mptcp_v0.96/net/mptcp/mptcp +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ECF (Earliest Completion First) scheduler for MPTCP, based on default minimum RTT scheduler"); +MODULE_VERSION("0.95"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_v0.96/net/mptcp/mptcp_fullmesh.c ---- linux-5.4.155/net/mptcp/mptcp_fullmesh.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_fullmesh.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_fullmesh.c linux-5.4.64.mptcp/net/mptcp/mptcp_fullmesh.c +--- linux-5.4.64/net/mptcp/mptcp_fullmesh.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_fullmesh.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,1938 @@ +#include +#include @@ -14808,10 +14507,10 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_fullmesh.c mptcp-mptcp_v0.96/net/mptcp/ +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Full-Mesh MPTCP"); +MODULE_VERSION("0.88"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mptcp_input.c ---- linux-5.4.155/net/mptcp/mptcp_input.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_input.c 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,2548 @@ +diff -aurN linux-5.4.64/net/mptcp/mptcp_input.c linux-5.4.64.mptcp/net/mptcp/mptcp_input.c +--- linux-5.4.64/net/mptcp/mptcp_input.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_input.c 2020-09-10 19:25:10.507220869 +0200 +@@ -0,0 +1,2531 @@ +/* + * MPTCP implementation - Sending side + * @@ -15698,7 +15397,7 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + } + + return !before64(end_data_seq, rcv_wup64) && -+ !after64(data_seq, mptcp_get_rcv_nxt_64(meta_tp) + tcp_receive_window_now(meta_tp)); ++ !after64(data_seq, mptcp_get_rcv_nxt_64(meta_tp) + tcp_receive_window(meta_tp)); +} + +/* @return: 0 everything is fine. Just continue processing @@ -15847,7 +15546,7 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + + /* Quick ACK if more 3/4 of the receive window is filled */ + if (after64(tp->mptcp->map_data_seq, -+ rcv_nxt64 + 3 * (tcp_receive_window_now(meta_tp) >> 2))) ++ rcv_nxt64 + 3 * (tcp_receive_window(meta_tp) >> 2))) + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + + } else { @@ -16239,19 +15938,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + meta_tp->snd_una = data_ack; +} + -+static void mptcp_stop_subflow_chronos(struct sock *meta_sk, -+ const enum tcp_chrono type) -+{ -+ const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; -+ struct mptcp_tcp_sock *mptcp; -+ -+ mptcp_for_each_sub(mpcb, mptcp) { -+ struct sock *sk_it = mptcp_to_sock(mptcp); -+ -+ tcp_chrono_stop(sk_it, type); -+ } -+} -+ +/* Handle the DATA_ACK */ +static bool mptcp_process_data_ack(struct sock *sk, const struct sk_buff *skb) +{ @@ -16377,13 +16063,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + if (meta_sk->sk_socket && + test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags)) + meta_sk->sk_write_space(meta_sk); -+ -+ if (meta_sk->sk_socket && -+ !test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags)) { -+ tcp_chrono_stop(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); -+ mptcp_stop_subflow_chronos(meta_sk, -+ TCP_CHRONO_SNDBUF_LIMITED); -+ } + } + + if (meta_sk->sk_state != TCP_ESTABLISHED) { @@ -17289,6 +16968,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + + tp->mptcp->include_mpc = 1; + ++ /* Ensure that fastopen is handled at the meta-level. */ ++ tp->fastopen_req = NULL; ++ + sk_set_socket(sk, meta_sk->sk_socket); + sk->sk_wq = meta_sk->sk_wq; + @@ -17360,9 +17042,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_input.c mptcp-mptcp_v0.96/net/mptcp/mpt + tcp_set_rto(sk); + mptcp_set_rto(sk); +} -diff -aurN linux-5.4.155/net/mptcp/mptcp_ipv4.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ipv4.c ---- linux-5.4.155/net/mptcp/mptcp_ipv4.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_ipv4.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_ipv4.c linux-5.4.64.mptcp/net/mptcp/mptcp_ipv4.c +--- linux-5.4.64/net/mptcp/mptcp_ipv4.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_ipv4.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,431 @@ +/* + * MPTCP implementation - IPv4-specific functions @@ -17795,9 +17477,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ipv4.c mptcp-mptcp_v0.96/net/mptcp/mptc + kmem_cache_destroy(mptcp_request_sock_ops.slab); + kfree(mptcp_request_sock_ops.slab_name); +} -diff -aurN linux-5.4.155/net/mptcp/mptcp_ipv6.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ipv6.c ---- linux-5.4.155/net/mptcp/mptcp_ipv6.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_ipv6.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_ipv6.c linux-5.4.64.mptcp/net/mptcp/mptcp_ipv6.c +--- linux-5.4.64/net/mptcp/mptcp_ipv6.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_ipv6.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,479 @@ +/* + * MPTCP implementation - IPv6-specific functions @@ -18278,9 +17960,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ipv6.c mptcp-mptcp_v0.96/net/mptcp/mptc + kmem_cache_destroy(mptcp6_request_sock_ops.slab); + kfree(mptcp6_request_sock_ops.slab_name); +} -diff -aurN linux-5.4.155/net/mptcp/mptcp_ndiffports.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ndiffports.c ---- linux-5.4.155/net/mptcp/mptcp_ndiffports.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_ndiffports.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_ndiffports.c linux-5.4.64.mptcp/net/mptcp/mptcp_ndiffports.c +--- linux-5.4.64/net/mptcp/mptcp_ndiffports.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_ndiffports.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,174 @@ +#include + @@ -18456,9 +18138,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_ndiffports.c mptcp-mptcp_v0.96/net/mptc +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("NDIFF-PORTS MPTCP"); +MODULE_VERSION("0.88"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_netlink.c mptcp-mptcp_v0.96/net/mptcp/mptcp_netlink.c ---- linux-5.4.155/net/mptcp/mptcp_netlink.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_netlink.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_netlink.c linux-5.4.64.mptcp/net/mptcp/mptcp_netlink.c +--- linux-5.4.64/net/mptcp/mptcp_netlink.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_netlink.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,1272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* MPTCP implementation - Netlink Path Manager @@ -19732,9 +19414,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_netlink.c mptcp-mptcp_v0.96/net/mptcp/m +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP netlink-based path manager"); +MODULE_ALIAS_GENL_FAMILY(MPTCP_GENL_NAME); -diff -aurN linux-5.4.155/net/mptcp/mptcp_olia.c mptcp-mptcp_v0.96/net/mptcp/mptcp_olia.c ---- linux-5.4.155/net/mptcp/mptcp_olia.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_olia.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_olia.c linux-5.4.64.mptcp/net/mptcp/mptcp_olia.c +--- linux-5.4.64/net/mptcp/mptcp_olia.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_olia.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,318 @@ +/* + * MPTCP implementation - OPPORTUNISTIC LINKED INCREASES CONGESTION CONTROL: @@ -20054,10 +19736,10 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_olia.c mptcp-mptcp_v0.96/net/mptcp/mptc +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP COUPLED CONGESTION CONTROL"); +MODULE_VERSION("0.1"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mptcp_output.c ---- linux-5.4.155/net/mptcp/mptcp_output.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_output.c 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,2009 @@ +diff -aurN linux-5.4.64/net/mptcp/mptcp_output.c linux-5.4.64.mptcp/net/mptcp/mptcp_output.c +--- linux-5.4.64/net/mptcp/mptcp_output.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_output.c 2020-09-10 19:25:10.507220869 +0200 +@@ -0,0 +1,1997 @@ +/* + * MPTCP implementation - Sending side + * @@ -20861,7 +20543,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mp + int push_one, gfp_t gfp) +{ + struct tcp_sock *meta_tp = tcp_sk(meta_sk), *subtp; -+ bool is_rwnd_limited = false; + struct mptcp_tcp_sock *mptcp; + struct sock *subsk = NULL; + struct mptcp_cb *mpcb = meta_tp->mpcb; @@ -20909,10 +20590,8 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mp + if (skb_unclone(skb, GFP_ATOMIC)) + break; + -+ if (unlikely(!tcp_snd_wnd_test(meta_tp, skb, mss_now))) { -+ is_rwnd_limited = true; ++ if (unlikely(!tcp_snd_wnd_test(meta_tp, skb, mss_now))) + break; -+ } + + /* Force tso_segs to 1 by using UINT_MAX. + * We actually don't care about the exact number of segments @@ -20995,11 +20674,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mp + break; + } + -+ if (is_rwnd_limited) -+ tcp_chrono_start(meta_sk, TCP_CHRONO_RWND_LIMITED); -+ else -+ tcp_chrono_stop(meta_sk, TCP_CHRONO_RWND_LIMITED); -+ + mptcp_for_each_sub(mpcb, mptcp) { + subsk = mptcp_to_sock(mptcp); + subtp = tcp_sk(subsk); @@ -21297,10 +20971,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mp + + meta_tp->rcv_wnd = tp->rcv_wnd; + meta_tp->rcv_wup = meta_tp->rcv_nxt; -+ /* no need to use tcp_update_rcv_right_edge, because at the meta level -+ * right edge cannot go back -+ */ -+ meta_tp->rcv_right_edge = meta_tp->rcv_wnd + meta_tp->rcv_wup; + + return new_win; +} @@ -22067,9 +21737,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_output.c mptcp-mptcp_v0.96/net/mptcp/mp + return max(xmit_size_goal, mss_now); +} + -diff -aurN linux-5.4.155/net/mptcp/mptcp_pm.c mptcp-mptcp_v0.96/net/mptcp/mptcp_pm.c ---- linux-5.4.155/net/mptcp/mptcp_pm.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_pm.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_pm.c linux-5.4.64.mptcp/net/mptcp/mptcp_pm.c +--- linux-5.4.64/net/mptcp/mptcp_pm.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_pm.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,226 @@ +/* + * MPTCP implementation - MPTCP-subflow-management @@ -22297,10 +21967,10 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_pm.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ + return mptcp_set_default_path_manager(CONFIG_DEFAULT_MPTCP_PM); +} +late_initcall(mptcp_path_manager_default); -diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp/mptcp_redundant.c ---- linux-5.4.155/net/mptcp/mptcp_redundant.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_redundant.c 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,395 @@ +diff -aurN linux-5.4.64/net/mptcp/mptcp_redundant.c linux-5.4.64.mptcp/net/mptcp/mptcp_redundant.c +--- linux-5.4.64/net/mptcp/mptcp_redundant.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_redundant.c 2020-09-10 19:25:10.507220869 +0200 +@@ -0,0 +1,392 @@ +/* + * MPTCP Scheduler to reduce latency and jitter. + * @@ -22327,10 +21997,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp +struct redsched_priv { + /* The skb or NULL */ + struct sk_buff *skb; -+ /* Start/end sequence number of the skb. This number should be checked ++ /* End sequence number of the skb. This number should be checked + * to be valid before the skb field is used + */ -+ u32 skb_start_seq; + u32 skb_end_seq; +}; + @@ -22492,7 +22161,7 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp + struct tcp_sock *meta_tp = tcp_sk(meta_sk); + + if (red_p->skb && -+ (!after(red_p->skb_start_seq, meta_tp->snd_una) || ++ (!after(red_p->skb_end_seq, meta_tp->snd_una) || + after(red_p->skb_end_seq, meta_tp->snd_nxt))) + red_p->skb = NULL; +} @@ -22610,7 +22279,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp + if (skb && redsched_use_subflow(meta_sk, active_valid_sks, tp, + skb)) { + red_p->skb = skb; -+ red_p->skb_start_seq = TCP_SKB_CB(skb)->seq; + red_p->skb_end_seq = TCP_SKB_CB(skb)->end_seq; + redsched_update_next_subflow(tp, red_cb); + *subsk = (struct sock *)tp; @@ -22638,7 +22306,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp + if (skb && redsched_use_subflow(meta_sk, active_valid_sks, tp, + skb)) { + red_p->skb = skb; -+ red_p->skb_start_seq = TCP_SKB_CB(skb)->seq; + red_p->skb_end_seq = TCP_SKB_CB(skb)->end_seq; + redsched_update_next_subflow(tp, red_cb); + *subsk = (struct sock *)tp; @@ -22696,9 +22363,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_redundant.c mptcp-mptcp_v0.96/net/mptcp +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("REDUNDANT MPTCP"); +MODULE_VERSION("0.90"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_rr.c mptcp-mptcp_v0.96/net/mptcp/mptcp_rr.c ---- linux-5.4.155/net/mptcp/mptcp_rr.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_rr.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_rr.c linux-5.4.64.mptcp/net/mptcp/mptcp_rr.c +--- linux-5.4.64/net/mptcp/mptcp_rr.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_rr.c 2020-09-10 19:25:10.507220869 +0200 @@ -0,0 +1,309 @@ +/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */ + @@ -23009,13 +22676,12 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_rr.c mptcp-mptcp_v0.96/net/mptcp/mptcp_ +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ROUNDROBIN MPTCP"); +MODULE_VERSION("0.89"); -diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mptcp_sched.c ---- linux-5.4.155/net/mptcp/mptcp_sched.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_sched.c 2021-10-25 10:05:18.000000000 +0200 -@@ -0,0 +1,677 @@ +diff -aurN linux-5.4.64/net/mptcp/mptcp_sched.c linux-5.4.64.mptcp/net/mptcp/mptcp_sched.c +--- linux-5.4.64/net/mptcp/mptcp_sched.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_sched.c 2020-09-10 19:25:10.511220802 +0200 +@@ -0,0 +1,647 @@ +/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */ + -+#include +#include +#include +#include @@ -23053,38 +22719,12 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt +} +EXPORT_SYMBOL_GPL(mptcp_is_def_unavailable); + -+/* estimate number of segments currently in flight + unsent in -+ * the subflow socket. -+ */ -+static int mptcp_subflow_queued(struct sock *sk, u32 max_tso_segs) -+{ -+ const struct tcp_sock *tp = tcp_sk(sk); -+ unsigned int queued; -+ -+ /* estimate the max number of segments in the write queue -+ * this is an overestimation, avoiding to iterate over the queue -+ * to make a better estimation. -+ * Having only one skb in the queue however might trigger tso deferral, -+ * delaying the sending of a tso segment in the hope that skb_entail -+ * will append more data to the skb soon. -+ * Therefore, in the case only one skb is in the queue, we choose to -+ * potentially underestimate, risking to schedule one skb too many onto -+ * the subflow rather than not enough. -+ */ -+ if (sk->sk_write_queue.qlen > 1) -+ queued = sk->sk_write_queue.qlen * max_tso_segs; -+ else -+ queued = sk->sk_write_queue.qlen; -+ -+ return queued + tcp_packets_in_flight(tp); -+} -+ +static bool mptcp_is_temp_unavailable(struct sock *sk, + const struct sk_buff *skb, + bool zero_wnd_test) +{ + const struct tcp_sock *tp = tcp_sk(sk); -+ unsigned int mss_now; ++ unsigned int mss_now, space, in_flight; + + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) { + /* If SACK is disabled, and we got a loss, TCP does not exit @@ -23108,10 +22748,19 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + return true; + } + ++ in_flight = tcp_packets_in_flight(tp); ++ /* Not even a single spot in the cwnd */ ++ if (in_flight >= tp->snd_cwnd) ++ return true; ++ + mss_now = tcp_current_mss(sk); + -+ /* Not even a single spot in the cwnd */ -+ if (mptcp_subflow_queued(sk, tcp_tso_segs(sk, mss_now)) >= tp->snd_cwnd) ++ /* Now, check if what is queued in the subflow's send-queue ++ * already fills the cwnd. ++ */ ++ space = (tp->snd_cwnd - in_flight) * mss_now; ++ ++ if (tp->write_seq - tp->snd_nxt >= space) + return true; + + if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp))) @@ -23405,22 +23054,14 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + if (!skb && meta_sk->sk_socket && + test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && + sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { -+ struct sock *subsk; -+ -+ /* meta is send buffer limited */ -+ tcp_chrono_start(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); -+ -+ subsk = mpcb->sched_ops->get_subflow(meta_sk, -+ NULL, false); ++ struct sock *subsk = mpcb->sched_ops->get_subflow(meta_sk, NULL, ++ false); + if (!subsk) + return NULL; + + skb = mptcp_rcv_buf_optimization(subsk, 0); + if (skb) + *reinject = -1; -+ else -+ tcp_chrono_start(subsk, -+ TCP_CHRONO_SNDBUF_LIMITED); + } + } + return skb; @@ -23432,10 +23073,11 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + unsigned int *limit) +{ + struct sk_buff *skb = __mptcp_next_segment(meta_sk, reinject); -+ unsigned int mss_now; -+ u32 max_len, gso_max_segs, max_segs, max_tso_segs, window; ++ unsigned int mss_now, in_flight_space; ++ int remaining_in_flight_space; ++ u32 max_len, max_segs, window; + struct tcp_sock *subtp; -+ int queued; ++ u16 gso_max_segs; + + /* As we set it, we have to reset it as well. */ + *limit = 0; @@ -23451,12 +23093,6 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + mss_now = tcp_current_mss(*subsk); + + if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { -+ /* an active flow is selected, but segment will not be sent due -+ * to no more space in send window -+ * this means the meta is receive window limited -+ * the subflow might also be, if we have nothing to reinject -+ */ -+ tcp_chrono_start(meta_sk, TCP_CHRONO_RWND_LIMITED); + skb = mptcp_rcv_buf_optimization(*subsk, 1); + if (skb) + *reinject = -1; @@ -23464,39 +23100,40 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + return NULL; + } + -+ if (!*reinject) { -+ /* this will stop any other chronos on the meta */ -+ tcp_chrono_start(meta_sk, TCP_CHRONO_BUSY); -+ } -+ + /* No splitting required, as we will only send one single segment */ + if (skb->len <= mss_now) + return skb; + -+ max_tso_segs = tcp_tso_segs(*subsk, tcp_current_mss(*subsk)); -+ queued = mptcp_subflow_queued(*subsk, max_tso_segs); -+ -+ /* this condition should already have been established in -+ * mptcp_is_temp_unavailable when selecting available flows ++ /* The following is similar to tcp_mss_split_point, but ++ * we do not care about nagle, because we will anyways ++ * use TCP_NAGLE_PUSH, which overrides this. + */ -+ WARN_ONCE(subtp->snd_cwnd <= queued, "Selected subflow no cwnd room"); + + gso_max_segs = (*subsk)->sk_gso_max_segs; + if (!gso_max_segs) /* No gso supported on the subflow's NIC */ + gso_max_segs = 1; -+ -+ max_segs = min_t(unsigned int, subtp->snd_cwnd - queued, gso_max_segs); ++ max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); + if (!max_segs) + return NULL; + -+ /* if there is room for a segment, schedule up to a complete TSO -+ * segment to avoid TSO splitting. Even if it is more than allowed by -+ * the congestion window. ++ /* max_len is what would fit in the cwnd (respecting the 2GSO-limit of ++ * tcp_cwnd_test), but ignoring whatever was already queued. + */ -+ max_segs = max_t(unsigned int, max_tso_segs, max_segs); -+ + max_len = min(mss_now * max_segs, skb->len); + ++ in_flight_space = (subtp->snd_cwnd - tcp_packets_in_flight(subtp)) * mss_now; ++ remaining_in_flight_space = (int)in_flight_space - (subtp->write_seq - subtp->snd_nxt); ++ ++ if (remaining_in_flight_space <= 0) ++ WARN_ONCE(1, "in_flight %u cwnd %u wseq %u snxt %u mss_now %u cache %u", ++ tcp_packets_in_flight(subtp), subtp->snd_cwnd, ++ subtp->write_seq, subtp->snd_nxt, mss_now, subtp->mss_cache); ++ else ++ /* max_len now fits exactly in the write-queue, taking into ++ * account what was already queued. ++ */ ++ max_len = min_t(u32, max_len, remaining_in_flight_space); ++ + window = tcp_wnd_end(subtp) - subtp->write_seq; + + /* max_len now also respects the announced receive-window */ @@ -23690,9 +23327,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_sched.c mptcp-mptcp_v0.96/net/mptcp/mpt + return mptcp_set_default_scheduler(CONFIG_DEFAULT_MPTCP_SCHED); +} +late_initcall(mptcp_scheduler_default); -diff -aurN linux-5.4.155/net/mptcp/mptcp_wvegas.c mptcp-mptcp_v0.96/net/mptcp/mptcp_wvegas.c ---- linux-5.4.155/net/mptcp/mptcp_wvegas.c 1970-01-01 01:00:00.000000000 +0100 -+++ mptcp-mptcp_v0.96/net/mptcp/mptcp_wvegas.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/mptcp/mptcp_wvegas.c linux-5.4.64.mptcp/net/mptcp/mptcp_wvegas.c +--- linux-5.4.64/net/mptcp/mptcp_wvegas.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-5.4.64.mptcp/net/mptcp/mptcp_wvegas.c 2020-09-10 19:25:10.511220802 +0200 @@ -0,0 +1,271 @@ +/* + * MPTCP implementation - WEIGHTED VEGAS @@ -23965,9 +23602,9 @@ diff -aurN linux-5.4.155/net/mptcp/mptcp_wvegas.c mptcp-mptcp_v0.96/net/mptcp/mp +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("MPTCP wVegas"); +MODULE_VERSION("0.1"); -diff -aurN linux-5.4.155/net/socket.c mptcp-mptcp_v0.96/net/socket.c ---- linux-5.4.155/net/socket.c 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/net/socket.c 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/net/socket.c linux-5.4.64.mptcp/net/socket.c +--- linux-5.4.64/net/socket.c 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/net/socket.c 2020-09-10 19:25:10.511220802 +0200 @@ -91,6 +91,7 @@ #include @@ -23976,7 +23613,7 @@ diff -aurN linux-5.4.155/net/socket.c mptcp-mptcp_v0.96/net/socket.c #include #include -@@ -1339,6 +1340,7 @@ +@@ -1350,6 +1351,7 @@ int err; struct socket *sock; const struct net_proto_family *pf; @@ -23984,7 +23621,7 @@ diff -aurN linux-5.4.155/net/socket.c mptcp-mptcp_v0.96/net/socket.c /* * Check protocol is in range -@@ -1359,6 +1361,9 @@ +@@ -1370,6 +1372,9 @@ family = PF_PACKET; } @@ -23994,7 +23631,7 @@ diff -aurN linux-5.4.155/net/socket.c mptcp-mptcp_v0.96/net/socket.c err = security_socket_create(family, type, protocol, kern); if (err) return err; -@@ -1408,6 +1413,10 @@ +@@ -1419,6 +1424,10 @@ if (err < 0) goto out_module_put; @@ -24005,9 +23642,9 @@ diff -aurN linux-5.4.155/net/socket.c mptcp-mptcp_v0.96/net/socket.c /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. -diff -aurN linux-5.4.155/tools/include/uapi/linux/bpf.h mptcp-mptcp_v0.96/tools/include/uapi/linux/bpf.h ---- linux-5.4.155/tools/include/uapi/linux/bpf.h 2021-10-20 11:40:18.000000000 +0200 -+++ mptcp-mptcp_v0.96/tools/include/uapi/linux/bpf.h 2021-10-25 10:05:18.000000000 +0200 +diff -aurN linux-5.4.64/tools/include/uapi/linux/bpf.h linux-5.4.64.mptcp/tools/include/uapi/linux/bpf.h +--- linux-5.4.64/tools/include/uapi/linux/bpf.h 2020-09-09 19:12:37.000000000 +0200 ++++ linux-5.4.64.mptcp/tools/include/uapi/linux/bpf.h 2020-09-10 19:25:10.511220802 +0200 @@ -3438,6 +3438,7 @@ BPF_TCP_LISTEN, BPF_TCP_CLOSING, /* Now a valid state */ @@ -24016,3 +23653,546 @@ diff -aurN linux-5.4.155/tools/include/uapi/linux/bpf.h mptcp-mptcp_v0.96/tools/ BPF_TCP_MAX_STATES /* Leave at the end! */ }; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 221e055623c1..49555fee79b4 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1707,8 +1707,11 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) + * 2) not cwnd limited (this else condition) + * 3) no more data to send (tcp_write_queue_empty()) + * 4) application is hitting buffer limit (SOCK_NOSPACE) ++ * 5) For MPTCP subflows, the scheduler determines ++ * sndbuf limited. + */ + if (tcp_write_queue_empty(sk) && sk->sk_socket && ++ !(mptcp(tcp_sk(sk)) && !is_meta_sk(sk)) && + test_bit(SOCK_NOSPACE, &sk->sk_socket->flags) && + (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) + tcp_chrono_start(sk, TCP_CHRONO_SNDBUF_LIMITED); +diff --git a/net/mptcp/mptcp_input.c b/net/mptcp/mptcp_input.c +index 3d9eecd74683..1a08d94e8249 100644 +--- a/net/mptcp/mptcp_input.c ++++ b/net/mptcp/mptcp_input.c +@@ -1425,6 +1425,19 @@ static void mptcp_snd_una_update(struct tcp_sock *meta_tp, u32 data_ack) + meta_tp->snd_una = data_ack; + } + ++static void mptcp_stop_subflow_chronos(struct sock *meta_sk, ++ const enum tcp_chrono type) ++{ ++ const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb; ++ struct mptcp_tcp_sock *mptcp; ++ ++ mptcp_for_each_sub(mpcb, mptcp) { ++ struct sock *sk_it = mptcp_to_sock(mptcp); ++ ++ tcp_chrono_stop(sk_it, type); ++ } ++} ++ + /* Handle the DATA_ACK */ + static bool mptcp_process_data_ack(struct sock *sk, const struct sk_buff *skb) + { +@@ -1550,6 +1563,13 @@ static bool mptcp_process_data_ack(struct sock *sk, const struct sk_buff *skb) + if (meta_sk->sk_socket && + test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags)) + meta_sk->sk_write_space(meta_sk); ++ ++ if (meta_sk->sk_socket && ++ !test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags)) { ++ tcp_chrono_stop(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); ++ mptcp_stop_subflow_chronos(meta_sk, ++ TCP_CHRONO_SNDBUF_LIMITED); ++ } + } + + if (meta_sk->sk_state != TCP_ESTABLISHED) { +diff --git a/net/mptcp/mptcp_output.c b/net/mptcp/mptcp_output.c +index fc71d41c608d..bf6b5324b823 100644 +--- a/net/mptcp/mptcp_output.c ++++ b/net/mptcp/mptcp_output.c +@@ -801,6 +801,7 @@ bool mptcp_write_xmit(struct sock *meta_sk, unsigned int mss_now, int nonagle, + int push_one, gfp_t gfp) + { + struct tcp_sock *meta_tp = tcp_sk(meta_sk), *subtp; ++ bool is_rwnd_limited = false; + struct mptcp_tcp_sock *mptcp; + struct sock *subsk = NULL; + struct mptcp_cb *mpcb = meta_tp->mpcb; +@@ -848,8 +849,10 @@ bool mptcp_write_xmit(struct sock *meta_sk, unsigned int mss_now, int nonagle, + if (skb_unclone(skb, GFP_ATOMIC)) + break; + +- if (unlikely(!tcp_snd_wnd_test(meta_tp, skb, mss_now))) ++ if (unlikely(!tcp_snd_wnd_test(meta_tp, skb, mss_now))) { ++ is_rwnd_limited = true; + break; ++ } + + /* Force tso_segs to 1 by using UINT_MAX. + * We actually don't care about the exact number of segments +@@ -932,6 +935,11 @@ bool mptcp_write_xmit(struct sock *meta_sk, unsigned int mss_now, int nonagle, + break; + } + ++ if (is_rwnd_limited) ++ tcp_chrono_start(meta_sk, TCP_CHRONO_RWND_LIMITED); ++ else ++ tcp_chrono_stop(meta_sk, TCP_CHRONO_RWND_LIMITED); ++ + mptcp_for_each_sub(mpcb, mptcp) { + subsk = mptcp_to_sock(mptcp); + subtp = tcp_sk(subsk); +diff --git a/net/mptcp/mptcp_sched.c b/net/mptcp/mptcp_sched.c +index 39a997f84209..a4d8c4a5e52d 100644 +--- a/net/mptcp/mptcp_sched.c ++++ b/net/mptcp/mptcp_sched.c +@@ -372,14 +372,22 @@ static struct sk_buff *__mptcp_next_segment(struct sock *meta_sk, int *reinject) + if (!skb && meta_sk->sk_socket && + test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && + sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { +- struct sock *subsk = mpcb->sched_ops->get_subflow(meta_sk, NULL, +- false); ++ struct sock *subsk; ++ ++ /* meta is send buffer limited */ ++ tcp_chrono_start(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); ++ ++ subsk = mpcb->sched_ops->get_subflow(meta_sk, ++ NULL, false); + if (!subsk) + return NULL; + + skb = mptcp_rcv_buf_optimization(subsk, 0); + if (skb) + *reinject = -1; ++ else ++ tcp_chrono_start(subsk, ++ TCP_CHRONO_SNDBUF_LIMITED); + } + } + return skb; +@@ -411,6 +419,12 @@ struct sk_buff *mptcp_next_segment(struct sock *meta_sk, + mss_now = tcp_current_mss(*subsk); + + if (!*reinject && unlikely(!tcp_snd_wnd_test(tcp_sk(meta_sk), skb, mss_now))) { ++ /* an active flow is selected, but segment will not be sent due ++ * to no more space in send window ++ * this means the meta is receive window limited ++ * the subflow might also be, if we have nothing to reinject ++ */ ++ tcp_chrono_start(meta_sk, TCP_CHRONO_RWND_LIMITED); + skb = mptcp_rcv_buf_optimization(*subsk, 1); + if (skb) + *reinject = -1; +@@ -418,6 +432,11 @@ struct sk_buff *mptcp_next_segment(struct sock *meta_sk, + return NULL; + } + ++ if (!*reinject) { ++ /* this will stop any other chronos on the meta */ ++ tcp_chrono_start(meta_sk, TCP_CHRONO_BUSY); ++ } ++ + /* No splitting required, as we will only send one single segment */ + if (skb->len <= mss_now) + return skb; +diff --git a/include/linux/tcp.h b/include/linux/tcp.h +index 04fcc5219f7b..970fb566f94d 100644 +--- a/include/linux/tcp.h ++++ b/include/linux/tcp.h +@@ -348,6 +348,7 @@ struct tcp_sock { + u32 rate_interval_us; /* saved rate sample: time elapsed */ + + u32 rcv_wnd; /* Current receiver window */ ++ u32 rcv_right_edge; /* Highest announced right edge */ + u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ + u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ + u32 pushed_seq; /* Last pushed seq, required to talk to windows */ +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 3e4f5179a835..93d53f5d5359 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -850,6 +850,32 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp) + return (u32) win; + } + ++/* right edge only moves forward, even if window shrinks due ++ * to mptcp meta ++ */ ++static inline void tcp_update_rcv_right_edge(struct tcp_sock *tp) ++{ ++ if (after(tp->rcv_wup + tp->rcv_wnd, tp->rcv_right_edge)) ++ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; ++} ++ ++/* Compute receive window which will never shrink. The way MPTCP handles ++ * the receive window can cause the effective right edge to shrink, ++ * causing valid segments to become out of window. ++ * This function should be used when checking if a segment is valid for ++ * the max right edge announced. ++ */ ++static inline u32 tcp_receive_window_no_shrink(const struct tcp_sock *tp) ++{ ++ s32 win = tp->rcv_right_edge - tp->rcv_nxt; ++ ++ win = max_t(s32, win, tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt); ++ ++ if (unlikely(win < 0)) ++ win = 0; ++ return (u32) win; ++} ++ + /* Choose a new window, without checks for shrinking, and without + * scaling applied to the result. The caller does these things + * if necessary. This is a "raw" window selection. +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index 617c06364516..81d35b7b00c0 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2824,6 +2824,7 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l + + tp->rcv_wnd = opt.rcv_wnd; + tp->rcv_wup = opt.rcv_wup; ++ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; + + return 0; + } +diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c +index 42d7a7d208df..88c211d28bc5 100644 +--- a/net/ipv4/tcp_fastopen.c ++++ b/net/ipv4/tcp_fastopen.c +@@ -277,6 +277,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk, + + tcp_rsk(req)->rcv_nxt = tp->rcv_nxt; + tp->rcv_wup = tp->rcv_nxt; ++ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; + + meta_sk = child; + ret = mptcp_check_req_fastopen(meta_sk, req); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 37e229d2f615..d968cc6fddf7 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -4122,7 +4122,7 @@ static inline bool tcp_paws_discard(const struct sock *sk, + static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) + { + return !before(end_seq, tp->rcv_wup) && +- !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); ++ !after(seq, tp->rcv_nxt + tcp_receive_window_no_shrink(tp)); + } + + /* When we get a reset we do this. */ +@@ -4842,7 +4842,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) + * Out of sequence packets to the out_of_order_queue. + */ + if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { +- if (tcp_receive_window(tp) == 0) { ++ if (tcp_receive_window_no_shrink(tp) == 0) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); + goto out_of_window; + } +@@ -4903,7 +4903,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) + } + + /* Out of window. F.e. zero window probe. */ +- if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) ++ if (!before(TCP_SKB_CB(skb)->seq, ++ tp->rcv_nxt + tcp_receive_window_no_shrink(tp))) + goto out_of_window; + + if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { +@@ -4913,7 +4914,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) + /* If window is closed, drop tail of packet. But after + * remembering D-SACK for its head made in previous line. + */ +- if (!tcp_receive_window(tp)) { ++ if (!tcp_receive_window_no_shrink(tp)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); + goto out_of_window; + } +@@ -6069,6 +6070,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + */ + WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); + tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; ++ tcp_update_rcv_right_edge(tp); + + /* RFC1323: The window in SYN & SYN/ACK segments is + * never scaled. +@@ -6187,6 +6189,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); + WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); + tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; ++ tcp_update_rcv_right_edge(tp); + + /* RFC1323: The window in SYN & SYN/ACK segments is + * never scaled. +diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c +index c4b489bfa9ae..fa9f63e3caaa 100644 +--- a/net/ipv4/tcp_minisocks.c ++++ b/net/ipv4/tcp_minisocks.c +@@ -550,6 +550,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, + newtp->window_clamp = req->rsk_window_clamp; + newtp->rcv_ssthresh = req->rsk_rcv_wnd; + newtp->rcv_wnd = req->rsk_rcv_wnd; ++ newtp->rcv_right_edge = newtp->rcv_wnd + newtp->rcv_wup; + newtp->rx_opt.wscale_ok = ireq->wscale_ok; + if (newtp->rx_opt.wscale_ok) { + newtp->rx_opt.snd_wscale = ireq->snd_wscale; +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index 221e055623c1..0f3bb4467133 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -283,6 +283,7 @@ u16 tcp_select_window(struct sock *sk) + + tp->rcv_wnd = new_win; + tp->rcv_wup = tp->rcv_nxt; ++ tcp_update_rcv_right_edge(tp); + + /* Make sure we do not exceed the maximum possible + * scaled window. +@@ -3484,6 +3485,8 @@ static void tcp_connect_init(struct sock *sk) + else + tp->rcv_tstamp = tcp_jiffies32; + tp->rcv_wup = tp->rcv_nxt; ++ /* force set rcv_right_edge here at start of connection */ ++ tp->rcv_right_edge = tp->rcv_wup + tp->rcv_wnd; + WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); + + inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); +diff --git a/net/mptcp/mptcp_ctrl.c b/net/mptcp/mptcp_ctrl.c +index a6bbb7a688ba..9210e755ae3d 100644 +--- a/net/mptcp/mptcp_ctrl.c ++++ b/net/mptcp/mptcp_ctrl.c +@@ -1278,6 +1278,7 @@ void mptcp_initialize_recv_vars(struct tcp_sock *meta_tp, struct mptcp_cb *mpcb, + meta_tp->copied_seq = (u32)idsn; + meta_tp->rcv_nxt = (u32)idsn; + meta_tp->rcv_wup = (u32)idsn; ++ meta_tp->rcv_right_edge = meta_tp->rcv_wup + meta_tp->rcv_wnd; + + meta_tp->snd_wl1 = meta_tp->rcv_nxt - 1; + } +diff --git a/net/mptcp/mptcp_output.c b/net/mptcp/mptcp_output.c +index fc71d41c608d..bdea1a26e3fc 100644 +--- a/net/mptcp/mptcp_output.c ++++ b/net/mptcp/mptcp_output.c +@@ -1229,6 +1229,10 @@ u16 mptcp_select_window(struct sock *sk) + + meta_tp->rcv_wnd = tp->rcv_wnd; + meta_tp->rcv_wup = meta_tp->rcv_nxt; ++ /* no need to use tcp_update_rcv_right_edge, because at the meta level ++ * right edge cannot go back ++ */ ++ meta_tp->rcv_right_edge = meta_tp->rcv_wnd + meta_tp->rcv_wup; + + return new_win; + } +diff --git a/net/mptcp/mptcp_sched.c b/net/mptcp/mptcp_sched.c +index 4b878d14492a..6cb8c5c7d098 100644 +--- a/net/mptcp/mptcp_sched.c ++++ b/net/mptcp/mptcp_sched.c +@@ -388,25 +388,32 @@ static struct sk_buff *__mptcp_next_segment(struct sock *meta_sk, int *reinject) + } else { + skb = tcp_send_head(meta_sk); + +- if (!skb && meta_sk->sk_socket && +- test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && +- sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { ++ if (!skb) { + struct sock *subsk; + +- /* meta is send buffer limited */ +- tcp_chrono_start(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); +- + subsk = mpcb->sched_ops->get_subflow(meta_sk, + NULL, false); + if (!subsk) + return NULL; + +- skb = mptcp_rcv_buf_optimization(subsk, 0); +- if (skb) +- *reinject = -1; +- else ++ if (meta_sk->sk_socket && ++ test_bit(SOCK_NOSPACE, &meta_sk->sk_socket->flags) && ++ sk_stream_wspace(meta_sk) < sk_stream_min_wspace(meta_sk)) { ++ skb = mptcp_rcv_buf_optimization(subsk, 0); ++ if (skb) ++ *reinject = -1; ++ else ++ tcp_chrono_start(subsk, ++ TCP_CHRONO_SNDBUF_LIMITED); ++ } ++ ++ if (!skb) { ++ /* meta is send buffer limited */ ++ tcp_chrono_start(meta_sk, TCP_CHRONO_SNDBUF_LIMITED); ++ + tcp_chrono_start(subsk, + TCP_CHRONO_SNDBUF_LIMITED); ++ } + } + } + return skb; +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 9d3fa5eb36d9..b6e9d709d1e1 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -350,6 +350,7 @@ int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib); + void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb); + int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, + gfp_t gfp_mask); ++u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now); + unsigned int tcp_mss_split_point(const struct sock *sk, + const struct sk_buff *skb, + unsigned int mss_now, +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c +index f72edfe89b4d..86bce63ab841 100644 +--- a/net/ipv4/tcp_output.c ++++ b/net/ipv4/tcp_output.c +@@ -1781,7 +1781,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now, + /* Return the number of segments we want in the skb we are transmitting. + * See if congestion control module wants to decide; otherwise, autosize. + */ +-static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) ++u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now) + { + const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; + u32 min_tso, tso_segs; +diff --git a/net/mptcp/mptcp_sched.c b/net/mptcp/mptcp_sched.c +index a4d8c4a5e52d..4b878d14492a 100644 +--- a/net/mptcp/mptcp_sched.c ++++ b/net/mptcp/mptcp_sched.c +@@ -1,5 +1,6 @@ + /* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */ + ++#include + #include + #include + #include +@@ -37,12 +38,38 @@ bool mptcp_is_def_unavailable(struct sock *sk) + } + EXPORT_SYMBOL_GPL(mptcp_is_def_unavailable); + ++/* estimate number of segments currently in flight + unsent in ++ * the subflow socket. ++ */ ++static int mptcp_subflow_queued(struct sock *sk, u32 max_tso_segs) ++{ ++ const struct tcp_sock *tp = tcp_sk(sk); ++ unsigned int queued; ++ ++ /* estimate the max number of segments in the write queue ++ * this is an overestimation, avoiding to iterate over the queue ++ * to make a better estimation. ++ * Having only one skb in the queue however might trigger tso deferral, ++ * delaying the sending of a tso segment in the hope that skb_entail ++ * will append more data to the skb soon. ++ * Therefore, in the case only one skb is in the queue, we choose to ++ * potentially underestimate, risking to schedule one skb too many onto ++ * the subflow rather than not enough. ++ */ ++ if (sk->sk_write_queue.qlen > 1) ++ queued = sk->sk_write_queue.qlen * max_tso_segs; ++ else ++ queued = sk->sk_write_queue.qlen; ++ ++ return queued + tcp_packets_in_flight(tp); ++} ++ + static bool mptcp_is_temp_unavailable(struct sock *sk, + const struct sk_buff *skb, + bool zero_wnd_test) + { + const struct tcp_sock *tp = tcp_sk(sk); +- unsigned int mss_now, space, in_flight; ++ unsigned int mss_now; + + if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) { + /* If SACK is disabled, and we got a loss, TCP does not exit +@@ -66,19 +93,11 @@ static bool mptcp_is_temp_unavailable(struct sock *sk, + return true; + } + +- in_flight = tcp_packets_in_flight(tp); +- /* Not even a single spot in the cwnd */ +- if (in_flight >= tp->snd_cwnd) +- return true; +- + mss_now = tcp_current_mss(sk); + +- /* Now, check if what is queued in the subflow's send-queue +- * already fills the cwnd. +- */ +- space = (tp->snd_cwnd - in_flight) * mss_now; +- +- if (tp->write_seq - tp->snd_nxt >= space) ++ /* Not even a single spot in the cwnd */ ++ if (mptcp_subflow_queued(sk, tcp_tso_segs(sk, tcp_current_mss(sk))) ++ >= tp->snd_cwnd) + return true; + + if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp))) +@@ -399,11 +418,10 @@ struct sk_buff *mptcp_next_segment(struct sock *meta_sk, + unsigned int *limit) + { + struct sk_buff *skb = __mptcp_next_segment(meta_sk, reinject); +- unsigned int mss_now, in_flight_space; +- int remaining_in_flight_space; +- u32 max_len, max_segs, window; ++ unsigned int mss_now; ++ u32 max_len, gso_max_segs, max_segs, max_tso_segs, window; + struct tcp_sock *subtp; +- u16 gso_max_segs; ++ int queued; + + /* As we set it, we have to reset it as well. */ + *limit = 0; +@@ -441,35 +459,29 @@ struct sk_buff *mptcp_next_segment(struct sock *meta_sk, + if (skb->len <= mss_now) + return skb; + +- /* The following is similar to tcp_mss_split_point, but +- * we do not care about nagle, because we will anyways +- * use TCP_NAGLE_PUSH, which overrides this. ++ max_tso_segs = tcp_tso_segs(*subsk, tcp_current_mss(*subsk)); ++ queued = mptcp_subflow_queued(*subsk, max_tso_segs); ++ ++ /* this condition should already have been established in ++ * mptcp_is_temp_unavailable when selecting available flows + */ ++ WARN_ONCE(subtp->snd_cwnd <= queued, "Selected subflow no cwnd room"); + + gso_max_segs = (*subsk)->sk_gso_max_segs; + if (!gso_max_segs) /* No gso supported on the subflow's NIC */ + gso_max_segs = 1; +- max_segs = min_t(unsigned int, tcp_cwnd_test(subtp, skb), gso_max_segs); ++ ++ max_segs = min_t(unsigned int, subtp->snd_cwnd - queued, gso_max_segs); + if (!max_segs) + return NULL; + +- /* max_len is what would fit in the cwnd (respecting the 2GSO-limit of +- * tcp_cwnd_test), but ignoring whatever was already queued. ++ /* if there is room for a segment, schedule up to a complete TSO ++ * segment to avoid TSO splitting. Even if it is more than allowed by ++ * the congestion window. + */ +- max_len = min(mss_now * max_segs, skb->len); +- +- in_flight_space = (subtp->snd_cwnd - tcp_packets_in_flight(subtp)) * mss_now; +- remaining_in_flight_space = (int)in_flight_space - (subtp->write_seq - subtp->snd_nxt); ++ max_segs = max_t(unsigned int, max_tso_segs, max_segs); + +- if (remaining_in_flight_space <= 0) +- WARN_ONCE(1, "in_flight %u cwnd %u wseq %u snxt %u mss_now %u cache %u", +- tcp_packets_in_flight(subtp), subtp->snd_cwnd, +- subtp->write_seq, subtp->snd_nxt, mss_now, subtp->mss_cache); +- else +- /* max_len now fits exactly in the write-queue, taking into +- * account what was already queued. +- */ +- max_len = min_t(u32, max_len, remaining_in_flight_space); ++ max_len = min(mss_now * max_segs, skb->len); + + window = tcp_wnd_end(subtp) - subtp->write_seq; \ No newline at end of file