From 0a55523ea5fbf3fc7e406603b556f124d78ff79f Mon Sep 17 00:00:00 2001 From: "Ycarus (Yannick Chabanois)" Date: Wed, 16 Mar 2022 20:53:52 +0100 Subject: [PATCH] Update Linux 5.4 kernel and clean some files --- build.sh | 6 +- root/include/kernel-version.mk | 3 +- .../package/network/wwan/files/data/05c6-9215 | 8 + .../package/network/wwan/files/data/2c7c-0121 | 11 + .../package/network/wwan/files/data/2c7c-0296 | 11 + .../package/network/wwan/files/data/2c7c-0306 | 11 + .../package/network/wwan/files/data/2c7c-0512 | 11 + ...5.5-MIPS-BPF-Restore-MIPS32-cBPF-JIT.patch | 1650 ----------------- ...-integer-overflow-in-arg-calculation.patch | 53 - ...w_table-fix-offloaded-connection-tim.patch | 115 -- .../generic/hack-5.4/690-mptcp_v0.96.patch | 486 ++--- ...w_table-add-hardware-offload-support.patch | 554 ++++++ ...dd-support-for-threaded-NAPI-polling.patch | 344 ---- .../999-fix-oeoverflow-ipheth.patch | 57 - .../base-files/etc/init.d/pcrypt-crconf | 30 - .../base-files/lib/functions/caldata.sh | 171 -- .../ipq40xx/base-files/lib/functions/leds.sh | 94 - .../base-files/lib/functions/migrations.sh | 67 - .../base-files/lib/functions/mobile.sh | 247 --- .../base-files/lib/functions/preinit.sh | 87 - .../base-files/lib/functions/service.sh | 103 - .../base-files/lib/functions/system.sh | 226 --- .../base-files/lib/upgrade/ipq_failsafe.sh | 383 ---- .../base-files/lib/upgrade/platform.sh | 3 +- .../100-GPIO-add-named-gpio-exports.patch | 165 -- .../719-meiglink_slm750_support.patch | 33 - 26 files changed, 870 insertions(+), 4059 deletions(-) create mode 100644 root/package/network/wwan/files/data/05c6-9215 create mode 100644 root/package/network/wwan/files/data/2c7c-0121 create mode 100644 root/package/network/wwan/files/data/2c7c-0296 create mode 100644 root/package/network/wwan/files/data/2c7c-0306 create mode 100644 root/package/network/wwan/files/data/2c7c-0512 delete mode 100644 root/target/linux/generic/backport-5.4/070-v5.5-MIPS-BPF-Restore-MIPS32-cBPF-JIT.patch delete mode 100644 root/target/linux/generic/backport-5.4/097-bpf-fix-integer-overflow-in-arg-calculation.patch delete mode 100644 root/target/linux/generic/backport-5.4/370-netfilter-nf_flow_table-fix-offloaded-connection-tim.patch create mode 100644 root/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch delete mode 100644 root/target/linux/generic/pending-5.4/690-net-add-support-for-threaded-NAPI-polling.patch delete mode 100644 root/target/linux/generic/pending-5.4/999-fix-oeoverflow-ipheth.patch delete mode 100755 root/target/linux/ipq40xx/base-files/etc/init.d/pcrypt-crconf delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/caldata.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/leds.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/migrations.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/mobile.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/preinit.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/service.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/functions/system.sh delete mode 100644 root/target/linux/ipq40xx/base-files/lib/upgrade/ipq_failsafe.sh delete mode 100644 root/target/linux/ipq40xx/patches-5.4/100-GPIO-add-named-gpio-exports.patch delete mode 100644 root/target/linux/ipq40xx/patches-5.4/719-meiglink_slm750_support.patch diff --git a/build.sh b/build.sh index 71e4a95d..1c88f30f 100755 --- a/build.sh +++ b/build.sh @@ -94,9 +94,9 @@ fi if [ "$OMR_OPENWRT" = "default" ]; then if [ "$OMR_KERNEL" = "5.4" ]; then # Use OpenWrt 21.02 for 5.4 kernel - _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "f441be3921c769b732f0148f005d4f1bbace0508" - _get_repo feeds/packages https://github.com/openwrt/packages "ab94e0709a9c796d34d723ddba44380f7b3d8698" - _get_repo feeds/luci https://github.com/openwrt/luci "0818d835cacd9fa75b8685aabe6378ac09b95145" + _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "864bba55d8714a64abdf94cfb835450b8cd7789e" + _get_repo feeds/packages https://github.com/openwrt/packages "793e7ee484ae4ec37b1cd920b4032dde3cae69cc" + _get_repo feeds/luci https://github.com/openwrt/luci "701ea947fc920e63d14d8efb8287097fd63442ca" else _get_repo "$OMR_TARGET/source" https://github.com/openwrt/openwrt "02de391b086dd2b7a72c2394cfb66cec666a51c1" _get_repo feeds/packages https://github.com/openwrt/packages "7b2dd3e9efbc20ef4e7f47f60c3db9aaef37c0a5" diff --git a/root/include/kernel-version.mk b/root/include/kernel-version.mk index f75a0dc5..c0609eca 100644 --- a/root/include/kernel-version.mk +++ b/root/include/kernel-version.mk @@ -6,12 +6,13 @@ ifdef CONFIG_TESTING_KERNEL KERNEL_PATCHVER:=$(KERNEL_TESTING_PATCHVER) endif -LINUX_VERSION-5.4 = .132 +LINUX_VERSION-5.4 = .182 LINUX_VERSION-5.10 = .64 LINUX_VERSION-5.14 = .6 LINUX_VERSION-5.15 = .17 LINUX_KERNEL_HASH-5.4.132 = 8466adbfb3579e751ede683496df7bb20f258b5f882250f3dd82be63736d00ef +LINUX_KERNEL_HASH-5.4.182 = b2f1201f64f010e9e3c85d6f303a559a7944a80a0244a86b8f5035bd23f1f40d LINUX_KERNEL_HASH-5.10.64 = 3eb84bd24a2de2b4749314e34597c02401c5d6831b055ed5224adb405c35e30a LINUX_KERNEL_HASH-5.14.6 = 54848c1268771ee3515e4c33e29abc3f1fa90d8144894cce6d0ebc3b158bccec LINUX_KERNEL_HASH-5.15.4 = 549d0fb75e65f6158e6f4becc648f249d386843da0e1211460bde8b1ea99cbca diff --git a/root/package/network/wwan/files/data/05c6-9215 b/root/package/network/wwan/files/data/05c6-9215 new file mode 100644 index 00000000..14317a03 --- /dev/null +++ b/root/package/network/wwan/files/data/05c6-9215 @@ -0,0 +1,8 @@ +{ + "desc": "Quectel EC20", + "type": "gobinet", + "control": 2, + "boudrate": 115200, + "stop_bits": 8, + "gps": 1 +} diff --git a/root/package/network/wwan/files/data/2c7c-0121 b/root/package/network/wwan/files/data/2c7c-0121 new file mode 100644 index 00000000..771731f0 --- /dev/null +++ b/root/package/network/wwan/files/data/2c7c-0121 @@ -0,0 +1,11 @@ +{ + "desc": "Quectel EC21", + "type": "gobinet", + "control": 2, + "boudrate": 115200, + "stop_bits": 8, + "gps": 1, + "ep_iface": 4, + "dl_max_size": 4096, + "dl_max_datagrams": 16 +} diff --git a/root/package/network/wwan/files/data/2c7c-0296 b/root/package/network/wwan/files/data/2c7c-0296 new file mode 100644 index 00000000..b1a62448 --- /dev/null +++ b/root/package/network/wwan/files/data/2c7c-0296 @@ -0,0 +1,11 @@ +{ + "desc": "Quectel BG96", + "type": "gobinet", + "control": 3, + "boudrate": 115200, + "stop_bits": 8, + "gps": 1, + "ep_iface": 4, + "dl_max_size": 2048, + "dl_max_datagrams": 8 +} diff --git a/root/package/network/wwan/files/data/2c7c-0306 b/root/package/network/wwan/files/data/2c7c-0306 new file mode 100644 index 00000000..e271dc4a --- /dev/null +++ b/root/package/network/wwan/files/data/2c7c-0306 @@ -0,0 +1,11 @@ +{ + "desc": "Quectel EG06", + "type": "gobinet", + "control": 2, + "boudrate": 115200, + "stop_bits": 8, + "gps": 1, + "ep_iface": 4, + "dl_max_size": 16384, + "dl_max_datagrams": 32 +} diff --git a/root/package/network/wwan/files/data/2c7c-0512 b/root/package/network/wwan/files/data/2c7c-0512 new file mode 100644 index 00000000..977b8c3d --- /dev/null +++ b/root/package/network/wwan/files/data/2c7c-0512 @@ -0,0 +1,11 @@ +{ + "desc": "Quectel EG12/EG18", + "type": "gobinet", + "control": 2, + "boudrate": 115200, + "stop_bits": 8, + "gps": 1, + "ep_iface": 4, + "dl_max_size": 16384, + "dl_max_datagrams": 32 +} diff --git a/root/target/linux/generic/backport-5.4/070-v5.5-MIPS-BPF-Restore-MIPS32-cBPF-JIT.patch b/root/target/linux/generic/backport-5.4/070-v5.5-MIPS-BPF-Restore-MIPS32-cBPF-JIT.patch deleted file mode 100644 index 027d0253..00000000 --- a/root/target/linux/generic/backport-5.4/070-v5.5-MIPS-BPF-Restore-MIPS32-cBPF-JIT.patch +++ /dev/null @@ -1,1650 +0,0 @@ -From 36366e367ee93ced84fddb8fae6675e12985f5a4 Mon Sep 17 00:00:00 2001 -From: Paul Burton -Date: Thu, 5 Dec 2019 10:23:18 -0800 -Subject: [PATCH] MIPS: BPF: Restore MIPS32 cBPF JIT - -Commit 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 -architecture.") enabled our eBPF JIT for MIPS32 kernels, whereas it has -previously only been availailable for MIPS64. It was my understanding at -the time that the BPF test suite was passing & JITing a comparable -number of tests to our cBPF JIT [1], but it turns out that was not the -case. - -The eBPF JIT has a number of problems on MIPS32: - -- Most notably various code paths still result in emission of MIPS64 - instructions which will cause reserved instruction exceptions & kernel - panics when run on MIPS32 CPUs. - -- The eBPF JIT doesn't account for differences between the O32 ABI used - by MIPS32 kernels versus the N64 ABI used by MIPS64 kernels. Notably - arguments beyond the first 4 are passed on the stack in O32, and this - is entirely unhandled when JITing a BPF_CALL instruction. Stack space - must be reserved for arguments even if they all fit in registers, and - the callee is free to assume that stack space has been reserved for - its use - with the eBPF JIT this is not the case, so calling any - function can result in clobbering values on the stack & unpredictable - behaviour. Function arguments in eBPF are always 64-bit values which - is also entirely unhandled - the JIT still uses a single (32-bit) - register per argument. As a result all function arguments are always - passed incorrectly when JITing a BPF_CALL instruction, leading to - kernel crashes or strange behavior. - -- The JIT attempts to bail our on use of ALU64 instructions or 64-bit - memory access instructions. The code doing this at the start of - build_one_insn() incorrectly checks whether BPF_OP() equals BPF_DW, - when it should really be checking BPF_SIZE() & only doing so when - BPF_CLASS() is one of BPF_{LD,LDX,ST,STX}. This results in false - positives that cause more bailouts than intended, and that in turns - hides some of the problems described above. - -- The kernel's cBPF->eBPF translation makes heavy use of 64-bit eBPF - instructions that the MIPS32 eBPF JIT bails out on, leading to most - cBPF programs not being JITed at all. - -Until these problems are resolved, revert the removal of the cBPF JIT -performed by commit 716850ab104d ("MIPS: eBPF: Initial eBPF support for -MIPS32 architecture."). Together with commit f8fffebdea75 ("MIPS: BPF: -Disable MIPS32 eBPF JIT") this restores MIPS32 BPF JIT behavior back to -the same state it was prior to the introduction of the broken eBPF JIT -support. - -[1] https://lore.kernel.org/linux-mips/MWHPR2201MB13583388481F01A422CE7D66D4410@MWHPR2201MB1358.namprd22.prod.outlook.com/ - -Signed-off-by: Paul Burton -Fixes: 716850ab104d ("MIPS: eBPF: Initial eBPF support for MIPS32 architecture.") -Cc: Daniel Borkmann -Cc: Hassan Naveed -Cc: Tony Ambardar -Cc: bpf@vger.kernel.org -Cc: netdev@vger.kernel.org -Cc: linux-mips@vger.kernel.org -Cc: linux-kernel@vger.kernel.org ---- - arch/mips/Kconfig | 1 + - arch/mips/net/Makefile | 1 + - arch/mips/net/bpf_jit.c | 1270 +++++++++++++++++++++++++++++++++++ - arch/mips/net/bpf_jit_asm.S | 285 ++++++++ - 4 files changed, 1557 insertions(+) - create mode 100644 arch/mips/net/bpf_jit.c - create mode 100644 arch/mips/net/bpf_jit_asm.S - ---- a/arch/mips/Kconfig -+++ b/arch/mips/Kconfig -@@ -46,6 +46,7 @@ config MIPS - select HAVE_ARCH_TRACEHOOK - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES - select HAVE_ASM_MODVERSIONS -+ select HAVE_CBPF_JIT if !64BIT && !CPU_MICROMIPS - select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2 - select HAVE_CONTEXT_TRACKING - select HAVE_COPY_THREAD_TLS ---- a/arch/mips/net/Makefile -+++ b/arch/mips/net/Makefile -@@ -1,4 +1,5 @@ - # SPDX-License-Identifier: GPL-2.0-only - # MIPS networking code - -+obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o - obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o ---- /dev/null -+++ b/arch/mips/net/bpf_jit.c -@@ -0,0 +1,1270 @@ -+/* -+ * Just-In-Time compiler for BPF filters on MIPS -+ * -+ * Copyright (c) 2014 Imagination Technologies Ltd. -+ * Author: Markos Chandras -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; version 2 of the License. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "bpf_jit.h" -+ -+/* ABI -+ * r_skb_hl SKB header length -+ * r_data SKB data pointer -+ * r_off Offset -+ * r_A BPF register A -+ * r_X BPF register X -+ * r_skb *skb -+ * r_M *scratch memory -+ * r_skb_len SKB length -+ * -+ * On entry (*bpf_func)(*skb, *filter) -+ * a0 = MIPS_R_A0 = skb; -+ * a1 = MIPS_R_A1 = filter; -+ * -+ * Stack -+ * ... -+ * M[15] -+ * M[14] -+ * M[13] -+ * ... -+ * M[0] <-- r_M -+ * saved reg k-1 -+ * saved reg k-2 -+ * ... -+ * saved reg 0 <-- r_sp -+ * -+ * -+ * Packet layout -+ * -+ * <--------------------- len ------------------------> -+ * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------> -+ * ---------------------------------------------------- -+ * | skb->data | -+ * ---------------------------------------------------- -+ */ -+ -+#define ptr typeof(unsigned long) -+ -+#define SCRATCH_OFF(k) (4 * (k)) -+ -+/* JIT flags */ -+#define SEEN_CALL (1 << BPF_MEMWORDS) -+#define SEEN_SREG_SFT (BPF_MEMWORDS + 1) -+#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT) -+#define SEEN_SREG(x) (SEEN_SREG_BASE << (x)) -+#define SEEN_OFF SEEN_SREG(2) -+#define SEEN_A SEEN_SREG(3) -+#define SEEN_X SEEN_SREG(4) -+#define SEEN_SKB SEEN_SREG(5) -+#define SEEN_MEM SEEN_SREG(6) -+/* SEEN_SK_DATA also implies skb_hl an skb_len */ -+#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0)) -+ -+/* Arguments used by JIT */ -+#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ -+ -+#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ -+ -+/** -+ * struct jit_ctx - JIT context -+ * @skf: The sk_filter -+ * @prologue_bytes: Number of bytes for prologue -+ * @idx: Instruction index -+ * @flags: JIT flags -+ * @offsets: Instruction offsets -+ * @target: Memory location for the compiled filter -+ */ -+struct jit_ctx { -+ const struct bpf_prog *skf; -+ unsigned int prologue_bytes; -+ u32 idx; -+ u32 flags; -+ u32 *offsets; -+ u32 *target; -+}; -+ -+ -+static inline int optimize_div(u32 *k) -+{ -+ /* power of 2 divides can be implemented with right shift */ -+ if (!(*k & (*k-1))) { -+ *k = ilog2(*k); -+ return 1; -+ } -+ -+ return 0; -+} -+ -+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx); -+ -+/* Simply emit the instruction if the JIT memory space has been allocated */ -+#define emit_instr(ctx, func, ...) \ -+do { \ -+ if ((ctx)->target != NULL) { \ -+ u32 *p = &(ctx)->target[ctx->idx]; \ -+ uasm_i_##func(&p, ##__VA_ARGS__); \ -+ } \ -+ (ctx)->idx++; \ -+} while (0) -+ -+/* -+ * Similar to emit_instr but it must be used when we need to emit -+ * 32-bit or 64-bit instructions -+ */ -+#define emit_long_instr(ctx, func, ...) \ -+do { \ -+ if ((ctx)->target != NULL) { \ -+ u32 *p = &(ctx)->target[ctx->idx]; \ -+ UASM_i_##func(&p, ##__VA_ARGS__); \ -+ } \ -+ (ctx)->idx++; \ -+} while (0) -+ -+/* Determine if immediate is within the 16-bit signed range */ -+static inline bool is_range16(s32 imm) -+{ -+ return !(imm >= SBIT(15) || imm < -SBIT(15)); -+} -+ -+static inline void emit_addu(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, addu, dst, src1, src2); -+} -+ -+static inline void emit_nop(struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, nop); -+} -+ -+/* Load a u32 immediate to a register */ -+static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) -+{ -+ if (ctx->target != NULL) { -+ /* addiu can only handle s16 */ -+ if (!is_range16(imm)) { -+ u32 *p = &ctx->target[ctx->idx]; -+ uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); -+ p = &ctx->target[ctx->idx + 1]; -+ uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); -+ } else { -+ u32 *p = &ctx->target[ctx->idx]; -+ uasm_i_addiu(&p, dst, r_zero, imm); -+ } -+ } -+ ctx->idx++; -+ -+ if (!is_range16(imm)) -+ ctx->idx++; -+} -+ -+static inline void emit_or(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, or, dst, src1, src2); -+} -+ -+static inline void emit_ori(unsigned int dst, unsigned src, u32 imm, -+ struct jit_ctx *ctx) -+{ -+ if (imm >= BIT(16)) { -+ emit_load_imm(r_tmp, imm, ctx); -+ emit_or(dst, src, r_tmp, ctx); -+ } else { -+ emit_instr(ctx, ori, dst, src, imm); -+ } -+} -+ -+static inline void emit_daddiu(unsigned int dst, unsigned int src, -+ int imm, struct jit_ctx *ctx) -+{ -+ /* -+ * Only used for stack, so the imm is relatively small -+ * and it fits in 15-bits -+ */ -+ emit_instr(ctx, daddiu, dst, src, imm); -+} -+ -+static inline void emit_addiu(unsigned int dst, unsigned int src, -+ u32 imm, struct jit_ctx *ctx) -+{ -+ if (!is_range16(imm)) { -+ emit_load_imm(r_tmp, imm, ctx); -+ emit_addu(dst, r_tmp, src, ctx); -+ } else { -+ emit_instr(ctx, addiu, dst, src, imm); -+ } -+} -+ -+static inline void emit_and(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, and, dst, src1, src2); -+} -+ -+static inline void emit_andi(unsigned int dst, unsigned int src, -+ u32 imm, struct jit_ctx *ctx) -+{ -+ /* If imm does not fit in u16 then load it to register */ -+ if (imm >= BIT(16)) { -+ emit_load_imm(r_tmp, imm, ctx); -+ emit_and(dst, src, r_tmp, ctx); -+ } else { -+ emit_instr(ctx, andi, dst, src, imm); -+ } -+} -+ -+static inline void emit_xor(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, xor, dst, src1, src2); -+} -+ -+static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) -+{ -+ /* If imm does not fit in u16 then load it to register */ -+ if (imm >= BIT(16)) { -+ emit_load_imm(r_tmp, imm, ctx); -+ emit_xor(dst, src, r_tmp, ctx); -+ } else { -+ emit_instr(ctx, xori, dst, src, imm); -+ } -+} -+ -+static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) -+{ -+ emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); -+} -+ -+static inline void emit_subu(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, subu, dst, src1, src2); -+} -+ -+static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) -+{ -+ emit_subu(reg, r_zero, reg, ctx); -+} -+ -+static inline void emit_sllv(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, sllv, dst, src, sa); -+} -+ -+static inline void emit_sll(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ /* sa is 5-bits long */ -+ if (sa >= BIT(5)) -+ /* Shifting >= 32 results in zero */ -+ emit_jit_reg_move(dst, r_zero, ctx); -+ else -+ emit_instr(ctx, sll, dst, src, sa); -+} -+ -+static inline void emit_srlv(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, srlv, dst, src, sa); -+} -+ -+static inline void emit_srl(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ /* sa is 5-bits long */ -+ if (sa >= BIT(5)) -+ /* Shifting >= 32 results in zero */ -+ emit_jit_reg_move(dst, r_zero, ctx); -+ else -+ emit_instr(ctx, srl, dst, src, sa); -+} -+ -+static inline void emit_slt(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, slt, dst, src1, src2); -+} -+ -+static inline void emit_sltu(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, sltu, dst, src1, src2); -+} -+ -+static inline void emit_sltiu(unsigned dst, unsigned int src, -+ unsigned int imm, struct jit_ctx *ctx) -+{ -+ /* 16 bit immediate */ -+ if (!is_range16((s32)imm)) { -+ emit_load_imm(r_tmp, imm, ctx); -+ emit_sltu(dst, src, r_tmp, ctx); -+ } else { -+ emit_instr(ctx, sltiu, dst, src, imm); -+ } -+ -+} -+ -+/* Store register on the stack */ -+static inline void emit_store_stack_reg(ptr reg, ptr base, -+ unsigned int offset, -+ struct jit_ctx *ctx) -+{ -+ emit_long_instr(ctx, SW, reg, offset, base); -+} -+ -+static inline void emit_store(ptr reg, ptr base, unsigned int offset, -+ struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, sw, reg, offset, base); -+} -+ -+static inline void emit_load_stack_reg(ptr reg, ptr base, -+ unsigned int offset, -+ struct jit_ctx *ctx) -+{ -+ emit_long_instr(ctx, LW, reg, offset, base); -+} -+ -+static inline void emit_load(unsigned int reg, unsigned int base, -+ unsigned int offset, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, lw, reg, offset, base); -+} -+ -+static inline void emit_load_byte(unsigned int reg, unsigned int base, -+ unsigned int offset, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, lb, reg, offset, base); -+} -+ -+static inline void emit_half_load(unsigned int reg, unsigned int base, -+ unsigned int offset, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, lh, reg, offset, base); -+} -+ -+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base, -+ unsigned int offset, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, lhu, reg, offset, base); -+} -+ -+static inline void emit_mul(unsigned int dst, unsigned int src1, -+ unsigned int src2, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, mul, dst, src1, src2); -+} -+ -+static inline void emit_div(unsigned int dst, unsigned int src, -+ struct jit_ctx *ctx) -+{ -+ if (ctx->target != NULL) { -+ u32 *p = &ctx->target[ctx->idx]; -+ uasm_i_divu(&p, dst, src); -+ p = &ctx->target[ctx->idx + 1]; -+ uasm_i_mflo(&p, dst); -+ } -+ ctx->idx += 2; /* 2 insts */ -+} -+ -+static inline void emit_mod(unsigned int dst, unsigned int src, -+ struct jit_ctx *ctx) -+{ -+ if (ctx->target != NULL) { -+ u32 *p = &ctx->target[ctx->idx]; -+ uasm_i_divu(&p, dst, src); -+ p = &ctx->target[ctx->idx + 1]; -+ uasm_i_mfhi(&p, dst); -+ } -+ ctx->idx += 2; /* 2 insts */ -+} -+ -+static inline void emit_dsll(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, dsll, dst, src, sa); -+} -+ -+static inline void emit_dsrl32(unsigned int dst, unsigned int src, -+ unsigned int sa, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, dsrl32, dst, src, sa); -+} -+ -+static inline void emit_wsbh(unsigned int dst, unsigned int src, -+ struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, wsbh, dst, src); -+} -+ -+/* load pointer to register */ -+static inline void emit_load_ptr(unsigned int dst, unsigned int src, -+ int imm, struct jit_ctx *ctx) -+{ -+ /* src contains the base addr of the 32/64-pointer */ -+ emit_long_instr(ctx, LW, dst, imm, src); -+} -+ -+/* load a function pointer to register */ -+static inline void emit_load_func(unsigned int reg, ptr imm, -+ struct jit_ctx *ctx) -+{ -+ if (IS_ENABLED(CONFIG_64BIT)) { -+ /* At this point imm is always 64-bit */ -+ emit_load_imm(r_tmp, (u64)imm >> 32, ctx); -+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ -+ emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); -+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ -+ emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); -+ } else { -+ emit_load_imm(reg, imm, ctx); -+ } -+} -+ -+/* Move to real MIPS register */ -+static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -+{ -+ emit_long_instr(ctx, ADDU, dst, src, r_zero); -+} -+ -+/* Move to JIT (32-bit) register */ -+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) -+{ -+ emit_addu(dst, src, r_zero, ctx); -+} -+ -+/* Compute the immediate value for PC-relative branches. */ -+static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) -+{ -+ if (ctx->target == NULL) -+ return 0; -+ -+ /* -+ * We want a pc-relative branch. We only do forward branches -+ * so tgt is always after pc. tgt is the instruction offset -+ * we want to jump to. -+ -+ * Branch on MIPS: -+ * I: target_offset <- sign_extend(offset) -+ * I+1: PC += target_offset (delay slot) -+ * -+ * ctx->idx currently points to the branch instruction -+ * but the offset is added to the delay slot so we need -+ * to subtract 4. -+ */ -+ return ctx->offsets[tgt] - -+ (ctx->idx * 4 - ctx->prologue_bytes) - 4; -+} -+ -+static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2, -+ unsigned int imm, struct jit_ctx *ctx) -+{ -+ if (ctx->target != NULL) { -+ u32 *p = &ctx->target[ctx->idx]; -+ -+ switch (cond) { -+ case MIPS_COND_EQ: -+ uasm_i_beq(&p, reg1, reg2, imm); -+ break; -+ case MIPS_COND_NE: -+ uasm_i_bne(&p, reg1, reg2, imm); -+ break; -+ case MIPS_COND_ALL: -+ uasm_i_b(&p, imm); -+ break; -+ default: -+ pr_warn("%s: Unhandled branch conditional: %d\n", -+ __func__, cond); -+ } -+ } -+ ctx->idx++; -+} -+ -+static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) -+{ -+ emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); -+} -+ -+static inline void emit_jalr(unsigned int link, unsigned int reg, -+ struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, jalr, link, reg); -+} -+ -+static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) -+{ -+ emit_instr(ctx, jr, reg); -+} -+ -+static inline u16 align_sp(unsigned int num) -+{ -+ /* Double word alignment for 32-bit, quadword for 64-bit */ -+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8; -+ num = (num + (align - 1)) & -align; -+ return num; -+} -+ -+static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) -+{ -+ int i = 0, real_off = 0; -+ u32 sflags, tmp_flags; -+ -+ /* Adjust the stack pointer */ -+ if (offset) -+ emit_stack_offset(-align_sp(offset), ctx); -+ -+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; -+ /* sflags is essentially a bitmap */ -+ while (tmp_flags) { -+ if ((sflags >> i) & 0x1) { -+ emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off, -+ ctx); -+ real_off += SZREG; -+ } -+ i++; -+ tmp_flags >>= 1; -+ } -+ -+ /* save return address */ -+ if (ctx->flags & SEEN_CALL) { -+ emit_store_stack_reg(r_ra, r_sp, real_off, ctx); -+ real_off += SZREG; -+ } -+ -+ /* Setup r_M leaving the alignment gap if necessary */ -+ if (ctx->flags & SEEN_MEM) { -+ if (real_off % (SZREG * 2)) -+ real_off += SZREG; -+ emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); -+ } -+} -+ -+static void restore_bpf_jit_regs(struct jit_ctx *ctx, -+ unsigned int offset) -+{ -+ int i, real_off = 0; -+ u32 sflags, tmp_flags; -+ -+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; -+ /* sflags is a bitmap */ -+ i = 0; -+ while (tmp_flags) { -+ if ((sflags >> i) & 0x1) { -+ emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off, -+ ctx); -+ real_off += SZREG; -+ } -+ i++; -+ tmp_flags >>= 1; -+ } -+ -+ /* restore return address */ -+ if (ctx->flags & SEEN_CALL) -+ emit_load_stack_reg(r_ra, r_sp, real_off, ctx); -+ -+ /* Restore the sp and discard the scrach memory */ -+ if (offset) -+ emit_stack_offset(align_sp(offset), ctx); -+} -+ -+static unsigned int get_stack_depth(struct jit_ctx *ctx) -+{ -+ int sp_off = 0; -+ -+ -+ /* How may s* regs do we need to preserved? */ -+ sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG; -+ -+ if (ctx->flags & SEEN_MEM) -+ sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */ -+ -+ if (ctx->flags & SEEN_CALL) -+ sp_off += SZREG; /* Space for our ra register */ -+ -+ return sp_off; -+} -+ -+static void build_prologue(struct jit_ctx *ctx) -+{ -+ int sp_off; -+ -+ /* Calculate the total offset for the stack pointer */ -+ sp_off = get_stack_depth(ctx); -+ save_bpf_jit_regs(ctx, sp_off); -+ -+ if (ctx->flags & SEEN_SKB) -+ emit_reg_move(r_skb, MIPS_R_A0, ctx); -+ -+ if (ctx->flags & SEEN_SKB_DATA) { -+ /* Load packet length */ -+ emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len), -+ ctx); -+ emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len), -+ ctx); -+ /* Load the data pointer */ -+ emit_load_ptr(r_skb_data, r_skb, -+ offsetof(struct sk_buff, data), ctx); -+ /* Load the header length */ -+ emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx); -+ } -+ -+ if (ctx->flags & SEEN_X) -+ emit_jit_reg_move(r_X, r_zero, ctx); -+ -+ /* -+ * Do not leak kernel data to userspace, we only need to clear -+ * r_A if it is ever used. In fact if it is never used, we -+ * will not save/restore it, so clearing it in this case would -+ * corrupt the state of the caller. -+ */ -+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) && -+ (ctx->flags & SEEN_A)) -+ emit_jit_reg_move(r_A, r_zero, ctx); -+} -+ -+static void build_epilogue(struct jit_ctx *ctx) -+{ -+ unsigned int sp_off; -+ -+ /* Calculate the total offset for the stack pointer */ -+ -+ sp_off = get_stack_depth(ctx); -+ restore_bpf_jit_regs(ctx, sp_off); -+ -+ /* Return */ -+ emit_jr(r_ra, ctx); -+ emit_nop(ctx); -+} -+ -+#define CHOOSE_LOAD_FUNC(K, func) \ -+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \ -+ func##_positive) -+ -+static int build_body(struct jit_ctx *ctx) -+{ -+ const struct bpf_prog *prog = ctx->skf; -+ const struct sock_filter *inst; -+ unsigned int i, off, condt; -+ u32 k, b_off __maybe_unused; -+ u8 (*sk_load_func)(unsigned long *skb, int offset); -+ -+ for (i = 0; i < prog->len; i++) { -+ u16 code; -+ -+ inst = &(prog->insns[i]); -+ pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", -+ __func__, inst->code, inst->jt, inst->jf, inst->k); -+ k = inst->k; -+ code = bpf_anc_helper(inst); -+ -+ if (ctx->target == NULL) -+ ctx->offsets[i] = ctx->idx * 4; -+ -+ switch (code) { -+ case BPF_LD | BPF_IMM: -+ /* A <- k ==> li r_A, k */ -+ ctx->flags |= SEEN_A; -+ emit_load_imm(r_A, k, ctx); -+ break; -+ case BPF_LD | BPF_W | BPF_LEN: -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); -+ /* A <- len ==> lw r_A, offset(skb) */ -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ off = offsetof(struct sk_buff, len); -+ emit_load(r_A, r_skb, off, ctx); -+ break; -+ case BPF_LD | BPF_MEM: -+ /* A <- M[k] ==> lw r_A, offset(M) */ -+ ctx->flags |= SEEN_MEM | SEEN_A; -+ emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); -+ break; -+ case BPF_LD | BPF_W | BPF_ABS: -+ /* A <- P[k:4] */ -+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word); -+ goto load; -+ case BPF_LD | BPF_H | BPF_ABS: -+ /* A <- P[k:2] */ -+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half); -+ goto load; -+ case BPF_LD | BPF_B | BPF_ABS: -+ /* A <- P[k:1] */ -+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte); -+load: -+ emit_load_imm(r_off, k, ctx); -+load_common: -+ ctx->flags |= SEEN_CALL | SEEN_OFF | -+ SEEN_SKB | SEEN_A | SEEN_SKB_DATA; -+ -+ emit_load_func(r_s0, (ptr)sk_load_func, ctx); -+ emit_reg_move(MIPS_R_A0, r_skb, ctx); -+ emit_jalr(MIPS_R_RA, r_s0, ctx); -+ /* Load second argument to delay slot */ -+ emit_reg_move(MIPS_R_A1, r_off, ctx); -+ /* Check the error value */ -+ emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx), -+ ctx); -+ /* Load return register on DS for failures */ -+ emit_reg_move(r_ret, r_zero, ctx); -+ /* Return with error */ -+ emit_b(b_imm(prog->len, ctx), ctx); -+ emit_nop(ctx); -+ break; -+ case BPF_LD | BPF_W | BPF_IND: -+ /* A <- P[X + k:4] */ -+ sk_load_func = sk_load_word; -+ goto load_ind; -+ case BPF_LD | BPF_H | BPF_IND: -+ /* A <- P[X + k:2] */ -+ sk_load_func = sk_load_half; -+ goto load_ind; -+ case BPF_LD | BPF_B | BPF_IND: -+ /* A <- P[X + k:1] */ -+ sk_load_func = sk_load_byte; -+load_ind: -+ ctx->flags |= SEEN_OFF | SEEN_X; -+ emit_addiu(r_off, r_X, k, ctx); -+ goto load_common; -+ case BPF_LDX | BPF_IMM: -+ /* X <- k */ -+ ctx->flags |= SEEN_X; -+ emit_load_imm(r_X, k, ctx); -+ break; -+ case BPF_LDX | BPF_MEM: -+ /* X <- M[k] */ -+ ctx->flags |= SEEN_X | SEEN_MEM; -+ emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); -+ break; -+ case BPF_LDX | BPF_W | BPF_LEN: -+ /* X <- len */ -+ ctx->flags |= SEEN_X | SEEN_SKB; -+ off = offsetof(struct sk_buff, len); -+ emit_load(r_X, r_skb, off, ctx); -+ break; -+ case BPF_LDX | BPF_B | BPF_MSH: -+ /* X <- 4 * (P[k:1] & 0xf) */ -+ ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB; -+ /* Load offset to a1 */ -+ emit_load_func(r_s0, (ptr)sk_load_byte, ctx); -+ /* -+ * This may emit two instructions so it may not fit -+ * in the delay slot. So use a0 in the delay slot. -+ */ -+ emit_load_imm(MIPS_R_A1, k, ctx); -+ emit_jalr(MIPS_R_RA, r_s0, ctx); -+ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ -+ /* Check the error value */ -+ emit_bcond(MIPS_COND_NE, r_ret, 0, -+ b_imm(prog->len, ctx), ctx); -+ emit_reg_move(r_ret, r_zero, ctx); -+ /* We are good */ -+ /* X <- P[1:K] & 0xf */ -+ emit_andi(r_X, r_A, 0xf, ctx); -+ /* X << 2 */ -+ emit_b(b_imm(i + 1, ctx), ctx); -+ emit_sll(r_X, r_X, 2, ctx); /* delay slot */ -+ break; -+ case BPF_ST: -+ /* M[k] <- A */ -+ ctx->flags |= SEEN_MEM | SEEN_A; -+ emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); -+ break; -+ case BPF_STX: -+ /* M[k] <- X */ -+ ctx->flags |= SEEN_MEM | SEEN_X; -+ emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); -+ break; -+ case BPF_ALU | BPF_ADD | BPF_K: -+ /* A += K */ -+ ctx->flags |= SEEN_A; -+ emit_addiu(r_A, r_A, k, ctx); -+ break; -+ case BPF_ALU | BPF_ADD | BPF_X: -+ /* A += X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_addu(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_SUB | BPF_K: -+ /* A -= K */ -+ ctx->flags |= SEEN_A; -+ emit_addiu(r_A, r_A, -k, ctx); -+ break; -+ case BPF_ALU | BPF_SUB | BPF_X: -+ /* A -= X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_subu(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_MUL | BPF_K: -+ /* A *= K */ -+ /* Load K to scratch register before MUL */ -+ ctx->flags |= SEEN_A; -+ emit_load_imm(r_s0, k, ctx); -+ emit_mul(r_A, r_A, r_s0, ctx); -+ break; -+ case BPF_ALU | BPF_MUL | BPF_X: -+ /* A *= X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_mul(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_DIV | BPF_K: -+ /* A /= k */ -+ if (k == 1) -+ break; -+ if (optimize_div(&k)) { -+ ctx->flags |= SEEN_A; -+ emit_srl(r_A, r_A, k, ctx); -+ break; -+ } -+ ctx->flags |= SEEN_A; -+ emit_load_imm(r_s0, k, ctx); -+ emit_div(r_A, r_s0, ctx); -+ break; -+ case BPF_ALU | BPF_MOD | BPF_K: -+ /* A %= k */ -+ if (k == 1) { -+ ctx->flags |= SEEN_A; -+ emit_jit_reg_move(r_A, r_zero, ctx); -+ } else { -+ ctx->flags |= SEEN_A; -+ emit_load_imm(r_s0, k, ctx); -+ emit_mod(r_A, r_s0, ctx); -+ } -+ break; -+ case BPF_ALU | BPF_DIV | BPF_X: -+ /* A /= X */ -+ ctx->flags |= SEEN_X | SEEN_A; -+ /* Check if r_X is zero */ -+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, -+ b_imm(prog->len, ctx), ctx); -+ emit_load_imm(r_ret, 0, ctx); /* delay slot */ -+ emit_div(r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_MOD | BPF_X: -+ /* A %= X */ -+ ctx->flags |= SEEN_X | SEEN_A; -+ /* Check if r_X is zero */ -+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, -+ b_imm(prog->len, ctx), ctx); -+ emit_load_imm(r_ret, 0, ctx); /* delay slot */ -+ emit_mod(r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_OR | BPF_K: -+ /* A |= K */ -+ ctx->flags |= SEEN_A; -+ emit_ori(r_A, r_A, k, ctx); -+ break; -+ case BPF_ALU | BPF_OR | BPF_X: -+ /* A |= X */ -+ ctx->flags |= SEEN_A; -+ emit_ori(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_XOR | BPF_K: -+ /* A ^= k */ -+ ctx->flags |= SEEN_A; -+ emit_xori(r_A, r_A, k, ctx); -+ break; -+ case BPF_ANC | SKF_AD_ALU_XOR_X: -+ case BPF_ALU | BPF_XOR | BPF_X: -+ /* A ^= X */ -+ ctx->flags |= SEEN_A; -+ emit_xor(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_AND | BPF_K: -+ /* A &= K */ -+ ctx->flags |= SEEN_A; -+ emit_andi(r_A, r_A, k, ctx); -+ break; -+ case BPF_ALU | BPF_AND | BPF_X: -+ /* A &= X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_and(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_LSH | BPF_K: -+ /* A <<= K */ -+ ctx->flags |= SEEN_A; -+ emit_sll(r_A, r_A, k, ctx); -+ break; -+ case BPF_ALU | BPF_LSH | BPF_X: -+ /* A <<= X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_sllv(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_RSH | BPF_K: -+ /* A >>= K */ -+ ctx->flags |= SEEN_A; -+ emit_srl(r_A, r_A, k, ctx); -+ break; -+ case BPF_ALU | BPF_RSH | BPF_X: -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_srlv(r_A, r_A, r_X, ctx); -+ break; -+ case BPF_ALU | BPF_NEG: -+ /* A = -A */ -+ ctx->flags |= SEEN_A; -+ emit_neg(r_A, ctx); -+ break; -+ case BPF_JMP | BPF_JA: -+ /* pc += K */ -+ emit_b(b_imm(i + k + 1, ctx), ctx); -+ emit_nop(ctx); -+ break; -+ case BPF_JMP | BPF_JEQ | BPF_K: -+ /* pc += ( A == K ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_EQ | MIPS_COND_K; -+ goto jmp_cmp; -+ case BPF_JMP | BPF_JEQ | BPF_X: -+ ctx->flags |= SEEN_X; -+ /* pc += ( A == X ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_EQ | MIPS_COND_X; -+ goto jmp_cmp; -+ case BPF_JMP | BPF_JGE | BPF_K: -+ /* pc += ( A >= K ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_GE | MIPS_COND_K; -+ goto jmp_cmp; -+ case BPF_JMP | BPF_JGE | BPF_X: -+ ctx->flags |= SEEN_X; -+ /* pc += ( A >= X ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_GE | MIPS_COND_X; -+ goto jmp_cmp; -+ case BPF_JMP | BPF_JGT | BPF_K: -+ /* pc += ( A > K ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_GT | MIPS_COND_K; -+ goto jmp_cmp; -+ case BPF_JMP | BPF_JGT | BPF_X: -+ ctx->flags |= SEEN_X; -+ /* pc += ( A > X ) ? pc->jt : pc->jf */ -+ condt = MIPS_COND_GT | MIPS_COND_X; -+jmp_cmp: -+ /* Greater or Equal */ -+ if ((condt & MIPS_COND_GE) || -+ (condt & MIPS_COND_GT)) { -+ if (condt & MIPS_COND_K) { /* K */ -+ ctx->flags |= SEEN_A; -+ emit_sltiu(r_s0, r_A, k, ctx); -+ } else { /* X */ -+ ctx->flags |= SEEN_A | -+ SEEN_X; -+ emit_sltu(r_s0, r_A, r_X, ctx); -+ } -+ /* A < (K|X) ? r_scrach = 1 */ -+ b_off = b_imm(i + inst->jf + 1, ctx); -+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, -+ ctx); -+ emit_nop(ctx); -+ /* A > (K|X) ? scratch = 0 */ -+ if (condt & MIPS_COND_GT) { -+ /* Checking for equality */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ if (condt & MIPS_COND_K) -+ emit_load_imm(r_s0, k, ctx); -+ else -+ emit_jit_reg_move(r_s0, r_X, -+ ctx); -+ b_off = b_imm(i + inst->jf + 1, ctx); -+ emit_bcond(MIPS_COND_EQ, r_A, r_s0, -+ b_off, ctx); -+ emit_nop(ctx); -+ /* Finally, A > K|X */ -+ b_off = b_imm(i + inst->jt + 1, ctx); -+ emit_b(b_off, ctx); -+ emit_nop(ctx); -+ } else { -+ /* A >= (K|X) so jump */ -+ b_off = b_imm(i + inst->jt + 1, ctx); -+ emit_b(b_off, ctx); -+ emit_nop(ctx); -+ } -+ } else { -+ /* A == K|X */ -+ if (condt & MIPS_COND_K) { /* K */ -+ ctx->flags |= SEEN_A; -+ emit_load_imm(r_s0, k, ctx); -+ /* jump true */ -+ b_off = b_imm(i + inst->jt + 1, ctx); -+ emit_bcond(MIPS_COND_EQ, r_A, r_s0, -+ b_off, ctx); -+ emit_nop(ctx); -+ /* jump false */ -+ b_off = b_imm(i + inst->jf + 1, -+ ctx); -+ emit_bcond(MIPS_COND_NE, r_A, r_s0, -+ b_off, ctx); -+ emit_nop(ctx); -+ } else { /* X */ -+ /* jump true */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ b_off = b_imm(i + inst->jt + 1, -+ ctx); -+ emit_bcond(MIPS_COND_EQ, r_A, r_X, -+ b_off, ctx); -+ emit_nop(ctx); -+ /* jump false */ -+ b_off = b_imm(i + inst->jf + 1, ctx); -+ emit_bcond(MIPS_COND_NE, r_A, r_X, -+ b_off, ctx); -+ emit_nop(ctx); -+ } -+ } -+ break; -+ case BPF_JMP | BPF_JSET | BPF_K: -+ ctx->flags |= SEEN_A; -+ /* pc += (A & K) ? pc -> jt : pc -> jf */ -+ emit_load_imm(r_s1, k, ctx); -+ emit_and(r_s0, r_A, r_s1, ctx); -+ /* jump true */ -+ b_off = b_imm(i + inst->jt + 1, ctx); -+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); -+ emit_nop(ctx); -+ /* jump false */ -+ b_off = b_imm(i + inst->jf + 1, ctx); -+ emit_b(b_off, ctx); -+ emit_nop(ctx); -+ break; -+ case BPF_JMP | BPF_JSET | BPF_X: -+ ctx->flags |= SEEN_X | SEEN_A; -+ /* pc += (A & X) ? pc -> jt : pc -> jf */ -+ emit_and(r_s0, r_A, r_X, ctx); -+ /* jump true */ -+ b_off = b_imm(i + inst->jt + 1, ctx); -+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); -+ emit_nop(ctx); -+ /* jump false */ -+ b_off = b_imm(i + inst->jf + 1, ctx); -+ emit_b(b_off, ctx); -+ emit_nop(ctx); -+ break; -+ case BPF_RET | BPF_A: -+ ctx->flags |= SEEN_A; -+ if (i != prog->len - 1) -+ /* -+ * If this is not the last instruction -+ * then jump to the epilogue -+ */ -+ emit_b(b_imm(prog->len, ctx), ctx); -+ emit_reg_move(r_ret, r_A, ctx); /* delay slot */ -+ break; -+ case BPF_RET | BPF_K: -+ /* -+ * It can emit two instructions so it does not fit on -+ * the delay slot. -+ */ -+ emit_load_imm(r_ret, k, ctx); -+ if (i != prog->len - 1) { -+ /* -+ * If this is not the last instruction -+ * then jump to the epilogue -+ */ -+ emit_b(b_imm(prog->len, ctx), ctx); -+ emit_nop(ctx); -+ } -+ break; -+ case BPF_MISC | BPF_TAX: -+ /* X = A */ -+ ctx->flags |= SEEN_X | SEEN_A; -+ emit_jit_reg_move(r_X, r_A, ctx); -+ break; -+ case BPF_MISC | BPF_TXA: -+ /* A = X */ -+ ctx->flags |= SEEN_A | SEEN_X; -+ emit_jit_reg_move(r_A, r_X, ctx); -+ break; -+ /* AUX */ -+ case BPF_ANC | SKF_AD_PROTOCOL: -+ /* A = ntohs(skb->protocol */ -+ ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, -+ protocol) != 2); -+ off = offsetof(struct sk_buff, protocol); -+ emit_half_load(r_A, r_skb, off, ctx); -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+ /* This needs little endian fixup */ -+ if (cpu_has_wsbh) { -+ /* R2 and later have the wsbh instruction */ -+ emit_wsbh(r_A, r_A, ctx); -+ } else { -+ /* Get first byte */ -+ emit_andi(r_tmp_imm, r_A, 0xff, ctx); -+ /* Shift it */ -+ emit_sll(r_tmp, r_tmp_imm, 8, ctx); -+ /* Get second byte */ -+ emit_srl(r_tmp_imm, r_A, 8, ctx); -+ emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); -+ /* Put everyting together in r_A */ -+ emit_or(r_A, r_tmp, r_tmp_imm, ctx); -+ } -+#endif -+ break; -+ case BPF_ANC | SKF_AD_CPU: -+ ctx->flags |= SEEN_A | SEEN_OFF; -+ /* A = current_thread_info()->cpu */ -+ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, -+ cpu) != 4); -+ off = offsetof(struct thread_info, cpu); -+ /* $28/gp points to the thread_info struct */ -+ emit_load(r_A, 28, off, ctx); -+ break; -+ case BPF_ANC | SKF_AD_IFINDEX: -+ /* A = skb->dev->ifindex */ -+ case BPF_ANC | SKF_AD_HATYPE: -+ /* A = skb->dev->type */ -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ off = offsetof(struct sk_buff, dev); -+ /* Load *dev pointer */ -+ emit_load_ptr(r_s0, r_skb, off, ctx); -+ /* error (0) in the delay slot */ -+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, -+ b_imm(prog->len, ctx), ctx); -+ emit_reg_move(r_ret, r_zero, ctx); -+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) { -+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); -+ off = offsetof(struct net_device, ifindex); -+ emit_load(r_A, r_s0, off, ctx); -+ } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */ -+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); -+ off = offsetof(struct net_device, type); -+ emit_half_load_unsigned(r_A, r_s0, off, ctx); -+ } -+ break; -+ case BPF_ANC | SKF_AD_MARK: -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); -+ off = offsetof(struct sk_buff, mark); -+ emit_load(r_A, r_skb, off, ctx); -+ break; -+ case BPF_ANC | SKF_AD_RXHASH: -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); -+ off = offsetof(struct sk_buff, hash); -+ emit_load(r_A, r_skb, off, ctx); -+ break; -+ case BPF_ANC | SKF_AD_VLAN_TAG: -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, -+ vlan_tci) != 2); -+ off = offsetof(struct sk_buff, vlan_tci); -+ emit_half_load_unsigned(r_A, r_skb, off, ctx); -+ break; -+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ emit_load_byte(r_A, r_skb, PKT_VLAN_PRESENT_OFFSET(), ctx); -+ if (PKT_VLAN_PRESENT_BIT) -+ emit_srl(r_A, r_A, PKT_VLAN_PRESENT_BIT, ctx); -+ if (PKT_VLAN_PRESENT_BIT < 7) -+ emit_andi(r_A, r_A, 1, ctx); -+ break; -+ case BPF_ANC | SKF_AD_PKTTYPE: -+ ctx->flags |= SEEN_SKB; -+ -+ emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); -+ /* Keep only the last 3 bits */ -+ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); -+#ifdef __BIG_ENDIAN_BITFIELD -+ /* Get the actual packet type to the lower 3 bits */ -+ emit_srl(r_A, r_A, 5, ctx); -+#endif -+ break; -+ case BPF_ANC | SKF_AD_QUEUE: -+ ctx->flags |= SEEN_SKB | SEEN_A; -+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, -+ queue_mapping) != 2); -+ BUILD_BUG_ON(offsetof(struct sk_buff, -+ queue_mapping) > 0xff); -+ off = offsetof(struct sk_buff, queue_mapping); -+ emit_half_load_unsigned(r_A, r_skb, off, ctx); -+ break; -+ default: -+ pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__, -+ inst->code); -+ return -1; -+ } -+ } -+ -+ /* compute offsets only during the first pass */ -+ if (ctx->target == NULL) -+ ctx->offsets[i] = ctx->idx * 4; -+ -+ return 0; -+} -+ -+void bpf_jit_compile(struct bpf_prog *fp) -+{ -+ struct jit_ctx ctx; -+ unsigned int alloc_size, tmp_idx; -+ -+ if (!bpf_jit_enable) -+ return; -+ -+ memset(&ctx, 0, sizeof(ctx)); -+ -+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL); -+ if (ctx.offsets == NULL) -+ return; -+ -+ ctx.skf = fp; -+ -+ if (build_body(&ctx)) -+ goto out; -+ -+ tmp_idx = ctx.idx; -+ build_prologue(&ctx); -+ ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; -+ /* just to complete the ctx.idx count */ -+ build_epilogue(&ctx); -+ -+ alloc_size = 4 * ctx.idx; -+ ctx.target = module_alloc(alloc_size); -+ if (ctx.target == NULL) -+ goto out; -+ -+ /* Clean it */ -+ memset(ctx.target, 0, alloc_size); -+ -+ ctx.idx = 0; -+ -+ /* Generate the actual JIT code */ -+ build_prologue(&ctx); -+ build_body(&ctx); -+ build_epilogue(&ctx); -+ -+ /* Update the icache */ -+ flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); -+ -+ if (bpf_jit_enable > 1) -+ /* Dump JIT code */ -+ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); -+ -+ fp->bpf_func = (void *)ctx.target; -+ fp->jited = 1; -+ -+out: -+ kfree(ctx.offsets); -+} -+ -+void bpf_jit_free(struct bpf_prog *fp) -+{ -+ if (fp->jited) -+ module_memfree(fp->bpf_func); -+ -+ bpf_prog_unlock_free(fp); -+} ---- /dev/null -+++ b/arch/mips/net/bpf_jit_asm.S -@@ -0,0 +1,285 @@ -+/* -+ * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF -+ * compiler. -+ * -+ * Copyright (C) 2015 Imagination Technologies Ltd. -+ * Author: Markos Chandras -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; version 2 of the License. -+ */ -+ -+#include -+#include -+#include -+#include "bpf_jit.h" -+ -+/* ABI -+ * -+ * r_skb_hl skb header length -+ * r_skb_data skb data -+ * r_off(a1) offset register -+ * r_A BPF register A -+ * r_X PF register X -+ * r_skb(a0) *skb -+ * r_M *scratch memory -+ * r_skb_le skb length -+ * r_s0 Scratch register 0 -+ * r_s1 Scratch register 1 -+ * -+ * On entry: -+ * a0: *skb -+ * a1: offset (imm or imm + X) -+ * -+ * All non-BPF-ABI registers are free for use. On return, we only -+ * care about r_ret. The BPF-ABI registers are assumed to remain -+ * unmodified during the entire filter operation. -+ */ -+ -+#define skb a0 -+#define offset a1 -+#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */ -+ -+ /* We know better :) so prevent assembler reordering etc */ -+ .set noreorder -+ -+#define is_offset_negative(TYPE) \ -+ /* If offset is negative we have more work to do */ \ -+ slti t0, offset, 0; \ -+ bgtz t0, bpf_slow_path_##TYPE##_neg; \ -+ /* Be careful what follows in DS. */ -+ -+#define is_offset_in_header(SIZE, TYPE) \ -+ /* Reading from header? */ \ -+ addiu $r_s0, $r_skb_hl, -SIZE; \ -+ slt t0, $r_s0, offset; \ -+ bgtz t0, bpf_slow_path_##TYPE; \ -+ -+LEAF(sk_load_word) -+ is_offset_negative(word) -+FEXPORT(sk_load_word_positive) -+ is_offset_in_header(4, word) -+ /* Offset within header boundaries */ -+ PTR_ADDU t1, $r_skb_data, offset -+ .set reorder -+ lw $r_A, 0(t1) -+ .set noreorder -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+# if MIPS_ISA_REV >= 2 -+ wsbh t0, $r_A -+ rotr $r_A, t0, 16 -+# else -+ sll t0, $r_A, 24 -+ srl t1, $r_A, 24 -+ srl t2, $r_A, 8 -+ or t0, t0, t1 -+ andi t2, t2, 0xff00 -+ andi t1, $r_A, 0xff00 -+ or t0, t0, t2 -+ sll t1, t1, 8 -+ or $r_A, t0, t1 -+# endif -+#endif -+ jr $r_ra -+ move $r_ret, zero -+ END(sk_load_word) -+ -+LEAF(sk_load_half) -+ is_offset_negative(half) -+FEXPORT(sk_load_half_positive) -+ is_offset_in_header(2, half) -+ /* Offset within header boundaries */ -+ PTR_ADDU t1, $r_skb_data, offset -+ lhu $r_A, 0(t1) -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+# if MIPS_ISA_REV >= 2 -+ wsbh $r_A, $r_A -+# else -+ sll t0, $r_A, 8 -+ srl t1, $r_A, 8 -+ andi t0, t0, 0xff00 -+ or $r_A, t0, t1 -+# endif -+#endif -+ jr $r_ra -+ move $r_ret, zero -+ END(sk_load_half) -+ -+LEAF(sk_load_byte) -+ is_offset_negative(byte) -+FEXPORT(sk_load_byte_positive) -+ is_offset_in_header(1, byte) -+ /* Offset within header boundaries */ -+ PTR_ADDU t1, $r_skb_data, offset -+ lbu $r_A, 0(t1) -+ jr $r_ra -+ move $r_ret, zero -+ END(sk_load_byte) -+ -+/* -+ * call skb_copy_bits: -+ * (prototype in linux/skbuff.h) -+ * -+ * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len) -+ * -+ * o32 mandates we leave 4 spaces for argument registers in case -+ * the callee needs to use them. Even though we don't care about -+ * the argument registers ourselves, we need to allocate that space -+ * to remain ABI compliant since the callee may want to use that space. -+ * We also allocate 2 more spaces for $r_ra and our return register (*to). -+ * -+ * n64 is a bit different. The *caller* will allocate the space to preserve -+ * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no -+ * good reason but it does not matter that much really. -+ * -+ * (void *to) is returned in r_s0 -+ * -+ */ -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+#define DS_OFFSET(SIZE) (4 * SZREG) -+#else -+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE)) -+#endif -+#define bpf_slow_path_common(SIZE) \ -+ /* Quick check. Are we within reasonable boundaries? */ \ -+ LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \ -+ sltu $r_s0, offset, $r_s1; \ -+ beqz $r_s0, fault; \ -+ /* Load 4th argument in DS */ \ -+ LONG_ADDIU a3, zero, SIZE; \ -+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ -+ PTR_LA t0, skb_copy_bits; \ -+ PTR_S $r_ra, (5 * SZREG)($r_sp); \ -+ /* Assign low slot to a2 */ \ -+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \ -+ jalr t0; \ -+ /* Reset our destination slot (DS but it's ok) */ \ -+ INT_S zero, (4 * SZREG)($r_sp); \ -+ /* \ -+ * skb_copy_bits returns 0 on success and -EFAULT \ -+ * on error. Our data live in a2. Do not bother with \ -+ * our data if an error has been returned. \ -+ */ \ -+ /* Restore our frame */ \ -+ PTR_L $r_ra, (5 * SZREG)($r_sp); \ -+ INT_L $r_s0, (4 * SZREG)($r_sp); \ -+ bltz v0, fault; \ -+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ -+ move $r_ret, zero; \ -+ -+NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp) -+ bpf_slow_path_common(4) -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+# if MIPS_ISA_REV >= 2 -+ wsbh t0, $r_s0 -+ jr $r_ra -+ rotr $r_A, t0, 16 -+# else -+ sll t0, $r_s0, 24 -+ srl t1, $r_s0, 24 -+ srl t2, $r_s0, 8 -+ or t0, t0, t1 -+ andi t2, t2, 0xff00 -+ andi t1, $r_s0, 0xff00 -+ or t0, t0, t2 -+ sll t1, t1, 8 -+ jr $r_ra -+ or $r_A, t0, t1 -+# endif -+#else -+ jr $r_ra -+ move $r_A, $r_s0 -+#endif -+ -+ END(bpf_slow_path_word) -+ -+NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp) -+ bpf_slow_path_common(2) -+#ifdef CONFIG_CPU_LITTLE_ENDIAN -+# if MIPS_ISA_REV >= 2 -+ jr $r_ra -+ wsbh $r_A, $r_s0 -+# else -+ sll t0, $r_s0, 8 -+ andi t1, $r_s0, 0xff00 -+ andi t0, t0, 0xff00 -+ srl t1, t1, 8 -+ jr $r_ra -+ or $r_A, t0, t1 -+# endif -+#else -+ jr $r_ra -+ move $r_A, $r_s0 -+#endif -+ -+ END(bpf_slow_path_half) -+ -+NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp) -+ bpf_slow_path_common(1) -+ jr $r_ra -+ move $r_A, $r_s0 -+ -+ END(bpf_slow_path_byte) -+ -+/* -+ * Negative entry points -+ */ -+ .macro bpf_is_end_of_data -+ li t0, SKF_LL_OFF -+ /* Reading link layer data? */ -+ slt t1, offset, t0 -+ bgtz t1, fault -+ /* Be careful what follows in DS. */ -+ .endm -+/* -+ * call skb_copy_bits: -+ * (prototype in linux/filter.h) -+ * -+ * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, -+ * int k, unsigned int size) -+ * -+ * see above (bpf_slow_path_common) for ABI restrictions -+ */ -+#define bpf_negative_common(SIZE) \ -+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \ -+ PTR_LA t0, bpf_internal_load_pointer_neg_helper; \ -+ PTR_S $r_ra, (5 * SZREG)($r_sp); \ -+ jalr t0; \ -+ li a2, SIZE; \ -+ PTR_L $r_ra, (5 * SZREG)($r_sp); \ -+ /* Check return pointer */ \ -+ beqz v0, fault; \ -+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \ -+ /* Preserve our pointer */ \ -+ move $r_s0, v0; \ -+ /* Set return value */ \ -+ move $r_ret, zero; \ -+ -+bpf_slow_path_word_neg: -+ bpf_is_end_of_data -+NESTED(sk_load_word_negative, (6 * SZREG), $r_sp) -+ bpf_negative_common(4) -+ jr $r_ra -+ lw $r_A, 0($r_s0) -+ END(sk_load_word_negative) -+ -+bpf_slow_path_half_neg: -+ bpf_is_end_of_data -+NESTED(sk_load_half_negative, (6 * SZREG), $r_sp) -+ bpf_negative_common(2) -+ jr $r_ra -+ lhu $r_A, 0($r_s0) -+ END(sk_load_half_negative) -+ -+bpf_slow_path_byte_neg: -+ bpf_is_end_of_data -+NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp) -+ bpf_negative_common(1) -+ jr $r_ra -+ lbu $r_A, 0($r_s0) -+ END(sk_load_byte_negative) -+ -+fault: -+ jr $r_ra -+ addiu $r_ret, zero, 1 diff --git a/root/target/linux/generic/backport-5.4/097-bpf-fix-integer-overflow-in-arg-calculation.patch b/root/target/linux/generic/backport-5.4/097-bpf-fix-integer-overflow-in-arg-calculation.patch deleted file mode 100644 index 1f14f015..00000000 --- a/root/target/linux/generic/backport-5.4/097-bpf-fix-integer-overflow-in-arg-calculation.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: Bui Quang Minh @ 2021-01-26 8:26 UTC (permalink / raw) - To: ast, daniel, davem, kuba, hawk, john.fastabend, andrii, kafai, - songliubraving, yhs, kpsingh, jakub, lmb - Cc: netdev, bpf, linux-kernel, minhquangbui99 - -In 32-bit architecture, the result of sizeof() is a 32-bit integer so -the expression becomes the multiplication between 2 32-bit integer which -can potentially leads to integer overflow. As a result, -bpf_map_area_alloc() allocates less memory than needed. - -Fix this by casting 1 operand to u64. - -Signed-off-by: Bui Quang Minh ---- - kernel/bpf/devmap.c | 4 ++-- - net/core/sock_map.c | 2 +- - 2 files changed, 3 insertions(+), 3 deletions(-) - -Index: linux-5.4.147/kernel/bpf/devmap.c -=================================================================== ---- linux-5.4.147.orig/kernel/bpf/devmap.c -+++ linux-5.4.147/kernel/bpf/devmap.c -@@ -94,7 +94,7 @@ static struct hlist_head *dev_map_create - int i; - struct hlist_head *hash; - -- hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); -+ hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); - if (hash != NULL) - for (i = 0; i < entries; i++) - INIT_HLIST_HEAD(&hash[i]); -@@ -159,7 +159,7 @@ static int dev_map_init_map(struct bpf_d - - spin_lock_init(&dtab->index_lock); - } else { -- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * -+ dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * - sizeof(struct bpf_dtab_netdev *), - dtab->map.numa_node); - if (!dtab->netdev_map) -Index: linux-5.4.147/net/core/sock_map.c -=================================================================== ---- linux-5.4.147.orig/net/core/sock_map.c -+++ linux-5.4.147/net/core/sock_map.c -@@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(un - if (err) - goto free_stab; - -- stab->sks = bpf_map_area_alloc(stab->map.max_entries * -+ stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * - sizeof(struct sock *), - stab->map.numa_node); - if (stab->sks) diff --git a/root/target/linux/generic/backport-5.4/370-netfilter-nf_flow_table-fix-offloaded-connection-tim.patch b/root/target/linux/generic/backport-5.4/370-netfilter-nf_flow_table-fix-offloaded-connection-tim.patch deleted file mode 100644 index 5dac7f5e..00000000 --- a/root/target/linux/generic/backport-5.4/370-netfilter-nf_flow_table-fix-offloaded-connection-tim.patch +++ /dev/null @@ -1,115 +0,0 @@ -From: Felix Fietkau -Date: Wed, 13 Jun 2018 12:33:39 +0200 -Subject: [PATCH] netfilter: nf_flow_table: fix offloaded connection timeout - corner case - -The full teardown of offloaded flows is deferred to a gc work item, -however processing of packets by netfilter needs to happen immediately -after a teardown is requested, because the conntrack state needs to be -fixed up. - -Since the IPS_OFFLOAD_BIT is still kept until the teardown is complete, -the netfilter conntrack gc can accidentally bump the timeout of a -connection where offload was just stopped, causing a conntrack entry -leak. - -Fix this by moving the conntrack timeout bumping from conntrack core to -the nf_flow_offload and add a check to prevent bogus timeout bumps. - -Signed-off-by: Felix Fietkau ---- - ---- a/net/netfilter/nf_conntrack_core.c -+++ b/net/netfilter/nf_conntrack_core.c -@@ -1207,18 +1207,6 @@ static bool gc_worker_can_early_drop(con - return false; - } - --#define DAY (86400 * HZ) -- --/* Set an arbitrary timeout large enough not to ever expire, this save -- * us a check for the IPS_OFFLOAD_BIT from the packet path via -- * nf_ct_is_expired(). -- */ --static void nf_ct_offload_timeout(struct nf_conn *ct) --{ -- if (nf_ct_expires(ct) < DAY / 2) -- ct->timeout = nfct_time_stamp + DAY; --} -- - static void gc_worker(struct work_struct *work) - { - unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION; -@@ -1250,11 +1238,9 @@ static void gc_worker(struct work_struct - - tmp = nf_ct_tuplehash_to_ctrack(h); - - scanned++; -- if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) { -- nf_ct_offload_timeout(tmp); -+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) - continue; -- } - - if (nf_ct_is_expired(tmp)) { - nf_ct_gc_expired(tmp); ---- a/net/netfilter/nf_flow_table_core.c -+++ b/net/netfilter/nf_flow_table_core.c -@@ -198,10 +198,29 @@ static const struct rhashtable_params nf - .automatic_shrinking = true, - }; - -+#define DAY (86400 * HZ) -+ -+/* Set an arbitrary timeout large enough not to ever expire, this save -+ * us a check for the IPS_OFFLOAD_BIT from the packet path via -+ * nf_ct_is_expired(). -+ */ -+static void nf_ct_offload_timeout(struct flow_offload *flow) -+{ -+ struct flow_offload_entry *entry; -+ struct nf_conn *ct; -+ -+ entry = container_of(flow, struct flow_offload_entry, flow); -+ ct = entry->ct; -+ -+ if (nf_ct_expires(ct) < DAY / 2) -+ ct->timeout = nfct_time_stamp + DAY; -+} -+ - int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) - { - int err; - -+ nf_ct_offload_timeout(flow); - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; - - err = rhashtable_insert_fast(&flow_table->rhashtable, -@@ -304,6 +323,7 @@ nf_flow_table_iterate(struct nf_flowtabl - rhashtable_walk_start(&hti); - - while ((tuplehash = rhashtable_walk_next(&hti))) { -+ - if (IS_ERR(tuplehash)) { - if (PTR_ERR(tuplehash) != -EAGAIN) { - err = PTR_ERR(tuplehash); -@@ -328,10 +348,17 @@ static void nf_flow_offload_gc_step(stru - { - struct nf_flowtable *flow_table = data; - struct flow_offload_entry *e; -+ bool teardown; - - e = container_of(flow, struct flow_offload_entry, flow); -- if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) || -- (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN))) -+ -+ teardown = flow->flags & (FLOW_OFFLOAD_DYING | -+ FLOW_OFFLOAD_TEARDOWN); -+ -+ if (!teardown) -+ nf_ct_offload_timeout(flow); -+ -+ if (nf_flow_has_expired(flow) || teardown) - flow_offload_del(flow_table, flow); - } - diff --git a/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch b/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch index d750880f..f98b5a46 100644 --- a/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch +++ b/root/target/linux/generic/hack-5.4/690-mptcp_v0.96.patch @@ -1,5 +1,5 @@ diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt -index 165abcb656c5..5d06ce2df29c 100644 +index 979423e1b639..c70f5d160b48 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2748,6 +2748,10 @@ @@ -287,10 +287,10 @@ index 34c4436fd18f..828f79528b32 100644 union { diff --git a/include/net/mptcp.h b/include/net/mptcp.h new file mode 100644 -index 000000000000..196b8939cbab +index 000000000000..c90d1b53b9d4 --- /dev/null +++ b/include/net/mptcp.h -@@ -0,0 +1,1577 @@ +@@ -0,0 +1,1540 @@ +/* + * MPTCP implementation + * @@ -575,6 +575,9 @@ index 000000000000..196b8939cbab + server_side:1, + infinite_mapping_rcv:1, + infinite_mapping_snd:1, ++ infinite_send_una_ahead:1, /* While falling back, the snd_una ++ *on meta is ahead of the subflow. ++ */ + dfin_combined:1, /* Was the DFIN combined with subflow-fin? */ + passive_close:1, + snd_hiseq_index:1, /* Index in snd_high_order of snd_nxt */ @@ -1116,6 +1119,8 @@ index 000000000000..196b8939cbab + int push_one, gfp_t gfp); +void tcp_parse_mptcp_options(const struct sk_buff *skb, + struct mptcp_options_received *mopt); ++bool mptcp_handle_ack_in_infinite(struct sock *sk, const struct sk_buff *skb, ++ int flag); +void mptcp_parse_options(const uint8_t *ptr, int opsize, + struct mptcp_options_received *mopt, + const struct sk_buff *skb, @@ -1152,7 +1157,6 @@ index 000000000000..196b8939cbab +unsigned int mptcp_current_mss(struct sock *meta_sk); +void mptcp_hmac(u8 ver, const u8 *key_1, const u8 *key_2, u8 *hash_out, + int arg_num, ...); -+void mptcp_clean_rtx_infinite(const struct sk_buff *skb, struct sock *sk); +void mptcp_fin(struct sock *meta_sk); +void mptcp_meta_retransmit_timer(struct sock *meta_sk); +void mptcp_sub_retransmit_timer(struct sock *sk); @@ -1598,47 +1602,6 @@ index 000000000000..196b8939cbab + mpcb->pm_ops->close_session(mptcp_meta_sk(except)); +} + -+static inline bool mptcp_fallback_infinite(struct sock *sk, int flag) -+{ -+ struct tcp_sock *tp = tcp_sk(sk); -+ struct mptcp_cb *mpcb = tp->mpcb; -+ -+ /* If data has been acknowleged on the meta-level, fully_established -+ * will have been set before and thus we will not fall back to infinite -+ * mapping. -+ */ -+ if (likely(tp->mptcp->fully_established)) -+ return false; -+ -+ if (!(flag & MPTCP_FLAG_DATA_ACKED)) -+ return false; -+ -+ /* Don't fallback twice ;) */ -+ if (mpcb->infinite_mapping_snd) -+ return false; -+ -+ pr_debug("%s %#x will fallback - pi %d, src %pI4:%u dst %pI4:%u rcv_nxt %u from %pS\n", -+ __func__, mpcb->mptcp_loc_token, tp->mptcp->path_index, -+ &inet_sk(sk)->inet_saddr, ntohs(inet_sk(sk)->inet_sport), -+ &inet_sk(sk)->inet_daddr, ntohs(inet_sk(sk)->inet_dport), -+ tp->rcv_nxt, __builtin_return_address(0)); -+ if (!is_master_tp(tp)) { -+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKSUB); -+ return true; -+ } -+ -+ mpcb->infinite_mapping_snd = 1; -+ mpcb->infinite_mapping_rcv = 1; -+ mpcb->infinite_rcv_seq = mptcp_get_rcv_nxt_64(mptcp_meta_tp(tp)); -+ tp->mptcp->fully_established = 1; -+ -+ mptcp_fallback_close(mpcb, sk); -+ -+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKINIT); -+ -+ return false; -+} -+ +static inline bool mptcp_v6_is_v4_mapped(const struct sock *sk) +{ + return sk->sk_family == AF_INET6 && @@ -1732,8 +1695,6 @@ index 000000000000..196b8939cbab +static inline void mptcp_update_metasocket(const struct sock *meta_sk) {} +static inline void mptcp_reinject_data(struct sock *orig_sk, int clone_it) {} +static inline void mptcp_update_sndbuf(const struct tcp_sock *tp) {} -+static inline void mptcp_clean_rtx_infinite(const struct sk_buff *skb, -+ const struct sock *sk) {} +static inline void mptcp_sub_close(struct sock *sk, unsigned long delay) {} +static inline void mptcp_set_rto(const struct sock *sk) {} +static inline void mptcp_send_fin(const struct sock *meta_sk) {} @@ -1788,7 +1749,9 @@ index 000000000000..196b8939cbab + return 0; +} +static inline void mptcp_sub_close_passive(struct sock *sk) {} -+static inline bool mptcp_fallback_infinite(const struct sock *sk, int flag) ++static inline bool mptcp_handle_ack_in_infinite(const struct sock *sk, ++ const struct sk_buff *skb, ++ int flag) +{ + return false; +} @@ -2945,7 +2908,7 @@ index a03036456221..aebb337662c3 100644 IFF_ALLMULTI)); diff --git a/net/core/filter.c b/net/core/filter.c -index 5ebc973ed4c5..516fc8689088 100644 +index d39518f691b4..fbb69a50b8a9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -73,6 +73,7 @@ @@ -2956,7 +2919,7 @@ index 5ebc973ed4c5..516fc8689088 100644 /** * sk_filter_trim_cap - run a packet through a socket filter -@@ -4280,6 +4281,19 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, +@@ -4285,6 +4286,19 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, if (sk->sk_mark != val) { sk->sk_mark = val; sk_dst_reset(sk); @@ -2976,7 +2939,7 @@ index 5ebc973ed4c5..516fc8689088 100644 } break; default: -@@ -4302,6 +4316,14 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, +@@ -4307,6 +4321,14 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, if (val == -1) val = 0; inet->tos = val; @@ -2991,7 +2954,7 @@ index 5ebc973ed4c5..516fc8689088 100644 } break; default: -@@ -4324,6 +4346,17 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, +@@ -4329,6 +4351,17 @@ static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, if (val == -1) val = 0; np->tclass = val; @@ -3020,7 +2983,7 @@ index 283ddb2dbc7d..8f526a0d1912 100644 + +EXPORT_TRACEPOINT_SYMBOL_GPL(mptcp_retransmit); diff --git a/net/core/skbuff.c b/net/core/skbuff.c -index ac083685214e..62bf97b4d5de 100644 +index 5bdb3cd20d61..d430e46373f3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -582,7 +582,7 @@ static inline void skb_drop_fraglist(struct sk_buff *skb) @@ -3205,7 +3168,7 @@ index a926de2e42b5..6d73dc6e2586 100644 default "dctcp" if DEFAULT_DCTCP default "cdg" if DEFAULT_CDG diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c -index c800220c404d..b8f10024780a 100644 +index a7a6b1adb698..8ebca975f8c8 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -100,6 +100,7 @@ @@ -3270,7 +3233,7 @@ index c800220c404d..b8f10024780a 100644 WARN_ON(!((1 << sk2->sk_state) & (TCPF_ESTABLISHED | TCPF_SYN_RECV | TCPF_CLOSE_WAIT | TCPF_CLOSE))); -@@ -1978,6 +2001,9 @@ static int __init inet_init(void) +@@ -1981,6 +2004,9 @@ static int __init inet_init(void) if (init_ipv4_mibs()) panic("%s: Cannot init ipv4 mibs\n", __func__); @@ -3385,7 +3348,7 @@ index aa3fd61818c4..8b3e955ec165 100644 break; case IP_TTL: diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c -index 2b45d1455592..f988be944eda 100644 +index 2b45d1455592..d2d2427e1883 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -12,6 +12,8 @@ @@ -3407,7 +3370,7 @@ index 2b45d1455592..f988be944eda 100644 { const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); -@@ -200,14 +203,33 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, +@@ -200,14 +203,35 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th, struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, @@ -3429,8 +3392,10 @@ index 2b45d1455592..f988be944eda 100644 + goto listen_overflow; + + ret = mptcp_check_req_master(sk, child, req, skb, mopt, 0, tsoff); -+ if (ret < 0) ++ if (ret < 0) { ++ __reqsk_free(req); + return NULL; ++ } + + if (!ret) + return tcp_sk(child)->mpcb->master_sk; @@ -3441,7 +3406,7 @@ index 2b45d1455592..f988be944eda 100644 if (child) { refcount_set(&req->rsk_refcnt, 1); tcp_sk(child)->tsoffset = tsoff; -@@ -284,6 +306,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -284,6 +308,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt; struct tcp_options_received tcp_opt; @@ -3449,7 +3414,7 @@ index 2b45d1455592..f988be944eda 100644 struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); -@@ -313,7 +336,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -313,7 +338,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); @@ -3459,7 +3424,7 @@ index 2b45d1455592..f988be944eda 100644 if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) { tsoff = secure_tcp_ts_off(sock_net(sk), -@@ -326,7 +350,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -326,7 +352,12 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) goto out; ret = NULL; @@ -3473,7 +3438,7 @@ index 2b45d1455592..f988be944eda 100644 if (!req) goto out; -@@ -346,6 +375,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -346,6 +377,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; @@ -3482,7 +3447,7 @@ index 2b45d1455592..f988be944eda 100644 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack = 0; treq->tfo_listener = false; -@@ -354,6 +385,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -354,6 +387,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->ir_iif = inet_request_bound_dev_if(sk, skb); @@ -3492,7 +3457,7 @@ index 2b45d1455592..f988be944eda 100644 /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ -@@ -392,15 +426,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) +@@ -392,15 +428,15 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) req->rsk_window_clamp = full_space; @@ -3514,7 +3479,7 @@ index 2b45d1455592..f988be944eda 100644 * Normal sockets get it right from inet_csk_route_child_sock() */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c -index 9f53d25e047e..ae9ba8f2ced1 100644 +index 9f53d25e047e..a48d9b394b11 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -270,6 +270,7 @@ @@ -3783,8 +3748,11 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 } /* Change state AFTER socket is unhashed to avoid closed -@@ -2297,7 +2383,7 @@ void tcp_set_state(struct sock *sk, int state) +@@ -2295,9 +2381,10 @@ void tcp_set_state(struct sock *sk, int state) + [TCP_LISTEN] = TCP_CLOSE, + [TCP_CLOSING] = TCP_CLOSING, [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ ++ [TCP_RST_WAIT] = TCP_CLOSE, }; -static int tcp_close_state(struct sock *sk) @@ -3792,7 +3760,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 { int next = (int)new_state[sk->sk_state]; int ns = next & TCP_STATE_MASK; -@@ -2327,7 +2413,7 @@ void tcp_shutdown(struct sock *sk, int how) +@@ -2327,7 +2414,7 @@ void tcp_shutdown(struct sock *sk, int how) TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { /* Clear out any half completed packets. FIN if needed. */ if (tcp_close_state(sk)) @@ -3801,7 +3769,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 } } EXPORT_SYMBOL(tcp_shutdown); -@@ -2352,6 +2438,17 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2352,6 +2439,17 @@ void tcp_close(struct sock *sk, long timeout) int data_was_unread = 0; int state; @@ -3819,7 +3787,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 lock_sock(sk); sk->sk_shutdown = SHUTDOWN_MASK; -@@ -2396,7 +2493,7 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2396,7 +2494,7 @@ void tcp_close(struct sock *sk, long timeout) /* Unread data was tossed, zap the connection. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); tcp_set_state(sk, TCP_CLOSE); @@ -3828,7 +3796,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { /* Check zero linger _after_ checking for unread data. */ sk->sk_prot->disconnect(sk, 0); -@@ -2470,7 +2567,7 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2470,7 +2568,7 @@ void tcp_close(struct sock *sk, long timeout) struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); @@ -3837,7 +3805,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONLINGER); } else { -@@ -2480,7 +2577,8 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2480,7 +2578,8 @@ void tcp_close(struct sock *sk, long timeout) inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); } else { @@ -3847,7 +3815,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 goto out; } } -@@ -2489,7 +2587,7 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2489,7 +2588,7 @@ void tcp_close(struct sock *sk, long timeout) sk_mem_reclaim(sk); if (tcp_check_oom(sk, 0)) { tcp_set_state(sk, TCP_CLOSE); @@ -3856,7 +3824,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); } else if (!check_net(sock_net(sk))) { -@@ -2521,15 +2619,6 @@ void tcp_close(struct sock *sk, long timeout) +@@ -2521,15 +2620,6 @@ void tcp_close(struct sock *sk, long timeout) } EXPORT_SYMBOL(tcp_close); @@ -3872,7 +3840,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 static void tcp_rtx_queue_purge(struct sock *sk) { struct rb_node *p = rb_first(&sk->tcp_rtx_queue); -@@ -2551,6 +2640,10 @@ void tcp_write_queue_purge(struct sock *sk) +@@ -2551,6 +2641,10 @@ void tcp_write_queue_purge(struct sock *sk) { struct sk_buff *skb; @@ -3883,7 +3851,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 tcp_chrono_stop(sk, TCP_CHRONO_BUSY); while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { tcp_skb_tsorted_anchor_cleanup(skb); -@@ -2569,6 +2662,36 @@ void tcp_write_queue_purge(struct sock *sk) +@@ -2569,6 +2663,36 @@ void tcp_write_queue_purge(struct sock *sk) inet_csk(sk)->icsk_backoff = 0; } @@ -3920,7 +3888,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); -@@ -2591,7 +2714,7 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2591,7 +2715,7 @@ int tcp_disconnect(struct sock *sk, int flags) /* The last check adjusts for discrepancy of Linux wrt. RFC * states */ @@ -3929,13 +3897,14 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 sk->sk_err = ECONNRESET; } else if (old_state == TCP_SYN_SENT) sk->sk_err = ECONNRESET; -@@ -2613,11 +2736,15 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2613,11 +2737,16 @@ int tcp_disconnect(struct sock *sk, int flags) if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) inet_reset_saddr(sk); + if (is_meta_sk(sk)) { + mptcp_disconnect(sk); + } else { ++ tp->request_mptcp = 0; + if (tp->inside_tk_table) + mptcp_hash_remove_bh(tp); + } @@ -3948,7 +3917,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 seq = tp->write_seq + tp->max_window + 2; if (!seq) -@@ -2627,21 +2754,14 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2627,21 +2756,14 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_backoff = 0; tp->snd_cwnd = 2; icsk->icsk_probes_out = 0; @@ -3973,7 +3942,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 inet_csk_delack_init(sk); /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0 * issue in __tcp_select_window() -@@ -2653,14 +2773,6 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2653,14 +2775,6 @@ int tcp_disconnect(struct sock *sk, int flags) sk->sk_rx_dst = NULL; tcp_saved_syn_free(tp); tp->compressed_ack = 0; @@ -3988,7 +3957,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 tp->duplicate_sack[0].start_seq = 0; tp->duplicate_sack[0].end_seq = 0; tp->dsack_dups = 0; -@@ -2669,8 +2781,6 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2669,8 +2783,6 @@ int tcp_disconnect(struct sock *sk, int flags) tp->sacked_out = 0; tp->tlp_high_seq = 0; tp->last_oow_ack_time = 0; @@ -3997,7 +3966,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 tp->rack.mstamp = 0; tp->rack.advanced = 0; tp->rack.reo_wnd_steps = 1; -@@ -2704,7 +2814,7 @@ int tcp_disconnect(struct sock *sk, int flags) +@@ -2704,7 +2816,7 @@ int tcp_disconnect(struct sock *sk, int flags) static inline bool tcp_can_repair_sock(const struct sock *sk) { return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) && @@ -4006,7 +3975,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 } static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int len) -@@ -2735,6 +2845,7 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l +@@ -2735,6 +2847,7 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l tp->rcv_wnd = opt.rcv_wnd; tp->rcv_wup = opt.rcv_wup; @@ -4014,7 +3983,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 return 0; } -@@ -2873,6 +2984,61 @@ static int do_tcp_setsockopt(struct sock *sk, int level, +@@ -2873,6 +2986,61 @@ static int do_tcp_setsockopt(struct sock *sk, int level, return tcp_fastopen_reset_cipher(net, sk, key, backup_key); } @@ -4076,7 +4045,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 default: /* fallthru */ break; -@@ -3062,6 +3228,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level, +@@ -3062,6 +3230,12 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_DEFER_ACCEPT: @@ -4089,7 +4058,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 /* Translate value in seconds to number of retransmits */ icsk->icsk_accept_queue.rskq_defer_accept = secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, -@@ -3089,7 +3261,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, +@@ -3089,7 +3263,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && inet_csk_ack_scheduled(sk)) { icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; @@ -4098,7 +4067,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 if (!(val & 1)) inet_csk_enter_pingpong_mode(sk); } -@@ -3099,7 +3271,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, +@@ -3099,7 +3273,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, #ifdef CONFIG_TCP_MD5SIG case TCP_MD5SIG: case TCP_MD5SIG_EXT: @@ -4110,7 +4079,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 break; #endif case TCP_USER_TIMEOUT: -@@ -3155,6 +3330,32 @@ static int do_tcp_setsockopt(struct sock *sk, int level, +@@ -3155,6 +3332,32 @@ static int do_tcp_setsockopt(struct sock *sk, int level, tp->notsent_lowat = val; sk->sk_write_space(sk); break; @@ -4143,7 +4112,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 case TCP_INQ: if (val > 1 || val < 0) err = -EINVAL; -@@ -3219,7 +3420,7 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, +@@ -3219,7 +3422,7 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, } /* Return information about state of tcp endpoint in API format. */ @@ -4152,7 +4121,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 { const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ const struct inet_connection_sock *icsk = inet_csk(sk); -@@ -3256,7 +3457,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) +@@ -3256,7 +3459,8 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) return; } @@ -4162,7 +4131,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 info->tcpi_ca_state = icsk->icsk_ca_state; info->tcpi_retransmits = icsk->icsk_retransmits; -@@ -3332,7 +3534,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) +@@ -3332,7 +3536,9 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) info->tcpi_reord_seen = tp->reord_seen; info->tcpi_rcv_ooopack = tp->rcv_ooopack; info->tcpi_snd_wnd = tp->snd_wnd; @@ -4173,7 +4142,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 } EXPORT_SYMBOL_GPL(tcp_get_info); -@@ -3479,7 +3683,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, +@@ -3479,7 +3685,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, if (get_user(len, optlen)) return -EFAULT; @@ -4182,7 +4151,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 len = min_t(unsigned int, len, sizeof(info)); if (put_user(len, optlen)) -@@ -3668,6 +3872,87 @@ static int do_tcp_getsockopt(struct sock *sk, int level, +@@ -3668,6 +3874,87 @@ static int do_tcp_getsockopt(struct sock *sk, int level, } return 0; } @@ -4270,7 +4239,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 #ifdef CONFIG_MMU case TCP_ZEROCOPY_RECEIVE: { struct tcp_zerocopy_receive zc; -@@ -3873,7 +4158,9 @@ void tcp_done(struct sock *sk) +@@ -3873,7 +4160,9 @@ void tcp_done(struct sock *sk) if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); @@ -4280,7 +4249,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 tcp_clear_xmit_timers(sk); if (req) reqsk_fastopen_remove(sk, req, false); -@@ -3889,6 +4176,8 @@ void tcp_done(struct sock *sk) +@@ -3889,6 +4178,8 @@ void tcp_done(struct sock *sk) int tcp_abort(struct sock *sk, int err) { @@ -4289,7 +4258,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 if (!sk_fullsock(sk)) { if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); -@@ -3902,7 +4191,7 @@ int tcp_abort(struct sock *sk, int err) +@@ -3902,7 +4193,7 @@ int tcp_abort(struct sock *sk, int err) } /* Don't race with userspace socket closes such as tcp_close. */ @@ -4298,7 +4267,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 if (sk->sk_state == TCP_LISTEN) { tcp_set_state(sk, TCP_CLOSE); -@@ -3911,7 +4200,7 @@ int tcp_abort(struct sock *sk, int err) +@@ -3911,7 +4202,7 @@ int tcp_abort(struct sock *sk, int err) /* Don't race with BH socket closes such as inet_csk_listen_stop. */ local_bh_disable(); @@ -4307,7 +4276,7 @@ index 9f53d25e047e..ae9ba8f2ced1 100644 if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_err = err; -@@ -3919,14 +4208,14 @@ int tcp_abort(struct sock *sk, int err) +@@ -3919,14 +4210,14 @@ int tcp_abort(struct sock *sk, int err) smp_wmb(); sk->sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) @@ -4428,7 +4397,7 @@ index a5ec77a5ad6f..f9fb4a268b9b 100644 * and queues the child into listener accept queue. */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c -index c0fcfa296468..dae2ce9656b8 100644 +index c0fcfa296468..f06b3debc592 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -76,35 +76,15 @@ @@ -4639,24 +4608,22 @@ index c0fcfa296468..dae2ce9656b8 100644 { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); -@@ -3713,6 +3720,16 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) +@@ -3713,6 +3720,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_rack_update_reo_wnd(sk, &rs); + if (mptcp(tp)) { -+ if (mptcp_fallback_infinite(sk, flag)) { ++ if (mptcp_handle_ack_in_infinite(sk, skb, flag)) { + pr_debug("%s resetting flow\n", __func__); + mptcp_send_reset(sk); + return -1; + } -+ -+ mptcp_clean_rtx_infinite(skb, sk); + } + if (tp->tlp_high_seq) tcp_process_tlp_ack(sk, ack, flag); -@@ -3856,8 +3873,10 @@ static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss) +@@ -3856,8 +3871,10 @@ static u16 tcp_parse_mss_option(const struct tcphdr *th, u16 user_mss) */ void tcp_parse_options(const struct net *net, const struct sk_buff *skb, @@ -4669,7 +4636,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { const unsigned char *ptr; const struct tcphdr *th = tcp_hdr(skb); -@@ -3943,6 +3962,10 @@ void tcp_parse_options(const struct net *net, +@@ -3943,6 +3960,10 @@ void tcp_parse_options(const struct net *net, */ break; #endif @@ -4680,7 +4647,7 @@ index c0fcfa296468..dae2ce9656b8 100644 case TCPOPT_FASTOPEN: tcp_parse_fastopen_option( opsize - TCPOLEN_FASTOPEN_BASE, -@@ -4010,7 +4033,9 @@ static bool tcp_fast_parse_options(const struct net *net, +@@ -4010,7 +4031,9 @@ static bool tcp_fast_parse_options(const struct net *net, return true; } @@ -4691,7 +4658,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; -@@ -4120,7 +4145,7 @@ static inline bool tcp_paws_discard(const struct sock *sk, +@@ -4120,7 +4143,7 @@ static inline bool tcp_paws_discard(const struct sock *sk, static inline bool tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && @@ -4700,7 +4667,7 @@ index c0fcfa296468..dae2ce9656b8 100644 } /* When we get a reset we do this. */ -@@ -4169,6 +4194,11 @@ void tcp_fin(struct sock *sk) +@@ -4169,6 +4192,11 @@ void tcp_fin(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -4712,7 +4679,7 @@ index c0fcfa296468..dae2ce9656b8 100644 inet_csk_schedule_ack(sk); sk->sk_shutdown |= RCV_SHUTDOWN; -@@ -4179,6 +4209,10 @@ void tcp_fin(struct sock *sk) +@@ -4179,6 +4207,10 @@ void tcp_fin(struct sock *sk) case TCP_ESTABLISHED: /* Move to CLOSE_WAIT */ tcp_set_state(sk, TCP_CLOSE_WAIT); @@ -4723,7 +4690,7 @@ index c0fcfa296468..dae2ce9656b8 100644 inet_csk_enter_pingpong_mode(sk); break; -@@ -4201,9 +4235,16 @@ void tcp_fin(struct sock *sk) +@@ -4201,9 +4233,16 @@ void tcp_fin(struct sock *sk) tcp_set_state(sk, TCP_CLOSING); break; case TCP_FIN_WAIT2: @@ -4741,7 +4708,7 @@ index c0fcfa296468..dae2ce9656b8 100644 break; default: /* Only TCP_LISTEN and TCP_CLOSE are left, in these -@@ -4225,6 +4266,10 @@ void tcp_fin(struct sock *sk) +@@ -4225,6 +4264,10 @@ void tcp_fin(struct sock *sk) if (!sock_flag(sk, SOCK_DEAD)) { sk->sk_state_change(sk); @@ -4752,7 +4719,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* Do not send POLL_HUP for half duplex close. */ if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) -@@ -4439,6 +4484,9 @@ static bool tcp_try_coalesce(struct sock *sk, +@@ -4439,6 +4482,9 @@ static bool tcp_try_coalesce(struct sock *sk, *fragstolen = false; @@ -4762,7 +4729,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* Its possible this segment overlaps with prior segment in queue */ if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; -@@ -4493,7 +4541,7 @@ static void tcp_drop(struct sock *sk, struct sk_buff *skb) +@@ -4493,7 +4539,7 @@ static void tcp_drop(struct sock *sk, struct sk_buff *skb) /* This one checks to see if we can put data from the * out_of_order queue into the receive_queue. */ @@ -4771,7 +4738,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; -@@ -4516,7 +4564,14 @@ static void tcp_ofo_queue(struct sock *sk) +@@ -4516,7 +4562,14 @@ static void tcp_ofo_queue(struct sock *sk) p = rb_next(p); rb_erase(&skb->rbnode, &tp->out_of_order_queue); @@ -4787,7 +4754,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_drop(sk, skb); continue; } -@@ -4546,6 +4601,9 @@ static void tcp_ofo_queue(struct sock *sk) +@@ -4546,6 +4599,9 @@ static void tcp_ofo_queue(struct sock *sk) static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) { @@ -4797,7 +4764,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || !sk_rmem_schedule(sk, skb, size)) { -@@ -4560,7 +4618,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, +@@ -4560,7 +4616,7 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, return 0; } @@ -4806,7 +4773,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { struct tcp_sock *tp = tcp_sk(sk); struct rb_node **p, *parent; -@@ -4632,7 +4690,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) +@@ -4632,7 +4688,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) continue; } if (before(seq, TCP_SKB_CB(skb1)->end_seq)) { @@ -4816,7 +4783,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* All the bits are present. Drop. */ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE); -@@ -4679,6 +4738,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) +@@ -4679,6 +4736,11 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) end_seq); break; } @@ -4828,7 +4795,7 @@ index c0fcfa296468..dae2ce9656b8 100644 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, TCP_SKB_CB(skb1)->end_seq); -@@ -4690,7 +4754,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) +@@ -4690,7 +4752,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) tp->ooo_last_skb = skb; add_sack: @@ -4837,7 +4804,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_sack_new_ofo_skb(sk, seq, end_seq); end: if (skb) { -@@ -4704,8 +4768,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) +@@ -4704,8 +4766,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) } } @@ -4848,7 +4815,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { int eaten; struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue); -@@ -4780,7 +4844,8 @@ void tcp_data_ready(struct sock *sk) +@@ -4780,7 +4842,8 @@ void tcp_data_ready(struct sock *sk) if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) && !sock_flag(sk, SOCK_DONE) && @@ -4858,7 +4825,7 @@ index c0fcfa296468..dae2ce9656b8 100644 return; sk->sk_data_ready(sk); -@@ -4792,10 +4857,14 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4792,10 +4855,14 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) bool fragstolen; int eaten; @@ -4874,7 +4841,7 @@ index c0fcfa296468..dae2ce9656b8 100644 skb_dst_drop(skb); __skb_pull(skb, tcp_hdr(skb)->doff * 4); -@@ -4806,7 +4875,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4806,7 +4873,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) * Out of sequence packets to the out_of_order_queue. */ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { @@ -4883,7 +4850,7 @@ index c0fcfa296468..dae2ce9656b8 100644 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; } -@@ -4822,7 +4891,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4822,7 +4889,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) } eaten = tcp_queue_rcv(sk, skb, &fragstolen); @@ -4892,7 +4859,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_event_data_recv(sk, skb); if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) tcp_fin(sk); -@@ -4844,7 +4913,11 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4844,7 +4911,11 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (eaten > 0) kfree_skb_partial(skb, fragstolen); @@ -4905,7 +4872,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_data_ready(sk); return; } -@@ -4864,7 +4937,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4864,7 +4935,8 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) } /* Out of window. F.e. zero window probe. */ @@ -4915,7 +4882,7 @@ index c0fcfa296468..dae2ce9656b8 100644 goto out_of_window; if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { -@@ -4874,7 +4948,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) +@@ -4874,7 +4946,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) /* If window is closed, drop tail of packet. But after * remembering D-SACK for its head made in previous line. */ @@ -4924,7 +4891,7 @@ index c0fcfa296468..dae2ce9656b8 100644 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPZEROWINDOWDROP); goto out_of_window; } -@@ -5187,7 +5261,7 @@ static int tcp_prune_queue(struct sock *sk) +@@ -5187,7 +5259,7 @@ static int tcp_prune_queue(struct sock *sk) return -1; } @@ -4933,7 +4900,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { const struct tcp_sock *tp = tcp_sk(sk); -@@ -5222,7 +5296,7 @@ static void tcp_new_space(struct sock *sk) +@@ -5222,7 +5294,7 @@ static void tcp_new_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); @@ -4942,7 +4909,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_sndbuf_expand(sk); tp->snd_cwnd_stamp = tcp_jiffies32; } -@@ -5236,10 +5310,11 @@ static void tcp_check_space(struct sock *sk) +@@ -5236,10 +5308,11 @@ static void tcp_check_space(struct sock *sk) sock_reset_flag(sk, SOCK_QUEUE_SHRUNK); /* pairs with tcp_poll() */ smp_mb(); @@ -4957,7 +4924,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED); } } -@@ -5258,6 +5333,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) +@@ -5258,6 +5331,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { struct tcp_sock *tp = tcp_sk(sk); unsigned long rtt, delay; @@ -4966,7 +4933,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && -@@ -5266,8 +5343,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) +@@ -5266,8 +5341,8 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) * If application uses SO_RCVLOWAT, we want send ack now if * we have not received enough bytes to satisfy the condition. */ @@ -4977,7 +4944,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* We ACK each frame or... */ tcp_in_quickack_mode(sk) || /* Protocol state mandates a one-time immediate ACK */ -@@ -5402,6 +5479,10 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t +@@ -5402,6 +5477,10 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t { struct tcp_sock *tp = tcp_sk(sk); @@ -4988,7 +4955,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* Check if we get a new urgent pointer - normally not. */ if (th->urg) tcp_check_urg(sk, th); -@@ -5544,9 +5625,15 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, +@@ -5544,9 +5623,15 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, goto discard; } @@ -5004,7 +4971,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_drop(sk, skb); return false; } -@@ -5603,6 +5690,10 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) +@@ -5603,6 +5688,10 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb) tp->rx_opt.saw_tstamp = 0; @@ -5015,7 +4982,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* pred_flags is 0xS?10 << 16 + snd_wnd * if header_prediction is to be made * 'S' will always be tp->tcp_header_len >> 2 -@@ -5777,7 +5868,7 @@ void tcp_init_transfer(struct sock *sk, int bpf_op) +@@ -5777,7 +5866,7 @@ void tcp_init_transfer(struct sock *sk, int bpf_op) tcp_call_bpf(sk, bpf_op, 0, NULL); tcp_init_congestion_control(sk); @@ -5024,7 +4991,7 @@ index c0fcfa296468..dae2ce9656b8 100644 } void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) -@@ -5814,17 +5905,24 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, +@@ -5814,17 +5903,24 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, struct tcp_fastopen_cookie *cookie) { struct tcp_sock *tp = tcp_sk(sk); @@ -5051,7 +5018,7 @@ index c0fcfa296468..dae2ce9656b8 100644 mss = opt.mss_clamp; } -@@ -5848,7 +5946,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, +@@ -5848,7 +5944,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, tcp_fastopen_cache_set(sk, mss, cookie, syn_drop, try_exp); @@ -5064,7 +5031,7 @@ index c0fcfa296468..dae2ce9656b8 100644 skb_rbtree_walk_from(data) { if (__tcp_retransmit_skb(sk, data, 1)) break; -@@ -5903,9 +6005,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -5903,9 +6003,13 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); struct tcp_fastopen_cookie foc = { .len = -1 }; int saved_clamp = tp->rx_opt.mss_clamp; @@ -5079,7 +5046,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) tp->rx_opt.rcv_tsecr -= tp->tsoffset; -@@ -5966,11 +6072,41 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -5966,11 +6070,41 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_try_undo_spurious_syn(sk); tcp_ack(sk, skb, FLAG_SLOWPATH); @@ -5121,7 +5088,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. -@@ -5992,6 +6128,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -5992,6 +6126,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->tcp_header_len = sizeof(struct tcphdr); } @@ -5133,7 +5100,7 @@ index c0fcfa296468..dae2ce9656b8 100644 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_initialize_rcv_mss(sk); -@@ -6015,9 +6156,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -6015,9 +6154,12 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, } if (fastopen_fail) return -1; @@ -5148,7 +5115,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* Save one ACK. Data will be ready after * several ticks, if write_pending is set. * -@@ -6056,6 +6200,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -6056,6 +6198,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tcp_paws_reject(&tp->rx_opt, 0)) goto discard_and_undo; @@ -5156,7 +5123,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (th->syn) { /* We see SYN without ACK. It is attempt of * simultaneous connect with crossed SYNs. -@@ -6072,9 +6217,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, +@@ -6072,9 +6215,15 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, tp->tcp_header_len = sizeof(struct tcphdr); } @@ -5172,7 +5139,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* RFC1323: The window in SYN & SYN/ACK segments is * never scaled. -@@ -6162,6 +6313,7 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) +@@ -6162,6 +6311,7 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) */ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) @@ -5180,7 +5147,7 @@ index c0fcfa296468..dae2ce9656b8 100644 { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); -@@ -6204,6 +6356,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6204,6 +6354,16 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tp->rx_opt.saw_tstamp = 0; tcp_mstamp_refresh(tp); queued = tcp_rcv_synsent_state_process(sk, skb, th); @@ -5197,7 +5164,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (queued >= 0) return queued; -@@ -6276,6 +6438,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6276,6 +6436,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) if (tp->rx_opt.tstamp_ok) tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; @@ -5206,7 +5173,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (!inet_csk(sk)->icsk_ca_ops->cong_control) tcp_update_pacing_rate(sk); -@@ -6285,6 +6449,30 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6285,6 +6447,30 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_initialize_rcv_mss(sk); tcp_fast_path_on(tp); @@ -5237,7 +5204,7 @@ index c0fcfa296468..dae2ce9656b8 100644 break; case TCP_FIN_WAIT1: { -@@ -6325,7 +6513,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6325,7 +6511,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tmo = tcp_fin_time(sk); if (tmo > TCP_TIMEWAIT_LEN) { inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN); @@ -5247,7 +5214,7 @@ index c0fcfa296468..dae2ce9656b8 100644 /* Bad case. We could lose such FIN otherwise. * It is not a big problem, but it looks confusing * and not so rare event. We still can lose it now, -@@ -6334,7 +6523,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6334,7 +6521,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) */ inet_csk_reset_keepalive_timer(sk, tmo); } else { @@ -5256,7 +5223,7 @@ index c0fcfa296468..dae2ce9656b8 100644 goto discard; } break; -@@ -6342,7 +6531,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6342,7 +6529,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) case TCP_CLOSING: if (tp->snd_una == tp->write_seq) { @@ -5265,7 +5232,7 @@ index c0fcfa296468..dae2ce9656b8 100644 goto discard; } break; -@@ -6354,6 +6543,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6354,6 +6541,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) goto discard; } break; @@ -5275,7 +5242,7 @@ index c0fcfa296468..dae2ce9656b8 100644 } /* step 6: check the URG bit */ -@@ -6375,7 +6567,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) +@@ -6375,7 +6565,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) */ if (sk->sk_shutdown & RCV_SHUTDOWN) { if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && @@ -5285,7 +5252,7 @@ index c0fcfa296468..dae2ce9656b8 100644 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA); tcp_reset(sk); return 1; -@@ -6477,6 +6670,8 @@ static void tcp_openreq_init(struct request_sock *req, +@@ -6477,6 +6668,8 @@ static void tcp_openreq_init(struct request_sock *req, ireq->wscale_ok = rx_opt->wscale_ok; ireq->acked = 0; ireq->ecn_ok = 0; @@ -5294,7 +5261,7 @@ index c0fcfa296468..dae2ce9656b8 100644 ireq->ir_rmt_port = tcp_hdr(skb)->source; ireq->ir_num = ntohs(tcp_hdr(skb)->dest); ireq->ir_mark = inet_request_mark(sk, skb); -@@ -6602,12 +6797,17 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, +@@ -6602,12 +6795,17 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, /* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. @@ -5313,7 +5280,7 @@ index c0fcfa296468..dae2ce9656b8 100644 } if (sk_acceptq_is_full(sk)) { -@@ -6625,8 +6825,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, +@@ -6625,8 +6823,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_clear_options(&tmp_opt); tmp_opt.mss_clamp = af_ops->mss_clamp; tmp_opt.user_mss = tp->rx_opt.user_mss; @@ -5324,7 +5291,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (want_cookie && !tmp_opt.saw_tstamp) tcp_clear_options(&tmp_opt); -@@ -6641,7 +6841,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, +@@ -6641,7 +6839,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, /* Note: tcp_v6_init_req() might override ir_iif for link locals */ inet_rsk(req)->ir_iif = inet_request_bound_dev_if(sk, skb); @@ -5334,7 +5301,7 @@ index c0fcfa296468..dae2ce9656b8 100644 if (security_inet_conn_request(sk, skb, req)) goto drop_and_free; -@@ -6677,7 +6878,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, +@@ -6677,7 +6876,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_ecn_create_request(req, skb, sk, dst); if (want_cookie) { @@ -5343,7 +5310,7 @@ index c0fcfa296468..dae2ce9656b8 100644 req->cookie_ts = tmp_opt.tstamp_ok; if (!tmp_opt.tstamp_ok) inet_rsk(req)->ecn_ok = 0; -@@ -6692,17 +6893,25 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, +@@ -6692,17 +6891,25 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, fastopen_sk = tcp_try_fastopen(sk, skb, req, &foc, dst); } if (fastopen_sk) { @@ -5434,7 +5401,7 @@ index 2ce85e52aea7..2e76c006ad16 100644 @@ -513,11 +522,13 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) goto out; - tp->mtu_info = info; + WRITE_ONCE(tp->mtu_info, info); - if (!sock_owned_by_user(sk)) { + if (!sock_owned_by_user(meta_sk)) { tcp_v4_mtu_reduced(sk); @@ -6729,7 +6696,7 @@ index 638d7b49ad71..d246e537e686 100644 if (tp->packets_out || tcp_write_queue_empty(sk)) { /* Cancel probe timer, if it is not required. */ diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c -index fa2ae96ecdc4..36199efe2837 100644 +index fa2ae96ecdc4..d2b3e30b8788 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -21,6 +21,7 @@ @@ -6891,7 +6858,7 @@ index fa2ae96ecdc4..36199efe2837 100644 tcp_mstamp_refresh(tp); + -+ if (tp->send_mp_fclose) { ++ if (tp->send_mp_fclose && sk->sk_state == TCP_RST_WAIT) { + if (icsk->icsk_retransmits >= MPTCP_FASTCLOSE_RETRIES) { + tcp_write_err(sk); + goto out; @@ -7232,9 +7199,10 @@ index 3903cc0ab188..2f91fddabceb 100644 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = rcu_dereference(tp->fastopen_rsk); snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; -@@ -454,10 +464,14 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, +@@ -454,11 +464,15 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, + + WRITE_ONCE(tp->mtu_info, mtu); - tp->mtu_info = ntohl(info); - if (!sock_owned_by_user(sk)) + if (!sock_owned_by_user(meta_sk)) { tcp_v6_mtu_reduced(sk); @@ -8367,10 +8335,10 @@ index 000000000000..179b53dea020 +MODULE_VERSION("0.1"); diff --git a/net/mptcp/mptcp_binder.c b/net/mptcp/mptcp_binder.c new file mode 100644 -index 000000000000..7f34a8d00274 +index 000000000000..6453e4389152 --- /dev/null +++ b/net/mptcp/mptcp_binder.c -@@ -0,0 +1,494 @@ +@@ -0,0 +1,493 @@ +#include + +#include @@ -8796,7 +8764,6 @@ index 000000000000..7f34a8d00274 + ret = proc_dostring(ctl, write, buffer, lenp, ppos); + } + -+ + return ret; +} + @@ -9426,10 +9393,10 @@ index 000000000000..9eb7628053f6 +MODULE_VERSION("0.1"); diff --git a/net/mptcp/mptcp_ctrl.c b/net/mptcp/mptcp_ctrl.c new file mode 100644 -index 000000000000..db01ec142111 +index 000000000000..ddb58a5f6d4d --- /dev/null +++ b/net/mptcp/mptcp_ctrl.c -@@ -0,0 +1,3313 @@ +@@ -0,0 +1,3310 @@ +/* + * MPTCP implementation - MPTCP-control + * @@ -11362,8 +11329,8 @@ index 000000000000..db01ec142111 + int data_was_unread = 0; + int state; + -+ mptcp_debug("%s: Close of meta_sk with tok %#x\n", -+ __func__, mpcb->mptcp_loc_token); ++ mptcp_debug("%s: Close of meta_sk with tok %#x state %u\n", ++ __func__, mpcb->mptcp_loc_token, meta_sk->sk_state); + + WARN_ON(refcount_inc_not_zero(&mpcb->mpcb_refcnt) == 0); + mutex_lock(&mpcb->mpcb_mutex); @@ -11399,6 +11366,7 @@ index 000000000000..db01ec142111 + + if (tcp_sk(sk_it)->send_mp_fclose) + continue; ++ + mptcp_sub_close(sk_it, 0); + } + goto adjudge_to_death; @@ -11524,7 +11492,6 @@ index 000000000000..db01ec142111 + } + } + -+ + if (meta_sk->sk_state == TCP_CLOSE) + inet_csk_destroy_sock(meta_sk); + /* Otherwise, socket is reprieved until protocol close. */ @@ -11584,7 +11551,6 @@ index 000000000000..db01ec142111 + meta_sk->sk_destruct = inet_sock_destruct; +} + -+ +/* Returns True if we should enable MPTCP for that socket. */ +bool mptcp_doit(struct sock *sk) +{ @@ -11807,7 +11773,6 @@ index 000000000000..db01ec142111 + * already been dropped in __mptcp_check_req_master(). + */ + sock_put(child); -+ reqsk_put(req); + return -1; + } + } @@ -12691,7 +12656,6 @@ index 000000000000..db01ec142111 + for (i = 0; i <= mptcp_reqsk_tk_htb.mask; i++) + INIT_HLIST_NULLS_HEAD(&mptcp_reqsk_tk_htb.hashtable[i], i); + -+ + spin_lock_init(&mptcp_tk_hashlock); + + if (register_pernet_subsys(&mptcp_pm_proc_ops)) @@ -12946,10 +12910,10 @@ index 000000000000..6b976b2b0c72 +MODULE_VERSION("0.95"); diff --git a/net/mptcp/mptcp_fullmesh.c b/net/mptcp/mptcp_fullmesh.c new file mode 100644 -index 000000000000..5424960256e6 +index 000000000000..65e2cd9bf630 --- /dev/null +++ b/net/mptcp/mptcp_fullmesh.c -@@ -0,0 +1,1938 @@ +@@ -0,0 +1,1936 @@ +#include +#include + @@ -14499,7 +14463,6 @@ index 000000000000..5424960256e6 + } + } + -+ + rcu_read_unlock_bh(); + + return id; @@ -14856,7 +14819,6 @@ index 000000000000..5424960256e6 +out: + return ret; + -+ +err_reg_pm: +#if IS_ENABLED(CONFIG_IPV6) + unregister_inet6addr_notifier(&inet6_addr_notifier); @@ -14890,10 +14852,10 @@ index 000000000000..5424960256e6 +MODULE_VERSION("0.88"); diff --git a/net/mptcp/mptcp_input.c b/net/mptcp/mptcp_input.c new file mode 100644 -index 000000000000..ae9cc7209613 +index 000000000000..18bbecad2441 --- /dev/null +++ b/net/mptcp/mptcp_input.c -@@ -0,0 +1,2546 @@ +@@ -0,0 +1,2616 @@ +/* + * MPTCP implementation - Sending side + * @@ -15506,7 +15468,7 @@ index 000000000000..ae9cc7209613 + return 0; +} + -+static void mptcp_restart_sending(struct sock *meta_sk) ++static void mptcp_restart_sending(struct sock *meta_sk, uint32_t in_flight_seq) +{ + struct tcp_sock *meta_tp = tcp_sk(meta_sk); + struct mptcp_cb *mpcb = meta_tp->mpcb; @@ -15514,12 +15476,22 @@ index 000000000000..ae9cc7209613 + + skb = tcp_rtx_queue_head(meta_sk); + -+ /* We resend everything that has not been acknowledged, thus we need -+ * to move it from the rtx-tree to the write-queue. ++ /* We resend everything that has not been acknowledged and is not in-flight, ++ * thus we need to move it from the rtx-tree to the write-queue. + */ + wq_head = tcp_write_queue_head(meta_sk); + ++ /* We artificially restart parts of the send-queue. Thus, ++ * it is as if no packets are in flight, minus the one that are. ++ */ ++ meta_tp->packets_out = 0; ++ + skb_rbtree_walk_from_safe(skb, tmp) { ++ if (!after(TCP_SKB_CB(skb)->end_seq, in_flight_seq)) { ++ meta_tp->packets_out += tcp_skb_pcount(skb); ++ continue; ++ } ++ + list_del(&skb->tcp_tsorted_anchor); + tcp_rtx_queue_unlink(skb, meta_sk); + INIT_LIST_HEAD(&skb->tcp_tsorted_anchor); @@ -15530,20 +15502,15 @@ index 000000000000..ae9cc7209613 + tcp_add_write_queue_tail(meta_sk, skb); + } + -+ /* We artificially restart the whole send-queue. Thus, -+ * it is as if no packets are in flight -+ */ -+ meta_tp->packets_out = 0; -+ + /* If the snd_nxt already wrapped around, we have to -+ * undo the wrapping, as we are restarting from snd_una ++ * undo the wrapping, as we are restarting from in_flight_seq + * on. + */ -+ if (meta_tp->snd_nxt < meta_tp->snd_una) { ++ if (meta_tp->snd_nxt < in_flight_seq) { + mpcb->snd_high_order[mpcb->snd_hiseq_index] -= 2; + mpcb->snd_hiseq_index = mpcb->snd_hiseq_index ? 0 : 1; + } -+ meta_tp->snd_nxt = meta_tp->snd_una; ++ meta_tp->snd_nxt = in_flight_seq; + + /* Trigger a sending on the meta. */ + mptcp_push_pending_frames(meta_sk); @@ -15657,10 +15624,10 @@ index 000000000000..ae9cc7209613 + data_len = skb->len + (mptcp_is_data_fin(skb) ? 1 : 0); + sub_seq = tcb->seq; + -+ mptcp_restart_sending(tp->meta_sk); -+ + mptcp_fallback_close(mpcb, sk); + ++ mptcp_restart_sending(tp->meta_sk, meta_tp->snd_una); ++ + /* data_seq and so on are set correctly */ + + /* At this point, the meta-ofo-queue has to be emptied, @@ -16360,7 +16327,7 @@ index 000000000000..ae9cc7209613 + } + + /* If we are in infinite mapping mode, rx_opt.data_ack has been -+ * set by mptcp_clean_rtx_infinite. ++ * set by mptcp_handle_ack_in_infinite. + */ + if (!(tcb->mptcp_flags & MPTCPHDR_ACK) && !tp->mpcb->infinite_mapping_snd) + return false; @@ -16491,23 +16458,88 @@ index 000000000000..ae9cc7209613 + return false; +} + -+void mptcp_clean_rtx_infinite(const struct sk_buff *skb, struct sock *sk) ++bool mptcp_handle_ack_in_infinite(struct sock *sk, const struct sk_buff *skb, ++ int flag) +{ -+ struct tcp_sock *tp = tcp_sk(sk), *meta_tp = tcp_sk(mptcp_meta_sk(sk)); ++ struct tcp_sock *tp = tcp_sk(sk); ++ struct tcp_sock *meta_tp = mptcp_meta_tp(tp); ++ struct mptcp_cb *mpcb = tp->mpcb; + -+ if (!tp->mpcb->infinite_mapping_snd) -+ return; -+ -+ /* The difference between both write_seq's represents the offset between -+ * data-sequence and subflow-sequence. As we are infinite, this must -+ * match. -+ * -+ * Thus, from this difference we can infer the meta snd_una. ++ /* We are already in fallback-mode. Data is in-sequence and we know ++ * exactly what is being sent on this subflow belongs to the current ++ * meta-level sequence number space. + */ -+ tp->mptcp->rx_opt.data_ack = meta_tp->snd_nxt - tp->snd_nxt + -+ tp->snd_una; ++ if (mpcb->infinite_mapping_snd) { ++ if (mpcb->infinite_send_una_ahead && ++ !before(meta_tp->snd_una, tp->mptcp->last_end_data_seq - (tp->snd_nxt - tp->snd_una))) { ++ tp->mptcp->rx_opt.data_ack = meta_tp->snd_una; ++ } else { ++ /* Remember that meta snd_una is no more ahead of the game */ ++ mpcb->infinite_send_una_ahead = 0; + ++ /* The difference between both write_seq's represents the offset between ++ * data-sequence and subflow-sequence. As we are infinite, this must ++ * match. ++ * ++ * Thus, from this difference we can infer the meta snd_una. ++ */ ++ tp->mptcp->rx_opt.data_ack = meta_tp->snd_nxt - ++ (tp->snd_nxt - tp->snd_una); ++ } ++ ++ goto exit; ++ } ++ ++ /* If data has been acknowleged on the meta-level, fully_established ++ * will have been set before and thus we will not fall back to infinite ++ * mapping. ++ */ ++ if (likely(tp->mptcp->fully_established)) ++ return false; ++ ++ if (!(flag & MPTCP_FLAG_DATA_ACKED)) ++ return false; ++ ++ pr_debug("%s %#x will fallback - pi %d, src %pI4:%u dst %pI4:%u rcv_nxt %u from %pS\n", ++ __func__, mpcb->mptcp_loc_token, tp->mptcp->path_index, ++ &inet_sk(sk)->inet_saddr, ntohs(inet_sk(sk)->inet_sport), ++ &inet_sk(sk)->inet_daddr, ntohs(inet_sk(sk)->inet_dport), ++ tp->rcv_nxt, __builtin_return_address(0)); ++ if (!is_master_tp(tp)) { ++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKSUB); ++ return true; ++ } ++ ++ mpcb->infinite_mapping_snd = 1; ++ mpcb->infinite_mapping_rcv = 1; ++ mpcb->infinite_rcv_seq = mptcp_get_rcv_nxt_64(mptcp_meta_tp(tp)); ++ tp->mptcp->fully_established = 1; ++ ++ mptcp_fallback_close(mpcb, sk); ++ ++ mptcp_restart_sending(tp->meta_sk, tp->mptcp->last_end_data_seq); ++ ++ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_FBACKINIT); ++ ++ /* The acknowledged data-seq at the subflow-level is: ++ * last_end_data_seq - (tp->snd_nxt - tp->snd_una) ++ * ++ * If this is less than meta->snd_una, then we ignore it. Otherwise, ++ * this becomes our data_ack. ++ */ ++ if (after(meta_tp->snd_una, tp->mptcp->last_end_data_seq - (tp->snd_nxt - tp->snd_una))) { ++ /* Remmeber that meta snd_una is ahead of the game */ ++ mpcb->infinite_send_una_ahead = 1; ++ tp->mptcp->rx_opt.data_ack = meta_tp->snd_una; ++ } else { ++ tp->mptcp->rx_opt.data_ack = tp->mptcp->last_end_data_seq - ++ (tp->snd_nxt - tp->snd_una); ++ } ++ ++exit: + mptcp_process_data_ack(sk, skb); ++ ++ return false; +} + +/**** static functions used by mptcp_parse_options */ @@ -17080,7 +17112,7 @@ index 000000000000..ae9cc7209613 + if (!th->rst && !mpcb->infinite_mapping_snd) { + mpcb->send_infinite_mapping = 1; + -+ mptcp_restart_sending(meta_sk); ++ mptcp_restart_sending(meta_sk, tcp_sk(meta_sk)->snd_una); + + mptcp_fallback_close(mpcb, sk); + } @@ -17442,10 +17474,10 @@ index 000000000000..ae9cc7209613 +} diff --git a/net/mptcp/mptcp_ipv4.c b/net/mptcp/mptcp_ipv4.c new file mode 100644 -index 000000000000..0370a7680d47 +index 000000000000..7594c8bafb81 --- /dev/null +++ b/net/mptcp/mptcp_ipv4.c -@@ -0,0 +1,431 @@ +@@ -0,0 +1,432 @@ +/* + * MPTCP implementation - IPv4-specific functions + * @@ -17504,7 +17536,6 @@ index 000000000000..0370a7680d47 + &mptcp_secret); +} + -+ +static void mptcp_v4_reqsk_destructor(struct request_sock *req) +{ + mptcp_reqsk_destructor(req); @@ -17698,7 +17729,9 @@ index 000000000000..0370a7680d47 + return 0; + +reset_and_discard: -+ tcp_v4_send_reset(rsk, skb); ++ /* skb_dst will be NULL if we come from mptcp_backlog_rcv */ ++ if (rsk || skb_dst(skb)) ++ tcp_v4_send_reset(rsk, skb); + goto discard; +} + @@ -17879,10 +17912,10 @@ index 000000000000..0370a7680d47 +} diff --git a/net/mptcp/mptcp_ipv6.c b/net/mptcp/mptcp_ipv6.c new file mode 100644 -index 000000000000..8af32df4fd5f +index 000000000000..fa13a99b735e --- /dev/null +++ b/net/mptcp/mptcp_ipv6.c -@@ -0,0 +1,479 @@ +@@ -0,0 +1,481 @@ +/* + * MPTCP implementation - IPv6-specific functions + * @@ -18163,7 +18196,9 @@ index 000000000000..8af32df4fd5f + return 0; + +reset_and_discard: -+ tcp_v6_send_reset(rsk, skb); ++ /* skb_dst will be NULL if we come from mptcp_backlog_rcv */ ++ if (rsk || skb_dst(skb)) ++ tcp_v6_send_reset(rsk, skb); + goto discard; +} + @@ -19822,10 +19857,10 @@ index 000000000000..dd696841ea85 +MODULE_ALIAS_GENL_FAMILY(MPTCP_GENL_NAME); diff --git a/net/mptcp/mptcp_olia.c b/net/mptcp/mptcp_olia.c new file mode 100644 -index 000000000000..c44eb9208581 +index 000000000000..161a63f336d7 --- /dev/null +++ b/net/mptcp/mptcp_olia.c -@@ -0,0 +1,318 @@ +@@ -0,0 +1,316 @@ +/* + * MPTCP implementation - OPPORTUNISTIC LINKED INCREASES CONGESTION CONTROL: + * @@ -19846,7 +19881,6 @@ index 000000000000..c44eb9208581 + * 2 of the License, or (at your option) any later version. + */ + -+ +#include +#include + @@ -20105,7 +20139,6 @@ index 000000000000..c44eb9208581 + mptcp_olia_scale(inc_num , scale) , inc_den); + } + -+ + if (ca->mptcp_snd_cwnd_cnt >= (1 << scale) - 1) { + if (tp->snd_cwnd < tp->snd_cwnd_clamp) + tp->snd_cwnd++; @@ -20146,10 +20179,10 @@ index 000000000000..c44eb9208581 +MODULE_VERSION("0.1"); diff --git a/net/mptcp/mptcp_output.c b/net/mptcp/mptcp_output.c new file mode 100644 -index 000000000000..39eae2199802 +index 000000000000..8bf9eb4724fb --- /dev/null +++ b/net/mptcp/mptcp_output.c -@@ -0,0 +1,2009 @@ +@@ -0,0 +1,2008 @@ +/* + * MPTCP implementation - Sending side + * @@ -22158,13 +22191,12 @@ index 000000000000..39eae2199802 + + return max(xmit_size_goal, mss_now); +} -+ diff --git a/net/mptcp/mptcp_pm.c b/net/mptcp/mptcp_pm.c new file mode 100644 -index 000000000000..0e24e0aaa70a +index 000000000000..184c2cca0607 --- /dev/null +++ b/net/mptcp/mptcp_pm.c -@@ -0,0 +1,226 @@ +@@ -0,0 +1,225 @@ +/* + * MPTCP implementation - MPTCP-subflow-management + * @@ -22194,7 +22226,6 @@ index 000000000000..0e24e0aaa70a + * 2 of the License, or (at your option) any later version. + */ + -+ +#include +#include + @@ -23792,10 +23823,10 @@ index 000000000000..eed9bfb44b59 +late_initcall(mptcp_scheduler_default); diff --git a/net/mptcp/mptcp_wvegas.c b/net/mptcp/mptcp_wvegas.c new file mode 100644 -index 000000000000..787ddaab98a2 +index 000000000000..6caba957467a --- /dev/null +++ b/net/mptcp/mptcp_wvegas.c -@@ -0,0 +1,271 @@ +@@ -0,0 +1,268 @@ +/* + * MPTCP implementation - WEIGHTED VEGAS + * @@ -23852,7 +23883,6 @@ index 000000000000..787ddaab98a2 + u32 queue_delay; /* queue delay*/ +}; + -+ +static inline u64 mptcp_wvegas_scale(u32 val, int scale) +{ + return (u64) val << scale; @@ -23947,7 +23977,6 @@ index 000000000000..787ddaab98a2 + if (!mpcb) + return wvegas->weight; + -+ + mptcp_for_each_sub(mpcb, mptcp) { + struct sock *sub_sk = mptcp_to_sock(mptcp); + struct wvegas *sub_wvegas = inet_csk_ca(sub_sk); @@ -24034,7 +24063,6 @@ index 000000000000..787ddaab98a2 + tcp_slow_start(tp, acked); +} + -+ +static struct tcp_congestion_ops mptcp_wvegas __read_mostly = { + .init = mptcp_wvegas_init, + .ssthresh = tcp_reno_ssthresh, diff --git a/root/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch b/root/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch new file mode 100644 index 00000000..9ae75b03 --- /dev/null +++ b/root/target/linux/generic/pending-5.4/640-netfilter-nf_flow_table-add-hardware-offload-support.patch @@ -0,0 +1,554 @@ +From: Pablo Neira Ayuso +Date: Thu, 11 Jan 2018 16:32:00 +0100 +Subject: [PATCH] netfilter: nf_flow_table: add hardware offload support + +This patch adds the infrastructure to offload flows to hardware, in case +the nic/switch comes with built-in flow tables capabilities. + +If the hardware comes with no hardware flow tables or they have +limitations in terms of features, the existing infrastructure falls back +to the software flow table implementation. + +The software flow table garbage collector skips entries that resides in +the hardware, so the hardware will be responsible for releasing this +flow table entry too via flow_offload_dead(). + +Hardware configuration, either to add or to delete entries, is done from +the hardware offload workqueue, to ensure this is done from user context +given that we may sleep when grabbing the mdio mutex. + +Signed-off-by: Pablo Neira Ayuso +--- + create mode 100644 net/netfilter/nf_flow_table_hw.c + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -922,6 +922,13 @@ struct devlink; + struct tlsdev_ops; + + ++struct flow_offload; ++ ++enum flow_offload_type { ++ FLOW_OFFLOAD_ADD = 0, ++ FLOW_OFFLOAD_DEL, ++}; ++ + /* + * This structure defines the management hooks for network devices. + * The following hooks can be defined; unless noted otherwise, they are +@@ -1154,6 +1161,10 @@ struct tlsdev_ops; + * int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh, + * u16 flags); + * ++ * int (*ndo_flow_offload)(enum flow_offload_type type, ++ * struct flow_offload *flow); ++ * Adds/deletes flow entry to/from net device flowtable. ++ * + * int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier); + * Called to change device carrier. Soft-devices (like dummy, team, etc) + * which do not represent real hardware may define this to allow their +@@ -1401,6 +1412,8 @@ struct net_device_ops { + int (*ndo_bridge_dellink)(struct net_device *dev, + struct nlmsghdr *nlh, + u16 flags); ++ int (*ndo_flow_offload)(enum flow_offload_type type, ++ struct flow_offload *flow); + int (*ndo_change_carrier)(struct net_device *dev, + bool new_carrier); + int (*ndo_get_phys_port_id)(struct net_device *dev, +--- a/include/net/netfilter/nf_flow_table.h ++++ b/include/net/netfilter/nf_flow_table.h +@@ -21,11 +21,17 @@ struct nf_flowtable_type { + struct module *owner; + }; + ++enum nf_flowtable_flags { ++ NF_FLOWTABLE_F_HW = 0x1, ++}; ++ + struct nf_flowtable { + struct list_head list; + struct rhashtable rhashtable; + const struct nf_flowtable_type *type; ++ u32 flags; + struct delayed_work gc_work; ++ possible_net_t ft_net; + }; + + enum flow_offload_tuple_dir { +@@ -68,6 +74,7 @@ struct flow_offload_tuple_rhash { + #define FLOW_OFFLOAD_DNAT 0x2 + #define FLOW_OFFLOAD_DYING 0x4 + #define FLOW_OFFLOAD_TEARDOWN 0x8 ++#define FLOW_OFFLOAD_HW 0x10 + + struct flow_offload { + struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX]; +@@ -120,6 +127,22 @@ unsigned int nf_flow_offload_ip_hook(voi + unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + const struct nf_hook_state *state); + ++void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow, ++ struct nf_conn *ct); ++void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow); ++ ++struct nf_flow_table_hw { ++ struct module *owner; ++ void (*add)(struct net *net, struct flow_offload *flow, ++ struct nf_conn *ct); ++ void (*del)(struct net *net, struct flow_offload *flow); ++}; ++ ++int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload); ++void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload); ++ ++extern struct work_struct nf_flow_offload_hw_work; ++ + #define MODULE_ALIAS_NF_FLOWTABLE(family) \ + MODULE_ALIAS("nf-flowtable-" __stringify(family)) + +--- a/include/uapi/linux/netfilter/nf_tables.h ++++ b/include/uapi/linux/netfilter/nf_tables.h +@@ -1516,6 +1516,7 @@ enum nft_object_attributes { + * @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32) + * @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32) + * @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64) ++ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32) + */ + enum nft_flowtable_attributes { + NFTA_FLOWTABLE_UNSPEC, +@@ -1525,6 +1526,7 @@ enum nft_flowtable_attributes { + NFTA_FLOWTABLE_USE, + NFTA_FLOWTABLE_HANDLE, + NFTA_FLOWTABLE_PAD, ++ NFTA_FLOWTABLE_FLAGS, + __NFTA_FLOWTABLE_MAX + }; + #define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1) +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -711,6 +711,15 @@ config NF_FLOW_TABLE + + To compile it as a module, choose M here. + ++config NF_FLOW_TABLE_HW ++ tristate "Netfilter flow table hardware offload module" ++ depends on NF_FLOW_TABLE ++ help ++ This option adds hardware offload support for the flow table core ++ infrastructure. ++ ++ To compile it as a module, choose M here. ++ + config NETFILTER_XTABLES + tristate "Netfilter Xtables support (required for ip_tables)" + default m if NETFILTER_ADVANCED=n +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -123,6 +123,7 @@ obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_t + nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o + + obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o ++obj-$(CONFIG_NF_FLOW_TABLE_HW) += nf_flow_table_hw.o + + # generic X tables + obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -248,10 +248,16 @@ static inline bool nf_flow_has_expired(c + return nf_flow_timeout_delta(flow->timeout) <= 0; + } + ++static inline bool nf_flow_in_hw(const struct flow_offload *flow) ++{ ++ return flow->flags & FLOW_OFFLOAD_HW; ++} ++ + static void flow_offload_del(struct nf_flowtable *flow_table, + struct flow_offload *flow) + { + struct flow_offload_entry *e; ++ struct net *net = read_pnet(&flow_table->ft_net); + + rhashtable_remove_fast(&flow_table->rhashtable, + &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node, +@@ -271,6 +277,9 @@ static void flow_offload_del(struct nf_f + if (!(flow->flags & FLOW_OFFLOAD_TEARDOWN)) + flow_offload_fixup_ct_state(e->ct); + ++ if (nf_flow_in_hw(flow)) ++ nf_flow_offload_hw_del(net, flow); ++ + flow_offload_free(flow); + } + +@@ -490,10 +502,43 @@ int nf_flow_dnat_port(const struct flow_ + } + EXPORT_SYMBOL_GPL(nf_flow_dnat_port); + ++static const struct nf_flow_table_hw __rcu *nf_flow_table_hw_hook __read_mostly; ++ ++static int nf_flow_offload_hw_init(struct nf_flowtable *flow_table) ++{ ++ const struct nf_flow_table_hw *offload; ++ ++ if (!rcu_access_pointer(nf_flow_table_hw_hook)) ++ request_module("nf-flow-table-hw"); ++ ++ rcu_read_lock(); ++ offload = rcu_dereference(nf_flow_table_hw_hook); ++ if (!offload) ++ goto err_no_hw_offload; ++ ++ if (!try_module_get(offload->owner)) ++ goto err_no_hw_offload; ++ ++ rcu_read_unlock(); ++ ++ return 0; ++ ++err_no_hw_offload: ++ rcu_read_unlock(); ++ ++ return -EOPNOTSUPP; ++} ++ + int nf_flow_table_init(struct nf_flowtable *flowtable) + { + int err; + ++ if (flowtable->flags & NF_FLOWTABLE_F_HW) { ++ err = nf_flow_offload_hw_init(flowtable); ++ if (err) ++ return err; ++ } ++ + INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc); + + err = rhashtable_init(&flowtable->rhashtable, +@@ -534,6 +579,8 @@ static void nf_flow_table_iterate_cleanu + { + nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev); + flush_delayed_work(&flowtable->gc_work); ++ if (flowtable->flags & NF_FLOWTABLE_F_HW) ++ flush_work(&nf_flow_offload_hw_work); + } + + void nf_flow_table_cleanup(struct net_device *dev) +@@ -547,6 +594,26 @@ void nf_flow_table_cleanup(struct net_de + } + EXPORT_SYMBOL_GPL(nf_flow_table_cleanup); + ++struct work_struct nf_flow_offload_hw_work; ++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_work); ++ ++/* Give the hardware workqueue the chance to remove entries from hardware.*/ ++static void nf_flow_offload_hw_free(struct nf_flowtable *flowtable) ++{ ++ const struct nf_flow_table_hw *offload; ++ ++ flush_work(&nf_flow_offload_hw_work); ++ ++ rcu_read_lock(); ++ offload = rcu_dereference(nf_flow_table_hw_hook); ++ if (!offload) { ++ rcu_read_unlock(); ++ return; ++ } ++ module_put(offload->owner); ++ rcu_read_unlock(); ++} ++ + void nf_flow_table_free(struct nf_flowtable *flow_table) + { + mutex_lock(&flowtable_lock); +@@ -556,9 +623,58 @@ void nf_flow_table_free(struct nf_flowta + nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL); + nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table); + rhashtable_destroy(&flow_table->rhashtable); ++ if (flow_table->flags & NF_FLOWTABLE_F_HW) ++ nf_flow_offload_hw_free(flow_table); + } + EXPORT_SYMBOL_GPL(nf_flow_table_free); + ++/* Must be called from user context. */ ++void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow, ++ struct nf_conn *ct) ++{ ++ const struct nf_flow_table_hw *offload; ++ ++ rcu_read_lock(); ++ offload = rcu_dereference(nf_flow_table_hw_hook); ++ if (offload) ++ offload->add(net, flow, ct); ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_add); ++ ++/* Must be called from user context. */ ++void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow) ++{ ++ const struct nf_flow_table_hw *offload; ++ ++ rcu_read_lock(); ++ offload = rcu_dereference(nf_flow_table_hw_hook); ++ if (offload) ++ offload->del(net, flow); ++ rcu_read_unlock(); ++} ++EXPORT_SYMBOL_GPL(nf_flow_offload_hw_del); ++ ++int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload) ++{ ++ if (rcu_access_pointer(nf_flow_table_hw_hook)) ++ return -EBUSY; ++ ++ rcu_assign_pointer(nf_flow_table_hw_hook, offload); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(nf_flow_table_hw_register); ++ ++void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload) ++{ ++ WARN_ON(rcu_access_pointer(nf_flow_table_hw_hook) != offload); ++ rcu_assign_pointer(nf_flow_table_hw_hook, NULL); ++ ++ synchronize_rcu(); ++} ++EXPORT_SYMBOL_GPL(nf_flow_table_hw_unregister); ++ + static int nf_flow_table_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) + { +--- /dev/null ++++ b/net/netfilter/nf_flow_table_hw.c +@@ -0,0 +1,169 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static DEFINE_SPINLOCK(flow_offload_hw_pending_list_lock); ++static LIST_HEAD(flow_offload_hw_pending_list); ++ ++static DEFINE_MUTEX(nf_flow_offload_hw_mutex); ++ ++struct flow_offload_hw { ++ struct list_head list; ++ enum flow_offload_type type; ++ struct flow_offload *flow; ++ struct nf_conn *ct; ++ possible_net_t flow_hw_net; ++}; ++ ++static int do_flow_offload_hw(struct net *net, struct flow_offload *flow, ++ int type) ++{ ++ struct net_device *indev; ++ int ret, ifindex; ++ ++ ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx; ++ indev = dev_get_by_index(net, ifindex); ++ if (WARN_ON(!indev)) ++ return 0; ++ ++ mutex_lock(&nf_flow_offload_hw_mutex); ++ ret = indev->netdev_ops->ndo_flow_offload(type, flow); ++ mutex_unlock(&nf_flow_offload_hw_mutex); ++ ++ dev_put(indev); ++ ++ return ret; ++} ++ ++static void flow_offload_hw_work_add(struct flow_offload_hw *offload) ++{ ++ struct net *net; ++ int ret; ++ ++ if (nf_ct_is_dying(offload->ct)) ++ return; ++ ++ net = read_pnet(&offload->flow_hw_net); ++ ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD); ++ if (ret >= 0) ++ offload->flow->flags |= FLOW_OFFLOAD_HW; ++} ++ ++static void flow_offload_hw_work_del(struct flow_offload_hw *offload) ++{ ++ struct net *net = read_pnet(&offload->flow_hw_net); ++ ++ do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL); ++} ++ ++static void flow_offload_hw_work(struct work_struct *work) ++{ ++ struct flow_offload_hw *offload, *next; ++ LIST_HEAD(hw_offload_pending); ++ ++ spin_lock_bh(&flow_offload_hw_pending_list_lock); ++ list_replace_init(&flow_offload_hw_pending_list, &hw_offload_pending); ++ spin_unlock_bh(&flow_offload_hw_pending_list_lock); ++ ++ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) { ++ switch (offload->type) { ++ case FLOW_OFFLOAD_ADD: ++ flow_offload_hw_work_add(offload); ++ break; ++ case FLOW_OFFLOAD_DEL: ++ flow_offload_hw_work_del(offload); ++ break; ++ } ++ if (offload->ct) ++ nf_conntrack_put(&offload->ct->ct_general); ++ list_del(&offload->list); ++ kfree(offload); ++ } ++} ++ ++static void flow_offload_queue_work(struct flow_offload_hw *offload) ++{ ++ spin_lock_bh(&flow_offload_hw_pending_list_lock); ++ list_add_tail(&offload->list, &flow_offload_hw_pending_list); ++ spin_unlock_bh(&flow_offload_hw_pending_list_lock); ++ ++ schedule_work(&nf_flow_offload_hw_work); ++} ++ ++static void flow_offload_hw_add(struct net *net, struct flow_offload *flow, ++ struct nf_conn *ct) ++{ ++ struct flow_offload_hw *offload; ++ ++ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC); ++ if (!offload) ++ return; ++ ++ nf_conntrack_get(&ct->ct_general); ++ offload->type = FLOW_OFFLOAD_ADD; ++ offload->ct = ct; ++ offload->flow = flow; ++ write_pnet(&offload->flow_hw_net, net); ++ ++ flow_offload_queue_work(offload); ++} ++ ++static void flow_offload_hw_del(struct net *net, struct flow_offload *flow) ++{ ++ struct flow_offload_hw *offload; ++ ++ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC); ++ if (!offload) ++ return; ++ ++ offload->type = FLOW_OFFLOAD_DEL; ++ offload->ct = NULL; ++ offload->flow = flow; ++ write_pnet(&offload->flow_hw_net, net); ++ ++ flow_offload_queue_work(offload); ++} ++ ++static const struct nf_flow_table_hw flow_offload_hw = { ++ .add = flow_offload_hw_add, ++ .del = flow_offload_hw_del, ++ .owner = THIS_MODULE, ++}; ++ ++static int __init nf_flow_table_hw_module_init(void) ++{ ++ INIT_WORK(&nf_flow_offload_hw_work, flow_offload_hw_work); ++ nf_flow_table_hw_register(&flow_offload_hw); ++ ++ return 0; ++} ++ ++static void __exit nf_flow_table_hw_module_exit(void) ++{ ++ struct flow_offload_hw *offload, *next; ++ LIST_HEAD(hw_offload_pending); ++ ++ nf_flow_table_hw_unregister(&flow_offload_hw); ++ cancel_work_sync(&nf_flow_offload_hw_work); ++ ++ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) { ++ if (offload->ct) ++ nf_conntrack_put(&offload->ct->ct_general); ++ list_del(&offload->list); ++ kfree(offload); ++ } ++} ++ ++module_init(nf_flow_table_hw_module_init); ++module_exit(nf_flow_table_hw_module_exit); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Pablo Neira Ayuso "); ++MODULE_ALIAS("nf-flow-table-hw"); +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -5743,6 +5743,13 @@ static int nf_tables_flowtable_parse_hoo + if (err < 0) + return err; + ++ for (i = 0; i < n; i++) { ++ if (flowtable->data.flags & NF_FLOWTABLE_F_HW && ++ !dev_array[i]->netdev_ops->ndo_flow_offload) { ++ return -EOPNOTSUPP; ++ } ++ } ++ + ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL); + if (!ops) + return -ENOMEM; +@@ -5873,10 +5880,19 @@ static int nf_tables_newflowtable(struct + } + + flowtable->data.type = type; ++ write_pnet(&flowtable->data.ft_net, net); ++ + err = type->init(&flowtable->data); + if (err < 0) + goto err3; + ++ if (nla[NFTA_FLOWTABLE_FLAGS]) { ++ flowtable->data.flags = ++ ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS])); ++ if (flowtable->data.flags & ~NF_FLOWTABLE_F_HW) ++ goto err4; ++ } ++ + err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK], + flowtable); + if (err < 0) +@@ -6002,7 +6018,8 @@ static int nf_tables_fill_flowtable_info + nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) || + nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) || + nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle), +- NFTA_FLOWTABLE_PAD)) ++ NFTA_FLOWTABLE_PAD) || ++ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags))) + goto nla_put_failure; + + nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK); +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -128,6 +128,9 @@ static void nft_flow_offload_eval(const + if (ret < 0) + goto err_flow_add; + ++ if (flowtable->flags & NF_FLOWTABLE_F_HW) ++ nf_flow_offload_hw_add(nft_net(pkt), flow, ct); ++ + dst_release(route.tuple[!dir].dst); + return; + diff --git a/root/target/linux/generic/pending-5.4/690-net-add-support-for-threaded-NAPI-polling.patch b/root/target/linux/generic/pending-5.4/690-net-add-support-for-threaded-NAPI-polling.patch deleted file mode 100644 index 88c995f2..00000000 --- a/root/target/linux/generic/pending-5.4/690-net-add-support-for-threaded-NAPI-polling.patch +++ /dev/null @@ -1,344 +0,0 @@ -From: Felix Fietkau -Date: Sun, 26 Jul 2020 14:03:21 +0200 -Subject: [PATCH] net: add support for threaded NAPI polling - -For some drivers (especially 802.11 drivers), doing a lot of work in the NAPI -poll function does not perform well. Since NAPI poll is bound to the CPU it -was scheduled from, we can easily end up with a few very busy CPUs spending -most of their time in softirq/ksoftirqd and some idle ones. - -Introduce threaded NAPI for such drivers based on a workqueue. The API is the -same except for using netif_threaded_napi_add instead of netif_napi_add. - -In my tests with mt76 on MT7621 using threaded NAPI + a thread for tx scheduling -improves LAN->WLAN bridging throughput by 10-50%. Throughput without threaded -NAPI is wildly inconsistent, depending on the CPU that runs the tx scheduling -thread. - -With threaded NAPI it seems stable and consistent (and higher than the best -results I got without it). - -Based on a patch by Hillf Danton - -Cc: Hillf Danton -Signed-off-by: Felix Fietkau ---- - ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -340,6 +340,7 @@ struct napi_struct { - struct list_head dev_list; - struct hlist_node napi_hash_node; - unsigned int napi_id; -+ struct work_struct work; - }; - - enum { -@@ -350,6 +351,7 @@ enum { - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ - NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ -+ NAPI_STATE_THREADED, /* Use threaded NAPI */ - }; - - enum { -@@ -360,6 +362,7 @@ enum { - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), -+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), - }; - - enum gro_result { -@@ -2101,6 +2104,7 @@ struct net_device { - struct lock_class_key addr_list_lock_key; - bool proto_down; - unsigned wol_enabled:1; -+ unsigned threaded:1; - }; - #define to_net_dev(d) container_of(d, struct net_device, dev) - -@@ -2281,6 +2285,26 @@ void netif_napi_add(struct net_device *d - int (*poll)(struct napi_struct *, int), int weight); - - /** -+ * netif_threaded_napi_add - initialize a NAPI context -+ * @dev: network device -+ * @napi: NAPI context -+ * @poll: polling function -+ * @weight: default weight -+ * -+ * This variant of netif_napi_add() should be used from drivers using NAPI -+ * with CPU intensive poll functions. -+ * This will schedule polling from a high priority workqueue -+ */ -+static inline void netif_threaded_napi_add(struct net_device *dev, -+ struct napi_struct *napi, -+ int (*poll)(struct napi_struct *, int), -+ int weight) -+{ -+ set_bit(NAPI_STATE_THREADED, &napi->state); -+ netif_napi_add(dev, napi, poll, weight); -+} -+ -+/** - * netif_tx_napi_add - initialize a NAPI context - * @dev: network device - * @napi: NAPI context ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -156,6 +156,7 @@ static DEFINE_SPINLOCK(offload_lock); - struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; - struct list_head ptype_all __read_mostly; /* Taps */ - static struct list_head offload_base __read_mostly; -+static struct workqueue_struct *napi_workq __read_mostly; - - static int netif_rx_internal(struct sk_buff *skb); - static int call_netdevice_notifiers_info(unsigned long val, -@@ -5931,6 +5932,11 @@ void __napi_schedule(struct napi_struct - { - unsigned long flags; - -+ if (test_bit(NAPI_STATE_THREADED, &n->state)) { -+ queue_work(napi_workq, &n->work); -+ return; -+ } -+ - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -@@ -6246,9 +6256,89 @@ static void init_gro_hash(struct napi_st - napi->gro_bitmask = 0; - } - -+static int __napi_poll(struct napi_struct *n, bool *repoll) -+{ -+ int work, weight; -+ -+ weight = n->weight; -+ -+ /* This NAPI_STATE_SCHED test is for avoiding a race -+ * with netpoll's poll_napi(). Only the entity which -+ * obtains the lock and sees NAPI_STATE_SCHED set will -+ * actually make the ->poll() call. Therefore we avoid -+ * accidentally calling ->poll() when NAPI is not scheduled. -+ */ -+ work = 0; -+ if (test_bit(NAPI_STATE_SCHED, &n->state)) { -+ work = n->poll(n, weight); -+ trace_napi_poll(n, work, weight); -+ } -+ -+ WARN_ON_ONCE(work > weight); -+ -+ if (likely(work < weight)) -+ return work; -+ -+ /* Drivers must not modify the NAPI state if they -+ * consume the entire weight. In such cases this code -+ * still "owns" the NAPI instance and therefore can -+ * move the instance around on the list at-will. -+ */ -+ if (unlikely(napi_disable_pending(n))) { -+ napi_complete(n); -+ return work; -+ } -+ -+ if (n->gro_bitmask) { -+ /* flush too old packets -+ * If HZ < 1000, flush all packets. -+ */ -+ napi_gro_flush(n, HZ >= 1000); -+ } -+ -+ gro_normal_list(n); -+ -+ *repoll = true; -+ -+ return work; -+} -+ -+static void napi_workfn(struct work_struct *work) -+{ -+ struct napi_struct *n = container_of(work, struct napi_struct, work); -+ void *have; -+ -+ for (;;) { -+ bool repoll = false; -+ -+ local_bh_disable(); -+ -+ have = netpoll_poll_lock(n); -+ __napi_poll(n, &repoll); -+ netpoll_poll_unlock(have); -+ -+ local_bh_enable(); -+ -+ if (!repoll) -+ return; -+ -+ if (!need_resched()) -+ continue; -+ -+ /* -+ * have to pay for the latency of task switch even if -+ * napi is scheduled -+ */ -+ queue_work(napi_workq, work); -+ return; -+ } -+} -+ - void netif_napi_add(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) - { -+ if (dev->threaded) -+ set_bit(NAPI_STATE_THREADED, &napi->state); - INIT_LIST_HEAD(&napi->poll_list); - hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); - napi->timer.function = napi_watchdog; -@@ -6265,6 +6355,7 @@ void netif_napi_add(struct net_device *d - #ifdef CONFIG_NETPOLL - napi->poll_owner = -1; - #endif -+ INIT_WORK(&napi->work, napi_workfn); - set_bit(NAPI_STATE_SCHED, &napi->state); - set_bit(NAPI_STATE_NPSVC, &napi->state); - list_add_rcu(&napi->dev_list, &dev->napi_list); -@@ -6305,6 +6396,7 @@ static void flush_gro_hash(struct napi_s - void netif_napi_del(struct napi_struct *napi) - { - might_sleep(); -+ cancel_work_sync(&napi->work); - if (napi_hash_del(napi)) - synchronize_net(); - list_del_init(&napi->dev_list); -@@ -6317,50 +6409,18 @@ EXPORT_SYMBOL(netif_napi_del); - - static int napi_poll(struct napi_struct *n, struct list_head *repoll) - { -+ bool do_repoll = false; - void *have; -- int work, weight; -+ int work; - - list_del_init(&n->poll_list); - - have = netpoll_poll_lock(n); - -- weight = n->weight; -- -- /* This NAPI_STATE_SCHED test is for avoiding a race -- * with netpoll's poll_napi(). Only the entity which -- * obtains the lock and sees NAPI_STATE_SCHED set will -- * actually make the ->poll() call. Therefore we avoid -- * accidentally calling ->poll() when NAPI is not scheduled. -- */ -- work = 0; -- if (test_bit(NAPI_STATE_SCHED, &n->state)) { -- work = n->poll(n, weight); -- trace_napi_poll(n, work, weight); -- } -- -- WARN_ON_ONCE(work > weight); -+ work = __napi_poll(n, &do_repoll); - -- if (likely(work < weight)) -- goto out_unlock; -- -- /* Drivers must not modify the NAPI state if they -- * consume the entire weight. In such cases this code -- * still "owns" the NAPI instance and therefore can -- * move the instance around on the list at-will. -- */ -- if (unlikely(napi_disable_pending(n))) { -- napi_complete(n); -+ if (!do_repoll) - goto out_unlock; -- } -- -- if (n->gro_bitmask) { -- /* flush too old packets -- * If HZ < 1000, flush all packets. -- */ -- napi_gro_flush(n, HZ >= 1000); -- } -- -- gro_normal_list(n); - - /* Some drivers may have called napi_schedule - * prior to exhausting their budget. -@@ -10340,6 +10400,10 @@ static int __init net_dev_init(void) - sd->backlog.weight = weight_p; - } - -+ napi_workq = alloc_workqueue("napi_workq", WQ_UNBOUND | WQ_HIGHPRI, -+ WQ_UNBOUND_MAX_ACTIVE | WQ_SYSFS); -+ BUG_ON(!napi_workq); -+ - dev_boot_phase = 0; - - /* The loopback device is special if any other network devices ---- a/net/core/net-sysfs.c -+++ b/net/core/net-sysfs.c -@@ -442,6 +442,52 @@ static ssize_t proto_down_store(struct d - } - NETDEVICE_SHOW_RW(proto_down, fmt_dec); - -+static int change_napi_threaded(struct net_device *dev, unsigned long val) -+{ -+ struct napi_struct *napi; -+ -+ if (list_empty(&dev->napi_list)) -+ return -EOPNOTSUPP; -+ -+ list_for_each_entry(napi, &dev->napi_list, dev_list) { -+ if (val) -+ set_bit(NAPI_STATE_THREADED, &napi->state); -+ else -+ clear_bit(NAPI_STATE_THREADED, &napi->state); -+ } -+ -+ return 0; -+} -+ -+static ssize_t napi_threaded_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t len) -+{ -+ return netdev_store(dev, attr, buf, len, change_napi_threaded); -+} -+ -+static ssize_t napi_threaded_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct net_device *netdev = to_net_dev(dev); -+ struct napi_struct *napi; -+ bool enabled = false; -+ -+ if (!rtnl_trylock()) -+ return restart_syscall(); -+ -+ list_for_each_entry(napi, &netdev->napi_list, dev_list) { -+ if (test_bit(NAPI_STATE_THREADED, &napi->state)) -+ enabled = true; -+ } -+ -+ rtnl_unlock(); -+ -+ return sprintf(buf, fmt_dec, enabled); -+} -+static DEVICE_ATTR_RW(napi_threaded); -+ - static ssize_t phys_port_id_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -532,6 +578,7 @@ static struct attribute *net_class_attrs - &dev_attr_flags.attr, - &dev_attr_tx_queue_len.attr, - &dev_attr_gro_flush_timeout.attr, -+ &dev_attr_napi_threaded.attr, - &dev_attr_phys_port_id.attr, - &dev_attr_phys_port_name.attr, - &dev_attr_phys_switch_id.attr, diff --git a/root/target/linux/generic/pending-5.4/999-fix-oeoverflow-ipheth.patch b/root/target/linux/generic/pending-5.4/999-fix-oeoverflow-ipheth.patch deleted file mode 100644 index 594fa62c..00000000 --- a/root/target/linux/generic/pending-5.4/999-fix-oeoverflow-ipheth.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 63e4b45c82ed1bde979da7052229a4229ce9cabf Mon Sep 17 00:00:00 2001 -From: Georgi Valkov -Date: Tue, 1 Feb 2022 08:16:18 +0100 -Subject: ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback - -When rx_buf is allocated we need to account for IPHETH_IP_ALIGN, -which reduces the usable size by 2 bytes. Otherwise we have 1512 -bytes usable instead of 1514, and if we receive more than 1512 -bytes, ipheth_rcvbulk_callback is called with status -EOVERFLOW, -after which the driver malfunctiones and all communication stops. - -Resolves ipheth 2-1:4.2: ipheth_rcvbulk_callback: urb status: -75 - -Fixes: f33d9e2b48a3 ("usbnet: ipheth: fix connectivity with iOS 14") -Signed-off-by: Georgi Valkov -Tested-by: Jan Kiszka -Link: https://lore.kernel.org/all/B60B8A4B-92A0-49B3-805D-809A2433B46C@abv.bg/ -Link: https://lore.kernel.org/all/24851bd2769434a5fc24730dce8e8a984c5a4505.1643699778.git.jan.kiszka@siemens.com/ -Signed-off-by: Jakub Kicinski ---- - drivers/net/usb/ipheth.c | 6 +++--- - 1 file changed, 3 insertions(+), 3 deletions(-) - -diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c -index cd33955df0b65f..6a769df0b4213c 100644 ---- a/drivers/net/usb/ipheth.c -+++ b/drivers/net/usb/ipheth.c -@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) - if (tx_buf == NULL) - goto free_rx_urb; - -- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, -+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, - GFP_KERNEL, &rx_urb->transfer_dma); - if (rx_buf == NULL) - goto free_tx_buf; -@@ -146,7 +146,7 @@ error_nomem: - - static void ipheth_free_urbs(struct ipheth_device *iphone) - { -- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, -+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, - iphone->rx_urb->transfer_dma); - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, - iphone->tx_urb->transfer_dma); -@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) - - usb_fill_bulk_urb(dev->rx_urb, udev, - usb_rcvbulkpipe(udev, dev->bulk_in), -- dev->rx_buf, IPHETH_BUF_SIZE, -+ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, - ipheth_rcvbulk_callback, - dev); - dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; --- -cgit - diff --git a/root/target/linux/ipq40xx/base-files/etc/init.d/pcrypt-crconf b/root/target/linux/ipq40xx/base-files/etc/init.d/pcrypt-crconf deleted file mode 100755 index 1ba5bbd1..00000000 --- a/root/target/linux/ipq40xx/base-files/etc/init.d/pcrypt-crconf +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/sh /etc/rc.common -# (C) 2013 openwrt.org - -START=81 - -boot() { - SHA_ALGS="sha1-neon sha224-neon sha256-neon sha384-neon sha512-neon md5-generic" - - for alg in $SHA_ALGS; do - crconf add driver "authenc(hmac($alg),cbc(aes-generic))" type 3 - crconf add driver "pcrypt(authenc(hmac($alg),cbc(aes-generic)))" type 3 - done - - for alg in $SHA_ALGS; do - crconf add driver "authenc(hmac($alg),cbc(des3_ede-generic))" type 3 - crconf add driver "pcrypt(authenc(hmac($alg),cbc(des3_ede-generic)))" type 3 - done -} - -start() { - return 0 -} - -restart() { - return 0 -} - -stop() { - return 0 -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/caldata.sh b/root/target/linux/ipq40xx/base-files/lib/functions/caldata.sh deleted file mode 100644 index 2177cf84..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/caldata.sh +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (C) 2019 OpenWrt.org - -. /lib/functions.sh -. /lib/functions/system.sh - -caldata_dd() { - local source=$1 - local target=$2 - local count=$(($3)) - local offset=$(($4)) - - dd if=$source of=$target iflag=skip_bytes,fullblock bs=$count skip=$offset count=1 2>/dev/null - return $? -} - -caldata_die() { - echo "caldata: " "$*" - exit 1 -} - -caldata_extract() { - local part=$1 - local offset=$(($2)) - local count=$(($3)) - local mtd - - mtd=$(find_mtd_chardev $part) - [ -n "$mtd" ] || caldata_die "no mtd device found for partition $part" - - caldata_dd $mtd /lib/firmware/$FIRMWARE $count $offset || \ - caldata_die "failed to extract calibration data from $mtd" -} - -caldata_extract_ubi() { - local part=$1 - local offset=$(($2)) - local count=$(($3)) - local ubidev - local ubi - - . /lib/upgrade/nand.sh - - ubidev=$(nand_find_ubi $CI_UBIPART) - ubi=$(nand_find_volume $ubidev $part) - [ -n "$ubi" ] || caldata_die "no UBI volume found for $part" - - caldata_dd /dev/$ubi /lib/firmware/$FIRMWARE $count $offset || \ - caldata_die "failed to extract calibration data from $ubi" -} - -caldata_extract_reverse() { - local part=$1 - local offset=$2 - local count=$(($3)) - local mtd - local reversed - local caldata - - mtd=$(find_mtd_chardev "$part") - reversed=$(hexdump -v -s $offset -n $count -e '/1 "%02x "' $mtd) - - for byte in $reversed; do - caldata="\x${byte}${caldata}" - done - - printf "%b" "$caldata" > /lib/firmware/$FIRMWARE -} - -caldata_from_file() { - local source=$1 - local offset=$(($2)) - local count=$(($3)) - local target=$4 - - [ -n "$target" ] || target=/lib/firmware/$FIRMWARE - - caldata_dd $source $target $count $offset || \ - caldata_die "failed to extract calibration data from $source" -} - -caldata_sysfsload_from_file() { - local source=$1 - local offset=$(($2)) - local count=$(($3)) - local target_dir="/sys/$DEVPATH" - local target="$target_dir/data" - - [ -d "$target_dir" ] || \ - caldata_die "no sysfs dir to write: $target" - - echo 1 > "$target_dir/loading" - caldata_dd $source $target $count $offset - if [ $? != 0 ]; then - echo 1 > "$target_dir/loading" - caldata_die "failed to extract calibration data from $source" - else - echo 0 > "$target_dir/loading" - fi -} - -caldata_valid() { - local expected="$1" - local target=$2 - - [ -n "$target" ] || target=/lib/firmware/$FIRMWARE - - magic=$(hexdump -v -n 2 -e '1/1 "%02x"' $target) - [ "$magic" = "$expected" ] - return $? -} - -caldata_patch_chksum() { - local mac=$1 - local mac_offset=$(($2)) - local chksum_offset=$(($3)) - local target=$4 - local xor_mac - local xor_fw_mac - local xor_fw_chksum - - xor_mac=${mac//:/} - xor_mac="${xor_mac:0:4} ${xor_mac:4:4} ${xor_mac:8:4}" - - xor_fw_mac=$(hexdump -v -n 6 -s $mac_offset -e '/1 "%02x"' /lib/firmware/$FIRMWARE) - xor_fw_mac="${xor_fw_mac:0:4} ${xor_fw_mac:4:4} ${xor_fw_mac:8:4}" - - xor_fw_chksum=$(hexdump -v -n 2 -s $chksum_offset -e '/1 "%02x"' /lib/firmware/$FIRMWARE) - xor_fw_chksum=$(xor $xor_fw_chksum $xor_fw_mac $xor_mac) - - printf "%b" "\x${xor_fw_chksum:0:2}\x${xor_fw_chksum:2:2}" | \ - dd of=$target conv=notrunc bs=1 seek=$chksum_offset count=2 -} - -caldata_patch_mac() { - local mac=$1 - local mac_offset=$(($2)) - local chksum_offset=$3 - local target=$4 - - [ -z "$mac" -o -z "$mac_offset" ] && return - - [ -n "$target" ] || target=/lib/firmware/$FIRMWARE - - [ -n "$chksum_offset" ] && caldata_patch_chksum "$mac" "$mac_offset" "$chksum_offset" "$target" - - macaddr_2bin $mac | dd of=$target conv=notrunc oflag=seek_bytes bs=6 seek=$mac_offset count=1 || \ - caldata_die "failed to write MAC address to eeprom file" -} - -ath9k_patch_mac() { - local mac=$1 - local target=$2 - - caldata_patch_mac "$mac" 0x2 "" "$target" -} - -ath9k_patch_mac_crc() { - local mac=$1 - local mac_offset=$2 - local chksum_offset=$((mac_offset - 10)) - local target=$4 - - caldata_patch_mac "$mac" "$mac_offset" "$chksum_offset" "$target" -} - -ath10k_patch_mac() { - local mac=$1 - local target=$2 - - caldata_patch_mac "$mac" 0x6 0x2 "$target" -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/leds.sh b/root/target/linux/ipq40xx/base-files/lib/functions/leds.sh deleted file mode 100644 index a7532faa..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/leds.sh +++ /dev/null @@ -1,94 +0,0 @@ -# Copyright (C) 2013 OpenWrt.org - -get_dt_led_path() { - local ledpath - local basepath="/proc/device-tree" - local nodepath="$basepath/aliases/led-$1" - - [ -f "$nodepath" ] && ledpath=$(cat "$nodepath") - [ -n "$ledpath" ] && ledpath="$basepath$ledpath" - - echo "$ledpath" -} - -get_dt_led() { - local label - local ledpath=$(get_dt_led_path $1) - - [ -n "$ledpath" ] && \ - label=$(cat "$ledpath/label" 2>/dev/null) || \ - label=$(cat "$ledpath/chan-name" 2>/dev/null) || \ - label=$(basename "$ledpath") - - echo "$label" -} - -led_set_attr() { - [ -f "/sys/class/leds/$1/$2" ] && echo "$3" > "/sys/class/leds/$1/$2" -} - -led_timer() { - led_set_attr $1 "trigger" "timer" - led_set_attr $1 "delay_on" "$2" - led_set_attr $1 "delay_off" "$3" -} - -led_on() { - led_set_attr $1 "trigger" "none" - led_set_attr $1 "brightness" 255 -} - -led_off() { - led_set_attr $1 "trigger" "none" - led_set_attr $1 "brightness" 0 -} - -status_led_restore_trigger() { - local trigger - local ledpath=$(get_dt_led_path $1) - - [ -n "$ledpath" ] && \ - trigger=$(cat "$ledpath/linux,default-trigger" 2>/dev/null) - - [ -n "$trigger" ] && \ - led_set_attr "$(get_dt_led $1)" "trigger" "$trigger" -} - -status_led_set_timer() { - led_timer $status_led "$1" "$2" - [ -n "$status_led2" ] && led_timer $status_led2 "$1" "$2" -} - -status_led_set_heartbeat() { - led_set_attr $status_led "trigger" "heartbeat" -} - -status_led_on() { - led_on $status_led - [ -n "$status_led2" ] && led_on $status_led2 -} - -status_led_off() { - led_off $status_led - [ -n "$status_led2" ] && led_off $status_led2 -} - -status_led_blink_slow() { - led_timer $status_led 1000 1000 -} - -status_led_blink_fast() { - led_timer $status_led 100 100 -} - -status_led_blink_preinit() { - led_timer $status_led 100 100 -} - -status_led_blink_failsafe() { - led_timer $status_led 50 50 -} - -status_led_blink_preinit_regular() { - led_timer $status_led 200 200 -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/migrations.sh b/root/target/linux/ipq40xx/base-files/lib/functions/migrations.sh deleted file mode 100644 index d43ea350..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/migrations.sh +++ /dev/null @@ -1,67 +0,0 @@ -. /lib/functions.sh - -migrate_led_sysfs() { - local cfg="$1"; shift - local tuples="$@" - local sysfs - local name - - config_get sysfs ${cfg} sysfs - config_get name ${cfg} name - - [ -z "${sysfs}" ] && return - - for tuple in ${tuples}; do - local old=${tuple%=*} - local new=${tuple#*=} - local new_sysfs - - new_sysfs=$(echo ${sysfs} | sed "s/${old}/${new}/") - - [ "${new_sysfs}" = "${sysfs}" ] && continue - - uci set system.${cfg}.sysfs="${new_sysfs}" - - logger -t led-migration "sysfs option of LED \"${name}\" updated to ${new_sysfs}" - done; -} - -remove_devicename_led_sysfs() { - local cfg="$1"; shift - local exceptions="$@" - local sysfs - local name - local new_sysfs - - config_get sysfs ${cfg} sysfs - config_get name ${cfg} name - - # only continue if two or more colons are present - echo "${sysfs}" | grep -q ":.*:" || return - - for exception in ${exceptions}; do - # no change if exceptions provided as argument are found for devicename - echo "${sysfs}" | grep -q "^${exception}:" && return - done - - new_sysfs=$(echo ${sysfs} | sed "s/^[^:]*://") - - uci set system.${cfg}.sysfs="${new_sysfs}" - - logger -t led-migration "sysfs option of LED \"${name}\" updated to ${new_sysfs}" -} - -migrate_leds() { - config_load system - config_foreach migrate_led_sysfs led "$@" -} - -remove_devicename_leds() { - config_load system - config_foreach remove_devicename_led_sysfs led "$@" -} - -migrations_apply() { - local realm="$1" - [ -n "$(uci changes ${realm})" ] && uci -q commit ${realm} -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/mobile.sh b/root/target/linux/ipq40xx/base-files/lib/functions/mobile.sh deleted file mode 100644 index 2476644e..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/mobile.sh +++ /dev/null @@ -1,247 +0,0 @@ -#Mobile configuration management lib - -. /usr/share/libubox/jshn.sh -. /lib/functions.sh - -gsm_soft_reset() { - gsmctl -n -A at+cfun=4 - sleep 2 - gsmctl -n -A at+cfun=1 -} - -qmi_error_handle() { - local error="$1" - local modem_id="$2" - - $(echo "$error" | grep -q "error") && { - echo "$error" - } - - $(echo "$error" | grep -q "Client IDs exhausted") && { - echo "ClientIdsExhausted! reseting counter..." - proto_notify_error "$interface" NO_CID - uqmi -s -d "$device" --sync - return 1 - } - -# Reik papildyt ERROR handlinima -# $(echo "$error" | grep -q "multiple-connection-to-same-pdn-not-allowed") && { -# echo "Reseting due dublicated connection..." -# qmicli -p -d "$device" --uim-sim-power-off=1 -# qmicli -p -d "$device" --uim-sim-power-on=1 -# return 1 -# } - -# $(echo "$error" | grep -q "Transaction timed out") && { -# echo "Device not responding, restarting module" -# gsmctl -O $modem_id -A at+cfun=1,1 -# } -# -# $(echo "$error" | grep -q 'verbose call end reason (2,236)') && { - # echo "Failed to start network, clearing all cids" - # qmicli -p -d "$device" --wds-noop --device-open-sync - # return 1 -# } - - $(echo "$error" | grep -q "Call Failed") && { - echo "Device not responding, restarting module" - sleep 10 - gsm_soft_reset - return 1 - } - - $(echo "$error" | grep -q "Policy Mismatch") && { - echo "Reseting network..." - gsm_soft_reset - return 1 - } - - $(echo "$error" | grep -q "Failed to connect to service") && { - echo "Device not responding, restarting module" - gsmctl -A at+cfun=1,1 - return 1 - } - - $(echo "$error" | grep -q "error") && { - echo "$error" - } - - return 0 -} - -passthrough_mode= -get_passthrough() { - config_get primary "$1" primary - [ "$primary" = "1" ] && { - config_get sim "$1" position; - passthrough_mode=$(uci -q get network.mob1s${sim}a1.passthrough_mode 2>/dev/null); - } -} - -setup_bridge_v4() { - local dev="$1" - local dhcp_param_file="/tmp/dnsmasq.d/bridge" - echo "$parameters4" - - json_load "$parameters4" - json_select "ipv4" - json_get_var bridge_ipaddr ip - json_get_var bridge_mask subnet - json_get_var bridge_gateway gateway - json_get_var bridge_dns1 dns1 - json_get_var bridge_dns2 dns2 - - json_init - json_add_string name "${interface}_4" - json_add_string ifname "$dev" - json_add_string proto "none" - json_add_object "data" - ubus call network add_dynamic "$(json_dump)" - IFACE4="${interface}_4" - - json_init - json_add_string interface "${interface}_4" - json_add_string zone "lan" - ubus call network.interface set_data "$(json_dump)" - - json_init - json_add_string interface "${interface}" - json_add_string bridge_ipaddr "$bridge_ipaddr" - ubus call network.interface set_data "$(json_dump)" - - json_init - json_add_string modem "$modem" - json_add_string sim "$sim" - ubus call network.interface."${interface}_4" set_data "$(json_dump)" - json_close_object - - ip route add default dev "$dev" table 42 - ip route add default dev br-lan table 43 - ip route add "$bridge_ipaddr" dev br-lan - - ip rule add pref 5042 from "$bridge_ipaddr" lookup 42 - ip rule add pref 5043 iif "$dev" lookup 43 - #sysctl -w net.ipv4.conf.br-lan.proxy_arp=1 #2>/dev/null - ip neighbor add proxy "$bridge_gateway" dev br-lan - - iptables -A postrouting_rule -m comment --comment "Bridge mode" -o "$dev" -j ACCEPT -tnat - - config_load simcard - config_foreach get_passthrough sim - - > $dhcp_param_file - [ -z "$mac" ] && mac="*:*:*:*:*:*" - [ "$passthrough_mode" != "no_dhcp" ] && { - echo "dhcp-range=tag:mobbridge,$bridge_ipaddr,static,$bridge_mask,${leasetime:-1h}" > "$dhcp_param_file" - echo "shared-network=br-lan,$bridge_ipaddr" >> "$dhcp_param_file" - echo "dhcp-host=$mac,set:mobbridge,$bridge_ipaddr" >> "$dhcp_param_file" - echo "dhcp-option=tag:mobbridge,br-lan,3,$bridge_gateway" >> "$dhcp_param_file" - echo "dhcp-option=tag:mobbridge,br-lan,6,$bridge_dns1,$bridge_dns2" >> "$dhcp_param_file" - echo "server=$bridge_dns1" >> "$dhcp_param_file" - echo "server=$bridge_dns2" >> "$dhcp_param_file" - } - /etc/init.d/dnsmasq reload - swconfig dev 'switch0' set soft_reset 5 & -} - -setup_static_v4() { - local dev="$1" - echo "Setting up $dev V4 static" - echo "$parameters4" - - json_load "$parameters4" - json_select "ipv4" - json_get_var ip_4 ip - json_get_var dns1_4 dns1 - json_get_var dns2_4 dns2 - - json_init - json_add_string name "${interface}_4" - json_add_string ifname "$dev" - json_add_string proto static - json_add_string gateway "0.0.0.0" - - json_add_array ipaddr - json_add_string "" "$ip_4" - json_close_array - - json_add_array dns - [ -n "$dns1_4" ] && json_add_string "" "$dns1_4" - [ -n "$dns2_4" ] && json_add_string "" "$dns2_4" - json_close_array - - [ -n "$ip4table" ] && json_add_string ip4table "$ip4table" - proto_add_dynamic_defaults - - ubus call network add_dynamic "$(json_dump)" -} - -setup_dhcp_v4() { - local dev="$1" - echo "Setting up $dev V4 DCHP" - json_init - json_add_string name "${interface}_4" - json_add_string ifname "$dev" - json_add_string proto "dhcp" - json_add_string script "/lib/netifd/dhcp_mobile.script" - [ -n "$ip4table" ] && json_add_string ip4table "$ip4table" - proto_add_dynamic_defaults - [ -n "$zone" ] && json_add_string zone "$zone" - ubus call network add_dynamic "$(json_dump)" -} - -setup_dhcp_v6() { - local dev="$1" - echo "Setting up $dev V6 DHCP" - json_init - json_add_string name "${interface}_6" - json_add_string ifname "$dev" - json_add_string proto "dhcpv6" - [ -n "$ip6table" ] && json_add_string ip6table "$ip6table" - json_add_boolean ignore_valid 1 - proto_add_dynamic_defaults - # RFC 7278: Extend an IPv6 /64 Prefix to LAN - json_add_string extendprefix 1 - [ -n "$zone" ] && json_add_string zone "$zone" - ubus call network add_dynamic "$(json_dump)" -} - -setup_static_v6() { - local dev="$1" - echo "Setting up $dev V6 static" - echo "$parameters6" - - json_load "$parameters6" - json_select "ipv6" - json_get_var ip6_with_prefix ip - ip_6="${ip6_with_prefix%/*}" - ip_prefix_length="${ip6_with_prefix#*/}" - json_get_var ip6_gateway_with_prefix gateway - gateway_6="${ip6_gateway_with_prefix%/*}" - json_get_var dns1_6 dns1 - json_get_var dns2_6 dns2 - - json_init - json_add_string name "${interface}_6" - json_add_string ifname "$dev" - json_add_string proto static - json_add_string ip6gw "$gateway_6" - - json_add_array ip6prefix - json_add_string "" "$ip6_with_prefix" - json_close_array - - json_add_array ip6addr - json_add_string "" "$ip6_with_prefix" - json_close_array - - json_add_array dns - [ -n "$dns1_6" ] && json_add_string "" "$dns1_6" - [ -n "$dns2_6" ] && json_add_string "" "$dns2_6" - json_close_array - - [ -n "$ip6table" ] && json_add_string ip6table "$ip6table" - proto_add_dynamic_defaults - - ubus call network add_dynamic "$(json_dump)" -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/preinit.sh b/root/target/linux/ipq40xx/base-files/lib/functions/preinit.sh deleted file mode 100644 index 591e810a..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/preinit.sh +++ /dev/null @@ -1,87 +0,0 @@ -# Copyright (C) 2006-2013 OpenWrt.org -# Copyright (C) 2010 Vertical Communications - -boot_hook_splice_start() { - export -n PI_HOOK_SPLICE=1 -} - -boot_hook_splice_finish() { - local hook - for hook in $PI_STACK_LIST; do - local v; eval "v=\${${hook}_splice:+\$${hook}_splice }$hook" - export -n "${hook}=${v% }" - export -n "${hook}_splice=" - done - export -n PI_HOOK_SPLICE= -} - -boot_hook_init() { - local hook="${1}_hook" - export -n "PI_STACK_LIST=${PI_STACK_LIST:+$PI_STACK_LIST }$hook" - export -n "$hook=" -} - -boot_hook_add() { - local hook="${1}_hook${PI_HOOK_SPLICE:+_splice}" - local func="${2}" - - [ -n "$func" ] && { - local v; eval "v=\$$hook" - export -n "$hook=${v:+$v }$func" - } -} - -boot_hook_shift() { - local hook="${1}_hook" - local rvar="${2}" - - local v; eval "v=\$$hook" - [ -n "$v" ] && { - local first="${v%% *}" - - [ "$v" != "${v#* }" ] && \ - export -n "$hook=${v#* }" || \ - export -n "$hook=" - - export -n "$rvar=$first" - return 0 - } - - return 1 -} - -boot_run_hook() { - local hook="$1" - local func - - while boot_hook_shift "$hook" func; do - local ran; eval "ran=\$PI_RAN_$func" - [ -n "$ran" ] || { - export -n "PI_RAN_$func=1" - $func "$1" "$2" - } - done -} - -pivot() { # - /bin/mount -o noatime,move /proc $1/proc && \ - pivot_root $1 $1$2 && { - /bin/mount -o noatime,move $2/dev /dev - /bin/mount -o noatime,move $2/tmp /tmp - /bin/mount -o noatime,move $2/sys /sys 2>&- - /bin/mount -o noatime,move $2/overlay /overlay 2>&- - return 0 - } -} - -fopivot() { # - /bin/mount -o noatime,lowerdir=/,upperdir=$1,workdir=$2 -t overlay "overlayfs:$1" /mnt - pivot /mnt $3 -} - -ramoverlay() { - mkdir -p /tmp/root - /bin/mount -t tmpfs -o noatime,mode=0755 root /tmp/root - mkdir -p /tmp/root/root /tmp/root/work - fopivot /tmp/root/root /tmp/root/work /rom 1 -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/service.sh b/root/target/linux/ipq40xx/base-files/lib/functions/service.sh deleted file mode 100644 index 3d08e143..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/service.sh +++ /dev/null @@ -1,103 +0,0 @@ -# -# service: simple wrapper around start-stop-daemon -# -# Usage: service ACTION EXEC ARGS... -# -# Action: -# -C check if EXEC is alive -# -S start EXEC, passing it ARGS as its arguments -# -K kill EXEC, sending it a TERM signal if not specified otherwise -# -# Environment variables exposed: -# SERVICE_DAEMONIZE run EXEC in background -# SERVICE_WRITE_PID create a pid-file and use it for matching -# SERVICE_MATCH_EXEC use EXEC command-line for matching (default) -# SERVICE_MATCH_NAME use EXEC process name for matching -# SERVICE_USE_PID assume EXEC create its own pid-file and use it for matching -# SERVICE_NAME process name to use (default to EXEC file part) -# SERVICE_PID_FILE pid file to use (default to /var/run/$SERVICE_NAME.pid) -# SERVICE_SIG signal to send when using -K -# SERVICE_SIG_RELOAD default signal used when reloading -# SERVICE_SIG_STOP default signal used when stopping -# SERVICE_STOP_TIME time to wait for a process to stop gracefully before killing it -# SERVICE_UID user EXEC should be run as -# SERVICE_GID group EXEC should be run as -# -# SERVICE_DEBUG don't do anything, but show what would be done -# SERVICE_QUIET don't print anything -# - -SERVICE_QUIET=1 -SERVICE_SIG_RELOAD="HUP" -SERVICE_SIG_STOP="TERM" -SERVICE_STOP_TIME=5 -SERVICE_MATCH_EXEC=1 - -service() { - local ssd - local exec - local name - local start - ssd="${SERVICE_DEBUG:+echo }start-stop-daemon${SERVICE_QUIET:+ -q}" - case "$1" in - -C) - ssd="$ssd -K -t" - ;; - -S) - ssd="$ssd -S${SERVICE_DAEMONIZE:+ -b}${SERVICE_WRITE_PID:+ -m}" - start=1 - ;; - -K) - ssd="$ssd -K${SERVICE_SIG:+ -s $SERVICE_SIG}" - ;; - *) - echo "service: unknown ACTION '$1'" 1>&2 - return 1 - esac - shift - exec="$1" - [ -n "$exec" ] || { - echo "service: missing argument" 1>&2 - return 1 - } - [ -x "$exec" ] || { - echo "service: file '$exec' is not executable" 1>&2 - return 1 - } - name="${SERVICE_NAME:-${exec##*/}}" - [ -z "$SERVICE_USE_PID$SERVICE_WRITE_PID$SERVICE_PID_FILE" ] \ - || ssd="$ssd -p ${SERVICE_PID_FILE:-/var/run/$name.pid}" - [ -z "$SERVICE_MATCH_NAME" ] || ssd="$ssd -n $name" - ssd="$ssd${SERVICE_UID:+ -c $SERVICE_UID${SERVICE_GID:+:$SERVICE_GID}}" - [ -z "$SERVICE_MATCH_EXEC$start" ] || ssd="$ssd -x $exec" - shift - $ssd${1:+ -- "$@"} -} - -service_check() { - service -C "$@" -} - -service_signal() { - SERVICE_SIG="${SERVICE_SIG:-USR1}" service -K "$@" -} - -service_start() { - service -S "$@" -} - -service_stop() { - local try - SERVICE_SIG="${SERVICE_SIG:-$SERVICE_SIG_STOP}" service -K "$@" || return 1 - while [ $((try++)) -lt $SERVICE_STOP_TIME ]; do - service -C "$@" || return 0 - sleep 1 - done - SERVICE_SIG="KILL" service -K "$@" - sleep 1 - ! service -C "$@" -} - -service_reload() { - SERVICE_SIG="${SERVICE_SIG:-$SERVICE_SIG_RELOAD}" service -K "$@" -} diff --git a/root/target/linux/ipq40xx/base-files/lib/functions/system.sh b/root/target/linux/ipq40xx/base-files/lib/functions/system.sh deleted file mode 100644 index 80e41718..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/functions/system.sh +++ /dev/null @@ -1,226 +0,0 @@ -# Copyright (C) 2006-2013 OpenWrt.org - -. /lib/functions.sh -. /usr/share/libubox/jshn.sh - -get_mac_binary() { - local path="$1" - local offset="$2" - - if ! [ -e "$path" ]; then - echo "get_mac_binary: file $path not found!" >&2 - return - fi - - hexdump -v -n 6 -s $offset -e '5/1 "%02x:" 1/1 "%02x"' $path 2>/dev/null -} - -get_mac_label_dt() { - local basepath="/proc/device-tree" - local macdevice="$(cat "$basepath/aliases/label-mac-device" 2>/dev/null)" - local macaddr - - [ -n "$macdevice" ] || return - - macaddr=$(get_mac_binary "$basepath/$macdevice/mac-address" 0 2>/dev/null) - [ -n "$macaddr" ] || macaddr=$(get_mac_binary "$basepath/$macdevice/local-mac-address" 0 2>/dev/null) - - echo $macaddr -} - -get_mac_label_json() { - local cfg="/etc/board.json" - local macaddr - - [ -s "$cfg" ] || return - - json_init - json_load "$(cat $cfg)" - if json_is_a system object; then - json_select system - json_get_var macaddr label_macaddr - json_select .. - fi - - echo $macaddr -} - -get_mac_label() { - local macaddr=$(get_mac_label_dt) - - [ -n "$macaddr" ] || macaddr=$(get_mac_label_json) - - echo $macaddr -} - -find_mtd_chardev() { - local INDEX=$(find_mtd_index "$1") - local PREFIX=/dev/mtd - - [ -d /dev/mtd ] && PREFIX=/dev/mtd/ - echo "${INDEX:+$PREFIX$INDEX}" -} - -mtd_get_mac_ascii() { - local mtdname="$1" - local key="$2" - local part - local mac_dirty - - part=$(find_mtd_part "$mtdname") - if [ -z "$part" ]; then - echo "mtd_get_mac_ascii: partition $mtdname not found!" >&2 - return - fi - - mac_dirty=$(strings "$part" | sed -n 's/^'"$key"'=//p') - - # "canonicalize" mac - [ -n "$mac_dirty" ] && macaddr_canonicalize "$mac_dirty" -} - -mtd_get_mac_text() { - local mtdname=$1 - local offset=$(($2)) - local part - local mac_dirty - - part=$(find_mtd_part "$mtdname") - if [ -z "$part" ]; then - echo "mtd_get_mac_text: partition $mtdname not found!" >&2 - return - fi - - if [ -z "$offset" ]; then - echo "mtd_get_mac_text: offset missing!" >&2 - return - fi - - mac_dirty=$(dd if="$part" bs=1 skip="$offset" count=17 2>/dev/null) - - # "canonicalize" mac - [ -n "$mac_dirty" ] && macaddr_canonicalize "$mac_dirty" -} - -mtd_get_mac_binary() { - local mtdname="$1" - local offset="$2" - local part - - part=$(find_mtd_part "$mtdname") - get_mac_binary "$part" "$offset" -} - -mtd_get_mac_binary_ubi() { - local mtdname="$1" - local offset="$2" - - . /lib/upgrade/nand.sh - - local ubidev=$(nand_find_ubi $CI_UBIPART) - local part=$(nand_find_volume $ubidev $1) - - get_mac_binary "/dev/$part" "$offset" -} - -mtd_get_part_size() { - local part_name=$1 - local first dev size erasesize name - while read dev size erasesize name; do - name=${name#'"'}; name=${name%'"'} - if [ "$name" = "$part_name" ]; then - echo $((0x$size)) - break - fi - done < /proc/mtd -} - -macaddr_add() { - local mac=$1 - local val=$2 - local oui=${mac%:*:*:*} - local nic=${mac#*:*:*:} - - nic=$(printf "%06x" $((0x${nic//:/} + val & 0xffffff)) | sed 's/^\(.\{2\}\)\(.\{2\}\)\(.\{2\}\)/\1:\2:\3/') - echo $oui:$nic -} - -macaddr_geteui() { - local mac=$1 - local sep=$2 - - echo ${mac:9:2}$sep${mac:12:2}$sep${mac:15:2} -} - -macaddr_setbit() { - local mac=$1 - local bit=${2:-0} - - [ $bit -gt 0 -a $bit -le 48 ] || return - - printf "%012x" $(( 0x${mac//:/} | 2**(48-bit) )) | sed -e 's/\(.\{2\}\)/\1:/g' -e 's/:$//' -} - -macaddr_unsetbit() { - local mac=$1 - local bit=${2:-0} - - [ $bit -gt 0 -a $bit -le 48 ] || return - - printf "%012x" $(( 0x${mac//:/} & ~(2**(48-bit)) )) | sed -e 's/\(.\{2\}\)/\1:/g' -e 's/:$//' -} - -macaddr_setbit_la() { - macaddr_setbit $1 7 -} - -macaddr_unsetbit_mc() { - local mac=$1 - - printf "%02x:%s" $((0x${mac%%:*} & ~0x01)) ${mac#*:} -} - -macaddr_random() { - local randsrc=$(get_mac_binary /dev/urandom 0) - - echo "$(macaddr_unsetbit_mc "$(macaddr_setbit_la "${randsrc}")")" -} - -macaddr_2bin() { - local mac=$1 - - echo -ne \\x${mac//:/\\x} -} - -macaddr_canonicalize() { - local mac="$1" - local canon="" - - mac=$(echo -n $mac | tr -d \") - [ ${#mac} -gt 17 ] && return - [ -n "${mac//[a-fA-F0-9\.: -]/}" ] && return - - for octet in ${mac//[\.:-]/ }; do - case "${#octet}" in - 1) - octet="0${octet}" - ;; - 2) - ;; - 4) - octet="${octet:0:2} ${octet:2:2}" - ;; - 12) - octet="${octet:0:2} ${octet:2:2} ${octet:4:2} ${octet:6:2} ${octet:8:2} ${octet:10:2}" - ;; - *) - return - ;; - esac - canon=${canon}${canon:+ }${octet} - done - - [ ${#canon} -ne 17 ] && return - - printf "%02x:%02x:%02x:%02x:%02x:%02x" 0x${canon// / 0x} 2>/dev/null -} diff --git a/root/target/linux/ipq40xx/base-files/lib/upgrade/ipq_failsafe.sh b/root/target/linux/ipq40xx/base-files/lib/upgrade/ipq_failsafe.sh deleted file mode 100644 index c1c93d20..00000000 --- a/root/target/linux/ipq40xx/base-files/lib/upgrade/ipq_failsafe.sh +++ /dev/null @@ -1,383 +0,0 @@ -find_mmc_part() { - local DEVNAME PARTNAME - - if grep -q "$1" /proc/mtd; then - echo "" && return 0 - fi - - for DEVNAME in /sys/block/mmcblk0/mmcblk*p*; do - PARTNAME=$(grep PARTNAME ${DEVNAME}/uevent | cut -f2 -d'=') - [ "$PARTNAME" = "$1" ] && echo "/dev/$(basename $DEVNAME)" && return 0 - done -} - -get_full_section_name() { - local img=$1 - local sec=$2 - - dumpimage -l ${img} | grep "^ Image.*(${sec})" | \ - sed 's,^ Image.*(\(.*\)),\1,' -} - -image_contains() { - local img=$1 - local sec=$2 - dumpimage -l ${img} | grep -q "^ Image.*(${sec}.*)" || return 1 -} - -print_sections() { - local img=$1 - - dumpimage -l ${img} | awk '/^ Image.*(.*)/ { print gensub(/Image .* \((.*)\)/,"\\1", $0) }' -} - -image_has_mandatory_section() { - local img=$1 - local mandatory_sections=$2 - - for sec in ${mandatory_sections}; do - image_contains $img ${sec} || {\ - return 1 - } - done -} - -image_demux() { - local img=$1 - - for sec in $(print_sections ${img}); do - local fullname=$(get_full_section_name ${img} ${sec}) - - local position=$(dumpimage -l ${img} | grep "(${fullname})" | awk '{print $2}') - dumpimage -i ${img} -o /tmp/${fullname}.bin -T "flat_dt" -p "${position}" ${fullname} > /dev/null || { \ - echo "Error while extracting \"${sec}\" from ${img}" - return 1 - } - done - return 0 -} - -image_is_FIT() { - if ! dumpimage -l $1 > /dev/null 2>&1; then - echo "$1 is not a valid FIT image" - return 1 - fi - return 0 -} - -switch_layout() { - local layout=$1 - local boot_layout=`find / -name boot_layout` - - # Layout switching is only required as the boot images (up to u-boot) - # use 512 user data bytes per code word, whereas Linux uses 516 bytes. - # It's only applicable for NAND flash. So let's return if we don't have - # one. - - [ -n "$boot_layout" ] || return - - case "${layout}" in - boot|1) echo 1 > $boot_layout;; - linux|0) echo 0 > $boot_layout;; - *) echo "Unknown layout \"${layout}\"";; - esac -} - -do_flash_mtd() { - local bin=$1 - local mtdname=$2 - local append="" - - local mtdpart=$(grep "\"${mtdname}\"" /proc/mtd | awk -F: '{print $1}') - local pgsz=$(cat /sys/class/mtd/${mtdpart}/writesize) - [ -f "$CONF_TAR" -a "$SAVE_CONFIG" -eq 1 -a "$2" == "rootfs" ] && append="-j $CONF_TAR" - - dd if=/tmp/${bin}.bin bs=${pgsz} conv=sync | mtd $append -e "/dev/${mtdpart}" write - "/dev/${mtdpart}" -} - -do_flash_emmc() { - local bin=$1 - local emmcblock=$2 - - dd if=/dev/zero of=${emmcblock} - dd if=/tmp/${bin}.bin of=${emmcblock} -} - -do_flash_partition() { - local bin=$1 - local mtdname=$2 - local emmcblock="$(find_mmc_part "$mtdname")" - - if [ -e "$emmcblock" ]; then - do_flash_emmc $bin $emmcblock - else - do_flash_mtd $bin $mtdname - fi -} - -do_flash_bootconfig() { - local bin=$1 - local mtdname=$2 - - # Fail safe upgrade - if [ -f /proc/boot_info/getbinary_${bin} ]; then - cat /proc/boot_info/getbinary_${bin} > /tmp/${bin}.bin - do_flash_partition $bin $mtdname - fi -} - -do_flash_failsafe_partition() { - local bin=$1 - local mtdname=$2 - local emmcblock - local primaryboot - - # Fail safe upgrade - [ -f /proc/boot_info/$mtdname/upgradepartition ] && { - default_mtd=$mtdname - mtdname=$(cat /proc/boot_info/$mtdname/upgradepartition) - primaryboot=$(cat /proc/boot_info/$default_mtd/primaryboot) - if [ $primaryboot -eq 0 ]; then - echo 1 > /proc/boot_info/$default_mtd/primaryboot - else - echo 0 > /proc/boot_info/$default_mtd/primaryboot - fi - } - - emmcblock="$(find_mmc_part "$mtdname")" - - if [ -e "$emmcblock" ]; then - do_flash_emmc $bin $emmcblock - else - do_flash_mtd $bin $mtdname - fi - -} - -do_flash_ubi() { - local bin=$1 - local mtdname=$2 - local mtdpart - local primaryboot - - mtdpart=$(grep "\"${mtdname}\"" /proc/mtd | awk -F: '{print $1}') - ubidetach -p /dev/${mtdpart} - - # Fail safe upgrade - [ -f /proc/boot_info/$mtdname/upgradepartition ] && { - primaryboot=$(cat /proc/boot_info/$mtdname/primaryboot) - if [ $primaryboot -eq 0 ]; then - echo 1 > /proc/boot_info/$mtdname/primaryboot - else - echo 0 > /proc/boot_info/$mtdname/primaryboot - fi - - mtdname=$(cat /proc/boot_info/$mtdname/upgradepartition) - } - - mtdpart=$(grep "\"${mtdname}\"" /proc/mtd | awk -F: '{print $1}') - ubiformat /dev/${mtdpart} -y -f /tmp/${bin}.bin -} - -do_flash_tz() { - local sec=$1 - local mtdpart=$(grep "\"0:QSEE\"" /proc/mtd | awk -F: '{print $1}') - local emmcblock="$(find_mmc_part "0:QSEE")" - - if [ -n "$mtdpart" -o -e "$emmcblock" ]; then - do_flash_failsafe_partition ${sec} "0:QSEE" - else - do_flash_failsafe_partition ${sec} "0:TZ" - fi -} - -do_flash_ddr() { - local sec=$1 - local mtdpart=$(grep "\"0:CDT\"" /proc/mtd | awk -F: '{print $1}') - local emmcblock="$(find_mmc_part "0:CDT")" - - if [ -n "$mtdpart" -o -e "$emmcblock" ]; then - do_flash_failsafe_partition ${sec} "0:CDT" - else - do_flash_failsafe_partition ${sec} "0:DDRPARAMS" - fi -} - -to_upper () { - echo $1 | awk '{print toupper($0)}' -} - -flash_section() { - local sec=$1 - - local board=$(board_name) - case "${sec}" in - hlos*) switch_layout linux; do_flash_failsafe_partition ${sec} "0:HLOS";; - rootfs*) switch_layout linux; do_flash_failsafe_partition ${sec} "rootfs";; - fs*) switch_layout linux; do_flash_failsafe_partition ${sec} "rootfs";; - ubi*) switch_layout linux; do_flash_ubi ${sec} "rootfs";; - #sbl1*) switch_layout boot; do_flash_partition ${sec} "0:SBL1";; - #sbl2*) switch_layout boot; do_flash_failsafe_partition ${sec} "0:SBL2";; - #sbl3*) switch_layout boot; do_flash_failsafe_partition ${sec} "0:SBL3";; - #mibib*) switch_layout boot; do_flash_partition ${sec} "0:MIBIB";; - #dtb-$(to_upper $board)*) switch_layout boot; do_flash_partition ${sec} "0:DTB";; - u-boot*) switch_layout boot; do_flash_failsafe_partition ${sec} "0:APPSBL";; - #ddr-$(to_upper $board)*) switch_layout boot; do_flash_ddr ${sec};; - ddr-${board}-*) switch_layout boot; do_flash_failsafe_partition ${sec} "0:CDT";; - #ssd*) switch_layout boot; do_flash_partition ${sec} "0:SSD";; - tz*) switch_layout boot; do_flash_tz ${sec};; - #rpm*) switch_layout boot; do_flash_failsafe_partition ${sec} "0:RPM";; - *) echo "Section ${sec} ignored"; return 1;; - esac - - echo "Flashed ${sec}" -} - -erase_emmc_config() { - local emmcblock="$(find_mmc_part "rootfs_data")" - if [ -e "$emmcblock" ]; then - dd if=/dev/zero of=${emmcblock} - mkfs.ext4 "$emmcblock" - fi -} - -platform_pre_upgrade() { - cp /sbin/upgraded /tmp - ubus call system nandupgrade "{\"path\": \"$1\" }" -} - -platform_check_image_ipq() { - local board=$(board_name) - - local mandatory_nand="ubi" - local mandatory_nor_emmc="hlos fs" - local mandatory_nor="hlos" - local mandatory_section_found=0 - local optional="sbl2 u-boot ddr-${board} ssd tz rpm" - local ignored="mibib bootconfig sbl1" - - image_is_FIT $1 || return 1 - - image_has_mandatory_section $1 ${mandatory_nand} && {\ - mandatory_section_found=1 - } - - image_has_mandatory_section $1 ${mandatory_nor_emmc} && {\ - mandatory_section_found=1 - } - - image_has_mandatory_section $1 ${mandatory_nor} && {\ - mandatory_section_found=1 - } - - if [ $mandatory_section_found -eq 0 ]; then - echo "Error: mandatory section(s) missing from \"$1\". Abort..." - return 1 - fi - - for sec in ${optional}; do - image_contains $1 ${sec} || {\ - echo "Warning: optional section \"${sec}\" missing from \"$1\". Continue..." - } - done - - for sec in ${ignored}; do - image_contains $1 ${sec} && {\ - echo "Warning: section \"${sec}\" will be ignored from \"$1\". Continue..." - } - done - - image_demux $1 || {\ - echo "Error: \"$1\" couldn't be extracted. Abort..." - return 1 - } - - [ -f /tmp/hlos_version ] && rm -f /tmp/*_version - dumpimage -c $1 2>/dev/null - return $? -} - -platform_version_upgrade() { - local version_files="appsbl_version sbl_version tz_version hlos_version rpm_version" - local sys="/sys/devices/system/qfprom/qfprom0/" - local tmp="/tmp/" - - for file in $version_files; do - [ -f "${tmp}${file}" ] && { - echo "Updating "${sys}${file}" with `cat "${tmp}${file}"`" - echo `cat "${tmp}${file}"` > "${sys}${file}" - rm -f "${tmp}${file}" - } - done -} - - -# The U-Boot loader of the OpenMesh devices requires image sizes and -# checksums to be provided in the U-Boot environment. -# The OpenMesh devices come with 2 main partitions - while one is active -# sysupgrade will flash the other. The boot order is changed to boot the -# newly flashed partition. If the new partition can't be booted due to -# upgrade failures the previously used partition is loaded. - -platform_do_upgrade_ipq() { - local board=$(board_name) - - # verify some things exist before erasing - if [ ! -e $1 ]; then - echo "Error: Can't find $1 after switching to ramfs, aborting upgrade!" - reboot - fi - - for sec in $(print_sections $1); do - if [ ! -e /tmp/${sec}.bin ]; then - echo "Error: Cant' find ${sec} after switching to ramfs, aborting upgrade!" - reboot - fi - done - - case "$board" in - teltonika,rutx) - for sec in $(print_sections $1); do - flash_section ${sec} - done - - switch_layout linux - # update bootconfig to register that fw upgrade has been done - do_flash_bootconfig bootconfig "0:BOOTCONFIG" - do_flash_bootconfig bootconfig1 "0:BOOTCONFIG1" - platform_version_upgrade - - erase_emmc_config - return 0; - ;; - esac - - echo "Upgrade failed!" - return 1; -} - -platform_copy_config() { - local emmcblock="$(find_mmc_part "rootfs_data")" - mkdir -p /tmp/overlay - - if [ -e "$emmcblock" ]; then - mount -t ext4 "$emmcblock" /tmp/overlay - cp /tmp/sysupgrade.tgz /tmp/overlay/ - sync - umount /tmp/overlay - else - local mtdname=rootfs - local mtdpart - - [ -f /proc/boot_info/$mtdname/upgradepartition ] && { - mtdname=$(cat /proc/boot_info/$mtdname/upgradepartition) - } - - mtdpart=$(grep "\"${mtdname}\"" /proc/mtd | awk -F: '{print $1}') - ubiattach -p /dev/${mtdpart} - mount -t ubifs ubi0:rootfs_data /tmp/overlay - cp /tmp/sysupgrade.tgz /tmp/overlay/ - sync - umount /tmp/overlay - fi -} diff --git a/root/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh b/root/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh index 70dc76ae..9f983b9e 100644 --- a/root/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh +++ b/root/target/linux/ipq40xx/base-files/lib/upgrade/platform.sh @@ -185,7 +185,8 @@ platform_do_upgrade() { platform_do_upgrade_dualboot_datachk "$1" ;; teltonika,rutx) - platform_do_upgrade_ipq "$1" + CI_UBIPART="rootfs" + nand_do_upgrade "$1" ;; zte,mf286d) CI_UBIPART="rootfs" diff --git a/root/target/linux/ipq40xx/patches-5.4/100-GPIO-add-named-gpio-exports.patch b/root/target/linux/ipq40xx/patches-5.4/100-GPIO-add-named-gpio-exports.patch deleted file mode 100644 index 805836fc..00000000 --- a/root/target/linux/ipq40xx/patches-5.4/100-GPIO-add-named-gpio-exports.patch +++ /dev/null @@ -1,165 +0,0 @@ -From 4267880319bc1a2270d352e0ded6d6386242a7ef Mon Sep 17 00:00:00 2001 -From: John Crispin -Date: Tue, 12 Aug 2014 20:49:27 +0200 -Subject: [PATCH 24/53] GPIO: add named gpio exports - -Signed-off-by: John Crispin ---- - drivers/gpio/gpiolib-of.c | 68 +++++++++++++++++++++++++++++++++++++++++ - drivers/gpio/gpiolib-sysfs.c | 10 +++++- - include/asm-generic/gpio.h | 6 ++++ - include/linux/gpio/consumer.h | 8 +++++ - 4 files changed, 91 insertions(+), 1 deletion(-) - ---- a/drivers/gpio/gpiolib-of.c -+++ b/drivers/gpio/gpiolib-of.c -@@ -19,6 +19,8 @@ - #include - #include - #include -+#include -+#include - - #include "gpiolib.h" - #include "gpiolib-of.h" -@@ -915,3 +917,68 @@ void of_gpiochip_remove(struct gpio_chip - { - of_node_put(chip->of_node); - } -+ -+static struct of_device_id gpio_export_ids[] = { -+ { .compatible = "gpio-export" }, -+ { /* sentinel */ } -+}; -+ -+static int of_gpio_export_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct device_node *cnp; -+ u32 val; -+ int nb = 0; -+ -+ for_each_child_of_node(np, cnp) { -+ const char *name = NULL; -+ int gpio; -+ bool dmc; -+ int max_gpio = 1; -+ int i; -+ -+ of_property_read_string(cnp, "gpio-export,name", &name); -+ -+ if (!name) -+ max_gpio = of_gpio_count(cnp); -+ -+ for (i = 0; i < max_gpio; i++) { -+ unsigned flags = 0; -+ enum of_gpio_flags of_flags; -+ -+ gpio = of_get_gpio_flags(cnp, i, &of_flags); -+ if (!gpio_is_valid(gpio)) -+ return gpio; -+ -+ if (of_flags == OF_GPIO_ACTIVE_LOW) -+ flags |= GPIOF_ACTIVE_LOW; -+ -+ if (!of_property_read_u32(cnp, "gpio-export,output", &val)) -+ flags |= val ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; -+ else -+ flags |= GPIOF_IN; -+ -+ if (devm_gpio_request_one(&pdev->dev, gpio, flags, name ? name : of_node_full_name(np))) -+ continue; -+ -+ dmc = of_property_read_bool(cnp, "gpio-export,direction_may_change"); -+ gpio_export_with_name(gpio, dmc, name); -+ nb++; -+ } -+ } -+ -+ dev_info(&pdev->dev, "%d gpio(s) exported\n", nb); -+ -+ return 0; -+} -+ -+static struct platform_driver gpio_export_driver = { -+ .driver = { -+ .name = "gpio-export", -+ .owner = THIS_MODULE, -+ .of_match_table = of_match_ptr(gpio_export_ids), -+ }, -+ .probe = of_gpio_export_probe, -+}; -+ -+module_platform_driver(gpio_export_driver); ---- a/drivers/gpio/gpiolib-sysfs.c -+++ b/drivers/gpio/gpiolib-sysfs.c -@@ -571,7 +571,7 @@ static struct class gpio_class = { - * - * Returns zero on success, else an error. - */ --int gpiod_export(struct gpio_desc *desc, bool direction_may_change) -+int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name) - { - struct gpio_chip *chip; - struct gpio_device *gdev; -@@ -633,6 +633,8 @@ int gpiod_export(struct gpio_desc *desc, - offset = gpio_chip_hwgpio(desc); - if (chip->names && chip->names[offset]) - ioname = chip->names[offset]; -+ if (name) -+ ioname = name; - - dev = device_create_with_groups(&gpio_class, &gdev->dev, - MKDEV(0, 0), data, gpio_groups, -@@ -654,6 +656,12 @@ err_unlock: - gpiod_dbg(desc, "%s: status %d\n", __func__, status); - return status; - } -+EXPORT_SYMBOL_GPL(__gpiod_export); -+ -+int gpiod_export(struct gpio_desc *desc, bool direction_may_change) -+{ -+ return __gpiod_export(desc, direction_may_change, NULL); -+} - EXPORT_SYMBOL_GPL(gpiod_export); - - static int match_export(struct device *dev, const void *desc) ---- a/include/asm-generic/gpio.h -+++ b/include/asm-generic/gpio.h -@@ -127,6 +127,12 @@ static inline int gpio_export(unsigned g - return gpiod_export(gpio_to_desc(gpio), direction_may_change); - } - -+int __gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); -+static inline int gpio_export_with_name(unsigned gpio, bool direction_may_change, const char *name) -+{ -+ return __gpiod_export(gpio_to_desc(gpio), direction_may_change, name); -+} -+ - static inline int gpio_export_link(struct device *dev, const char *name, - unsigned gpio) - { ---- a/include/linux/gpio/consumer.h -+++ b/include/linux/gpio/consumer.h -@@ -668,6 +668,7 @@ static inline void devm_acpi_dev_remove_ - - #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) - -+int _gpiod_export(struct gpio_desc *desc, bool direction_may_change, const char *name); - int gpiod_export(struct gpio_desc *desc, bool direction_may_change); - int gpiod_export_link(struct device *dev, const char *name, - struct gpio_desc *desc); -@@ -675,6 +676,13 @@ void gpiod_unexport(struct gpio_desc *de - - #else /* CONFIG_GPIOLIB && CONFIG_GPIO_SYSFS */ - -+static inline int _gpiod_export(struct gpio_desc *desc, -+ bool direction_may_change, -+ const char *name) -+{ -+ return -ENOSYS; -+} -+ - static inline int gpiod_export(struct gpio_desc *desc, - bool direction_may_change) - { diff --git a/root/target/linux/ipq40xx/patches-5.4/719-meiglink_slm750_support.patch b/root/target/linux/ipq40xx/patches-5.4/719-meiglink_slm750_support.patch deleted file mode 100644 index f9af9a3c..00000000 --- a/root/target/linux/ipq40xx/patches-5.4/719-meiglink_slm750_support.patch +++ /dev/null @@ -1,33 +0,0 @@ -Index: linux-5.4.137/drivers/usb/serial/option.c -=================================================================== ---- linux-5.4.137.orig/drivers/usb/serial/option.c -+++ linux-5.4.137/drivers/usb/serial/option.c -@@ -241,6 +241,8 @@ static void option_instat_callback(struc - #define UBLOX_PRODUCT_R6XX 0x90fa - /* These Yuga products use Qualcomm's vendor ID */ - #define YUGA_PRODUCT_CLM920_NC5 0x9625 -+/* These Meiglink products use Qualcomm's vendor ID */ -+#define MEIGLINK_PRODUCT_SLM750 0xf601 - - #define QUECTEL_VENDOR_ID 0x2c7c - /* These Quectel products use Quectel's vendor ID */ -@@ -1102,4 +1104,7 @@ static const struct usb_device_id option - /* u-blox products using Qualcomm vendor ID */ - { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), - .driver_info = RSVD(1) | RSVD(3) }, -+ /* Meiglink products using Qualcomm vendor ID */ -+ // Works OK in case of some issues check macros that are used by Quectel Products -+ { USB_DEVICE(QUALCOMM_VENDOR_ID, MEIGLINK_PRODUCT_SLM750)}, - /* Quectel products using Quectel vendor ID */ -Index: linux-5.4.137/drivers/net/usb/qmi_wwan.c -=================================================================== ---- linux-5.4.137.orig/drivers/net/usb/qmi_wwan.c -+++ linux-5.4.137/drivers/net/usb/qmi_wwan.c -@@ -1171,6 +1171,7 @@ static const struct usb_device_id produc - {QMI_FIXED_INTF(0x05c6, 0x9079, 6)}, - {QMI_FIXED_INTF(0x05c6, 0x9079, 7)}, - {QMI_FIXED_INTF(0x05c6, 0x9079, 8)}, -+ {QMI_MATCH_FF_FF_FF(0x05c6, 0xf601)}, /* Meiglink SLM750 (in case of issues check if DTR flag setting is enough) */ - {QMI_FIXED_INTF(0x05c6, 0x9080, 5)}, - {QMI_FIXED_INTF(0x05c6, 0x9080, 6)}, - {QMI_FIXED_INTF(0x05c6, 0x9080, 7)},