1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-02-14 12:21:53 +00:00

Add NSS Acceleration Support on Qualcomm devices

This commit is contained in:
Ycarus (Yannick Chabanois) 2024-04-23 20:45:31 +02:00
parent 359074adc6
commit e9d1e0219b
54 changed files with 12395 additions and 0 deletions

View file

@ -0,0 +1,311 @@
From 6504bc9edeb1a2a54d813f4bb5d0267e7bf827f9 Mon Sep 17 00:00:00 2001
From: Praveenkumar I <ipkumar@codeaurora.org>
Date: Thu, 6 Feb 2020 17:35:42 +0530
Subject: [PATCH 4/8] clk: ipq8074: Support added for necessary clocks and
reset
Change-Id: I21a76a44185f766e9b6dcba274392ea8e599718b
Signed-off-by: Praveenkumar I <ipkumar@codeaurora.org>
Signed-off-by: Rajkumar Ayyasamy <arajkuma@codeaurora.org>
---
drivers/clk/qcom/gcc-ipq8074.c | 238 ++++++++++++++++++-
include/dt-bindings/clock/qcom,gcc-ipq8074.h | 35 ++-
2 files changed, 258 insertions(+), 15 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -49,6 +49,22 @@ enum {
P_UNIPHY2_TX,
};
+static const char * const gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = {
+ "xo",
+ "gpll4",
+ "gpll0",
+ "gpll6",
+ "gpll0_out_main_div2",
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
static struct clk_alpha_pll gpll0_main = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
@@ -630,6 +646,12 @@ static const struct freq_tbl ftbl_pcie_a
{ }
};
+struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
static struct clk_rcg2 pcie0_axi_clk_src = {
.cmd_rcgr = 0x75054,
.freq_tbl = ftbl_pcie_axi_clk_src,
@@ -2030,6 +2052,78 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
+struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(600000000, P_GPLL6, 2, 0, 0),
+ { }
+};
+
+struct clk_rcg2 qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x29064,
+ .freq_tbl = ftbl_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_tsctr_clk_src",
+ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_dap_sync_clk_src",
+ .parent_names = (const char *[]){
+ "qdss_tsctr_clk_src"
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+struct freq_tbl ftbl_qdss_at_clk_src[] = {
+ F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+ F(240000000, P_GPLL6, 6, 0, 0),
+ { }
+};
+
+struct clk_rcg2 qdss_at_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .freq_tbl = ftbl_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_at_clk_src",
+ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+
+struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+struct clk_rcg2 adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c008,
+ .freq_tbl = ftbl_adss_pwm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "adss_pwm_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x01008,
.clkr = {
@@ -4225,13 +4319,7 @@ static struct clk_branch gcc_gp3_clk = {
},
};
-static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(100000000, P_GPLL0, 8, 0, 0),
- { }
-};
-
-static struct clk_rcg2 pcie0_rchng_clk_src = {
+struct clk_rcg2 pcie0_rchng_clk_src = {
.cmd_rcgr = 0x75070,
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
@@ -4323,6 +4411,114 @@ static const struct alpha_pll_config nss
.alpha_en_mask = BIT(24),
};
+static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
+ .halt_reg = 0x4700c,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x4700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_bus_timeout2_ahb_clk",
+ .parent_names = (const char *[]){
+ "usb0_master_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_bus_timeout3_ahb_clk",
+ .parent_names = (const char *[]){
+ "usb1_master_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x29024,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x29024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_at_clk",
+ .parent_names = (const char *[]){
+ "qdss_at_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x29084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_dap_clk",
+ .parent_names = (const char *[]){
+ "qdss_dap_sync_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c020,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_adss_pwm_clk",
+ .parent_names = (const char *[]){
+ "adss_pwm_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4331,6 +4527,7 @@ static struct clk_hw *gcc_ipq8074_hws[]
&gcc_xo_div4_clk_src.hw,
&nss_noc_clk_src.hw,
&nss_ppe_cdiv_clk_src.hw,
+ &qdss_dap_sync_clk_src.hw,
};
static struct clk_regmap *gcc_ipq8074_clks[] = {
@@ -4562,6 +4759,15 @@ static struct clk_regmap *gcc_ipq8074_cl
[GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
[GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
+ [GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr,
+ [GCC_SNOC_BUS_TIMEOUT3_AHB_CLK] = &gcc_snoc_bus_timeout3_ahb_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+ [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq8074_resets[] = {
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -230,10 +230,19 @@
#define GCC_GP1_CLK 221
#define GCC_GP2_CLK 222
#define GCC_GP3_CLK 223
-#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
-#define GCC_PCIE0_RCHNG_CLK_SRC 225
-#define GCC_PCIE0_RCHNG_CLK 226
-#define GCC_CRYPTO_PPE_CLK 227
+#define GCC_CRYPTO_PPE_CLK 224
+#define GCC_PCIE0_RCHNG_CLK_SRC 225
+#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 227
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 228
+#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK 229
+#define GCC_DCC_CLK 230
+#define ADSS_PWM_CLK_SRC 231
+#define GCC_ADSS_PWM_CLK 232
+#define QDSS_TSCTR_CLK_SRC 233
+#define QDSS_AT_CLK_SRC 234
+#define GCC_QDSS_AT_CLK 235
+#define GCC_QDSS_DAP_CLK 236
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1

View file

@ -0,0 +1,44 @@
From 462aa0c53397ec5bf78e3e7f68aa8a3ca300f4ba Mon Sep 17 00:00:00 2001
From: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
Date: Tue, 24 Mar 2020 19:09:38 +0530
Subject: [PATCH 5/8] clk: qcom: ipq8074: Fix gcc_snoc_bus_timeout_ahb_clk
offset
By default, the ipq8074 V2 clks are provided in the gcc driver.
Updating the gcc_snoc_bus_timeout_ahb_clk offsets also as needed
in ipq8074 V2.
Change-Id: I5a6e98d002f5c3354a804e55dd9ebb1f83f7f974
Signed-off-by: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
---
drivers/clk/qcom/gcc-ipq8074.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4412,10 +4412,10 @@ static const struct alpha_pll_config nss
};
static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
- .halt_reg = 0x4700c,
+ .halt_reg = 0x47014,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x4700c,
+ .enable_reg = 0x47014,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_snoc_bus_timeout2_ahb_clk",
@@ -4430,10 +4430,10 @@ static struct clk_branch gcc_snoc_bus_ti
};
static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = {
- .halt_reg = 0x47014,
+ .halt_reg = 0x4701C,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x47014,
+ .enable_reg = 0x4701C,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_snoc_bus_timeout3_ahb_clk",

View file

@ -0,0 +1,41 @@
From 52315bec6ed633b6a71f28b746029602f8bd70b9 Mon Sep 17 00:00:00 2001
From: Balaji Prakash J <bjagadee@codeaurora.org>
Date: Wed, 22 Apr 2020 20:35:30 +0530
Subject: [PATCH] clk: ipq8074: fix gcc_blsp1_ahb_clk properties
All the voting enabled clocks does not support the enable
from CBCR register. So, updated gcc_blsp1_ahb_clk enable
register and mask to enable bit in APCS_CLOCK_BRANCH_ENA_VOTE.
Also, the voting controlled clocks are shared among multiple
components like APSS, RPM, NSS, TZ, etc. So, turning the
voting off from APSS does not make the clock off if it has
been voted from another component. Added the flag
BRANCH_HALT_VOTED in order to skip checking the clock
disable status.
This change is referred from the below commits,
1. 246b4fb3af9bd65d8af794aac2f0e7b1ed9cc2dd
2. c8374157d5ae91d3b3e0d513d62808a798b32d3a
Signed-off-by: Balaji Prakash J <bjagadee@codeaurora.org>
Change-Id: I505cb560b31ad27a02c165fbe13bb33a2fc7d230
---
drivers/clk/qcom/gcc-ipq8074.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -2126,9 +2126,10 @@ struct clk_rcg2 adss_pwm_clk_src = {
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
- .enable_reg = 0x01008,
- .enable_mask = BIT(0),
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
.parent_hws = (const struct clk_hw *[]){

View file

@ -0,0 +1,878 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -69,6 +69,9 @@ void brioctl_set(int (*hook)(struct net
void __user *uarg));
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg);
+extern void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats);
+extern bool br_is_hairpin_enabled(struct net_device *dev);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@@ -211,4 +214,42 @@ static inline clock_t br_get_ageing_time
}
#endif
+/* QCA NSS ECM support - Start */
+extern struct net_device *br_port_dev_get(struct net_device *dev,
+ unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie);
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
+extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr,
+ __u16 vid);
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned char *addr,
+ unsigned int cookie);
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
+
+#define BR_FDB_EVENT_ADD 0x01
+#define BR_FDB_EVENT_DEL 0x02
+
+struct br_fdb_event {
+ struct net_device *dev;
+ unsigned char addr[6];
+ unsigned char is_local;
+ struct net_bridge *br;
+ struct net_device *orig_dev;
+};
+extern void br_fdb_register_notify(struct notifier_block *nb);
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_get_dst_hook_t(
+ const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -143,7 +143,10 @@ extern struct net_device *__vlan_find_de
extern int vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid,
void *arg), void *arg);
+extern void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats); /* QCA NSS ECM support */
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev); /* QCA NSS ECM support */
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,12 @@ extern void vlan_vids_del_by_dev(struct
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+
+} /* QCA NSS ECM support - End */
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2855,6 +2855,10 @@ enum netdev_cmd {
NETDEV_OFFLOAD_XSTATS_DISABLE,
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
+ /* QCA NSS ECM Support - Start */
+ NETDEV_BR_JOIN,
+ NETDEV_BR_LEAVE,
+ /* QCA NSS ECM Support - End */
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -211,6 +211,11 @@ void rt6_multipath_rebalance(struct fib6
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb);
+int rt6_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -249,6 +249,13 @@ static inline int neigh_parms_family(str
return p->tbl->family;
}
+/* QCA NSS ECM support - Start */
+struct neigh_mac_update {
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+};
+/* QCA NSS ECM support - End */
+
#define NEIGH_PRIV_ALIGN sizeof(long long)
#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
@@ -397,6 +404,11 @@ int neigh_xmit(int fam, struct net_devic
void pneigh_for_each(struct neigh_table *tbl,
void (*cb)(struct pneigh_entry *));
+/* QCA NSS ECM support - Start */
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct neigh_seq_state {
struct seq_net_private p;
struct neigh_table *tbl;
@@ -602,4 +614,5 @@ static inline void neigh_update_is_route
*notify = 1;
}
}
+
#endif
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -240,6 +240,11 @@ struct rtable *rt_dst_alloc(struct net_d
unsigned int flags, u16 type, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb);
+int ip_rt_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -2172,4 +2172,9 @@ void br_do_proxy_suppress_arp(struct sk_
void br_do_suppress_nd(struct sk_buff *skb, struct net_bridge *br,
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
+
+/* QCA NSS ECM support - Start */
+#define __br_get(__hook, __default, __args ...) \
+ (__hook ? (__hook(__args)) : (__default))
+/* QCA NSS ECM support - End */
#endif
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -72,6 +72,28 @@ bool vlan_do_receive(struct sk_buff **sk
return true;
}
+/* QCA NSS ECM support - Start */
+/* Update the VLAN device with statistics from network offload engines */
+void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct vlan_pcpu_stats *stats;
+
+ if (!is_vlan_dev(dev))
+ return;
+
+ stats = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, 0);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->rx_packets, nlstats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, nlstats->rx_bytes);
+ u64_stats_add(&stats->tx_packets, nlstats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, nlstats->tx_bytes);
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL(__vlan_dev_update_accel_stats);
+/* QCA NSS ECM support - End */
+
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
__be16 vlan_proto, u16 vlan_id)
@@ -110,6 +132,15 @@ struct net_device *vlan_dev_real_dev(con
}
EXPORT_SYMBOL(vlan_dev_real_dev);
+/* QCA NSS ECM support - Start */
+/* Caller is responsible to hold the reference of the returned device */
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_next_dev);
+/* QCA NSS ECM support - End */
+
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_id;
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -33,6 +33,20 @@ static const struct rhashtable_params br
static struct kmem_cache *br_fdb_cache __read_mostly;
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
+
+void br_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
+
+void br_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -188,6 +202,25 @@ static void fdb_notify(struct net_bridge
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
+ /* QCA NSS ECM support - Start */
+ if (fdb->dst) {
+ int event;
+ struct br_fdb_event fdb_event;
+
+ if (type == RTM_NEWNEIGH)
+ event = BR_FDB_EVENT_ADD;
+ else
+ event = BR_FDB_EVENT_DEL;
+
+ fdb_event.dev = fdb->dst->dev;
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
+ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
+ event,
+ (void *)&fdb_event);
+ }
+ /* QCA NSS ECM support - End */
+
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
@@ -512,6 +545,22 @@ out:
spin_unlock_bh(&br->hash_lock);
}
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
+
+void br_fdb_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
+
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
@@ -520,6 +569,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
+ u8 mac_addr[6]; /* QCA NSS ECM support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -546,8 +596,15 @@ void br_fdb_cleanup(struct work_struct *
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
- if (!hlist_unhashed(&f->fdb_node))
+ if (!hlist_unhashed(&f->fdb_node)) {
+ ether_addr_copy(mac_addr, f->key.addr.addr);
fdb_delete(br, f, true);
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list, 0,
+ (void *)mac_addr);
+ /* QCA NSS ECM support - End */
+ }
spin_unlock_bh(&br->hash_lock);
}
}
@@ -879,6 +936,12 @@ void br_fdb_update(struct net_bridge *br
&fdb->flags)))
clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
&fdb->flags);
+
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list,
+ 0, (void *)addr);
+ /* QCA NSS ECM support - End */
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -902,6 +965,64 @@ void br_fdb_update(struct net_bridge *br
}
}
+/* QCA NSS ECM support - Start */
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
+ addr);
+ return;
+ }
+
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, 0, true);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
+
+/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
+void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ if (likely(fdb)) {
+ fdb->updated = jiffies;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_fdb_entry_refresh);
+
+/* Look up the MAC address in the device's bridge fdb table */
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr, __u16 vid)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return NULL;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
+
+/* QCA NSS ECM support - End */
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -26,6 +26,12 @@
#include "br_private.h"
+/* QCA NSS ECM support - Start */
+/* Hook for external forwarding logic */
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
+/* QCA NSS ECM support - End */
+
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
@@ -697,6 +703,8 @@ int br_add_if(struct net_bridge *br, str
kobject_uevent(&p->kobj, KOBJ_ADD);
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
+
return 0;
err6:
@@ -732,6 +740,8 @@ int br_del_if(struct net_bridge *br, str
if (!p || p->br != br)
return -EINVAL;
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
+
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
@@ -775,3 +785,97 @@ bool br_port_flag_is_set(const struct ne
return p->flags & flag;
}
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
+
+/* br_port_dev_get()
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
+ * use that to try and determine the egress port for that skb.
+ * If not, or no egress port could be determined, use the given addr
+ * to identify the port to which it is reachable,
+ * returing a reference to the net device associated with that port.
+ *
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
+ * associated port.
+ */
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie)
+{
+ struct net_bridge_fdb_entry *fdbe;
+ struct net_bridge *br;
+ struct net_device *netdev = NULL;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If the hook exists and the skb isn't NULL, try and get the port */
+ if (skb) {
+ br_port_dev_get_hook_t *port_dev_get_hook;
+
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
+ if (port_dev_get_hook) {
+ struct net_bridge_port *pdst =
+ __br_get(port_dev_get_hook, NULL, dev, skb,
+ addr, cookie);
+ if (pdst) {
+ dev_hold(pdst->dev);
+ netdev = pdst->dev;
+ goto out;
+ }
+ }
+ }
+
+ /* Either there is no hook, or can't
+ * determine the port to use - fall back to using FDB
+ */
+
+ br = netdev_priv(dev);
+
+ /* Lookup the fdb entry and get reference to the port dev */
+ fdbe = br_fdb_find_rcu(br, addr, 0);
+ if (fdbe && fdbe->dst) {
+ netdev = fdbe->dst->dev; /* port device */
+ dev_hold(netdev);
+ }
+out:
+ rcu_read_unlock();
+ return netdev;
+}
+EXPORT_SYMBOL_GPL(br_port_dev_get);
+
+/* Update bridge statistics for bridge packets processed by offload engines */
+void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct pcpu_sw_netstats *tstats;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return;
+
+ tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->rx_packets, nlstats->rx_packets);
+ u64_stats_add(&tstats->rx_bytes, nlstats->rx_bytes);
+ u64_stats_add(&tstats->tx_packets, nlstats->tx_packets);
+ u64_stats_add(&tstats->tx_bytes, nlstats->tx_bytes);
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
+
+/* QCA NSS ECM support - Start */
+/* API to know if hairpin feature is enabled/disabled on this bridge port */
+bool br_is_hairpin_enabled(struct net_device *dev)
+{
+ struct net_bridge_port *port = br_port_get_check_rcu(dev);
+
+ if (likely(port))
+ return port->flags & BR_HAIRPIN_MODE;
+ return false;
+}
+EXPORT_SYMBOL_GPL(br_is_hairpin_enabled);
+
+/* QCA NSS ECM support - End */
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1275,6 +1275,22 @@ static void neigh_update_hhs(struct neig
}
}
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
+
+void neigh_mac_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
+
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
-- new is new state.
@@ -1303,6 +1319,7 @@ static int __neigh_update(struct neighbo
struct net_device *dev;
int err, notify = 0;
u8 old;
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1317,7 +1334,10 @@ static int __neigh_update(struct neighbo
new = old;
goto out;
}
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
@@ -1354,7 +1374,12 @@ static int __neigh_update(struct neighbo
- compare new & old
- if they are different, check override flag
*/
- if ((old & NUD_VALID) &&
+ /* QCA NSS ECM update - Start */
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
+ /* QCA NSS ECM update - End */
+
+ if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
} else {
@@ -1476,8 +1501,11 @@ out:
neigh_update_gc_list(neigh);
if (managed_update)
neigh_update_managed_list(neigh);
- if (notify)
+ if (notify) {
neigh_update_notify(neigh, nlmsg_pid);
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
+ }
trace_neigh_update_done(neigh, err);
return err;
}
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1211,6 +1211,9 @@ static bool fib_valid_key_len(u32 key, u
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
+/* Define route change notification chain. */
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
@@ -1404,6 +1407,9 @@ int fib_table_insert(struct net *net, st
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_NEWROUTE, fi);
+
return 0;
out_remove_new_fa:
@@ -1775,6 +1781,9 @@ int fib_table_delete(struct net *net, st
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_DELROUTE, fa_to_delete->fa_info);
+
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
@@ -2407,6 +2416,20 @@ void __init fib_trie_init(void)
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_register_notifier);
+
+int ip_rt_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
+/* QCA NSS ECM support - End */
+
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -666,6 +666,7 @@ void ndisc_send_ns(struct net_device *de
if (skb)
ndisc_send_skb(skb, daddr, saddr);
}
+EXPORT_SYMBOL(ndisc_send_ns);
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
const struct in6_addr *daddr)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -197,6 +197,9 @@ static void rt6_uncached_list_flush_dev(
}
}
+/* Define route change notification chain. */
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
+
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
struct sk_buff *skb,
const void *daddr)
@@ -3865,6 +3868,10 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, rt);
+
fib6_info_release(rt);
return err;
@@ -3886,6 +3893,9 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -6339,6 +6349,20 @@ static int ip6_route_dev_notify(struct n
return NOTIFY_OK;
}
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_register_notifier);
+
+int rt6_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_unregister_notifier);
+/* QCA NSS ECM support - End */
+
/*
* /proc
*/
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1639,6 +1639,7 @@ const char *netdev_cmd_to_name(enum netd
N(SVLAN_FILTER_PUSH_INFO) N(SVLAN_FILTER_DROP_INFO)
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
+ N(BR_JOIN) N(BR_LEAVE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1002,6 +1002,7 @@ void inet6_ifa_finish_destroy(struct ine
kfree_rcu(ifp, rcu);
}
+EXPORT_SYMBOL(inet6_ifa_finish_destroy);
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -432,6 +432,15 @@ static inline __be32 vxlan_compute_rco(u
return vni_field;
}
+/*
+ * vxlan_get_vni()
+ * Returns the vni corresponding to tunnel
+ */
+static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun)
+{
+ return be32_to_cpu(vxlan_tun->cfg.vni);
+}
+
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
return vs->sock->sk->sk_family;
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -63,6 +63,8 @@ enum {
#define IPPROTO_MTP IPPROTO_MTP
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
IPPROTO_ENCAP = 98, /* Encapsulation Header */
#define IPPROTO_ENCAP IPPROTO_ENCAP
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
@@ -327,7 +329,7 @@ struct sockaddr_in {
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
-#include <asm/byteorder.h>
+#include <asm/byteorder.h>
#endif /* _UAPI_LINUX_IN_H */
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -63,6 +63,8 @@ enum {
#define IPPROTO_MTP IPPROTO_MTP
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
IPPROTO_ENCAP = 98, /* Encapsulation Header */
#define IPPROTO_ENCAP IPPROTO_ENCAP
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
@@ -327,7 +329,7 @@ struct sockaddr_in {
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
-#include <asm/byteorder.h>
+#include <asm/byteorder.h>
#endif /* _UAPI_LINUX_IN_H */
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -266,7 +266,6 @@ void nf_conntrack_register_notifier(stru
mutex_lock(&nf_ct_ecache_mutex);
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
- WARN_ON_ONCE(notify);
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
}
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -26,6 +26,7 @@ struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
u8 tcp_loose;
u8 tcp_be_liberal;
+ u8 tcp_no_window_check;
u8 tcp_max_retrans;
u8 tcp_ignore_invalid_rst;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -513,11 +513,15 @@ tcp_in_window(struct nf_conn *ct, enum ip_conntrack_dir dir,
struct ip_ct_tcp *state = &ct->proto.tcp;
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+ const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
__u32 seq, ack, sack, end, win, swin;
bool in_recv_win, seq_ok;
s32 receiver_offset;
u16 win_raw;
+ if (tn->tcp_no_window_check)
+ return NFCT_TCP_ACCEPT;
+
/*
* Get the required data from the packet.
*/
@@ -1257,7 +1261,7 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
- else if (ct->proto.tcp.last_win == 0 &&
+ else if (!tn->tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
@@ -1573,6 +1577,9 @@ void nf_conntrack_tcp_init_net(struct net *net)
*/
tn->tcp_be_liberal = 0;
+ /* Skip Windows Check */
+ tn->tcp_no_window_check = 0;
+
/* If it's non-zero, we turn off RST sequence number check */
tn->tcp_ignore_invalid_rst = 0;
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -637,6 +637,7 @@ enum nf_ct_sysctl_index {
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
@@ -844,6 +845,14 @@ static struct ctl_table nf_ct_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
[NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
.procname = "nf_conntrack_tcp_ignore_invalid_rst",
.maxlen = sizeof(u8),
@@ -1054,6 +1063,7 @@ static void nf_conntrack_standalone_init_tcp_sysctl(struct net *net,
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
+ XASSIGN(NO_WINDOW_CHECK, &tn->tcp_no_window_check);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN

View file

@ -0,0 +1,600 @@
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -48,6 +48,7 @@
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/if_pppox.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
@@ -254,6 +255,25 @@ struct ppp_net {
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/*
+ * Registration/Unregistration methods
+ * for PPP channel connect and disconnect event notifications.
+ */
+RAW_NOTIFIER_HEAD(ppp_channel_connection_notifier_list);
+
+void ppp_channel_connection_register_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_register_notify);
+
+void ppp_channel_connection_unregister_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_unregister_notify);
+
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
@@ -3453,7 +3473,10 @@ ppp_connect_channel(struct channel *pch,
struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
+ int ppp_proto;
+ int version;
+ int notify = 0;
pn = ppp_pernet(pch->chan_net);
mutex_lock(&pn->all_ppp_mutex);
@@ -3485,13 +3508,40 @@ ppp_connect_channel(struct channel *pch,
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
+
+ /* Set the netdev priv flag if the prototype
+ * is L2TP or PPTP. Return success in all cases
+ */
+ if (!pch->chan)
+ goto out2;
+
+ ppp_proto = ppp_channel_get_protocol(pch->chan);
+ if (ppp_proto == PX_PROTO_PPTP) {
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_PPTP;
+ } else if (ppp_proto == PX_PROTO_OL2TP) {
+ version = ppp_channel_get_proto_version(pch->chan);
+ if (version == 2)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV2;
+ else if (version == 3)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV3;
+ }
+ notify = 1;
+
+ out2:
ppp_unlock(ppp);
ret = 0;
-
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
+
+ if (notify && ppp && ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_CONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
return ret;
}
@@ -3509,6 +3559,13 @@ ppp_disconnect_channel(struct channel *p
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
+ if (ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_DISCONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
@@ -3588,6 +3645,222 @@ static void *unit_find(struct idr *p, in
return idr_find(p, n);
}
+/* Updates the PPP interface statistics. */
+void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return;
+
+ if (dev->type != ARPHRD_PPP)
+ return;
+
+ ppp = netdev_priv(dev);
+
+ ppp_xmit_lock(ppp);
+ ppp->stats64.tx_packets += tx_packets;
+ ppp->stats64.tx_bytes += tx_bytes;
+ ppp->dev->stats.tx_errors += tx_errors;
+ ppp->dev->stats.tx_dropped += tx_dropped;
+ if (tx_packets)
+ ppp->last_xmit = jiffies;
+ ppp_xmit_unlock(ppp);
+
+ ppp_recv_lock(ppp);
+ ppp->stats64.rx_packets += rx_packets;
+ ppp->stats64.rx_bytes += rx_bytes;
+ ppp->dev->stats.rx_errors += rx_errors;
+ ppp->dev->stats.rx_dropped += rx_dropped;
+ if (rx_packets)
+ ppp->last_recv = jiffies;
+ ppp_recv_unlock(ppp);
+}
+
+/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
+ * the device is not PPP.
+ */
+int ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flags = ppp->flags;
+ ppp_unlock(ppp);
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ppp_is_multilink);
+
+/* ppp_channel_get_protocol()
+ * Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ *
+ * NOTE: Some channels do not use PX sockets so the protocol value may be very
+ * different for them.
+ * NOTE: -1 indicates failure.
+ * NOTE: Once you know the channel protocol you may then either cast 'chan' to
+ * its sub-class or use the channel protocol specific API's as provided by that
+ * channel sub type.
+ */
+int ppp_channel_get_protocol(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol)
+ return -1;
+
+ return chan->ops->get_channel_protocol(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_protocol);
+
+/* ppp_channel_get_proto_version()
+ * Call this to get channel protocol version
+ */
+int ppp_channel_get_proto_version(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol_ver)
+ return -1;
+
+ return chan->ops->get_channel_protocol_ver(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_proto_version);
+
+/* ppp_channel_hold()
+ * Call this to hold a channel.
+ *
+ * Returns true on success or false if the hold could not happen.
+ *
+ * NOTE: chan must be protected against destruction during this call -
+ * either by correct locking etc. or because you already have an implicit
+ * or explicit hold to the channel already and this is an additional hold.
+ */
+bool ppp_channel_hold(struct ppp_channel *chan)
+{
+ if (!chan->ops->hold)
+ return false;
+
+ chan->ops->hold(chan);
+ return true;
+}
+EXPORT_SYMBOL(ppp_channel_hold);
+
+/* ppp_channel_release()
+ * Call this to release a hold you have upon a channel
+ */
+void ppp_channel_release(struct ppp_channel *chan)
+{
+ chan->ops->release(chan);
+}
+EXPORT_SYMBOL(ppp_channel_release);
+
+/* Check if ppp xmit lock is on hold */
+bool ppp_is_xmit_locked(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ if (!ppp)
+ return false;
+
+ if (spin_is_locked(&(ppp)->wlock))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ppp_is_xmit_locked);
+
+/* ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ ppp_lock(ppp);
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ ppp_unlock(ppp);
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ ppp_unlock(ppp);
+ return c;
+}
+EXPORT_SYMBOL(ppp_hold_channels);
+
+/* ppp_release_channels()
+ * Releases channels
+ */
+void ppp_release_channels(struct ppp_channel *channels[], unsigned int chan_sz)
+{
+ unsigned int c;
+
+ for (c = 0; c < chan_sz; ++c) {
+ struct ppp_channel *chan;
+
+ chan = channels[c];
+ chan->ops->release(chan);
+ }
+}
+EXPORT_SYMBOL(ppp_release_channels);
+
/* Module/initialization stuff */
module_init(ppp_init);
@@ -3604,6 +3877,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+EXPORT_SYMBOL(ppp_update_stats);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -62,6 +62,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/if_pppox.h>
@@ -87,7 +88,7 @@
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
-static const struct ppp_channel_ops pppoe_chan_ops;
+static const struct pppoe_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
static unsigned int pppoe_net_id __read_mostly;
@@ -692,7 +693,7 @@ static int pppoe_connect(struct socket *
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
- po->chan.ops = &pppoe_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
@@ -995,9 +996,80 @@ static int pppoe_fill_forward_path(struc
return 0;
}
-static const struct ppp_channel_ops pppoe_chan_ops = {
- .start_xmit = pppoe_xmit,
- .fill_forward_path = pppoe_fill_forward_path,
+/************************************************************************
+ *
+ * function called by generic PPP driver to hold channel
+ *
+ ***********************************************************************/
+static void pppoe_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/************************************************************************
+ *
+ * function called by generic PPP driver to release channel
+ *
+ ***********************************************************************/
+static void pppoe_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/************************************************************************
+ *
+ * function called to get the channel protocol type
+ *
+ ***********************************************************************/
+static int pppoe_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OE;
+}
+
+/************************************************************************
+ *
+ * function called to get the PPPoE channel addressing
+ * NOTE: This function returns a HOLD to the netdevice
+ *
+ ***********************************************************************/
+static int pppoe_get_addressing(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ int err = 0;
+
+ *addressing = po->proto.pppoe;
+ if (!addressing->dev)
+ return -ENODEV;
+
+ dev_hold(addressing->dev);
+ return err;
+}
+
+/* pppoe_channel_addressing_get()
+ * Return PPPoE channel specific addressing information.
+ */
+int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ return pppoe_get_addressing(chan, addressing);
+}
+EXPORT_SYMBOL(pppoe_channel_addressing_get);
+
+static const struct pppoe_channel_ops pppoe_chan_ops = {
+ /* PPPoE specific channel ops */
+ .get_addressing = pppoe_get_addressing,
+ /* General ppp channel ops */
+ .ops.start_xmit = pppoe_xmit,
+ .ops.get_channel_protocol = pppoe_get_channel_protocol,
+ .ops.hold = pppoe_hold_chan,
+ .ops.release = pppoe_release_chan,
+ .ops.fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -91,4 +91,17 @@ enum {
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
+/*
+ * PPPoE Channel specific operations
+ */
+struct pppoe_channel_ops {
+ /* Must be first - general to all PPP channels */
+ struct ppp_channel_ops ops;
+ int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
+};
+
+/* Return PPPoE channel specific addressing information */
+extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1762,6 +1762,36 @@ enum netdev_priv_flags {
IFF_NO_IP_ALIGN = BIT_ULL(34),
};
+/**
+ * enum netdev_priv_flags_ext - &struct net_device priv_flags_ext
+ *
+ * These flags are used to check for device type and can be
+ * set and used by the drivers
+ *
+ * @IFF_EXT_TUN_TAP: device is a TUN/TAP device
+ * @IFF_EXT_PPP_L2TPV2: device is a L2TPV2 device
+ * @IFF_EXT_PPP_L2TPV3: device is a L2TPV3 device
+ * @IFF_EXT_PPP_PPTP: device is a PPTP device
+ * @IFF_EXT_GRE_V4_TAP: device is a GRE IPv4 TAP device
+ * @IFF_EXT_GRE_V6_TAP: device is a GRE IPv6 TAP device
+ * @IFF_EXT_IFB: device is an IFB device
+ * @IFF_EXT_MAPT: device is an MAPT device
+ * @IFF_EXT_HW_NO_OFFLOAD: device is an NON Offload device
+ * @IFF_EXT_L2TPV3: device is a L2TPV3 Ethernet device
+ */
+enum netdev_priv_flags_ext {
+ IFF_EXT_TUN_TAP = 1<<0,
+ IFF_EXT_PPP_L2TPV2 = 1<<1,
+ IFF_EXT_PPP_L2TPV3 = 1<<2,
+ IFF_EXT_PPP_PPTP = 1<<3,
+ IFF_EXT_GRE_V4_TAP = 1<<4,
+ IFF_EXT_GRE_V6_TAP = 1<<5,
+ IFF_EXT_IFB = 1<<6,
+ IFF_EXT_MAPT = 1<<7,
+ IFF_EXT_HW_NO_OFFLOAD = 1<<8,
+ IFF_EXT_ETH_L2TPV3 = 1<<9,
+};
+
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
#define IFF_BONDING IFF_BONDING
@@ -2075,6 +2103,7 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
unsigned long long priv_flags;
+ unsigned int priv_flags_ext;
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -19,6 +19,10 @@
#include <linux/skbuff.h>
#include <linux/poll.h>
#include <net/net_namespace.h>
+#include <linux/notifier.h>
+
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
struct net_device_path;
struct net_device_path_ctx;
@@ -30,9 +34,19 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ /* Get channel protocol type, one of PX_PROTO_XYZ or specific to
+ * the channel subtype
+ */
+ int (*get_channel_protocol)(struct ppp_channel *);
+ /* Get channel protocol version */
+ int (*get_channel_protocol_ver)(struct ppp_channel *);
+ /* Hold the channel from being destroyed */
+ void (*hold)(struct ppp_channel *);
+ /* Release hold on the channel */
+ void (*release)(struct ppp_channel *);
int (*fill_forward_path)(struct net_device_path_ctx *,
- struct net_device_path *,
- const struct ppp_channel *);
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
@@ -76,6 +90,51 @@ extern int ppp_unit_number(struct ppp_ch
/* Get the device name associated with a channel, or NULL if none */
extern char *ppp_dev_name(struct ppp_channel *);
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
+
/*
* SMP locking notes:
* The channel code must ensure that when it calls ppp_unregister_channel,

View file

@ -0,0 +1,49 @@
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -208,6 +208,7 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(
#endif
unsigned int bond_net_id __read_mostly;
+static unsigned long bond_id_mask = 0xFFFFFFF0; /* QCA NSS ECM bonding support */
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
@@ -5793,8 +5794,14 @@ static void bond_destructor(struct net_d
if (bond->wq)
destroy_workqueue(bond->wq);
+ /* QCA NSS ECM bonding support - Start */
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
+ /* QCA NSS ECM bonding support - End */
+
if (bond->rr_tx_counter)
free_percpu(bond->rr_tx_counter);
+
}
void bond_setup(struct net_device *bond_dev)
@@ -6358,6 +6365,13 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
+ /* QCA NSS ECM bonding support - Start */
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
+ /* QCA NSS ECM bonding support - End */
out:
rtnl_unlock();
return res;
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -265,6 +265,7 @@ struct bonding {
spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
+ u32 id;/* QCA NSS ECM bonding support */
};
#define bond_slave_get_rcu(dev) \

View file

@ -0,0 +1,685 @@
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -115,6 +115,40 @@ static void ad_marker_response_received(
struct port *port);
static void ad_update_actor_keys(struct port *port, bool reset);
+/* QCA NSS ECM bonding support - Start */
+struct bond_cb __rcu *bond_cb;
+
+int bond_register_cb(struct bond_cb *cb)
+{
+ struct bond_cb *lag_cb;
+
+ lag_cb = kzalloc(sizeof(*lag_cb), GFP_ATOMIC | __GFP_NOWARN);
+ if (!lag_cb) {
+ return -1;
+ }
+
+ memcpy((void *)lag_cb, (void *)cb, sizeof(*cb));
+
+ rcu_read_lock();
+ rcu_assign_pointer(bond_cb, lag_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(bond_register_cb);
+
+void bond_unregister_cb(void)
+{
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ rcu_assign_pointer(bond_cb, NULL);
+ rcu_read_unlock();
+
+ kfree(lag_cb_main);
+}
+EXPORT_SYMBOL(bond_unregister_cb);
+/* QCA NSS ECM bonding support - End */
/* ================= api to bonding and kernel code ================== */
@@ -1064,7 +1098,31 @@ static void ad_mux_machine(struct port *
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
+
+ /* QCA NSS ECM bonding support - Start */
+ /* Send a notificaton about change in state of this
+ * port. We only want to handle case where port moves
+ * from AD_MUX_COLLECTING_DISTRIBUTING ->
+ * AD_MUX_ATTACHED.
+ */
+ if (bond_slave_is_up(port->slave) &&
+ (last_state == AD_MUX_COLLECTING_DISTRIBUTING)) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_down) {
+ struct net_device *dev;
+
+ dev = port->slave->dev;
+ lag_cb_main->bond_cb_link_down(dev);
+ }
+ rcu_read_unlock();
+ }
+
break;
+ /* QCA NSS ECM bonding support - End */
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= LACP_STATE_COLLECTING;
port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
@@ -1908,6 +1966,7 @@ static void ad_enable_collecting_distrib
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Enabling port %d (LAG %d)\n",
port->actor_port_number,
@@ -1915,6 +1974,16 @@ static void ad_enable_collecting_distrib
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(port->slave->dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
}
}
@@ -2674,6 +2743,104 @@ int bond_3ad_get_active_agg_info(struct
return ret;
}
+/* QCA NSS ECM bonding support - Start */
+/* bond_3ad_get_tx_dev - Calculate egress interface for a given packet,
+ * for a LAG that is configured in 802.3AD mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address
+ * @dst: pointer to destination L3 address
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash is used to calculate hash using L2/L3
+ * addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, u8 *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct aggregator *agg;
+ struct ad_info ad_info;
+ struct list_head *iter;
+ struct slave *slave;
+ struct slave *first_ok_slave = NULL;
+ u32 hash = 0;
+ int slaves_in_agg;
+ int slave_agg_no = 0;
+ int agg_id;
+
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ slaves_in_agg = ad_info.ports;
+ agg_id = ad_info.aggregator_id;
+
+ if (slaves_in_agg == 0) {
+ pr_debug("%s: Error: active aggregator is empty\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_agg_no = hash % slaves_in_agg;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for 802.3AD fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ slave_agg_no = hash % slaves_in_agg;
+ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
+
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && bond_slave_can_tx(slave))
+ first_ok_slave = slave;
+ slave_agg_no--;
+ continue;
+ }
+
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ if (slave_agg_no >= 0) {
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ bond_dev->name, agg_id);
+ return NULL;
+ }
+
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found.
+ */
+ if (first_ok_slave)
+ return first_ok_slave->dev;
+
+ return NULL;
+}
+/* QCA NSS ECM bonding support - End */
+
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -286,6 +286,21 @@ const char *bond_mode_name(int mode)
return names[mode];
}
+/* QCA NSS ECM bonding support */
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+ return bond->id;
+}
+EXPORT_SYMBOL(bond_get_id);
+/* QCA NSS ECM bonding support */
+
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -1185,6 +1200,23 @@ void bond_change_active_slave(struct bon
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
+ /* QCA NSS ECM bonding support - Start */
+ if (bond->params.mode == BOND_MODE_XOR) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_up) {
+ struct net_device *dev;
+
+ dev = new_active->dev;
+ lag_cb_main->bond_cb_link_up(dev);
+ }
+ rcu_read_unlock();
+ }
+ /* QCA NSS ECM bonding support - End */
+
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
@@ -1808,6 +1840,7 @@ int bond_enslave(struct net_device *bond
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
int link_reporting;
int res = 0, i;
@@ -2251,6 +2284,15 @@ int bond_enslave(struct net_device *bond
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
@@ -2316,6 +2358,15 @@ err_undo_flags:
}
}
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
return res;
}
@@ -2337,6 +2388,7 @@ static int __bond_release_one(struct net
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -2359,6 +2411,15 @@ static int __bond_release_one(struct net
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_release)
+ lag_cb_main->bond_cb_release(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
@@ -2678,6 +2739,8 @@ static void bond_miimon_commit(struct bo
struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
+ struct net_device *slave_dev = NULL; /* QCA NSS ECM bonding support */
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
ASSERT_RTNL();
@@ -2717,6 +2780,12 @@ static void bond_miimon_commit(struct bo
bond_set_active_slave(slave);
}
+ /* QCA NSS ECM bonding support - Start */
+ if ((bond->params.mode == BOND_MODE_XOR) &&
+ (!slave_dev))
+ slave_dev = slave->dev;
+ /* QCA NSS ECM bonding support - End */
+
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2765,6 +2834,16 @@ static void bond_miimon_commit(struct bo
unblock_netpoll_tx();
}
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (slave_dev && lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
bond_set_carrier(bond);
}
@@ -4012,8 +4091,219 @@ static inline u32 bond_eth_hash(struct s
return 0;
ep = (struct ethhdr *)(data + mhoff);
- return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
+ return ep->h_dest[5] ^ ep->h_source[5]; /* QCA NSS ECM bonding support */
+}
+
+/* QCA NSS ECM bonding support - Start */
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect_without_skb(struct bonding *bond,
+ u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst,
+ u16 protocol, __be16 *layer4hdr,
+ struct flow_keys *fk)
+{
+ u32 *src = NULL;
+ u32 *dst = NULL;
+
+ fk->ports.ports = 0;
+ src = (uint32_t *)psrc;
+ dst = (uint32_t *)pdst;
+
+ if (protocol == htons(ETH_P_IP)) {
+ /* V4 addresses and address type*/
+ fk->addrs.v4addrs.src = src[0];
+ fk->addrs.v4addrs.dst = dst[0];
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ /* V6 addresses and address type*/
+ memcpy(&fk->addrs.v6addrs.src, src, sizeof(struct in6_addr));
+ memcpy(&fk->addrs.v6addrs.dst, dst, sizeof(struct in6_addr));
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ } else {
+ return false;
+ }
+ if ((bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) &&
+ (layer4hdr))
+ fk->ports.ports = *layer4hdr;
+
+ return true;
+}
+
+/* bond_xmit_hash_without_skb - Applies load balancing algorithm for a packet,
+ * to calculate hash for a given set of L2/L3 addresses. Does not
+ * calculate egress interface.
+ */
+uint32_t bond_xmit_hash_without_skb(u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct flow_keys flow;
+ u32 hash = 0;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect_without_skb(bond, src_mac, dst_mac, psrc,
+ pdst, protocol, layer4hdr, &flow))
+ return (dst_mac[5] ^ src_mac[5]);
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23)
+ hash = dst_mac[5] ^ src_mac[5];
+ else if (layer4hdr)
+ hash = (__force u32)flow.ports.ports;
+
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+
+ return hash;
+}
+
+/* bond_xor_get_tx_dev - Calculate egress interface for a given packet for a LAG
+ * that is configured in balance-xor mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash_without_skb is used to calculate hash using
+ * L2/L3 addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+static struct net_device *bond_xor_get_tx_dev(struct sk_buff *skb,
+ u8 *src_mac, u8 *dst_mac,
+ void *src, void *dst,
+ u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
+ int slave_id = 0, i = 0;
+ u32 hash;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slave_cnt == 0) {
+ pr_debug("%s: Error: No slave is attached to the interface\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_id = hash % slave_cnt;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for balance-XOR fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, src,
+ dst, protocol, bond_dev,
+ layer4hdr);
+ slave_id = hash % slave_cnt;
+ }
+
+ i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0) {
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0)
+ break;
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ return NULL;
+}
+
+/* bond_get_tx_dev - Calculate egress interface for a given packet.
+ *
+ * Supports 802.3AD and balance-xor modes
+ *
+ * @skb: pointer to skb to be egressed, if valid
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * Returns: Either valid slave device, or NULL for un-supported LAG modes
+ */
+struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond;
+
+ if (!bond_dev)
+ return NULL;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return NULL;
+
+ bond = netdev_priv(bond_dev);
+
+ switch (bond->params.mode) {
+ case BOND_MODE_XOR:
+ return bond_xor_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ case BOND_MODE_8023AD:
+ return bond_3ad_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ default:
+ return NULL;
+ }
}
+EXPORT_SYMBOL(bond_get_tx_dev);
+
+/* In bond_xmit_xor() , we determine the output device by using a pre-
+ * determined xmit_hash_policy(), If the selected device is not enabled,
+ * find the next active slave.
+ */
+static int bond_xmit_xor(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *outdev;
+
+ outdev = bond_xor_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev)
+ goto out;
+
+ bond_dev_queue_xmit(bond, skb, outdev);
+ goto final;
+out:
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+final:
+ return NETDEV_TX_OK;
+}
+/* QCA NSS ECM bonding support - End */
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
@@ -5192,15 +5482,18 @@ static netdev_tx_t bond_3ad_xor_xmit(str
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- struct bond_up_slave *slaves;
- struct slave *slave;
+ /* QCA NSS ECM bonding support - Start */
+ struct net_device *outdev = NULL;
- slaves = rcu_dereference(bond->usable_slaves);
- slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
- if (likely(slave))
- return bond_dev_queue_xmit(bond, skb, slave->dev);
+ outdev = bond_3ad_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- return bond_tx_drop(dev, skb);
+ return bond_dev_queue_xmit(bond, skb, outdev);
+ /* QCA NSS ECM bonding support - End */
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -5450,8 +5743,9 @@ static netdev_tx_t __bond_start_xmit(str
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
- case BOND_MODE_8023AD:
case BOND_MODE_XOR:
+ return bond_xmit_xor(skb, dev); /* QCA NSS ECM bonding support */
+ case BOND_MODE_8023AD:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
return bond_xmit_broadcast(skb, dev);
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -303,8 +303,15 @@ int bond_3ad_lacpdu_recv(const struct sk
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_active(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
+/* QCA NSS ECM bonding support */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ uint8_t *dst_mac, void *src,
+ void *dst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM bonding support */
+
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
size_t bond_3ad_stats_size(void);
#endif /* _NET_BOND_3AD_H */
-
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -94,6 +94,8 @@
#define BOND_TLS_FEATURES (NETIF_F_HW_TLS_TX | NETIF_F_HW_TLS_RX)
+extern struct bond_cb __rcu *bond_cb; /* QCA NSS ECM bonding support */
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -659,6 +661,7 @@ struct bond_net {
int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_get_id(struct net_device *bond_dev); /* QCA NSS ECM bonding support */
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
@@ -689,6 +692,13 @@ struct bond_vlan_tag *bond_verify_device
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+/* QCA NSS ECM bonding support - Start */
+uint32_t bond_xmit_hash_without_skb(uint8_t *src_mac, uint8_t *dst_mac,
+ void *psrc, void *pdst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM bonding support - End */
+
void bond_work_init_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
@@ -793,4 +803,18 @@ static inline netdev_tx_t bond_tx_drop(s
return NET_XMIT_DROP;
}
+/* QCA NSS ECM bonding support - Start */
+struct bond_cb {
+ void (*bond_cb_link_up)(struct net_device *slave);
+ void (*bond_cb_link_down)(struct net_device *slave);
+ void (*bond_cb_enslave)(struct net_device *slave);
+ void (*bond_cb_release)(struct net_device *slave);
+ void (*bond_cb_delete_by_slave)(struct net_device *slave);
+ void (*bond_cb_delete_by_mac)(uint8_t *mac_addr);
+};
+
+extern int bond_register_cb(struct bond_cb *cb);
+extern void bond_unregister_cb(void);
+/* QCA NSS ECM bonding support - End */
+
#endif /* _NET_BONDING_H */

View file

@ -0,0 +1,96 @@
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -15,6 +15,13 @@ struct macvlan_port;
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
+/* QCA NSS ECM Support - Start */
+/*
+ * Callback for updating interface statistics for macvlan flows offloaded from host CPU.
+ */
+typedef void (*macvlan_offload_stats_update_cb_t)(struct net_device *dev, struct rtnl_link_stats64 *stats, bool update_mcast_rx_stats);
+/* QCA NSS ECM Support - End */
+
struct macvlan_dev {
struct net_device *dev;
struct list_head list;
@@ -35,6 +42,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ macvlan_offload_stats_update_cb_t offload_stats_update; /* QCA NSS ECM support */
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -107,4 +115,26 @@ static inline int macvlan_release_l2fw_o
macvlan->accel_priv = NULL;
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
}
+
+/* QCA NSS ECM Support - Start */
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline void
+macvlan_offload_stats_update(struct net_device *dev,
+ struct rtnl_link_stats64 *stats,
+ bool update_mcast_rx_stats)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->offload_stats_update(dev, stats, update_mcast_rx_stats);
+}
+
+static inline enum
+macvlan_mode macvlan_get_mode(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode;
+}
+#endif
+/* QCA NSS ECM Support - End */
#endif /* _LINUX_IF_MACVLAN_H */
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -933,6 +933,34 @@ static void macvlan_uninit(struct net_de
macvlan_port_destroy(port->dev);
}
+/* QCA NSS ECM Support - Start */
+/* Update macvlan statistics processed by offload engines */
+static void macvlan_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *offl_stats,
+ bool update_mcast_rx_stats)
+{
+ struct vlan_pcpu_stats *stats;
+ struct macvlan_dev *macvlan;
+
+ /* Is this a macvlan? */
+ if (!netif_is_macvlan(dev))
+ return;
+
+ macvlan = netdev_priv(dev);
+ stats = this_cpu_ptr(macvlan->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->rx_packets, offl_stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, offl_stats->rx_bytes);
+ u64_stats_add(&stats->tx_packets, offl_stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, offl_stats->tx_bytes);
+ /* Update multicast statistics */
+ if (unlikely(update_mcast_rx_stats)) {
+ u64_stats_add(&stats->rx_multicast, offl_stats->rx_packets);
+ }
+ u64_stats_update_end(&stats->syncp);
+}
+/* QCA NSS ECM Support - End */
+
static void macvlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -1477,6 +1505,7 @@ int macvlan_common_newlink(struct net *s
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->offload_stats_update = macvlan_dev_update_stats; /* QCA NSS ECM Support */
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])

View file

@ -0,0 +1,154 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -175,6 +175,13 @@ config NF_CONNTRACK_TIMEOUT
If unsure, say `N'.
+config NF_CONNTRACK_DSCPREMARK_EXT
+ bool 'Connection tracking extension for dscp remark target'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking extension
+ for dscp remark.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -31,6 +31,10 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NET_ACT_CT)
NF_CT_EXT_ACT_CT,
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */
+#endif
+
NF_CT_EXT_NUM,
};
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_act_ct.h>
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
#include <net/netfilter/nf_nat.h>
#define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */
@@ -54,6 +55,9 @@ static const u8 nf_ct_ext_type_len[NF_CT
#if IS_ENABLED(CONFIG_NET_ACT_CT)
[NF_CT_EXT_ACT_CT] = sizeof(struct nf_conn_act_ct_ext),
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ [NF_CT_EXT_DSCPREMARK] = sizeof(struct nf_ct_dscpremark_ext),
+#endif
};
static __always_inline unsigned int total_extension_size(void)
@@ -86,6 +90,9 @@ static __always_inline unsigned int tota
#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ sizeof(struct nf_conn_act_ct_ext)
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ + sizeof(struct nf_ct_dscpremark_ext)
+#endif
;
}
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -14,6 +14,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_LABEL
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o
ifeq ($(CONFIG_NF_CONNTRACK),m)
nf_conntrack-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_conntrack_bpf.o
else ifeq ($(CONFIG_NF_CONNTRACK),y)
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -45,6 +45,9 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
+#endif
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
@@ -1781,6 +1784,9 @@ init_conntrack(struct net *net, struct n
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ nf_ct_dscpremark_ext_add(ct, GFP_ATOMIC);
+#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -15,6 +15,9 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_DSCP.h>
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
+#endif
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
@@ -31,6 +34,10 @@ dscp_tg(struct sk_buff *skb, const struc
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+#endif
if (dscp != dinfo->dscp) {
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
@@ -39,6 +46,13 @@ dscp_tg(struct sk_buff *skb, const struc
ipv4_change_dsfield(ip_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return XT_CONTINUE;
+
+ nf_conntrack_dscpremark_ext_set_dscp_rule_valid(ct);
+#endif
}
return XT_CONTINUE;
}
@@ -48,13 +62,24 @@ dscp_tg6(struct sk_buff *skb, const stru
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
-
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+#endif
if (dscp != dinfo->dscp) {
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
ipv6_change_dsfield(ipv6_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
+
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return XT_CONTINUE;
+
+ nf_conntrack_dscpremark_ext_set_dscp_rule_valid(ct);
+#endif
}
return XT_CONTINUE;
}

View file

@ -0,0 +1,89 @@
From ce18a6fdff6a39a01111d74f513d2ef66142047c Mon Sep 17 00:00:00 2001
From: Murat Sezgin <msezgin@codeaurora.org>
Date: Wed, 5 Aug 2020 13:21:27 -0700
Subject: [PATCH 246/281] net:ipv6: Fix IPv6 user route change event calls
These events should be called only when the route table is
changed by the userspace. So, we should call them in the
ioctl and the netlink message handler function.
Change-Id: If7ec615014cfc79d5fa72878e49eaf99c2560c32
Signed-off-by: Murat Sezgin <msezgin@codeaurora.org>
---
net/ipv6/route.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index df82117..4fb8247 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3868,10 +3868,6 @@ int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
- if (!err)
- atomic_notifier_call_chain(&ip6route_chain,
- RTM_NEWROUTE, rt);
-
fib6_info_release(rt);
return err;
@@ -3893,9 +3889,6 @@ static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
- if (!err)
- atomic_notifier_call_chain(&ip6route_chain,
- RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -4501,6 +4494,10 @@ int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
break;
}
rtnl_unlock();
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ (cmd == SIOCADDRT) ? RTM_NEWROUTE : RTM_DELROUTE, &cfg);
+
return err;
}
@@ -5528,11 +5525,17 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
}
if (cfg.fc_mp)
- return ip6_route_multipath_del(&cfg, extack);
+ err = ip6_route_multipath_del(&cfg, extack);
else {
cfg.fc_delete_all_nh = 1;
- return ip6_route_del(&cfg, extack);
+ err = ip6_route_del(&cfg, extack);
}
+
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, &cfg);
+
+ return err;
}
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -5549,9 +5552,15 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
cfg.fc_metric = IP6_RT_PRIO_USER;
if (cfg.fc_mp)
- return ip6_route_multipath_add(&cfg, extack);
+ err = ip6_route_multipath_add(&cfg, extack);
else
- return ip6_route_add(&cfg, GFP_KERNEL, extack);
+ err = ip6_route_add(&cfg, GFP_KERNEL, extack);
+
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, &cfg);
+
+ return err;
}
/* add the overhead of this fib6_nh to nexthop_len */

View file

@ -0,0 +1,92 @@
From 3c17a0e1112be70071e98d5208da5b55dcec20a6 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:37:29 +0100
Subject: [PATCH] Update 607-qca-add-add-nss-bridge-mgr-support.patch for kernel 5.15
---
include/linux/if_bridge.h | 4 ++++
net/bridge/br_fdb.c | 25 +++++++++++++++++++++----
2 files changed, 25 insertions(+), 4 deletions(-)
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -252,4 +252,8 @@ typedef struct net_bridge_port *br_get_d
extern br_get_dst_hook_t __rcu *br_get_dst_hook;
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
+/* QCA NSS bridge-mgr support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -569,7 +569,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
- u8 mac_addr[6]; /* QCA NSS ECM support */
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -597,12 +597,13 @@ void br_fdb_cleanup(struct work_struct *
} else {
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node)) {
- ether_addr_copy(mac_addr, f->key.addr.addr);
+ memset(&fdb_event, 0, sizeof(fdb_event));
+ ether_addr_copy(fdb_event.addr, f->key.addr.addr);
fdb_delete(br, f, true);
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list, 0,
- (void *)mac_addr);
+ (void *)&fdb_event);
/* QCA NSS ECM support - End */
}
spin_unlock_bh(&br->hash_lock);
@@ -900,10 +901,21 @@ static bool __fdb_mark_active(struct net
test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
}
+/* QCA NSS bridge-mgr support - Start */
+/* Get the bridge device */
+struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br)
+{
+ dev_hold(br->dev);
+ return br->dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_bridge_dev_get_and_hold);
+/* QCA NSS bridge-mgr support - End */
+
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* some users want to always flood. */
if (hold_time(br) == 0)
@@ -929,6 +941,12 @@ void br_fdb_update(struct net_bridge *br
if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
+ /* QCA NSS bridge-mgr support - Start */
+ ether_addr_copy(fdb_event.addr, addr);
+ fdb_event.br = br;
+ fdb_event.orig_dev = fdb->dst->dev;
+ fdb_event.dev = source->dev;
+ /* QCA NSS bridge-mgr support - End */
WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
@@ -940,7 +958,7 @@ void br_fdb_update(struct net_bridge *br
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list,
- 0, (void *)addr);
+ 0, (void *)&fdb_event);
/* QCA NSS ECM support - End */
}

View file

@ -0,0 +1,44 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -773,6 +773,7 @@ typedef unsigned char *sk_buff_data_t;
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_skip_classify_offload: do not classify packet set by offload IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
@@ -968,6 +969,8 @@ struct sk_buff {
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
+ __u8 tc_skip_classify_offload:1;
+ __u16 tc_verd_qca_nss; /* QCA NSS Qdisc Support */
#endif
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -139,6 +139,7 @@ enum tca_id {
TCA_ID_MPLS,
TCA_ID_CT,
TCA_ID_GATE,
+ TCA_ID_MIRRED_NSS, /* QCA NSS Qdisc IGS Support */
/* other actions go here */
__TCA_ID_MAX = 255
};
@@ -801,4 +802,14 @@ enum {
TCF_EM_OPND_LT
};
+/* QCA NSS Qdisc Support - Start */
+#define _TC_MAKE32(x) ((x))
+#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n))
+
+#define TC_NCLS _TC_MAKEMASK1(8)
+#define TC_NCLS_NSS _TC_MAKEMASK1(12)
+#define SET_TC_NCLS_NSS(v) ( TC_NCLS_NSS | ((v) & ~TC_NCLS_NSS))
+#define CLR_TC_NCLS_NSS(v) ( (v) & ~TC_NCLS_NSS)
+/* QCA NSS Qdisc Support - End */
+
#endif

View file

@ -0,0 +1,441 @@
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -17,6 +17,7 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
+ unsigned long cust_data;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -151,6 +151,31 @@ resched:
}
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats)
+{
+ struct ifb_dev_private *dp;
+ struct ifb_q_private *txp;
+
+ if (!dev || !offload_stats) {
+ return;
+ }
+
+ if (!(dev->priv_flags_ext & IFF_EXT_IFB)) {
+ return;
+ }
+
+ dp = netdev_priv(dev);
+ txp = dp->tx_private;
+
+ u64_stats_update_begin(&txp->rx_stats.sync);
+ txp->rx_stats.packets += u64_stats_read(&offload_stats->rx_packets);
+ txp->rx_stats.bytes += u64_stats_read(&offload_stats->rx_bytes);
+ txp->tx_stats.packets += u64_stats_read(&offload_stats->tx_packets);
+ txp->tx_stats.bytes += u64_stats_read(&offload_stats->tx_bytes);
+ u64_stats_update_end(&txp->rx_stats.sync);
+}
+EXPORT_SYMBOL(ifb_update_offload_stats);
+
static void ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -326,6 +351,7 @@ static void ifb_setup(struct net_device
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */
netif_keep_dst(dev);
eth_hw_addr_random(dev);
dev->needs_free_netdev = true;
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4588,6 +4588,15 @@ void dev_uc_flush(struct net_device *dev
void dev_uc_init(struct net_device *dev);
/**
+ * ifb_update_offload_stats - Update the IFB interface stats
+ * @dev: IFB device to update the stats
+ * @offload_stats: per CPU stats structure
+ *
+ * Allows update of IFB stats when flows are offloaded to an accelerator.
+ **/
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats);
+
+/**
* __dev_uc_sync - Synchonize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
@@ -5133,6 +5142,11 @@ static inline bool netif_is_failover_sla
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}
+static inline bool netif_is_ifb_dev(const struct net_device *dev)
+{
+ return dev->priv_flags_ext & IFF_EXT_IFB;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1278,4 +1278,248 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* QCA NSS Clients Support - Start */
+enum {
+ TCA_NSS_ACCEL_MODE_NSS_FW,
+ TCA_NSS_ACCEL_MODE_PPE,
+ TCA_NSS_ACCEL_MODE_MAX
+};
+
+/* NSSFIFO section */
+
+enum {
+ TCA_NSSFIFO_UNSPEC,
+ TCA_NSSFIFO_PARMS,
+ __TCA_NSSFIFO_MAX
+};
+
+#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1)
+
+struct tc_nssfifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRED section */
+
+enum {
+ TCA_NSSWRED_UNSPEC,
+ TCA_NSSWRED_PARMS,
+ __TCA_NSSWRED_MAX
+};
+
+#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1)
+#define NSSWRED_CLASS_MAX 6
+struct tc_red_alg_parameter {
+ __u32 min; /* qlen_avg < min: pkts are all enqueued */
+ __u32 max; /* qlen_avg > max: pkts are all dropped */
+ __u32 probability;/* Drop probability at qlen_avg = max */
+ __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */
+};
+
+struct tc_nsswred_traffic_class {
+ __u32 limit; /* Queue length */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* Parameters for RED alg */
+};
+
+/*
+ * Weight modes for WRED
+ */
+enum tc_nsswred_weight_modes {
+ TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */
+ TC_NSSWRED_WEIGHT_MODES, /* Must be last */
+};
+
+struct tc_nsswred_qopt {
+ __u32 limit; /* Queue length */
+ enum tc_nsswred_weight_modes weight_mode;
+ /* Weight mode */
+ __u32 traffic_classes; /* How many traffic classes: DPs */
+ __u32 def_traffic_class; /* Default traffic if no match: def_DP */
+ __u32 traffic_id; /* The traffic id to be configured: DP */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* RED algorithm parameters */
+ struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX];
+ /* Traffic settings for dumpping */
+ __u8 ecn; /* Setting ECN bit or dropping */
+ __u8 set_default; /* Sets qdisc to be the default for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSCODEL section */
+
+enum {
+ TCA_NSSCODEL_UNSPEC,
+ TCA_NSSCODEL_PARMS,
+ __TCA_NSSCODEL_MAX
+};
+
+#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1)
+
+struct tc_nsscodel_qopt {
+ __u32 target; /* Acceptable queueing delay */
+ __u32 limit; /* Max number of packets that can be held in the queue */
+ __u32 interval; /* Monitoring interval */
+ __u32 flows; /* Number of flow buckets */
+ __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */
+ __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+struct tc_nsscodel_xstats {
+ __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
+ __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */
+};
+
+/* NSSFQ_CODEL section */
+
+struct tc_nssfq_codel_xstats {
+ __u32 new_flow_count; /* Total number of new flows seen */
+ __u32 new_flows_len; /* Current number of new flows */
+ __u32 old_flows_len; /* Current number of old flows */
+ __u32 ecn_mark; /* Number of packets marked with ECN */
+ __u32 drop_overlimit; /* Number of packets dropped due to overlimit */
+ __u32 maxpacket; /* The largest packet seen so far in the queue */
+};
+
+/* NSSTBL section */
+
+enum {
+ TCA_NSSTBL_UNSPEC,
+ TCA_NSSTBL_PARMS,
+ __TCA_NSSTBL_MAX
+};
+
+#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1)
+
+struct tc_nsstbl_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Limiting rate of TBF */
+ __u32 peakrate; /* Maximum rate at which TBF is allowed to send */
+ __u32 mtu; /* Max size of packet, or minumim burst size */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSPRIO section */
+
+#define TCA_NSSPRIO_MAX_BANDS 256
+
+enum {
+ TCA_NSSPRIO_UNSPEC,
+ TCA_NSSPRIO_PARMS,
+ __TCA_NSSPRIO_MAX
+};
+
+#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1)
+
+struct tc_nssprio_qopt {
+ __u32 bands; /* Number of bands */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBF section */
+
+enum {
+ TCA_NSSBF_UNSPEC,
+ TCA_NSSBF_CLASS_PARMS,
+ TCA_NSSBF_QDISC_PARMS,
+ __TCA_NSSBF_MAX
+};
+
+#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1)
+
+struct tc_nssbf_class_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 mtu; /* MTU of the associated interface */
+ __u32 quantum; /* Quantum allocation for DRR */
+};
+
+struct tc_nssbf_qopt {
+ __u16 defcls; /* Default class value */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRR section */
+
+enum {
+ TCA_NSSWRR_UNSPEC,
+ TCA_NSSWRR_CLASS_PARMS,
+ TCA_NSSWRR_QDISC_PARMS,
+ __TCA_NSSWRR_MAX
+};
+
+#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1)
+
+struct tc_nsswrr_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswrr_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWFQ section */
+
+enum {
+ TCA_NSSWFQ_UNSPEC,
+ TCA_NSSWFQ_CLASS_PARMS,
+ TCA_NSSWFQ_QDISC_PARMS,
+ __TCA_NSSWFQ_MAX
+};
+
+#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1)
+
+struct tc_nsswfq_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswfq_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSHTB section */
+
+enum {
+ TCA_NSSHTB_UNSPEC,
+ TCA_NSSHTB_CLASS_PARMS,
+ TCA_NSSHTB_QDISC_PARMS,
+ __TCA_NSSHTB_MAX
+};
+
+#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1)
+
+struct tc_nsshtb_class_qopt {
+ __u32 burst; /* Allowed burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 cburst; /* Maximum burst size */
+ __u32 crate; /* Maximum bandwidth for this class */
+ __u32 quantum; /* Quantum allocation for DRR */
+ __u32 priority; /* Priority value associated with this class */
+ __u32 overhead; /* Overhead in bytes per packet */
+};
+
+struct tc_nsshtb_qopt {
+ __u32 r2q; /* Rate to quantum ratio */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBLACKHOLE section */
+
+enum {
+ TCA_NSSBLACKHOLE_UNSPEC,
+ TCA_NSSBLACKHOLE_PARMS,
+ __TCA_NSSBLACKHOLE_MAX
+};
+
+#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1)
+
+struct tc_nssblackhole_qopt {
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+/* QCA NSS Clients Support - End */
#endif
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -313,6 +313,7 @@ struct Qdisc *qdisc_lookup(struct net_de
out:
return q;
}
+EXPORT_SYMBOL(qdisc_lookup);
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
{
@@ -2386,4 +2387,26 @@ static int __init pktsched_init(void)
return 0;
}
+/* QCA NSS Qdisc Support - Start */
+bool tcf_destroy(struct tcf_proto *tp, bool force)
+{
+ tp->ops->destroy(tp, force, NULL);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+
+ return true;
+}
+
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+ struct tcf_proto *tp;
+
+ while ((tp = rtnl_dereference(*fl)) != NULL) {
+ RCU_INIT_POINTER(*fl, tp->next);
+ tcf_destroy(tp, true);
+ }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
+/* QCA NSS Qdisc Support - End */
+
subsys_initcall(pktsched_init);
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1069,6 +1069,7 @@ static void __qdisc_destroy(struct Qdisc
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
+EXPORT_SYMBOL(qdisc_destroy);
void qdisc_destroy(struct Qdisc *qdisc)
{
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -94,6 +94,7 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -719,6 +720,40 @@ static inline bool skb_skip_tc_classify(
return false;
}
+/*
+ * Set skb classify bit field.
+ */
+static inline void skb_set_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 1;
+#endif
+}
+
+/*
+ * Clear skb classify bit field.
+ */
+static inline void skb_clear_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 0;
+#endif
+}
+
+/*
+ * Skip skb processing if sent from ifb dev.
+ */
+static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_skip_classify_offload) {
+ skb_clear_tc_classify_offload(skb);
+ return true;
+ }
+#endif
+ return false;
+}
+
/* Reset all TX qdiscs greater than index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
@@ -1305,4 +1340,9 @@ static inline void qdisc_synchronize(con
msleep(1);
}
+/* QCA NSS Qdisc Support - Start */
+void qdisc_destroy(struct Qdisc *qdisc);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+/* QCA NSS Qdisc Support - End */
+
#endif

View file

@ -0,0 +1,46 @@
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -398,6 +398,31 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct l2tp_stats *stats)
+{
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &tunnel->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &tunnel->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &tunnel->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &tunnel->stats.tx_bytes);
+
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &session->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &session->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &session->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &session->stats.tx_bytes);
+}
+EXPORT_SYMBOL_GPL(l2tp_stats_update);
+
+
/*****************************************************************************
* Receive data handling
*****************************************************************************/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -232,6 +232,9 @@ struct l2tp_session *l2tp_session_get_nt
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View file

@ -0,0 +1,478 @@
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -36,6 +36,7 @@ struct pptp_opt {
u32 ack_sent, ack_recv;
u32 seq_sent, seq_recv;
int ppp_flags;
+ bool pptp_offload_mode;
};
#include <net/sock.h>
@@ -100,8 +101,40 @@ struct pppoe_channel_ops {
int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
};
+/* PPTP client callback */
+typedef int (*pptp_gre_seq_offload_callback_t)(struct sk_buff *skb,
+ struct net_device *pptp_dev);
+
/* Return PPPoE channel specific addressing information */
extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
struct pppoe_opt *addressing);
+/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */
+extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr);
+
+/* Lookup PPTP session info and return PPTP session using dip and peer call id */
+extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Return PPTP session information given the channel */
+extern void pptp_channel_addressing_get(struct pptp_opt *opt,
+ struct ppp_channel *chan);
+
+/* Enable the PPTP session offload flag */
+extern int pptp_session_enable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Disable the PPTP session offload flag */
+extern int pptp_session_disable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Register the PPTP GRE packets sequence number offload callback */
+extern int
+pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_client_cb);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+extern void pptp_unregister_gre_seq_offload_callback(void);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2973,6 +2973,20 @@ char *ppp_dev_name(struct ppp_channel *c
return name;
}
+/* Return the PPP net device index */
+int ppp_dev_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int ifindex = 0;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ ifindex = pch->ppp->dev->ifindex;
+ read_unlock_bh(&pch->upl);
+ }
+ return ifindex;
+}
/*
* Disconnect a channel from the generic layer.
@@ -3681,6 +3695,28 @@ void ppp_update_stats(struct net_device
ppp_recv_unlock(ppp);
}
+/* Returns true if Compression is enabled on PPP device
+ */
+bool ppp_is_cp_enabled(struct net_device *dev)
+{
+ struct ppp *ppp;
+ bool flag = false;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flag = !!(ppp->xstate & SC_COMP_RUN) || !!(ppp->rstate & SC_DECOMP_RUN);
+ ppp_unlock(ppp);
+
+ return flag;
+}
+EXPORT_SYMBOL(ppp_is_cp_enabled);
+
/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
* the device is not PPP.
*/
@@ -3872,6 +3908,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_dev_index);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -84,6 +84,9 @@ extern void ppp_unregister_channel(struc
/* Get the channel number for a channel */
extern int ppp_channel_index(struct ppp_channel *);
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
@@ -116,6 +119,7 @@ extern int ppp_hold_channels(struct net_
/* Test if ppp xmit lock is locked */
extern bool ppp_is_xmit_locked(struct net_device *dev);
+bool ppp_is_cp_enabled(struct net_device *dev);
/* Test if the ppp device is a multi-link ppp device */
extern int ppp_is_multilink(struct net_device *dev);
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -50,6 +50,8 @@ static struct proto pptp_sk_proto __read
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
+static pptp_gre_seq_offload_callback_t __rcu pptp_gre_offload_xmit_cb;
+
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
@@ -91,6 +93,79 @@ static int lookup_chan_dst(u16 call_id,
return i < MAX_CALLID;
}
+/* Search a pptp session based on local call id, local and remote ip address */
+static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.src_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr &&
+ sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Search a pptp session based on peer call id and peer ip address */
+static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == d_addr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* If offload mode set then this function sends all packets to
+ * offload module instead of network stack
+ */
+static int pptp_client_skb_xmit(struct sk_buff *skb,
+ struct net_device *pptp_dev)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+ int ret;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (!pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ ret = pptp_gre_offload_cb_f(skb, pptp_dev);
+ rcu_read_unlock();
+ return ret;
+}
+
static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{
@@ -136,7 +211,7 @@ static struct rtable *pptp_route_output(
struct net *net;
net = sock_net(sk);
- flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
+ flowi4_init_output(fl4, 0, sk->sk_mark, 0,
RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
po->proto.pptp.dst_addr.sin_addr.s_addr,
po->proto.pptp.src_addr.sin_addr.s_addr,
@@ -163,8 +238,11 @@ static int pptp_xmit(struct ppp_channel
struct rtable *rt;
struct net_device *tdev;
+ struct net_device *pptp_dev;
struct iphdr *iph;
int max_headroom;
+ int pptp_ifindex;
+ int ret;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
@@ -258,7 +336,32 @@ static int pptp_xmit(struct ppp_channel
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
- ip_local_out(net, skb->sk, skb);
+ pptp_ifindex = ppp_dev_index(chan);
+
+ /* set incoming interface as the ppp interface */
+ if (skb->skb_iif)
+ skb->skb_iif = pptp_ifindex;
+
+ /* If the PPTP GRE seq number offload module is not enabled yet
+ * then sends all PPTP GRE packets through linux network stack
+ */
+ if (!opt->pptp_offload_mode) {
+ ip_local_out(net, skb->sk, skb);
+ return 1;
+ }
+
+ pptp_dev = dev_get_by_index(&init_net, pptp_ifindex);
+ if (!pptp_dev)
+ goto tx_error;
+
+ /* If PPTP offload module is enabled then forward all PPTP GRE
+ * packets to PPTP GRE offload module
+ */
+ ret = pptp_client_skb_xmit(skb, pptp_dev);
+ dev_put(pptp_dev);
+ if (ret < 0)
+ goto tx_error;
+
return 1;
tx_error:
@@ -314,6 +417,13 @@ static int pptp_rcv_core(struct sock *sk
goto drop;
payload = skb->data + headersize;
+
+ /* If offload is enabled, we expect the offload module
+ * to handle PPTP GRE sequence number checks
+ */
+ if (opt->pptp_offload_mode)
+ goto allow_packet;
+
/* check for expected sequence number */
if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
@@ -371,6 +481,7 @@ static int pptp_rcv(struct sk_buff *skb)
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
+ skb->skb_iif = ppp_dev_index(&po->chan);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
@@ -473,7 +584,7 @@ static int pptp_connect(struct socket *s
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state |= PPPOX_CONNECTED;
-
+ opt->pptp_offload_mode = false;
end:
release_sock(sk);
return error;
@@ -603,9 +714,169 @@ static int pptp_ppp_ioctl(struct ppp_cha
return err;
}
+/* pptp_channel_addressing_get()
+ * Return PPTP channel specific addressing information.
+ */
+void pptp_channel_addressing_get(struct pptp_opt *opt, struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct pppox_sock *po;
+
+ if (!opt)
+ return;
+
+ sk = (struct sock *)chan->private;
+ if (!sk)
+ return;
+
+ sock_hold(sk);
+
+ /* This is very unlikely, but check the socket is connected state */
+ if (unlikely(sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED))) {
+ sock_put(sk);
+ return;
+ }
+
+ po = pppox_sk(sk);
+ memcpy(opt, &po->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(pptp_channel_addressing_get);
+
+/* pptp_session_find()
+ * Search and return a PPTP session info based on peer callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_dst(opt, ntohs(peer_call_id), peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_find);
+
+/* pptp_session_find_by_src_callid()
+ * Search and return a PPTP session info based on src callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr);
+}
+EXPORT_SYMBOL(pptp_session_find_by_src_callid);
+
+ /* Function to change the offload mode true/false for a PPTP session */
+static int pptp_set_offload_mode(bool accel_mode,
+ __be16 peer_call_id, __be32 peer_ip_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == peer_call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == peer_ip_addr) {
+ sock_hold(sk_pppox(sock));
+ sock->proto.pptp.pptp_offload_mode = accel_mode;
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Enable the PPTP session offload flag */
+int pptp_session_enable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(true, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_enable_offload_mode);
+
+/* Disable the PPTP session offload flag */
+int pptp_session_disable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(false, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_disable_offload_mode);
+
+/* Register the offload callback function on behalf of the module which
+ * will own the sequence and acknowledgment number updates for all
+ * PPTP GRE packets. All PPTP GRE packets are then transmitted to this
+ * module after encapsulation in order to ensure the correct seq/ack
+ * fields are set in the packets before transmission. This is required
+ * when PPTP flows are offloaded to acceleration engines, in-order to
+ * ensure consistency in sequence and ack numbers between PPTP control
+ * (PPP LCP) and data packets
+ */
+int pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_gre_offload_cb)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, pptp_gre_offload_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(pptp_register_gre_seq_offload_callback);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+void pptp_unregister_gre_seq_offload_callback(void)
+{
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, NULL);
+}
+EXPORT_SYMBOL(pptp_unregister_gre_seq_offload_callback);
+
+/* pptp_hold_chan() */
+static void pptp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pptp_release_chan() */
+static void pptp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pptp_get_channel_protocol()
+ * Return the protocol type of the PPTP over PPP protocol
+ */
+static int pptp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_PPTP;
+}
+
static const struct ppp_channel_ops pptp_chan_ops = {
.start_xmit = pptp_xmit,
.ioctl = pptp_ppp_ioctl,
+ .get_channel_protocol = pptp_get_channel_protocol,
+ .hold = pptp_hold_chan,
+ .release = pptp_release_chan,
};
static struct proto pptp_sk_proto __read_mostly = {

View file

@ -0,0 +1,77 @@
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ __u8 draft03; /* FMR using draft03 of map-e - QCA NSS Clients Support */
bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -553,4 +553,9 @@ static inline void ip_tunnel_info_opts_s
#endif /* CONFIG_INET */
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr);
+void ip6_update_offload_stats(struct net_device *dev, void *ptr);
+/* QCA NSS Clients Support - End */
+
#endif /* __NET_IP_TUNNELS_H */
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2398,6 +2398,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Client Support - Start */
+/*
+ * Update offload stats
+ */
+void ip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets));
+ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes));
+ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets));
+ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes));
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ip6_update_offload_stats);
+/* QCA NSS Client Support - End */
+
struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1733,6 +1733,23 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets));
+ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes));
+ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets));
+ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes));
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ipip6_update_offload_stats);
+/* QCA NSS Clients Support - End */
+
static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },

View file

@ -0,0 +1,103 @@
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -71,6 +71,20 @@ static inline bool vxlan_collect_metadat
ip_tunnel_collect_metadata();
}
+ATOMIC_NOTIFIER_HEAD(vxlan_fdb_notifier_list);
+
+void vxlan_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_register_notify);
+
+void vxlan_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_unregister_notify);
+
#if IS_ENABLED(CONFIG_IPV6)
static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
{
@@ -307,6 +321,7 @@ static void __vxlan_fdb_notify(struct vx
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
+ struct vxlan_fdb_event vfe;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
@@ -322,6 +337,10 @@ static void __vxlan_fdb_notify(struct vx
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ vfe.dev = vxlan->dev;
+ vfe.rdst = rd;
+ ether_addr_copy(vfe.eth_addr, fdb->eth_addr);
+ atomic_notifier_call_chain(&vxlan_fdb_notifier_list, type, (void *)&vfe);
return;
errout:
if (err < 0)
@@ -488,6 +507,18 @@ static struct vxlan_fdb *vxlan_find_mac(
return f;
}
+/* Find and update age of fdb entry corresponding to MAC. */
+void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni)
+{
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ vxlan_find_mac(vxlan, mac, vni);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+}
+EXPORT_SYMBOL(vxlan_fdb_update_mac);
+
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
@@ -2658,6 +2689,9 @@ static void vxlan_xmit_one(struct sk_buf
goto out_unlock;
}
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2729,6 +2763,9 @@ static void vxlan_xmit_one(struct sk_buf
if (err < 0)
goto tx_error;
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&local_ip.sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -344,6 +344,19 @@ struct vxlan_dev {
VXLAN_F_COLLECT_METADATA | \
VXLAN_F_VNIFILTER)
+/*
+ * Application data for fdb notifier event
+ */
+struct vxlan_fdb_event {
+ struct net_device *dev;
+ struct vxlan_rdst *rdst;
+ u8 eth_addr[ETH_ALEN];
+};
+
+extern void vxlan_fdb_register_notify(struct notifier_block *nb);
+extern void vxlan_fdb_unregister_notify(struct notifier_block *nb);
+extern void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni);
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);

View file

@ -0,0 +1,368 @@
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -61,6 +61,51 @@ struct ppp_channel {
};
#ifdef __KERNEL__
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+extern int __ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+extern int __ppp_is_multilink(struct net_device *dev);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
/* Called by the channel when it can send some more data. */
extern void ppp_output_wakeup(struct ppp_channel *);
@@ -148,5 +193,17 @@ extern void ppp_update_stats(struct net_
* that ppp_unregister_channel returns.
*/
+/* QCA NSS Clients Support - Start */
+/* PPP channel connection event types */
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+/* QCA NSS Clients Support - End */
+
#endif /* __KERNEL__ */
#endif
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -12,4 +12,30 @@
#include <linux/in6.h>
#include <uapi/linux/if_pppol2tp.h>
+/* QCA NSS ECM support - Start */
+/*
+ * Holds L2TP channel info
+ */
+struct pppol2tp_common_addr {
+ int tunnel_version; /* v2 or v3 */
+ __u32 local_tunnel_id, remote_tunnel_id; /* tunnel id */
+ __u32 local_session_id, remote_session_id; /* session id */
+ struct sockaddr_in local_addr, remote_addr; /* ip address and port */
+};
+
+/*
+ * L2TP channel operations
+ */
+struct pppol2tp_channel_ops {
+ struct ppp_channel_ops ops; /* ppp channel ops */
+};
+
+/*
+ * exported function which calls pppol2tp channel's get addressing
+ * function
+ */
+extern int pppol2tp_channel_addressing_get(struct ppp_channel *,
+ struct pppol2tp_common_addr *);
+/* QCA NSS ECM support - End */
+
#endif
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -123,9 +123,17 @@ struct pppol2tp_session {
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-
-static const struct ppp_channel_ops pppol2tp_chan_ops = {
- .start_xmit = pppol2tp_xmit,
+static int pppol2tp_get_channel_protocol(struct ppp_channel *);
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *);
+static void pppol2tp_hold_chan(struct ppp_channel *);
+static void pppol2tp_release_chan(struct ppp_channel *);
+
+static const struct pppol2tp_channel_ops pppol2tp_chan_ops = {
+ .ops.start_xmit = pppol2tp_xmit,
+ .ops.get_channel_protocol = pppol2tp_get_channel_protocol,
+ .ops.get_channel_protocol_ver = pppol2tp_get_channel_protocol_ver,
+ .ops.hold = pppol2tp_hold_chan,
+ .ops.release = pppol2tp_release_chan,
};
static const struct proto_ops pppol2tp_ops;
@@ -373,6 +381,13 @@ static int pppol2tp_xmit(struct ppp_chan
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
+ /* QCA NSS ECM support - start */
+ /* set incoming interface as the ppp interface */
+ if ((skb->protocol == htons(ETH_P_IP)) ||
+ (skb->protocol == htons(ETH_P_IPV6)))
+ skb->skb_iif = ppp_dev_index(chan);
+ /* QCA NSS ECM support - End */
+
local_bh_disable();
l2tp_xmit_skb(session, skb);
local_bh_enable();
@@ -818,7 +833,7 @@ static int pppol2tp_connect(struct socke
po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppol2tp_chan_ops.ops;
po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
error = ppp_register_net_channel(sock_net(sk), &po->chan);
@@ -1732,6 +1747,109 @@ static void __exit pppol2tp_exit(void)
unregister_pernet_device(&pppol2tp_net_ops);
}
+/* QCA NSS ECM support - Start */
+/* pppol2tp_hold_chan() */
+static void pppol2tp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pppol2tp_release_chan() */
+static void pppol2tp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pppol2tp_get_channel_protocol()
+ * Return the protocol type of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OL2TP;
+}
+
+/* pppol2tp_get_channel_protocol_ver()
+ * Return the protocol version of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int version = 0;
+
+ if (chan && chan->private)
+ sk = (struct sock *)chan->private;
+ else
+ return -1;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return -1;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return -1;
+ }
+
+ version = tunnel->version;
+
+ sock_put(sk);
+
+ return version;
+}
+
+/* pppol2tp_get_addressing() */
+static int pppol2tp_get_addressing(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct inet_sock *isk = NULL;
+ int err = -ENXIO;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return err;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return err;
+ }
+ isk = inet_sk(tunnel->sock);
+
+ addr->local_tunnel_id = tunnel->tunnel_id;
+ addr->remote_tunnel_id = tunnel->peer_tunnel_id;
+ addr->local_session_id = session->session_id;
+ addr->remote_session_id = session->peer_session_id;
+
+ addr->local_addr.sin_port = isk->inet_sport;
+ addr->remote_addr.sin_port = isk->inet_dport;
+ addr->local_addr.sin_addr.s_addr = isk->inet_saddr;
+ addr->remote_addr.sin_addr.s_addr = isk->inet_daddr;
+
+ sock_put(sk);
+ return 0;
+}
+
+/* pppol2tp_channel_addressing_get() */
+int pppol2tp_channel_addressing_get(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ return pppol2tp_get_addressing(chan, addr);
+}
+EXPORT_SYMBOL(pppol2tp_channel_addressing_get);
+/* QCA NSS ECM support - End */
+
module_init(pppol2tp_init);
module_exit(pppol2tp_exit);
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3743,6 +3743,32 @@ int ppp_is_multilink(struct net_device *
}
EXPORT_SYMBOL(ppp_is_multilink);
+/* __ppp_is_multilink()
+ * Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0
+ * if the device is not PPP. Caller should acquire ppp_lock before calling
+ * this function
+ */
+int __ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ flags = ppp->flags;
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__ppp_is_multilink);
+
/* ppp_channel_get_protocol()
* Call this to obtain the underlying protocol of the PPP channel,
* e.g. PX_PROTO_OE
@@ -3881,6 +3907,59 @@ int ppp_hold_channels(struct net_device
}
EXPORT_SYMBOL(ppp_hold_channels);
+/* __ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int __ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ return c;
+}
+EXPORT_SYMBOL(__ppp_hold_channels);
+
/* ppp_release_channels()
* Releases channels
*/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -235,6 +235,9 @@ struct l2tp_session *l2tp_session_get_by
void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
struct l2tp_stats *stats);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View file

@ -0,0 +1,22 @@
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2404,7 +2404,7 @@ nla_put_failure:
*/
void ip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1736,7 +1736,7 @@ nla_put_failure:
/* QCA NSS Clients Support - Start */
void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;

View file

@ -0,0 +1,24 @@
--- /dev/null
+++ b/include/uapi/linux/tlshdr.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_LINUX_TLSHDR_H
+#define _UAPI_LINUX_TLSHDR_H
+
+#include <linux/types.h>
+
+struct tlshdr {
+ __u8 type;
+ __be16 version;
+ __be16 len;
+} __attribute__((packed));
+
+#define TLSHDR_REC_TYPE_CCS 20 /* TLS packet is change cipher specification */
+#define TLSHDR_REC_TYPE_ALERT 21 /* TLS packet is Alert */
+#define TLSHDR_REC_TYPE_HANDSHAKE 22 /* TLS packet is Handshake */
+#define TLSHDR_REC_TYPE_DATA 23 /* TLS packet is Application data */
+
+#define TLSHDR_VERSION_1_1 0x0302 /* TLS Header Version(tls 1.1) */
+#define TLSHDR_VERSION_1_2 0x0303 /* TLS Header Version(tls 1.2) */
+#define TLSHDR_VERSION_1_3 0x0304 /* TLS Header Version(tls 1.3) */
+
+#endif /* _UAPI_LINUX_TLSHDR_H */

View file

@ -0,0 +1,876 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -281,4 +281,17 @@ extern br_get_dst_hook_t __rcu *br_get_d
extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
/* QCA NSS bridge-mgr support - End */
+/* QCA qca-mcs support - Start */
+typedef struct net_bridge_port *br_get_dst_hook_t(const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+
+typedef int (br_multicast_handle_hook_t)(const struct net_bridge_port *src,
+ struct sk_buff *skb);
+extern br_multicast_handle_hook_t __rcu *br_multicast_handle_hook;
+
+typedef void (br_notify_hook_t)(int group, int event, const void *ptr);
+extern br_notify_hook_t __rcu *br_notify_hook;
+/* QCA qca-mcs support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -232,6 +232,8 @@ static void fdb_notify(struct net_bridge
kfree_skb(skb);
goto errout;
}
+
+ __br_notify(RTNLGRP_NEIGH, type, fdb); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
@@ -298,6 +300,7 @@ struct net_bridge_fdb_entry *br_fdb_find
{
return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
}
+EXPORT_SYMBOL_GPL(br_fdb_find_rcu); /* QCA qca-mcs support */
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -853,6 +853,7 @@ void br_manage_promisc(struct net_bridge
int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
/* br_input.c */
+int br_pass_frame_up(struct sk_buff *skb); /* QCA qca-mcs support */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
rx_handler_func_t *br_get_rx_handler(const struct net_device *dev);
@@ -2178,4 +2179,14 @@ struct nd_msg *br_is_nd_neigh_msg(struct
#define __br_get(__hook, __default, __args ...) \
(__hook ? (__hook(__args)) : (__default))
/* QCA NSS ECM support - End */
+
+/* QCA qca-mcs support - Start */
+static inline void __br_notify(int group, int type, const void *data)
+{
+ br_notify_hook_t *notify_hook = rcu_dereference(br_notify_hook);
+
+ if (notify_hook)
+ notify_hook(group, type, data);
+}
+/* QCA qca-mcs support - End */
#endif
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -640,6 +640,7 @@ void br_info_notify(int event, const str
kfree_skb(skb);
goto errout;
}
+ __br_notify(RTNLGRP_LINK, event, port); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -467,6 +467,12 @@ static void __exit br_deinit(void)
br_fdb_fini();
}
+/* QCA qca-mcs support - Start */
+/* Hook for bridge event notifications */
+br_notify_hook_t __rcu *br_notify_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_notify_hook);
+/* QCA qca-mcs support - End */
+
module_init(br_init)
module_exit(br_deinit)
MODULE_LICENSE("GPL");
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -82,6 +82,13 @@ netdev_tx_t br_dev_xmit(struct sk_buff *
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true);
} else if (is_multicast_ether_addr(dest)) {
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook =
+ rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, NULL, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
+
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, BR_PKT_MULTICAST, false, true);
goto out;
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,7 +30,17 @@ br_netif_receive_skb(struct net *net, st
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+/* QCA qca-mcs support - Start */
+/* Hook for external Multicast handler */
+br_multicast_handle_hook_t __rcu *br_multicast_handle_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_multicast_handle_hook);
+
+/* Hook for external forwarding logic */
+br_get_dst_hook_t __rcu *br_get_dst_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_get_dst_hook);
+/* QCA qca-mcs support - End */
+
+int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -69,6 +79,7 @@ static int br_pass_frame_up(struct sk_bu
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
}
+EXPORT_SYMBOL_GPL(br_pass_frame_up); /* QCA qca-mcs support */
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -82,6 +93,11 @@ int br_handle_frame_finish(struct net *n
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook;
+ struct net_bridge_port *pdst = NULL;
+ br_get_dst_hook_t *get_dst_hook = rcu_dereference(br_get_dst_hook);
+ /* QCA qca-mcs support - End */
u16 vid = 0;
u8 state;
@@ -158,6 +174,12 @@ int br_handle_frame_finish(struct net *n
switch (pkt_type) {
case BR_PKT_MULTICAST:
+ /* QCA qca-mcs support - Start */
+ multicast_handle_hook = rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, p, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
+
mdst = br_mdb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
@@ -173,8 +195,15 @@ int br_handle_frame_finish(struct net *n
}
break;
case BR_PKT_UNICAST:
- dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
- break;
+ /* QCA qca-mcs support - Start */
+ pdst = __br_get(get_dst_hook, NULL, p, &skb);
+ if (pdst) {
+ if (!skb)
+ goto out;
+ } else {
+ /* QCA qca-mcs support - End */
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ }
default:
break;
}
@@ -189,6 +218,13 @@ int br_handle_frame_finish(struct net *n
dst->used = now;
br_forward(dst->dst, skb, local_rcv, false);
} else {
+ /* QCA qca-mcs support - Start */
+ if (pdst) {
+ br_forward(pdst, skb, local_rcv, false);
+ goto out;
+ }
+ /* QCA qca-mcs support - End */
+
if (!mcast_hit)
br_flood(br, skb, pkt_type, local_rcv, false);
else
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -85,4 +85,44 @@ struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
+
+/* QCA ECM qca-mcs support - Start */
+#define IPMR_MFC_EVENT_UPDATE 1
+#define IPMR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ipmr_mfc_event_offload_callback_t)(__be32 origin, __be32 group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ u8 op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur to
+ * MFC. The callback is registered by offload modules
+ */
+extern bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ipmr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list, given a multicast group and source
+ */
+extern int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dst_cnt, u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out);
+/* QCA ECM qca-mcs support - End */
#endif
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -110,4 +110,47 @@ static inline int ip6mr_sk_done(struct s
return 0;
}
#endif
+
+/* QCA qca-mcs support - Start */
+#define IP6MR_MFC_EVENT_UPDATE 1
+#define IP6MR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ip6mr_mfc_event_offload_callback_t)(struct in6_addr *origin,
+ struct in6_addr *group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ uint8_t op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur
+ * to MFC. The callback is registered by offload modules
+ */
+extern bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ip6mr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list given a multicast group and source
+ */
+extern int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dst_cnt,
+ u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, uint64_t pkts_in,
+ uint64_t bytes_in, uint64_t pkts_out,
+ uint64_t bytes_out);
+/* QCA qca-mcs support - End */
#endif
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -89,6 +89,9 @@ static struct net_device *vif_dev_read(c
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
+/* spinlock for offload */
+static DEFINE_SPINLOCK(lock); /* QCA ECM qca-mcs support */
+
/* We return to original Alan's scheme. Hash table of resolved
* entries is changed only in process context and protected
* with weak lock mrt_lock. Queue of unresolved entries is protected
@@ -112,6 +115,9 @@ static void mroute_netlink_event(struct
static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin,
+ __be32 mcastgrp);
+static ipmr_mfc_event_offload_callback_t __rcu ipmr_mfc_event_offload_callback; /* QCA ECM qca-mcs support */
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
@@ -223,6 +229,80 @@ static int ipmr_rule_fill(struct fib_rul
return 0;
}
+/* QCA ECM qca-mcs support - Start */
+/* ipmr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ipmr_sync_entry_update(struct mr_table *mrt,
+ struct mfc_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXVIFS];
+ __be32 origin;
+ __be32 group;
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ origin = cache->mfc_origin;
+ group = cache->mfc_mcastgrp;
+
+ spin_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+ if (dest_if_count == MAXVIFS) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, dest_if_count, dest_dev,
+ IPMR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ipmr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ipmr_sync_entry_delete(u32 origin, u32 group)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, 0, NULL, IPMR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+/* QCA ECM qca-mcs support - End */
+
static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.family = RTNL_FAMILY_IPMR,
.rule_size = sizeof(struct ipmr_rule),
@@ -236,6 +316,156 @@ static const struct fib_rules_ops __net_
.owner = THIS_MODULE,
};
+/* QCA ECM qca-mcs support - Start */
+/* ipmr_register_mfc_event_offload_callback()
+ * Register the IPv4 Multicast update offload callback with IPMR
+ */
+bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ipmr_register_mfc_event_offload_callback);
+
+/* ipmr_unregister_mfc_event_offload_callback()
+ * De-register the IPv4 Multicast update offload callback with IPMR
+ */
+void ipmr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ipmr_unregister_mfc_event_offload_callback);
+
+/* ipmr_find_mfc_entry()
+ * Returns destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dest_cnt, u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ spin_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ipmr_find_mfc_entry);
+
+/* ipmr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ spin_lock(&mrt_lock);
+ if (!VIF_EXISTS(mrt, vif)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmr_mfc_stats_update);
+/* QCA ECM qca-mcs support - End */
+
static int __net_init ipmr_rules_init(struct net *net)
{
struct fib_rules_ops *ops;
@@ -1191,6 +1421,10 @@ static int ipmr_mfc_delete(struct mr_tab
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ipmr_sync_entry_delete(c->mfc_origin, c->mfc_mcastgrp);
+ /* QCA ECM qca-mcs support - End */
return 0;
}
@@ -1221,6 +1455,10 @@ static int ipmr_mfc_add(struct net *net,
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ipmr_sync_entry_update(mrt, c);
+ /* QCA ECM qca-mcs support - End */
return 0;
}
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -74,6 +74,9 @@ static struct net_device *vif_dev_read(c
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
+/* Spinlock for offload */
+static DEFINE_SPINLOCK(lock); /* QCA qca-mcs support */
+
/* We return to original Alan's scheme. Hash table of resolved
entries is changed only in process context and protected
with weak lock mrt_lock. Queue of unresolved entries is protected
@@ -101,6 +104,13 @@ static int ip6mr_rtm_dumproute(struct sk
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+/* QCA qca-mcs support - Start */
+static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
+ const struct in6_addr *origin,
+ const struct in6_addr *mcastgrp);
+static ip6mr_mfc_event_offload_callback_t __rcu
+ ip6mr_mfc_event_offload_callback;
+/* QCA qca-mcs support - End */
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
@@ -375,6 +385,84 @@ static struct mfc6_cache_cmp_arg ip6mr_m
.mf6c_mcastgrp = IN6ADDR_ANY_INIT,
};
+/* QCA qca-mcs support - Start */
+/* ip6mr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ip6mr_sync_entry_update(struct mr_table *mrt,
+ struct mfc6_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXMIFS];
+ struct in6_addr mc_origin, mc_group;
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ spin_lock(&mrt_lock);
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ if (dest_if_count == MAXMIFS) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+
+ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr));
+ spin_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(&mc_group, &mc_origin, dest_if_count, dest_dev,
+ IP6MR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ip6mr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ip6mr_sync_entry_delete(struct in6_addr *mc_origin,
+ struct in6_addr *mc_group)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(mc_group, mc_origin, 0, NULL,
+ IP6MR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+/* QCA qca-mcs support - End */
+
static struct mr_table_ops ip6mr_mr_table_ops = {
.rht_params = &ip6mr_rht_params,
.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
@@ -697,6 +785,151 @@ static int call_ip6mr_mfc_entry_notifier
&mfc->_c, tb_id, &net->ipv6.ipmr_seq);
}
+/* QCA qca-mcs support - Start */
+/* ip6mr_register_mfc_event_offload_callback()
+ * Register the IPv6 multicast update callback for offload modules
+ */
+bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ip6mr_register_mfc_event_offload_callback);
+
+/* ip6mr_unregister_mfc_event_offload_callback()
+ * De-register the IPv6 multicast update callback for offload modules
+ */
+void ip6mr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ip6mr_unregister_mfc_event_offload_callback);
+
+/* ip6mr_find_mfc_entry()
+ * Return the destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dest_cnt,
+ u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (!mrt)
+ return -ENOENT;
+
+ spin_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ spin_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ip6mr_find_mfc_entry);
+
+/* ip6mr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u64 pkts_in,
+ u64 bytes_in, uint64_t pkts_out,
+ u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+
+ if (!mrt)
+ return -ENOENT;
+
+ spin_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ spin_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ if (!VIF_EXISTS(mrt, vif)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+
+ spin_unlock(&mrt_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ip6mr_mfc_stats_update);
+/* QCA qca-mcs support - End */
+
/* Delete a VIF entry */
static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
@@ -1221,6 +1454,7 @@ static int ip6mr_mfc_delete(struct mr_ta
int parent)
{
struct mfc6_cache *c;
+ struct in6_addr mc_origin, mc_group; /* QCA qca-mcs support */
/* The entries are added/deleted only under RTNL */
rcu_read_lock();
@@ -1229,6 +1463,11 @@ static int ip6mr_mfc_delete(struct mr_ta
rcu_read_unlock();
if (!c)
return -ENOENT;
+
+ /* QCA qca-mcs support - Start */
+ memcpy(&mc_origin, &c->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &c->mf6c_mcastgrp, sizeof(struct in6_addr));
+ /* QCA qca-mcs support - End */
rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
list_del_rcu(&c->_c.list);
@@ -1236,6 +1475,11 @@ static int ip6mr_mfc_delete(struct mr_ta
FIB_EVENT_ENTRY_DEL, c, mrt->id);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ip6mr_sync_entry_delete(&mc_origin, &mc_group);
+ /* QCA qca-mcs support - End */
+
return 0;
}
@@ -1457,6 +1701,10 @@ static int ip6mr_mfc_add(struct net *net
call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ip6mr_sync_entry_update(mrt, c);
+ /* QCA qca-mcs support - End */
return 0;
}

View file

@ -0,0 +1,111 @@
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -417,6 +417,8 @@ static int crypto_authenc_create(struct
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ inst->alg.base.cra_flags |= (auth_base->cra_flags |
+ enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -101,6 +101,11 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * Set this flag if algorithm does not support SG list transforms
+ */
+#define CRYPTO_ALG_NOSUPP_SG 0x0000c000
+
+/*
* The algorithm may allocate memory during request processing, i.e. during
* encryption, decryption, or hashing. Users can request an algorithm with this
* flag unset if they can't handle memory allocation failures.
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -658,6 +658,7 @@ static int esp_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -669,6 +670,11 @@ static int esp_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -890,6 +896,7 @@ static int esp_input(struct xfrm_state *
u8 *iv;
struct scatterlist *sg;
int err = -EINVAL;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
goto out;
@@ -897,6 +904,12 @@ static int esp_input(struct xfrm_state *
if (elen <= 0)
goto out;
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -934,6 +940,7 @@ static int esp6_input(struct xfrm_state
__be32 *seqhi;
u8 *iv;
struct scatterlist *sg;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
ret = -EINVAL;
@@ -945,6 +952,12 @@ static int esp6_input(struct xfrm_state
goto out;
}
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;

View file

@ -0,0 +1,80 @@
From eee3a7956b943dd3e23a74fbb5bfe89405eb0782 Mon Sep 17 00:00:00 2001
From: Andrea Righi <andrea.righi@canonical.com>
Date: Mon, 6 Dec 2021 17:34:47 +0100
Subject: UBUNTU: SAUCE: ipv6: fix NULL pointer dereference in ip6_output()
It is possible to trigger a NULL pointer dereference by running the srv6
net kselftest (tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh):
[ 249.051216] BUG: kernel NULL pointer dereference, address: 0000000000000378
[ 249.052331] #PF: supervisor read access in kernel mode
[ 249.053137] #PF: error_code(0x0000) - not-present page
[ 249.053960] PGD 0 P4D 0
[ 249.054376] Oops: 0000 [#1] PREEMPT SMP NOPTI
[ 249.055083] CPU: 1 PID: 21 Comm: ksoftirqd/1 Tainted: G E 5.16.0-rc4 #2
[ 249.056328] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
[ 249.057632] RIP: 0010:ip6_forward+0x53c/0xab0
[ 249.058354] Code: 49 c7 44 24 20 00 00 00 00 48 83 e0 fe 48 8b 40 30 48 3d 70 b2 b5 81 0f 85 b5 04 00 00 e8 7c f2 ff ff 41 89 c5 e9 17 01 00 00 <44> 8b 93 78 03 00 00 45 85 d2 0f 85 92 fb ff ff 49 8b 54 24 10 48
[ 249.061274] RSP: 0018:ffffc900000cbb30 EFLAGS: 00010246
[ 249.062042] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8881051d3400
[ 249.063141] RDX: ffff888104bda000 RSI: 00000000000002c0 RDI: 0000000000000000
[ 249.064264] RBP: ffffc900000cbbc8 R08: 0000000000000000 R09: 0000000000000000
[ 249.065376] R10: 0000000000000040 R11: 0000000000000000 R12: ffff888103409800
[ 249.066498] R13: ffff8881051d3410 R14: ffff888102725280 R15: ffff888103525000
[ 249.067619] FS: 0000000000000000(0000) GS:ffff88813bc80000(0000) knlGS:0000000000000000
[ 249.068881] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 249.069777] CR2: 0000000000000378 CR3: 0000000104980000 CR4: 0000000000750ee0
[ 249.070907] PKRU: 55555554
[ 249.071337] Call Trace:
[ 249.071730] <TASK>
[ 249.072070] ? debug_smp_processor_id+0x17/0x20
[ 249.072807] seg6_input_core+0x2bb/0x2d0
[ 249.073436] ? _raw_spin_unlock_irqrestore+0x29/0x40
[ 249.074225] seg6_input+0x3b/0x130
[ 249.074768] lwtunnel_input+0x5e/0xa0
[ 249.075357] ip_rcv+0x17b/0x190
[ 249.075867] ? update_load_avg+0x82/0x600
[ 249.076514] __netif_receive_skb_one_core+0x86/0xa0
[ 249.077231] __netif_receive_skb+0x15/0x60
[ 249.077843] process_backlog+0x97/0x160
[ 249.078389] __napi_poll+0x31/0x170
[ 249.078912] net_rx_action+0x229/0x270
[ 249.079506] __do_softirq+0xef/0x2ed
[ 249.080085] run_ksoftirqd+0x37/0x50
[ 249.080663] smpboot_thread_fn+0x193/0x230
[ 249.081312] kthread+0x17a/0x1a0
[ 249.081847] ? smpboot_register_percpu_thread+0xe0/0xe0
[ 249.082677] ? set_kthread_struct+0x50/0x50
[ 249.083340] ret_from_fork+0x22/0x30
[ 249.083926] </TASK>
[ 249.090295] ---[ end trace 1998d7ba5965a365 ]---
It looks like commit 0857d6f8c759 ("ipv6: When forwarding count rx stats
on the orig netdev") tries to determine the right netdev to account the
rx stats, but in this particular case it's failing and the netdev is
NULL.
Fallback to the previous method of determining the netdev interface (via
skb->dev) to account the rx stats when the orig netdev can't be
determined.
Fixes: 0857d6f8c759 ("ipv6: When forwarding count rx stats on the orig netdev")
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
(cherry picked from https://lore.kernel.org/lkml/20211206163447.991402-1-andrea.righi@canonical.com/T/#u)
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
---
net/ipv6/ip6_output.c | 3 +++
1 file changed, 3 insertions(+)
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -492,6 +492,9 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+ if (unlikely(!idev))
+ idev = __in6_dev_get_safely(skb->dev);
+
if (net->ipv6.devconf_all->forwarding == 0)
goto error;

View file

@ -0,0 +1,25 @@
diff -uprN a/arch/arm64/boot/dts/qcom/ipq8074.dtsi b/arch/arm64/boot/dts/qcom/ipq8074.dtsi
--- a/arch/arm64/boot/dts/qcom/ipq8074.dtsi 2023-06-22 18:11:32.910676000 -0700
+++ b/arch/arm64/boot/dts/qcom/ipq8074.dtsi 2023-07-26 21:43:58.269612521 -0700
@@ -750,6 +750,21 @@
status = "disabled";
};
+ blsp1_i2c4: i2c@78b8000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x078b8000 0x600>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ clock-frequency = <100000>;
+ dmas = <&blsp_dma 18>, <&blsp_dma 19>;
+ dma-names = "tx", "rx";
+ status = "disabled";
+ };
+
blsp1_i2c5: i2c@78b9000 {
compatible = "qcom,i2c-qup-v2.2.1";
#address-cells = <1>;

View file

@ -0,0 +1,412 @@
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -94,6 +94,7 @@ enum cpuhp_state {
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
+ CPUHP_SKB_RECYCLER_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1240,7 +1240,7 @@ static inline void kfree_skb_list(struct sk_buff *segs)
kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
}
-#ifdef CONFIG_TRACEPOINTS
+#ifdef CONFIG_SKB_RECYCLER
void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
@@ -1252,6 +1252,8 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_head_cache;
+extern void kfree_skbmem(struct sk_buff *skb);
+extern void skb_release_data(struct sk_buff *skb);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -332,6 +332,27 @@ config NET_FLOW_LIMIT
with many clients some protection against DoS by a single (spoofed)
flow that greatly exceeds average workload.
+config SKB_RECYCLER
+ bool "Generic skb recycling"
+ default y
+ help
+ SKB_RECYCLER is used to implement RX-to-RX skb recycling.
+ This config enables the recycling scheme for bridging and
+ routing workloads. It can reduce skbuff freeing or
+ reallocation overhead.
+
+config SKB_RECYCLER_MULTI_CPU
+ bool "Cross-CPU recycling for CPU-locked workloads"
+ depends on SMP && SKB_RECYCLER
+ default n
+
+config ALLOC_SKB_PAGE_FRAG_DISABLE
+ bool "Disable page fragment based skbuff payload allocations"
+ depends on !SKB_RECYCLER
+ default n
+ help
+ Disable page fragment based allocations for skbuff payloads.
+
menu "Network testing"
config NET_PKTGEN
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -40,3 +40,4 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
+obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5974,10 +5974,16 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->weight = READ_ONCE(dev_rx_weight);
while (again) {
- struct sk_buff *skb;
+ struct sk_buff *skb, *next_skb;
while ((skb = __skb_dequeue(&sd->process_queue))) {
rcu_read_lock();
+
+ next_skb = skb_peek(&sd->process_queue);
+ if (likely(next_skb)) {
+ prefetch(next_skb->data);
+ }
+
__netif_receive_skb(skb);
rcu_read_unlock();
input_queue_head_incr(sd);
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -84,6 +84,33 @@
#include "dev.h"
#include "sock_destructor.h"
+struct kmem_cache *skb_data_cache;
+
+/*
+ * For low memory profile, NSS_SKB_FIXED_SIZE_2K is enabled and
+ * CONFIG_SKB_RECYCLER is disabled. For premium and enterprise profile
+ * CONFIG_SKB_RECYCLER is enabled and NSS_SKB_FIXED_SIZE_2K is disabled.
+ * Irrespective of NSS_SKB_FIXED_SIZE_2K enabled/disabled, the
+ * CONFIG_SKB_RECYCLER and __LP64__ determines the value of SKB_DATA_CACHE_SIZE
+ */
+#if defined(CONFIG_SKB_RECYCLER)
+/*
+ * 2688 for 64bit arch, 2624 for 32bit arch
+ */
+#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(SKB_RECYCLE_SIZE + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#else
+/*
+ * 2368 for 64bit arch, 2176 for 32bit arch
+ */
+#if defined(__LP64__)
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1984 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#else
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1856 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#endif
+#endif
+
+#include "skbuff_recycle.h"
+
struct kmem_cache *skbuff_head_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
#ifdef CONFIG_SKB_EXTENSIONS
@@ -426,32 +453,46 @@ EXPORT_SYMBOL(napi_build_skb);
* memory is free
*/
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
- bool *pfmemalloc)
-{
- bool ret_pfmemalloc = false;
- size_t obj_size;
- void *obj;
+ bool *pfmemalloc)
+ {
+ void *obj;
+ bool ret_pfmemalloc = false;
+ unsigned int obj_size = *size;
+
+ if (obj_size > SZ_2K && obj_size <= SKB_DATA_CACHE_SIZE) {
+ obj = kmem_cache_alloc_node(skb_data_cache,
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ *size = SKB_DATA_CACHE_SIZE;
+ if (obj || !(gfp_pfmemalloc_allowed(flags)))
+ goto out;
- obj_size = SKB_HEAD_ALIGN(*size);
+ /* Try again but now we are using pfmemalloc reserves */
+ ret_pfmemalloc = true;
+ obj = kmem_cache_alloc_node(skb_data_cache, flags, node);
+ goto out;
+ }
obj_size = kmalloc_size_roundup(obj_size);
- /* The following cast might truncate high-order bits of obj_size, this
+
+ /*
+ * The following cast might truncate high-order bits of obj_size, this
* is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
*/
*size = (unsigned int)obj_size;
- /*
- * Try a regular allocation, when that fails and we're not entitled
- * to the reserves, fail.
- */
+ /*
+ * Try a regular allocation, when that fails and we're not entitled
+ * to the reserves, fail.
+ */
obj = kmalloc_node_track_caller(obj_size,
- flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
- node);
- if (obj || !(gfp_pfmemalloc_allowed(flags)))
- goto out;
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ if (obj || !(gfp_pfmemalloc_allowed(flags)))
+ goto out;
- /* Try again but now we are using pfmemalloc reserves */
- ret_pfmemalloc = true;
+ /* Try again but now we are using pfmemalloc reserves */
+ ret_pfmemalloc = true;
obj = kmalloc_node_track_caller(obj_size, flags, node);
out:
@@ -513,10 +554,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
- /* kmalloc_size_roundup() might give us more room than requested.
+ /* kmalloc_reserve(size) might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
@@ -551,7 +594,7 @@ EXPORT_SYMBOL(__alloc_skb);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
- * @len: length to allocate
+ * @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
@@ -561,29 +604,53 @@ EXPORT_SYMBOL(__alloc_skb);
*
* %NULL is returned if there is no free memory.
*/
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
- gfp_t gfp_mask)
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask)
{
- struct page_frag_cache *nc;
struct sk_buff *skb;
+ unsigned int len = length;
+
+#ifdef CONFIG_SKB_RECYCLER
+ skb = skb_recycler_alloc(dev, length);
+ if (likely(skb))
+ return skb;
+
+ len = SKB_RECYCLE_SIZE;
+ if (unlikely(length > SKB_RECYCLE_SIZE))
+ len = length;
+
+ skb = __alloc_skb(len + NET_SKB_PAD, gfp_mask,
+ SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+ goto skb_fail;
+ goto skb_success;
+#else
+ struct page_frag_cache *nc;
bool pfmemalloc;
+ bool page_frag_alloc_enable = true;
void *data;
len += NET_SKB_PAD;
+
+#ifdef CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE
+ page_frag_alloc_enable = false;
+#endif
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
if (len <= SKB_WITH_OVERHEAD(1024) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
- (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) ||
+ !page_frag_alloc_enable) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
- len = SKB_HEAD_ALIGN(len);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -612,6 +679,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
+#endif
skb_success:
skb_reserve(skb, NET_SKB_PAD);
@@ -682,7 +750,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
- len = SKB_HEAD_ALIGN(len);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
@@ -780,7 +849,7 @@ static void skb_free_head(struct sk_buff *skb)
}
}
-static void skb_release_data(struct sk_buff *skb)
+void skb_release_data(struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int i;
@@ -822,7 +891,7 @@ static void skb_release_data(struct sk_buff *skb)
/*
* Free an skbuff by memory without cleaning the state.
*/
-static void kfree_skbmem(struct sk_buff *skb)
+void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff_fclones *fclones;
@@ -1034,7 +1103,6 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
-#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -1043,13 +1111,50 @@ EXPORT_SYMBOL(skb_tx_error);
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
+#ifdef CONFIG_SKB_RECYCLER
void consume_skb(struct sk_buff *skb)
{
if (!skb_unref(skb))
return;
+ prefetch(&skb->destructor);
+
+ /*Tian: Not sure if we need to continue using this since
+ * since unref does the work in 5.4
+ */
+
+ /*
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+ */
+
+ /* If possible we'd like to recycle any skb rather than just free it,
+ * but in order to do that we need to release any head state too.
+ * We don't want to do this later because we'll be in a pre-emption
+ * disabled state.
+ */
+ skb_release_head_state(skb);
+
+ /* Can we recycle this skb? If we can then it will be much faster
+ * for us to recycle this one later than to allocate a new one
+ * from scratch.
+ */
+ if (likely(skb->head) && likely(skb_recycler_consume(skb)))
+ return;
+
+#ifdef CONFIG_TRACEPOINTS
trace_consume_skb(skb);
- __kfree_skb(skb);
+#endif
+ /* We're not recycling so now we need to do the rest of what we would
+ * have done in __kfree_skb (above and beyond the skb_release_head_state
+ * that we already did).
+ */
+ if (likely(skb->head))
+ skb_release_data(skb);
+
+ kfree_skbmem(skb);
}
EXPORT_SYMBOL(consume_skb);
#endif
@@ -1856,6 +1961,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
@@ -4557,6 +4664,11 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
+ skb_data_cache = kmem_cache_create_usercopy("skb_data_cache",
+ SKB_DATA_CACHE_SIZE,
+ 0, SLAB_PANIC, 0, SKB_DATA_CACHE_SIZE,
+ NULL);
+
skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -4570,6 +4682,7 @@ void __init skb_init(void)
SLAB_HWCACHE_ALIGN|SLAB_PANIC,
NULL);
skb_extensions_init();
+ skb_recycler_init();
}
static int
@@ -6224,6 +6337,8 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
@@ -6340,6 +6455,8 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;

View file

@ -0,0 +1,10 @@
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -290,7 +290,6 @@ static struct crypto_larval *__crypto_re
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
- !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}

View file

@ -0,0 +1,13 @@
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -261,10 +261,6 @@ static int mtdblock_open(struct mtd_blkt
return 0;
}
- if (mtd_type_is_nand(mbd->mtd))
- pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
- mbd->tr->name, mbd->mtd->name);
-
/* OK, it's not open. Create cache info for it */
mtdblk->count = 1;
mutex_init(&mtdblk->cache_mutex);

View file

@ -0,0 +1,311 @@
From 6504bc9edeb1a2a54d813f4bb5d0267e7bf827f9 Mon Sep 17 00:00:00 2001
From: Praveenkumar I <ipkumar@codeaurora.org>
Date: Thu, 6 Feb 2020 17:35:42 +0530
Subject: [PATCH 4/8] clk: ipq8074: Support added for necessary clocks and
reset
Change-Id: I21a76a44185f766e9b6dcba274392ea8e599718b
Signed-off-by: Praveenkumar I <ipkumar@codeaurora.org>
Signed-off-by: Rajkumar Ayyasamy <arajkuma@codeaurora.org>
---
drivers/clk/qcom/gcc-ipq8074.c | 238 ++++++++++++++++++-
include/dt-bindings/clock/qcom,gcc-ipq8074.h | 35 ++-
2 files changed, 258 insertions(+), 15 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -48,6 +48,22 @@ enum {
P_UNIPHY2_TX,
};
+static const char * const gcc_xo_gpll4_gpll0_gpll6_gpll0_div2[] = {
+ "xo",
+ "gpll4",
+ "gpll0",
+ "gpll6",
+ "gpll0_out_main_div2",
+};
+
+static const struct parent_map gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map[] = {
+ { P_XO, 0 },
+ { P_GPLL4, 1 },
+ { P_GPLL0, 2 },
+ { P_GPLL6, 3 },
+ { P_GPLL0_DIV2, 4 },
+};
+
static struct clk_alpha_pll gpll0_main = {
.offset = 0x21000,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
@@ -629,6 +645,12 @@ static const struct freq_tbl ftbl_pcie_a
{ }
};
+struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(100000000, P_GPLL0, 8, 0, 0),
+ { }
+};
+
static struct clk_rcg2 pcie0_axi_clk_src = {
.cmd_rcgr = 0x75054,
.freq_tbl = ftbl_pcie_axi_clk_src,
@@ -2029,6 +2051,78 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
+struct freq_tbl ftbl_qdss_tsctr_clk_src[] = {
+ F(160000000, P_GPLL0_DIV2, 2.5, 0, 0),
+ F(320000000, P_GPLL0, 2.5, 0, 0),
+ F(600000000, P_GPLL6, 2, 0, 0),
+ { }
+};
+
+struct clk_rcg2 qdss_tsctr_clk_src = {
+ .cmd_rcgr = 0x29064,
+ .freq_tbl = ftbl_qdss_tsctr_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_tsctr_clk_src",
+ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_fixed_factor qdss_dap_sync_clk_src = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "qdss_dap_sync_clk_src",
+ .parent_names = (const char *[]){
+ "qdss_tsctr_clk_src"
+ },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+struct freq_tbl ftbl_qdss_at_clk_src[] = {
+ F(66670000, P_GPLL0_DIV2, 6, 0, 0),
+ F(240000000, P_GPLL6, 6, 0, 0),
+ { }
+};
+
+struct clk_rcg2 qdss_at_clk_src = {
+ .cmd_rcgr = 0x2900c,
+ .freq_tbl = ftbl_qdss_at_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "qdss_at_clk_src",
+ .parent_names = gcc_xo_gpll4_gpll0_gpll6_gpll0_div2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+
+struct freq_tbl ftbl_adss_pwm_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(200000000, P_GPLL0, 4, 0, 0),
+ { }
+};
+
+struct clk_rcg2 adss_pwm_clk_src = {
+ .cmd_rcgr = 0x1c008,
+ .freq_tbl = ftbl_adss_pwm_clk_src,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "adss_pwm_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x01008,
.clkr = {
@@ -4224,13 +4318,7 @@ static struct clk_branch gcc_gp3_clk = {
},
};
-static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(100000000, P_GPLL0, 8, 0, 0),
- { }
-};
-
-static struct clk_rcg2 pcie0_rchng_clk_src = {
+struct clk_rcg2 pcie0_rchng_clk_src = {
.cmd_rcgr = 0x75070,
.freq_tbl = ftbl_pcie_rchng_clk_src,
.hid_width = 5,
@@ -4322,6 +4410,114 @@ static const struct alpha_pll_config nss
.alpha_en_mask = BIT(24),
};
+static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
+ .halt_reg = 0x4700c,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x4700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_bus_timeout2_ahb_clk",
+ .parent_names = (const char *[]){
+ "usb0_master_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_snoc_bus_timeout3_ahb_clk",
+ .parent_names = (const char *[]){
+ "usb1_master_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_dcc_clk",
+ .parent_names = (const char *[]){
+ "pcnoc_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_at_clk = {
+ .halt_reg = 0x29024,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x29024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_at_clk",
+ .parent_names = (const char *[]){
+ "qdss_at_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qdss_dap_clk = {
+ .halt_reg = 0x29084,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x29084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qdss_dap_clk",
+ .parent_names = (const char *[]){
+ "qdss_dap_sync_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_adss_pwm_clk = {
+ .halt_reg = 0x1c020,
+ .halt_bit = 31,
+ .clkr = {
+ .enable_reg = 0x1c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_adss_pwm_clk",
+ .parent_names = (const char *[]){
+ "adss_pwm_clk_src"
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_hw *gcc_ipq8074_hws[] = {
&gpll0_out_main_div2.hw,
&gpll6_out_main_div2.hw,
@@ -4330,6 +4526,7 @@ static struct clk_hw *gcc_ipq8074_hws[]
&gcc_xo_div4_clk_src.hw,
&nss_noc_clk_src.hw,
&nss_ppe_cdiv_clk_src.hw,
+ &qdss_dap_sync_clk_src.hw,
};
static struct clk_regmap *gcc_ipq8074_clks[] = {
@@ -4561,6 +4758,15 @@ static struct clk_regmap *gcc_ipq8074_cl
[GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr,
[GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr,
[GCC_CRYPTO_PPE_CLK] = &gcc_crypto_ppe_clk.clkr,
+ [GCC_SNOC_BUS_TIMEOUT2_AHB_CLK] = &gcc_snoc_bus_timeout2_ahb_clk.clkr,
+ [GCC_SNOC_BUS_TIMEOUT3_AHB_CLK] = &gcc_snoc_bus_timeout3_ahb_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [QDSS_TSCTR_CLK_SRC] = &qdss_tsctr_clk_src.clkr,
+ [QDSS_AT_CLK_SRC] = &qdss_at_clk_src.clkr,
+ [GCC_QDSS_AT_CLK] = &gcc_qdss_at_clk.clkr,
+ [GCC_QDSS_DAP_CLK] = &gcc_qdss_dap_clk.clkr,
+ [ADSS_PWM_CLK_SRC] = &adss_pwm_clk_src.clkr,
+ [GCC_ADSS_PWM_CLK] = &gcc_adss_pwm_clk.clkr,
};
static const struct qcom_reset_map gcc_ipq8074_resets[] = {
--- a/include/dt-bindings/clock/qcom,gcc-ipq8074.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq8074.h
@@ -230,10 +230,19 @@
#define GCC_GP1_CLK 221
#define GCC_GP2_CLK 222
#define GCC_GP3_CLK 223
-#define GCC_PCIE0_AXI_S_BRIDGE_CLK 224
-#define GCC_PCIE0_RCHNG_CLK_SRC 225
-#define GCC_PCIE0_RCHNG_CLK 226
-#define GCC_CRYPTO_PPE_CLK 227
+#define GCC_CRYPTO_PPE_CLK 224
+#define GCC_PCIE0_RCHNG_CLK_SRC 225
+#define GCC_PCIE0_RCHNG_CLK 226
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 227
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 228
+#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK 229
+#define GCC_DCC_CLK 230
+#define ADSS_PWM_CLK_SRC 231
+#define GCC_ADSS_PWM_CLK 232
+#define QDSS_TSCTR_CLK_SRC 233
+#define QDSS_AT_CLK_SRC 234
+#define GCC_QDSS_AT_CLK 235
+#define GCC_QDSS_DAP_CLK 236
#define GCC_BLSP1_BCR 0
#define GCC_BLSP1_QUP1_BCR 1

View file

@ -0,0 +1,44 @@
From 462aa0c53397ec5bf78e3e7f68aa8a3ca300f4ba Mon Sep 17 00:00:00 2001
From: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
Date: Tue, 24 Mar 2020 19:09:38 +0530
Subject: [PATCH 5/8] clk: qcom: ipq8074: Fix gcc_snoc_bus_timeout_ahb_clk
offset
By default, the ipq8074 V2 clks are provided in the gcc driver.
Updating the gcc_snoc_bus_timeout_ahb_clk offsets also as needed
in ipq8074 V2.
Change-Id: I5a6e98d002f5c3354a804e55dd9ebb1f83f7f974
Signed-off-by: Selvam Sathappan Periakaruppan <speriaka@codeaurora.org>
---
drivers/clk/qcom/gcc-ipq8074.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -4411,10 +4411,10 @@ static const struct alpha_pll_config nss
};
static struct clk_branch gcc_snoc_bus_timeout2_ahb_clk = {
- .halt_reg = 0x4700c,
+ .halt_reg = 0x47014,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x4700c,
+ .enable_reg = 0x47014,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_snoc_bus_timeout2_ahb_clk",
@@ -4429,10 +4429,10 @@ static struct clk_branch gcc_snoc_bus_ti
};
static struct clk_branch gcc_snoc_bus_timeout3_ahb_clk = {
- .halt_reg = 0x47014,
+ .halt_reg = 0x4701C,
.halt_bit = 31,
.clkr = {
- .enable_reg = 0x47014,
+ .enable_reg = 0x4701C,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_snoc_bus_timeout3_ahb_clk",

View file

@ -0,0 +1,41 @@
From 52315bec6ed633b6a71f28b746029602f8bd70b9 Mon Sep 17 00:00:00 2001
From: Balaji Prakash J <bjagadee@codeaurora.org>
Date: Wed, 22 Apr 2020 20:35:30 +0530
Subject: [PATCH] clk: ipq8074: fix gcc_blsp1_ahb_clk properties
All the voting enabled clocks does not support the enable
from CBCR register. So, updated gcc_blsp1_ahb_clk enable
register and mask to enable bit in APCS_CLOCK_BRANCH_ENA_VOTE.
Also, the voting controlled clocks are shared among multiple
components like APSS, RPM, NSS, TZ, etc. So, turning the
voting off from APSS does not make the clock off if it has
been voted from another component. Added the flag
BRANCH_HALT_VOTED in order to skip checking the clock
disable status.
This change is referred from the below commits,
1. 246b4fb3af9bd65d8af794aac2f0e7b1ed9cc2dd
2. c8374157d5ae91d3b3e0d513d62808a798b32d3a
Signed-off-by: Balaji Prakash J <bjagadee@codeaurora.org>
Change-Id: I505cb560b31ad27a02c165fbe13bb33a2fc7d230
---
drivers/clk/qcom/gcc-ipq8074.c | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
--- a/drivers/clk/qcom/gcc-ipq8074.c
+++ b/drivers/clk/qcom/gcc-ipq8074.c
@@ -2125,9 +2125,10 @@ struct clk_rcg2 adss_pwm_clk_src = {
static struct clk_branch gcc_blsp1_ahb_clk = {
.halt_reg = 0x01008,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
- .enable_reg = 0x01008,
- .enable_mask = BIT(0),
+ .enable_reg = 0x0b004,
+ .enable_mask = BIT(10),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
.parent_hws = (const struct clk_hw *[]){

View file

@ -0,0 +1,875 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -71,6 +71,9 @@ void brioctl_set(int (*hook)(struct net
void __user *uarg));
int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd,
struct ifreq *ifr, void __user *uarg);
+extern void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats);
+extern bool br_is_hairpin_enabled(struct net_device *dev);
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
@@ -213,4 +216,42 @@ static inline clock_t br_get_ageing_time
}
#endif
+/* QCA NSS ECM support - Start */
+extern struct net_device *br_port_dev_get(struct net_device *dev,
+ unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie);
+extern void br_refresh_fdb_entry(struct net_device *dev, const char *addr);
+extern void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid);
+extern struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr,
+ __u16 vid);
+extern void br_fdb_update_register_notify(struct notifier_block *nb);
+extern void br_fdb_update_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_port_dev_get_hook_t(struct net_device *dev,
+ struct sk_buff *skb,
+ unsigned char *addr,
+ unsigned int cookie);
+extern br_port_dev_get_hook_t __rcu *br_port_dev_get_hook;
+
+#define BR_FDB_EVENT_ADD 0x01
+#define BR_FDB_EVENT_DEL 0x02
+
+struct br_fdb_event {
+ struct net_device *dev;
+ unsigned char addr[6];
+ unsigned char is_local;
+ struct net_bridge *br;
+ struct net_device *orig_dev;
+};
+extern void br_fdb_register_notify(struct notifier_block *nb);
+extern void br_fdb_unregister_notify(struct notifier_block *nb);
+
+typedef struct net_bridge_port *br_get_dst_hook_t(
+ const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+/* QCA NSS ECM support - End */
+
#endif
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -143,7 +143,10 @@ extern struct net_device *__vlan_find_de
extern int vlan_for_each(struct net_device *dev,
int (*action)(struct net_device *dev, int vid,
void *arg), void *arg);
+extern void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats); /* QCA NSS ECM support */
extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern struct net_device *vlan_dev_next_dev(const struct net_device *dev); /* QCA NSS ECM support */
extern u16 vlan_dev_vlan_id(const struct net_device *dev);
extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
@@ -236,6 +239,12 @@ extern void vlan_vids_del_by_dev(struct
extern bool vlan_uses_dev(const struct net_device *dev);
#else
+static inline void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+
+} /* QCA NSS ECM support */
+
static inline struct net_device *
__vlan_find_dev_deep_rcu(struct net_device *real_dev,
__be16 vlan_proto, u16 vlan_id)
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2936,6 +2936,10 @@ enum netdev_cmd {
NETDEV_OFFLOAD_XSTATS_REPORT_USED,
NETDEV_OFFLOAD_XSTATS_REPORT_DELTA,
NETDEV_XDP_FEAT_CHANGE,
+ /* QCA NSS ECM Support - Start */
+ NETDEV_BR_JOIN,
+ NETDEV_BR_LEAVE,
+ /* QCA NSS ECM Support - End */
};
const char *netdev_cmd_to_name(enum netdev_cmd cmd);
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -207,6 +207,11 @@ void rt6_multipath_rebalance(struct fib6
void rt6_uncached_list_add(struct rt6_info *rt);
void rt6_uncached_list_del(struct rt6_info *rt);
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb);
+int rt6_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb)
{
const struct dst_entry *dst = skb_dst(skb);
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -249,6 +249,13 @@ static inline int neigh_parms_family(str
return p->tbl->family;
}
+/* QCA NSS ECM support - Start */
+struct neigh_mac_update {
+ unsigned char old_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+ unsigned char update_mac[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
+};
+/* QCA NSS ECM support - End */
+
#define NEIGH_PRIV_ALIGN sizeof(long long)
#define NEIGH_ENTRY_SIZE(size) ALIGN((size), NEIGH_PRIV_ALIGN)
@@ -395,6 +402,11 @@ void __neigh_for_each_release(struct nei
int (*cb)(struct neighbour *));
int neigh_xmit(int fam, struct net_device *, const void *, struct sk_buff *);
+/* QCA NSS ECM support - Start */
+extern void neigh_mac_update_register_notify(struct notifier_block *nb);
+extern void neigh_mac_update_unregister_notify(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct neigh_seq_state {
struct seq_net_private p;
struct neigh_table *tbl;
@@ -600,4 +612,5 @@ static inline void neigh_update_is_route
*notify = 1;
}
}
+
#endif
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -237,6 +237,11 @@ struct rtable *rt_dst_alloc(struct net_d
unsigned int flags, u16 type, bool noxfrm);
struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt);
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb);
+int ip_rt_unregister_notifier(struct notifier_block *nb);
+/* QCA NSS ECM support - End */
+
struct in_ifaddr;
void fib_add_ifaddr(struct in_ifaddr *);
void fib_del_ifaddr(struct in_ifaddr *, struct in_ifaddr *);
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -2266,4 +2266,6 @@ void br_do_suppress_nd(struct sk_buff *s
u16 vid, struct net_bridge_port *p, struct nd_msg *msg);
struct nd_msg *br_is_nd_neigh_msg(struct sk_buff *skb, struct nd_msg *m);
bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid);
+#define __br_get(__hook, __default, __args ...) \
+ (__hook ? (__hook(__args)) : (__default)) /* QCA NSS ECM support */
#endif
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -72,6 +72,28 @@ bool vlan_do_receive(struct sk_buff **sk
return true;
}
+/* QCA NSS ECM support - Start */
+/* Update the VLAN device with statistics from network offload engines */
+void __vlan_dev_update_accel_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct vlan_pcpu_stats *stats;
+
+ if (!is_vlan_dev(dev))
+ return;
+
+ stats = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, 0);
+
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->rx_packets, nlstats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, nlstats->rx_bytes);
+ u64_stats_add(&stats->tx_packets, nlstats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, nlstats->tx_bytes);
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL(__vlan_dev_update_accel_stats);
+/* QCA NSS ECM support - End */
+
/* Must be invoked with rcu_read_lock. */
struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
__be16 vlan_proto, u16 vlan_id)
@@ -110,6 +132,15 @@ struct net_device *vlan_dev_real_dev(con
}
EXPORT_SYMBOL(vlan_dev_real_dev);
+/* QCA NSS ECM support - Start */
+/* Caller is responsible to hold the reference of the returned device */
+struct net_device *vlan_dev_next_dev(const struct net_device *dev)
+{
+ return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_next_dev);
+/* QCA NSS ECM support - End */
+
u16 vlan_dev_vlan_id(const struct net_device *dev)
{
return vlan_dev_priv(dev)->vlan_id;
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -33,6 +33,20 @@ static const struct rhashtable_params br
static struct kmem_cache *br_fdb_cache __read_mostly;
+ATOMIC_NOTIFIER_HEAD(br_fdb_notifier_list);
+
+void br_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_register_notify);
+
+void br_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_unregister_notify);
+
int __init br_fdb_init(void)
{
br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
@@ -195,6 +209,25 @@ static void fdb_notify(struct net_bridge
if (swdev_notify)
br_switchdev_fdb_notify(br, fdb, type);
+ /* QCA NSS ECM support - Start */
+ if (fdb->dst) {
+ int event;
+ struct br_fdb_event fdb_event;
+
+ if (type == RTM_NEWNEIGH)
+ event = BR_FDB_EVENT_ADD;
+ else
+ event = BR_FDB_EVENT_DEL;
+
+ fdb_event.dev = fdb->dst->dev;
+ ether_addr_copy(fdb_event.addr, fdb->key.addr.addr);
+ fdb_event.is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
+ atomic_notifier_call_chain(&br_fdb_notifier_list,
+ event,
+ (void *)&fdb_event);
+ }
+ /* QCA NSS ECM support - End */
+
skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
if (skb == NULL)
goto errout;
@@ -519,6 +552,22 @@ out:
spin_unlock_bh(&br->hash_lock);
}
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(br_fdb_update_notifier_list);
+
+void br_fdb_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_register_notify);
+
+void br_fdb_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&br_fdb_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(br_fdb_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
void br_fdb_cleanup(struct work_struct *work)
{
struct net_bridge *br = container_of(work, struct net_bridge,
@@ -527,6 +576,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
+ u8 mac_addr[6]; /* QCA NSS ECM support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -553,8 +603,15 @@ void br_fdb_cleanup(struct work_struct *
work_delay = min(work_delay, this_timer - now);
} else {
spin_lock_bh(&br->hash_lock);
- if (!hlist_unhashed(&f->fdb_node))
+ if (!hlist_unhashed(&f->fdb_node)) {
+ ether_addr_copy(mac_addr, f->key.addr.addr);
fdb_delete(br, f, true);
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list, 0,
+ (void *)mac_addr);
+ /* QCA NSS ECM support - End */
+ }
spin_unlock_bh(&br->hash_lock);
}
}
@@ -891,6 +948,12 @@ void br_fdb_update(struct net_bridge *br
*/
if (unlikely(test_bit(BR_FDB_LOCKED, &fdb->flags)))
clear_bit(BR_FDB_LOCKED, &fdb->flags);
+
+ /* QCA NSS ECM support - Start */
+ atomic_notifier_call_chain(
+ &br_fdb_update_notifier_list,
+ 0, (void *)addr);
+ /* QCA NSS ECM support - End */
}
if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
@@ -914,6 +977,64 @@ void br_fdb_update(struct net_bridge *br
}
}
+/* QCA NSS ECM support - Start */
+/* Refresh FDB entries for bridge packets being forwarded by offload engines */
+void br_refresh_fdb_entry(struct net_device *dev, const char *addr)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ if (!is_valid_ether_addr(addr)) {
+ pr_info("bridge: Attempt to refresh with invalid ether address %pM\n",
+ addr);
+ return;
+ }
+
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, 0, true);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_refresh_fdb_entry);
+
+/* Update timestamp of FDB entries for bridge packets being forwarded by offload engines */
+void br_fdb_entry_refresh(struct net_device *dev, const char *addr, __u16 vid)
+{
+ struct net_bridge_fdb_entry *fdb;
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ if (likely(fdb)) {
+ fdb->updated = jiffies;
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(br_fdb_entry_refresh);
+
+/* Look up the MAC address in the device's bridge fdb table */
+struct net_bridge_fdb_entry *br_fdb_has_entry(struct net_device *dev,
+ const char *addr, __u16 vid)
+{
+ struct net_bridge_port *p = br_port_get_rcu(dev);
+ struct net_bridge_fdb_entry *fdb;
+
+ if (!p || p->state == BR_STATE_DISABLED)
+ return NULL;
+
+ rcu_read_lock();
+ fdb = fdb_find_rcu(&p->br->fdb_hash_tbl, addr, vid);
+ rcu_read_unlock();
+
+ return fdb;
+}
+EXPORT_SYMBOL_GPL(br_fdb_has_entry);
+
+/* QCA NSS ECM support - End */
/* Dump information about entries, in response to GETNEIGH */
int br_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -26,6 +26,12 @@
#include "br_private.h"
+/* QCA NSS ECM support - Start */
+/* Hook for external forwarding logic */
+br_port_dev_get_hook_t __rcu *br_port_dev_get_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_port_dev_get_hook);
+/* QCA NSS ECM support - End */
+
/*
* Determine initial path cost based on speed.
* using recommendations from 802.1d standard
@@ -697,6 +703,8 @@ int br_add_if(struct net_bridge *br, str
kobject_uevent(&p->kobj, KOBJ_ADD);
+ call_netdevice_notifiers(NETDEV_BR_JOIN, dev); /* QCA NSS ECM support */
+
return 0;
err6:
@@ -732,6 +740,8 @@ int br_del_if(struct net_bridge *br, str
if (!p || p->br != br)
return -EINVAL;
+ call_netdevice_notifiers(NETDEV_BR_LEAVE, dev); /* QCA NSS ECM support */
+
/* Since more than one interface can be attached to a bridge,
* there still maybe an alternate path for netconsole to use;
* therefore there is no reason for a NETDEV_RELEASE event.
@@ -775,3 +785,97 @@ bool br_port_flag_is_set(const struct ne
return p->flags & flag;
}
EXPORT_SYMBOL_GPL(br_port_flag_is_set);
+
+/* br_port_dev_get()
+ * If a skb is provided, and the br_port_dev_get_hook_t hook exists,
+ * use that to try and determine the egress port for that skb.
+ * If not, or no egress port could be determined, use the given addr
+ * to identify the port to which it is reachable,
+ * returing a reference to the net device associated with that port.
+ *
+ * NOTE: Return NULL if given dev is not a bridge or the mac has no
+ * associated port.
+ */
+struct net_device *br_port_dev_get(struct net_device *dev, unsigned char *addr,
+ struct sk_buff *skb,
+ unsigned int cookie)
+{
+ struct net_bridge_fdb_entry *fdbe;
+ struct net_bridge *br;
+ struct net_device *netdev = NULL;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return NULL;
+
+ rcu_read_lock();
+
+ /* If the hook exists and the skb isn't NULL, try and get the port */
+ if (skb) {
+ br_port_dev_get_hook_t *port_dev_get_hook;
+
+ port_dev_get_hook = rcu_dereference(br_port_dev_get_hook);
+ if (port_dev_get_hook) {
+ struct net_bridge_port *pdst =
+ __br_get(port_dev_get_hook, NULL, dev, skb,
+ addr, cookie);
+ if (pdst) {
+ dev_hold(pdst->dev);
+ netdev = pdst->dev;
+ goto out;
+ }
+ }
+ }
+
+ /* Either there is no hook, or can't
+ * determine the port to use - fall back to using FDB
+ */
+
+ br = netdev_priv(dev);
+
+ /* Lookup the fdb entry and get reference to the port dev */
+ fdbe = br_fdb_find_rcu(br, addr, 0);
+ if (fdbe && fdbe->dst) {
+ netdev = fdbe->dst->dev; /* port device */
+ dev_hold(netdev);
+ }
+out:
+ rcu_read_unlock();
+ return netdev;
+}
+EXPORT_SYMBOL_GPL(br_port_dev_get);
+
+/* Update bridge statistics for bridge packets processed by offload engines */
+void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct pcpu_sw_netstats *tstats;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return;
+
+ tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->rx_packets, nlstats->rx_packets);
+ u64_stats_add(&tstats->rx_bytes, nlstats->rx_bytes);
+ u64_stats_add(&tstats->tx_packets, nlstats->tx_packets);
+ u64_stats_add(&tstats->tx_bytes, nlstats->tx_bytes);
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
+
+/* QCA NSS ECM support - Start */
+/* API to know if hairpin feature is enabled/disabled on this bridge port */
+bool br_is_hairpin_enabled(struct net_device *dev)
+{
+ struct net_bridge_port *port = br_port_get_check_rcu(dev);
+
+ if (likely(port))
+ return port->flags & BR_HAIRPIN_MODE;
+ return false;
+}
+EXPORT_SYMBOL_GPL(br_is_hairpin_enabled);
+
+/* QCA NSS ECM support - End */
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1275,6 +1275,22 @@ static void neigh_update_hhs(struct neig
}
}
+/* QCA NSS ECM support - Start */
+ATOMIC_NOTIFIER_HEAD(neigh_mac_update_notifier_list);
+
+void neigh_mac_update_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_register_notify);
+
+void neigh_mac_update_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&neigh_mac_update_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(neigh_mac_update_unregister_notify);
+/* QCA NSS ECM support - End */
+
/* Generic update routine.
-- lladdr is new lladdr or NULL, if it is not supplied.
-- new is new state.
@@ -1303,6 +1319,7 @@ static int __neigh_update(struct neighbo
struct net_device *dev;
int err, notify = 0;
u8 old;
+ struct neigh_mac_update nmu; /* QCA NSS ECM support */
trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
@@ -1317,7 +1334,10 @@ static int __neigh_update(struct neighbo
new = old;
goto out;
}
- if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+
+ memset(&nmu, 0, sizeof(struct neigh_mac_update)); /* QCA NSS ECM support */
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
(old & (NUD_NOARP | NUD_PERMANENT)))
goto out;
@@ -1354,7 +1374,12 @@ static int __neigh_update(struct neighbo
- compare new & old
- if they are different, check override flag
*/
- if ((old & NUD_VALID) &&
+ /* QCA NSS ECM update - Start */
+ memcpy(nmu.old_mac, neigh->ha, dev->addr_len);
+ memcpy(nmu.update_mac, lladdr, dev->addr_len);
+ /* QCA NSS ECM update - End */
+
+ if ((old & NUD_VALID) &&
!memcmp(lladdr, neigh->ha, dev->addr_len))
lladdr = neigh->ha;
} else {
@@ -1476,8 +1501,11 @@ out:
neigh_update_gc_list(neigh);
if (managed_update)
neigh_update_managed_list(neigh);
- if (notify)
+ if (notify) {
neigh_update_notify(neigh, nlmsg_pid);
+ atomic_notifier_call_chain(&neigh_mac_update_notifier_list, 0,
+ (struct neigh_mac_update *)&nmu); /* QCA NSS ECM support */
+ }
trace_neigh_update_done(neigh, err);
return err;
}
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1211,6 +1211,9 @@ static bool fib_valid_key_len(u32 key, u
static void fib_remove_alias(struct trie *t, struct key_vector *tp,
struct key_vector *l, struct fib_alias *old);
+/* Define route change notification chain. */
+static BLOCKING_NOTIFIER_HEAD(iproute_chain); /* QCA NSS ECM support */
+
/* Caller must hold RTNL. */
int fib_table_insert(struct net *net, struct fib_table *tb,
struct fib_config *cfg, struct netlink_ext_ack *extack)
@@ -1404,6 +1407,9 @@ int fib_table_insert(struct net *net, st
rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, new_fa->tb_id,
&cfg->fc_nlinfo, nlflags);
succeeded:
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_NEWROUTE, fi);
+
return 0;
out_remove_new_fa:
@@ -1775,6 +1781,9 @@ int fib_table_delete(struct net *net, st
if (fa_to_delete->fa_state & FA_S_ACCESSED)
rt_cache_flush(cfg->fc_nlinfo.nl_net);
+ blocking_notifier_call_chain(&iproute_chain,
+ RTM_DELROUTE, fa_to_delete->fa_info);
+
fib_release_info(fa_to_delete->fa_info);
alias_free_mem_rcu(fa_to_delete);
return 0;
@@ -2407,6 +2416,20 @@ void __init fib_trie_init(void)
0, SLAB_PANIC | SLAB_ACCOUNT, NULL);
}
+/* QCA NSS ECM support - Start */
+int ip_rt_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_register_notifier);
+
+int ip_rt_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&iproute_chain, nb);
+}
+EXPORT_SYMBOL(ip_rt_unregister_notifier);
+/* QCA NSS ECM support - End */
+
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
{
struct fib_table *tb;
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -666,6 +666,7 @@ void ndisc_send_ns(struct net_device *de
if (skb)
ndisc_send_skb(skb, daddr, saddr);
}
+EXPORT_SYMBOL(ndisc_send_ns);
void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
const struct in6_addr *daddr)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -197,6 +197,9 @@ static void rt6_uncached_list_flush_dev(
}
}
+/* Define route change notification chain. */
+ATOMIC_NOTIFIER_HEAD(ip6route_chain); /* QCA NSS ECM support */
+
static inline const void *choose_neigh_daddr(const struct in6_addr *p,
struct sk_buff *skb,
const void *daddr)
@@ -3864,6 +3867,10 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, rt);
+
fib6_info_release(rt);
return err;
@@ -3885,6 +3892,9 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -6329,6 +6339,20 @@ static int ip6_route_dev_notify(struct n
return NOTIFY_OK;
}
+/* QCA NSS ECM support - Start */
+int rt6_register_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_register_notifier);
+
+int rt6_unregister_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ip6route_chain, nb);
+}
+EXPORT_SYMBOL(rt6_unregister_notifier);
+/* QCA NSS ECM support - End */
+
/*
* /proc
*/
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1673,6 +1673,7 @@ const char *netdev_cmd_to_name(enum netd
N(PRE_CHANGEADDR) N(OFFLOAD_XSTATS_ENABLE) N(OFFLOAD_XSTATS_DISABLE)
N(OFFLOAD_XSTATS_REPORT_USED) N(OFFLOAD_XSTATS_REPORT_DELTA)
N(XDP_FEAT_CHANGE)
+ N(BR_JOIN) N(BR_LEAVE)
}
#undef N
return "UNKNOWN_NETDEV_EVENT";
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1002,6 +1002,7 @@ void inet6_ifa_finish_destroy(struct ine
kfree_rcu(ifp, rcu);
}
+EXPORT_SYMBOL(inet6_ifa_finish_destroy);
static void
ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -440,6 +440,15 @@ static inline __be32 vxlan_compute_rco(u
return vni_field;
}
+/*
+ * vxlan_get_vni()
+ * Returns the vni corresponding to tunnel
+ */
+static inline u32 vxlan_get_vni(struct vxlan_dev *vxlan_tun)
+{
+ return be32_to_cpu(vxlan_tun->cfg.vni);
+}
+
static inline unsigned short vxlan_get_sk_family(struct vxlan_sock *vs)
{
return vs->sock->sk->sk_family;
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -63,6 +63,8 @@ enum {
#define IPPROTO_MTP IPPROTO_MTP
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
IPPROTO_ENCAP = 98, /* Encapsulation Header */
#define IPPROTO_ENCAP IPPROTO_ENCAP
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
@@ -327,7 +329,7 @@ struct sockaddr_in {
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
-#include <asm/byteorder.h>
+#include <asm/byteorder.h>
#endif /* _UAPI_LINUX_IN_H */
--- a/tools/include/uapi/linux/in.h
+++ b/tools/include/uapi/linux/in.h
@@ -63,6 +63,8 @@ enum {
#define IPPROTO_MTP IPPROTO_MTP
IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ETHERIP = 97, /* ETHERIP protocol number */
+#define IPPROTO_ETHERIP IPPROTO_ETHERIP
IPPROTO_ENCAP = 98, /* Encapsulation Header */
#define IPPROTO_ENCAP IPPROTO_ENCAP
IPPROTO_PIM = 103, /* Protocol Independent Multicast */
@@ -327,7 +329,7 @@ struct sockaddr_in {
#endif
/* <asm/byteorder.h> contains the htonl type stuff.. */
-#include <asm/byteorder.h>
+#include <asm/byteorder.h>
#endif /* _UAPI_LINUX_IN_H */
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -266,7 +266,6 @@ void nf_conntrack_register_notifier(stru
mutex_lock(&nf_ct_ecache_mutex);
notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
- WARN_ON_ONCE(notify);
rcu_assign_pointer(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
}
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -26,6 +26,7 @@ struct nf_tcp_net {
unsigned int timeouts[TCP_CONNTRACK_TIMEOUT_MAX];
u8 tcp_loose;
u8 tcp_be_liberal;
+ u8 tcp_no_window_check;
u8 tcp_max_retrans;
u8 tcp_ignore_invalid_rst;
#if IS_ENABLED(CONFIG_NF_FLOW_TABLE)
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -515,11 +515,15 @@ tcp_in_window(struct nf_conn *ct, enum i
struct ip_ct_tcp *state = &ct->proto.tcp;
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+ const struct nf_tcp_net *tn = nf_tcp_pernet(nf_ct_net(ct));
__u32 seq, ack, sack, end, win, swin;
bool in_recv_win, seq_ok;
s32 receiver_offset;
u16 win_raw;
+ if (tn->tcp_no_window_check)
+ return NFCT_TCP_ACCEPT;
+
/*
* Get the required data from the packet.
*/
@@ -1285,7 +1289,7 @@ int nf_conntrack_tcp_packet(struct nf_co
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
timeout = timeouts[TCP_CONNTRACK_UNACK];
- else if (ct->proto.tcp.last_win == 0 &&
+ else if (!tn->tcp_no_window_check && ct->proto.tcp.last_win == 0 &&
timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
timeout = timeouts[TCP_CONNTRACK_RETRANS];
else
@@ -1601,6 +1605,9 @@ void nf_conntrack_tcp_init_net(struct ne
*/
tn->tcp_be_liberal = 0;
+ /* Skip Windows Check */
+ tn->tcp_no_window_check = 0;
+
/* If it's non-zero, we turn off RST sequence number check */
tn->tcp_ignore_invalid_rst = 0;
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -633,6 +633,7 @@ enum nf_ct_sysctl_index {
#endif
NF_SYSCTL_CT_PROTO_TCP_LOOSE,
NF_SYSCTL_CT_PROTO_TCP_LIBERAL,
+ NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK,
NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST,
NF_SYSCTL_CT_PROTO_TCP_MAX_RETRANS,
NF_SYSCTL_CT_PROTO_TIMEOUT_UDP,
@@ -840,6 +841,14 @@ static struct ctl_table nf_ct_sysctl_tab
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
+ [NF_SYSCTL_CT_PROTO_TCP_NO_WINDOW_CHECK] = {
+ .procname = "nf_conntrack_tcp_no_window_check",
+ .maxlen = sizeof(u8),
+ .mode = 0644,
+ .proc_handler = proc_dou8vec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
[NF_SYSCTL_CT_PROTO_TCP_IGNORE_INVALID_RST] = {
.procname = "nf_conntrack_tcp_ignore_invalid_rst",
.maxlen = sizeof(u8),
@@ -1050,6 +1059,7 @@ static void nf_conntrack_standalone_init
XASSIGN(LOOSE, &tn->tcp_loose);
XASSIGN(LIBERAL, &tn->tcp_be_liberal);
+ XASSIGN(NO_WINDOW_CHECK, &tn->tcp_no_window_check);
XASSIGN(MAX_RETRANS, &tn->tcp_max_retrans);
XASSIGN(IGNORE_INVALID_RST, &tn->tcp_ignore_invalid_rst);
#undef XASSIGN

View file

@ -0,0 +1,600 @@
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -48,6 +48,7 @@
#include <net/slhc_vj.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
+#include <linux/if_pppox.h>
#include <linux/nsproxy.h>
#include <net/net_namespace.h>
@@ -254,6 +255,25 @@ struct ppp_net {
#define seq_before(a, b) ((s32)((a) - (b)) < 0)
#define seq_after(a, b) ((s32)((a) - (b)) > 0)
+
+/*
+ * Registration/Unregistration methods
+ * for PPP channel connect and disconnect event notifications.
+ */
+RAW_NOTIFIER_HEAD(ppp_channel_connection_notifier_list);
+
+void ppp_channel_connection_register_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_register(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_register_notify);
+
+void ppp_channel_connection_unregister_notify(struct notifier_block *nb)
+{
+ raw_notifier_chain_unregister(&ppp_channel_connection_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(ppp_channel_connection_unregister_notify);
+
/* Prototypes. */
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
struct file *file, unsigned int cmd, unsigned long arg);
@@ -3453,7 +3473,10 @@ ppp_connect_channel(struct channel *pch,
struct ppp_net *pn;
int ret = -ENXIO;
int hdrlen;
+ int ppp_proto;
+ int version;
+ int notify = 0;
pn = ppp_pernet(pch->chan_net);
mutex_lock(&pn->all_ppp_mutex);
@@ -3485,13 +3508,40 @@ ppp_connect_channel(struct channel *pch,
++ppp->n_channels;
pch->ppp = ppp;
refcount_inc(&ppp->file.refcnt);
+
+ /* Set the netdev priv flag if the prototype
+ * is L2TP or PPTP. Return success in all cases
+ */
+ if (!pch->chan)
+ goto out2;
+
+ ppp_proto = ppp_channel_get_protocol(pch->chan);
+ if (ppp_proto == PX_PROTO_PPTP) {
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_PPTP;
+ } else if (ppp_proto == PX_PROTO_OL2TP) {
+ version = ppp_channel_get_proto_version(pch->chan);
+ if (version == 2)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV2;
+ else if (version == 3)
+ ppp->dev->priv_flags_ext |= IFF_EXT_PPP_L2TPV3;
+ }
+ notify = 1;
+
+ out2:
ppp_unlock(ppp);
ret = 0;
-
outl:
write_unlock_bh(&pch->upl);
out:
mutex_unlock(&pn->all_ppp_mutex);
+
+ if (notify && ppp && ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_CONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
return ret;
}
@@ -3509,6 +3559,13 @@ ppp_disconnect_channel(struct channel *p
pch->ppp = NULL;
write_unlock_bh(&pch->upl);
if (ppp) {
+ if (ppp->dev) {
+ dev_hold(ppp->dev);
+ raw_notifier_call_chain(&ppp_channel_connection_notifier_list,
+ PPP_CHANNEL_DISCONNECT, ppp->dev);
+ dev_put(ppp->dev);
+ }
+
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
@@ -3588,6 +3645,222 @@ static void *unit_find(struct idr *p, in
return idr_find(p, n);
}
+/* Updates the PPP interface statistics. */
+void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return;
+
+ if (dev->type != ARPHRD_PPP)
+ return;
+
+ ppp = netdev_priv(dev);
+
+ ppp_xmit_lock(ppp);
+ ppp->stats64.tx_packets += tx_packets;
+ ppp->stats64.tx_bytes += tx_bytes;
+ ppp->dev->stats.tx_errors += tx_errors;
+ ppp->dev->stats.tx_dropped += tx_dropped;
+ if (tx_packets)
+ ppp->last_xmit = jiffies;
+ ppp_xmit_unlock(ppp);
+
+ ppp_recv_lock(ppp);
+ ppp->stats64.rx_packets += rx_packets;
+ ppp->stats64.rx_bytes += rx_bytes;
+ ppp->dev->stats.rx_errors += rx_errors;
+ ppp->dev->stats.rx_dropped += rx_dropped;
+ if (rx_packets)
+ ppp->last_recv = jiffies;
+ ppp_recv_unlock(ppp);
+}
+
+/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
+ * the device is not PPP.
+ */
+int ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flags = ppp->flags;
+ ppp_unlock(ppp);
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(ppp_is_multilink);
+
+/* ppp_channel_get_protocol()
+ * Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ *
+ * NOTE: Some channels do not use PX sockets so the protocol value may be very
+ * different for them.
+ * NOTE: -1 indicates failure.
+ * NOTE: Once you know the channel protocol you may then either cast 'chan' to
+ * its sub-class or use the channel protocol specific API's as provided by that
+ * channel sub type.
+ */
+int ppp_channel_get_protocol(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol)
+ return -1;
+
+ return chan->ops->get_channel_protocol(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_protocol);
+
+/* ppp_channel_get_proto_version()
+ * Call this to get channel protocol version
+ */
+int ppp_channel_get_proto_version(struct ppp_channel *chan)
+{
+ if (!chan->ops->get_channel_protocol_ver)
+ return -1;
+
+ return chan->ops->get_channel_protocol_ver(chan);
+}
+EXPORT_SYMBOL(ppp_channel_get_proto_version);
+
+/* ppp_channel_hold()
+ * Call this to hold a channel.
+ *
+ * Returns true on success or false if the hold could not happen.
+ *
+ * NOTE: chan must be protected against destruction during this call -
+ * either by correct locking etc. or because you already have an implicit
+ * or explicit hold to the channel already and this is an additional hold.
+ */
+bool ppp_channel_hold(struct ppp_channel *chan)
+{
+ if (!chan->ops->hold)
+ return false;
+
+ chan->ops->hold(chan);
+ return true;
+}
+EXPORT_SYMBOL(ppp_channel_hold);
+
+/* ppp_channel_release()
+ * Call this to release a hold you have upon a channel
+ */
+void ppp_channel_release(struct ppp_channel *chan)
+{
+ chan->ops->release(chan);
+}
+EXPORT_SYMBOL(ppp_channel_release);
+
+/* Check if ppp xmit lock is on hold */
+bool ppp_is_xmit_locked(struct net_device *dev)
+{
+ struct ppp *ppp;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ if (!ppp)
+ return false;
+
+ if (spin_is_locked(&(ppp)->wlock))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ppp_is_xmit_locked);
+
+/* ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ ppp_lock(ppp);
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ ppp_unlock(ppp);
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ ppp_unlock(ppp);
+ return c;
+}
+EXPORT_SYMBOL(ppp_hold_channels);
+
+/* ppp_release_channels()
+ * Releases channels
+ */
+void ppp_release_channels(struct ppp_channel *channels[], unsigned int chan_sz)
+{
+ unsigned int c;
+
+ for (c = 0; c < chan_sz; ++c) {
+ struct ppp_channel *chan;
+
+ chan = channels[c];
+ chan->ops->release(chan);
+ }
+}
+EXPORT_SYMBOL(ppp_release_channels);
+
/* Module/initialization stuff */
module_init(ppp_init);
@@ -3604,6 +3877,7 @@ EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
+EXPORT_SYMBOL(ppp_update_stats);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
MODULE_ALIAS_RTNL_LINK("ppp");
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -62,6 +62,7 @@
#include <linux/inetdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/if_pppox.h>
@@ -87,7 +88,7 @@
static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
static const struct proto_ops pppoe_ops;
-static const struct ppp_channel_ops pppoe_chan_ops;
+static const struct pppoe_channel_ops pppoe_chan_ops;
/* per-net private data for this module */
static unsigned int pppoe_net_id __read_mostly;
@@ -692,7 +693,7 @@ static int pppoe_connect(struct socket *
po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
po->chan.private = sk;
- po->chan.ops = &pppoe_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppoe_chan_ops;
error = ppp_register_net_channel(dev_net(dev), &po->chan);
if (error) {
@@ -995,9 +996,80 @@ static int pppoe_fill_forward_path(struc
return 0;
}
-static const struct ppp_channel_ops pppoe_chan_ops = {
- .start_xmit = pppoe_xmit,
- .fill_forward_path = pppoe_fill_forward_path,
+/************************************************************************
+ *
+ * function called by generic PPP driver to hold channel
+ *
+ ***********************************************************************/
+static void pppoe_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/************************************************************************
+ *
+ * function called by generic PPP driver to release channel
+ *
+ ***********************************************************************/
+static void pppoe_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/************************************************************************
+ *
+ * function called to get the channel protocol type
+ *
+ ***********************************************************************/
+static int pppoe_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OE;
+}
+
+/************************************************************************
+ *
+ * function called to get the PPPoE channel addressing
+ * NOTE: This function returns a HOLD to the netdevice
+ *
+ ***********************************************************************/
+static int pppoe_get_addressing(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct pppox_sock *po = pppox_sk(sk);
+ int err = 0;
+
+ *addressing = po->proto.pppoe;
+ if (!addressing->dev)
+ return -ENODEV;
+
+ dev_hold(addressing->dev);
+ return err;
+}
+
+/* pppoe_channel_addressing_get()
+ * Return PPPoE channel specific addressing information.
+ */
+int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing)
+{
+ return pppoe_get_addressing(chan, addressing);
+}
+EXPORT_SYMBOL(pppoe_channel_addressing_get);
+
+static const struct pppoe_channel_ops pppoe_chan_ops = {
+ /* PPPoE specific channel ops */
+ .get_addressing = pppoe_get_addressing,
+ /* General ppp channel ops */
+ .ops.start_xmit = pppoe_xmit,
+ .ops.get_channel_protocol = pppoe_get_channel_protocol,
+ .ops.hold = pppoe_hold_chan,
+ .ops.release = pppoe_release_chan,
+ .ops.fill_forward_path = pppoe_fill_forward_path,
};
static int pppoe_recvmsg(struct socket *sock, struct msghdr *m,
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -91,4 +91,17 @@ enum {
PPPOX_DEAD = 16 /* dead, useless, please clean me up!*/
};
+/*
+ * PPPoE Channel specific operations
+ */
+struct pppoe_channel_ops {
+ /* Must be first - general to all PPP channels */
+ struct ppp_channel_ops ops;
+ int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
+};
+
+/* Return PPPoE channel specific addressing information */
+extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
+ struct pppoe_opt *addressing);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1762,6 +1762,36 @@ enum netdev_priv_flags {
IFF_NO_IP_ALIGN = BIT_ULL(34),
};
+/**
+ * enum netdev_priv_flags_ext - &struct net_device priv_flags_ext
+ *
+ * These flags are used to check for device type and can be
+ * set and used by the drivers
+ *
+ * @IFF_EXT_TUN_TAP: device is a TUN/TAP device
+ * @IFF_EXT_PPP_L2TPV2: device is a L2TPV2 device
+ * @IFF_EXT_PPP_L2TPV3: device is a L2TPV3 device
+ * @IFF_EXT_PPP_PPTP: device is a PPTP device
+ * @IFF_EXT_GRE_V4_TAP: device is a GRE IPv4 TAP device
+ * @IFF_EXT_GRE_V6_TAP: device is a GRE IPv6 TAP device
+ * @IFF_EXT_IFB: device is an IFB device
+ * @IFF_EXT_MAPT: device is an MAPT device
+ * @IFF_EXT_HW_NO_OFFLOAD: device is an NON Offload device
+ * @IFF_EXT_L2TPV3: device is a L2TPV3 Ethernet device
+ */
+enum netdev_priv_flags_ext {
+ IFF_EXT_TUN_TAP = 1<<0,
+ IFF_EXT_PPP_L2TPV2 = 1<<1,
+ IFF_EXT_PPP_L2TPV3 = 1<<2,
+ IFF_EXT_PPP_PPTP = 1<<3,
+ IFF_EXT_GRE_V4_TAP = 1<<4,
+ IFF_EXT_GRE_V6_TAP = 1<<5,
+ IFF_EXT_IFB = 1<<6,
+ IFF_EXT_MAPT = 1<<7,
+ IFF_EXT_HW_NO_OFFLOAD = 1<<8,
+ IFF_EXT_ETH_L2TPV3 = 1<<9,
+};
+
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
#define IFF_BONDING IFF_BONDING
@@ -2127,6 +2157,7 @@ struct net_device {
unsigned int flags;
xdp_features_t xdp_features;
unsigned long long priv_flags;
+ unsigned int priv_flags_ext;
const struct net_device_ops *netdev_ops;
const struct xdp_metadata_ops *xdp_metadata_ops;
int ifindex;
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -19,6 +19,10 @@
#include <linux/skbuff.h>
#include <linux/poll.h>
#include <net/net_namespace.h>
+#include <linux/notifier.h>
+
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
struct net_device_path;
struct net_device_path_ctx;
@@ -30,9 +34,19 @@ struct ppp_channel_ops {
int (*start_xmit)(struct ppp_channel *, struct sk_buff *);
/* Handle an ioctl call that has come in via /dev/ppp. */
int (*ioctl)(struct ppp_channel *, unsigned int, unsigned long);
+ /* Get channel protocol type, one of PX_PROTO_XYZ or specific to
+ * the channel subtype
+ */
+ int (*get_channel_protocol)(struct ppp_channel *);
+ /* Get channel protocol version */
+ int (*get_channel_protocol_ver)(struct ppp_channel *);
+ /* Hold the channel from being destroyed */
+ void (*hold)(struct ppp_channel *);
+ /* Release hold on the channel */
+ void (*release)(struct ppp_channel *);
int (*fill_forward_path)(struct net_device_path_ctx *,
- struct net_device_path *,
- const struct ppp_channel *);
+ struct net_device_path *,
+ const struct ppp_channel *);
};
struct ppp_channel {
@@ -76,6 +90,51 @@ extern int ppp_unit_number(struct ppp_ch
/* Get the device name associated with a channel, or NULL if none */
extern char *ppp_dev_name(struct ppp_channel *);
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
+
/*
* SMP locking notes:
* The channel code must ensure that when it calls ppp_unregister_channel,

View file

@ -0,0 +1,46 @@
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -210,6 +210,7 @@ atomic_t netpoll_block_tx = ATOMIC_INIT(
#endif
unsigned int bond_net_id __read_mostly;
+static unsigned long bond_id_mask = 0xFFFFFFF0; /* QCA NSS ECM bonding support */
static const struct flow_dissector_key flow_keys_bonding_keys[] = {
{
@@ -5872,6 +5873,11 @@ static void bond_destructor(struct net_d
if (bond->wq)
destroy_workqueue(bond->wq);
+ /* QCA NSS ECM bonding support - Start */
+ if (bond->id != (~0U))
+ clear_bit(bond->id, &bond_id_mask);
+ /* QCA NSS ECM bonding support - End */
+
free_percpu(bond->rr_tx_counter);
}
@@ -6421,6 +6427,13 @@ int bond_create(struct net *net, const c
bond_work_init_all(bond);
+ /* QCA NSS ECM bonding support - Start */
+ bond->id = ~0U;
+ if (bond_id_mask != (~0UL)) {
+ bond->id = (u32)ffz(bond_id_mask);
+ set_bit(bond->id, &bond_id_mask);
+ }
+ /* QCA NSS ECM bonding support - End */
out:
rtnl_unlock();
return res;
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -261,6 +261,7 @@ struct bonding {
spinlock_t ipsec_lock;
#endif /* CONFIG_XFRM_OFFLOAD */
struct bpf_prog *xdp_prog;
+ u32 id;/* QCA NSS ECM bonding support */
};
#define bond_slave_get_rcu(dev) \

View file

@ -0,0 +1,685 @@
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -116,6 +116,40 @@ static void ad_marker_response_received(
struct port *port);
static void ad_update_actor_keys(struct port *port, bool reset);
+/* QCA NSS ECM bonding support - Start */
+struct bond_cb __rcu *bond_cb;
+
+int bond_register_cb(struct bond_cb *cb)
+{
+ struct bond_cb *lag_cb;
+
+ lag_cb = kzalloc(sizeof(*lag_cb), GFP_ATOMIC | __GFP_NOWARN);
+ if (!lag_cb) {
+ return -1;
+ }
+
+ memcpy((void *)lag_cb, (void *)cb, sizeof(*cb));
+
+ rcu_read_lock();
+ rcu_assign_pointer(bond_cb, lag_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(bond_register_cb);
+
+void bond_unregister_cb(void)
+{
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ rcu_assign_pointer(bond_cb, NULL);
+ rcu_read_unlock();
+
+ kfree(lag_cb_main);
+}
+EXPORT_SYMBOL(bond_unregister_cb);
+/* QCA NSS ECM bonding support - End */
/* ================= api to bonding and kernel code ================== */
@@ -1073,7 +1107,31 @@ static void ad_mux_machine(struct port *
ad_disable_collecting_distributing(port,
update_slave_arr);
port->ntt = true;
+
+ /* QCA NSS ECM bonding support - Start */
+ /* Send a notificaton about change in state of this
+ * port. We only want to handle case where port moves
+ * from AD_MUX_COLLECTING_DISTRIBUTING ->
+ * AD_MUX_ATTACHED.
+ */
+ if (bond_slave_is_up(port->slave) &&
+ (last_state == AD_MUX_COLLECTING_DISTRIBUTING)) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_down) {
+ struct net_device *dev;
+
+ dev = port->slave->dev;
+ lag_cb_main->bond_cb_link_down(dev);
+ }
+ rcu_read_unlock();
+ }
+
break;
+ /* QCA NSS ECM bonding support - End */
case AD_MUX_COLLECTING_DISTRIBUTING:
port->actor_oper_port_state |= LACP_STATE_COLLECTING;
port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
@@ -1917,6 +1975,7 @@ static void ad_enable_collecting_distrib
bool *update_slave_arr)
{
if (port->aggregator->is_active) {
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
slave_dbg(port->slave->bond->dev, port->slave->dev,
"Enabling port %d (LAG %d)\n",
port->actor_port_number,
@@ -1924,6 +1983,16 @@ static void ad_enable_collecting_distrib
__enable_port(port);
/* Slave array needs update */
*update_slave_arr = true;
+
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(port->slave->dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
}
}
@@ -2683,6 +2752,104 @@ int bond_3ad_get_active_agg_info(struct
return ret;
}
+/* QCA NSS ECM bonding support - Start */
+/* bond_3ad_get_tx_dev - Calculate egress interface for a given packet,
+ * for a LAG that is configured in 802.3AD mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address
+ * @dst: pointer to destination L3 address
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash is used to calculate hash using L2/L3
+ * addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, u8 *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct aggregator *agg;
+ struct ad_info ad_info;
+ struct list_head *iter;
+ struct slave *slave;
+ struct slave *first_ok_slave = NULL;
+ u32 hash = 0;
+ int slaves_in_agg;
+ int slave_agg_no = 0;
+ int agg_id;
+
+ if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
+ pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ slaves_in_agg = ad_info.ports;
+ agg_id = ad_info.aggregator_id;
+
+ if (slaves_in_agg == 0) {
+ pr_debug("%s: Error: active aggregator is empty\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_agg_no = hash % slaves_in_agg;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for 802.3AD fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ slave_agg_no = hash % slaves_in_agg;
+ }
+
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ agg = SLAVE_AD_INFO(slave)->port.aggregator;
+ if (!agg || agg->aggregator_identifier != agg_id)
+ continue;
+
+ if (slave_agg_no >= 0) {
+ if (!first_ok_slave && bond_slave_can_tx(slave))
+ first_ok_slave = slave;
+ slave_agg_no--;
+ continue;
+ }
+
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ if (slave_agg_no >= 0) {
+ pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
+ bond_dev->name, agg_id);
+ return NULL;
+ }
+
+ /* we couldn't find any suitable slave after the agg_no, so use the
+ * first suitable found, if found.
+ */
+ if (first_ok_slave)
+ return first_ok_slave->dev;
+
+ return NULL;
+}
+/* QCA NSS ECM bonding support - End */
+
int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -288,6 +288,21 @@ const char *bond_mode_name(int mode)
return names[mode];
}
+/* QCA NSS ECM bonding support */
+int bond_get_id(struct net_device *bond_dev)
+{
+ struct bonding *bond;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return -EINVAL;
+
+ bond = netdev_priv(bond_dev);
+ return bond->id;
+}
+EXPORT_SYMBOL(bond_get_id);
+/* QCA NSS ECM bonding support */
+
/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
@@ -1189,6 +1204,23 @@ void bond_change_active_slave(struct bon
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_handle_link_change(new_active, BOND_LINK_UP);
+ /* QCA NSS ECM bonding support - Start */
+ if (bond->params.mode == BOND_MODE_XOR) {
+ struct bond_cb *lag_cb_main;
+
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main &&
+ lag_cb_main->bond_cb_link_up) {
+ struct net_device *dev;
+
+ dev = new_active->dev;
+ lag_cb_main->bond_cb_link_up(dev);
+ }
+ rcu_read_unlock();
+ }
+ /* QCA NSS ECM bonding support - End */
+
if (bond_is_lb(bond))
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
@@ -1833,6 +1865,7 @@ int bond_enslave(struct net_device *bond
const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct slave *new_slave = NULL, *prev_slave;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
int link_reporting;
int res = 0, i;
@@ -2278,6 +2311,15 @@ int bond_enslave(struct net_device *bond
bond_is_active_slave(new_slave) ? "an active" : "a backup",
new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
/* enslave is successful */
bond_queue_slave_event(new_slave);
return 0;
@@ -2343,6 +2385,15 @@ err_undo_flags:
}
}
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_enslave)
+ lag_cb_main->bond_cb_enslave(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
return res;
}
@@ -2364,6 +2415,7 @@ static int __bond_release_one(struct net
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr_storage ss;
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
int old_flags = bond_dev->flags;
netdev_features_t old_features = bond_dev->features;
@@ -2386,6 +2438,15 @@ static int __bond_release_one(struct net
bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW);
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+ if (lag_cb_main && lag_cb_main->bond_cb_release)
+ lag_cb_main->bond_cb_release(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
bond_sysfs_slave_del(slave);
/* recompute stats just before removing the slave */
@@ -2708,6 +2769,8 @@ static void bond_miimon_commit(struct bo
struct slave *slave, *primary, *active;
bool do_failover = false;
struct list_head *iter;
+ struct net_device *slave_dev = NULL; /* QCA NSS ECM bonding support */
+ struct bond_cb *lag_cb_main; /* QCA NSS ECM bonding support */
ASSERT_RTNL();
@@ -2747,6 +2810,12 @@ static void bond_miimon_commit(struct bo
bond_set_active_slave(slave);
}
+ /* QCA NSS ECM bonding support - Start */
+ if ((bond->params.mode == BOND_MODE_XOR) &&
+ (!slave_dev))
+ slave_dev = slave->dev;
+ /* QCA NSS ECM bonding support - End */
+
slave_info(bond->dev, slave->dev, "link status definitely up, %u Mbps %s duplex\n",
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2795,6 +2864,16 @@ static void bond_miimon_commit(struct bo
unblock_netpoll_tx();
}
+ /* QCA NSS ECM bonding support - Start */
+ rcu_read_lock();
+ lag_cb_main = rcu_dereference(bond_cb);
+
+ if (slave_dev && lag_cb_main && lag_cb_main->bond_cb_link_up)
+ lag_cb_main->bond_cb_link_up(slave_dev);
+
+ rcu_read_unlock();
+ /* QCA NSS ECM bonding support - End */
+
bond_set_carrier(bond);
}
@@ -4047,8 +4126,219 @@ static inline u32 bond_eth_hash(struct s
return 0;
ep = (struct ethhdr *)(data + mhoff);
- return ep->h_dest[5] ^ ep->h_source[5] ^ be16_to_cpu(ep->h_proto);
+ return ep->h_dest[5] ^ ep->h_source[5]; /* QCA NSS ECM bonding support */
+}
+
+/* QCA NSS ECM bonding support - Start */
+/* Extract the appropriate headers based on bond's xmit policy */
+static bool bond_flow_dissect_without_skb(struct bonding *bond,
+ u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst,
+ u16 protocol, __be16 *layer4hdr,
+ struct flow_keys *fk)
+{
+ u32 *src = NULL;
+ u32 *dst = NULL;
+
+ fk->ports.ports = 0;
+ src = (uint32_t *)psrc;
+ dst = (uint32_t *)pdst;
+
+ if (protocol == htons(ETH_P_IP)) {
+ /* V4 addresses and address type*/
+ fk->addrs.v4addrs.src = src[0];
+ fk->addrs.v4addrs.dst = dst[0];
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ /* V6 addresses and address type*/
+ memcpy(&fk->addrs.v6addrs.src, src, sizeof(struct in6_addr));
+ memcpy(&fk->addrs.v6addrs.dst, dst, sizeof(struct in6_addr));
+ fk->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
+ } else {
+ return false;
+ }
+ if ((bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER34) &&
+ (layer4hdr))
+ fk->ports.ports = *layer4hdr;
+
+ return true;
+}
+
+/* bond_xmit_hash_without_skb - Applies load balancing algorithm for a packet,
+ * to calculate hash for a given set of L2/L3 addresses. Does not
+ * calculate egress interface.
+ */
+uint32_t bond_xmit_hash_without_skb(u8 *src_mac, u8 *dst_mac,
+ void *psrc, void *pdst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ struct flow_keys flow;
+ u32 hash = 0;
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER2 ||
+ !bond_flow_dissect_without_skb(bond, src_mac, dst_mac, psrc,
+ pdst, protocol, layer4hdr, &flow))
+ return (dst_mac[5] ^ src_mac[5]);
+
+ if (bond->params.xmit_policy == BOND_XMIT_POLICY_LAYER23)
+ hash = dst_mac[5] ^ src_mac[5];
+ else if (layer4hdr)
+ hash = (__force u32)flow.ports.ports;
+
+ hash ^= (__force u32)flow_get_u32_dst(&flow) ^
+ (__force u32)flow_get_u32_src(&flow);
+ hash ^= (hash >> 16);
+ hash ^= (hash >> 8);
+
+ return hash;
+}
+
+/* bond_xor_get_tx_dev - Calculate egress interface for a given packet for a LAG
+ * that is configured in balance-xor mode
+ * @skb: pointer to skb to be egressed
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol
+ * @bond_dev: pointer to bond master device
+ *
+ * If @skb is NULL, bond_xmit_hash_without_skb is used to calculate hash using
+ * L2/L3 addresses.
+ *
+ * Returns: Either valid slave device, or NULL otherwise
+ */
+static struct net_device *bond_xor_get_tx_dev(struct sk_buff *skb,
+ u8 *src_mac, u8 *dst_mac,
+ void *src, void *dst,
+ u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond = netdev_priv(bond_dev);
+ int slave_cnt = READ_ONCE(bond->slave_cnt);
+ int slave_id = 0, i = 0;
+ u32 hash;
+ struct list_head *iter;
+ struct slave *slave;
+
+ if (slave_cnt == 0) {
+ pr_debug("%s: Error: No slave is attached to the interface\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ if (skb) {
+ hash = bond_xmit_hash(bond, skb);
+ slave_id = hash % slave_cnt;
+ } else {
+ if (bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER23 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER2 &&
+ bond->params.xmit_policy != BOND_XMIT_POLICY_LAYER34) {
+ pr_debug("%s: Error: Unsupported hash policy for balance-XOR fast path\n",
+ bond_dev->name);
+ return NULL;
+ }
+
+ hash = bond_xmit_hash_without_skb(src_mac, dst_mac, src,
+ dst, protocol, bond_dev,
+ layer4hdr);
+ slave_id = hash % slave_cnt;
+ }
+
+ i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0) {
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (--i < 0)
+ break;
+ if (bond_slave_can_tx(slave))
+ return slave->dev;
+ }
+
+ return NULL;
+}
+
+/* bond_get_tx_dev - Calculate egress interface for a given packet.
+ *
+ * Supports 802.3AD and balance-xor modes
+ *
+ * @skb: pointer to skb to be egressed, if valid
+ * @src_mac: pointer to source L2 address
+ * @dst_mac: pointer to destination L2 address
+ * @src: pointer to source L3 address in network order
+ * @dst: pointer to destination L3 address in network order
+ * @protocol: L3 protocol id from L2 header
+ * @bond_dev: pointer to bond master device
+ *
+ * Returns: Either valid slave device, or NULL for un-supported LAG modes
+ */
+struct net_device *bond_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ u8 *dst_mac, void *src,
+ void *dst, u16 protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr)
+{
+ struct bonding *bond;
+
+ if (!bond_dev)
+ return NULL;
+
+ if (!((bond_dev->priv_flags & IFF_BONDING) &&
+ (bond_dev->flags & IFF_MASTER)))
+ return NULL;
+
+ bond = netdev_priv(bond_dev);
+
+ switch (bond->params.mode) {
+ case BOND_MODE_XOR:
+ return bond_xor_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ case BOND_MODE_8023AD:
+ return bond_3ad_get_tx_dev(skb, src_mac, dst_mac,
+ src, dst, protocol,
+ bond_dev, layer4hdr);
+ default:
+ return NULL;
+ }
}
+EXPORT_SYMBOL(bond_get_tx_dev);
+
+/* In bond_xmit_xor() , we determine the output device by using a pre-
+ * determined xmit_hash_policy(), If the selected device is not enabled,
+ * find the next active slave.
+ */
+static int bond_xmit_xor(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bonding *bond = netdev_priv(dev);
+ struct net_device *outdev;
+
+ outdev = bond_xor_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev)
+ goto out;
+
+ bond_dev_queue_xmit(bond, skb, outdev);
+ goto final;
+out:
+ /* no suitable interface, frame not sent */
+ dev_kfree_skb(skb);
+final:
+ return NETDEV_TX_OK;
+}
+/* QCA NSS ECM bonding support - End */
static bool bond_flow_ip(struct sk_buff *skb, struct flow_keys *fk, const void *data,
int hlen, __be16 l2_proto, int *nhoff, int *ip_proto, bool l34)
@@ -5177,15 +5467,18 @@ static netdev_tx_t bond_3ad_xor_xmit(str
struct net_device *dev)
{
struct bonding *bond = netdev_priv(dev);
- struct bond_up_slave *slaves;
- struct slave *slave;
+ /* QCA NSS ECM bonding support - Start */
+ struct net_device *outdev = NULL;
- slaves = rcu_dereference(bond->usable_slaves);
- slave = bond_xmit_3ad_xor_slave_get(bond, skb, slaves);
- if (likely(slave))
- return bond_dev_queue_xmit(bond, skb, slave->dev);
+ outdev = bond_3ad_get_tx_dev(skb, NULL, NULL, NULL,
+ NULL, 0, dev, NULL);
+ if (!outdev) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
- return bond_tx_drop(dev, skb);
+ return bond_dev_queue_xmit(bond, skb, outdev);
+ /* QCA NSS ECM bonding support - End */
}
/* in broadcast mode, we send everything to all usable interfaces. */
@@ -5435,8 +5728,9 @@ static netdev_tx_t __bond_start_xmit(str
return bond_xmit_roundrobin(skb, dev);
case BOND_MODE_ACTIVEBACKUP:
return bond_xmit_activebackup(skb, dev);
- case BOND_MODE_8023AD:
case BOND_MODE_XOR:
+ return bond_xmit_xor(skb, dev); /* QCA NSS ECM bonding support */
+ case BOND_MODE_8023AD:
return bond_3ad_xor_xmit(skb, dev);
case BOND_MODE_BROADCAST:
return bond_xmit_broadcast(skb, dev);
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -302,8 +302,15 @@ int bond_3ad_lacpdu_recv(const struct sk
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
+/* QCA NSS ECM bonding support */
+struct net_device *bond_3ad_get_tx_dev(struct sk_buff *skb, uint8_t *src_mac,
+ uint8_t *dst_mac, void *src,
+ void *dst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM bonding support */
+
void bond_3ad_update_ad_actor_settings(struct bonding *bond);
int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
size_t bond_3ad_stats_size(void);
#endif /* _NET_BOND_3AD_H */
-
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -90,6 +90,8 @@
#define BOND_XFRM_FEATURES (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM | \
NETIF_F_GSO_ESP)
+extern struct bond_cb __rcu *bond_cb; /* QCA NSS ECM bonding support */
+
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -653,6 +655,7 @@ struct bond_net {
int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, struct slave *slave);
netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+int bond_get_id(struct net_device *bond_dev); /* QCA NSS ECM bonding support */
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
@@ -684,6 +687,13 @@ struct bond_vlan_tag *bond_verify_device
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+/* QCA NSS ECM bonding support - Start */
+uint32_t bond_xmit_hash_without_skb(uint8_t *src_mac, uint8_t *dst_mac,
+ void *psrc, void *pdst, uint16_t protocol,
+ struct net_device *bond_dev,
+ __be16 *layer4hdr);
+/* QCA NSS ECM bonding support - End */
+
void bond_work_init_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
@@ -788,4 +798,18 @@ static inline netdev_tx_t bond_tx_drop(s
return NET_XMIT_DROP;
}
+/* QCA NSS ECM bonding support - Start */
+struct bond_cb {
+ void (*bond_cb_link_up)(struct net_device *slave);
+ void (*bond_cb_link_down)(struct net_device *slave);
+ void (*bond_cb_enslave)(struct net_device *slave);
+ void (*bond_cb_release)(struct net_device *slave);
+ void (*bond_cb_delete_by_slave)(struct net_device *slave);
+ void (*bond_cb_delete_by_mac)(uint8_t *mac_addr);
+};
+
+extern int bond_register_cb(struct bond_cb *cb);
+extern void bond_unregister_cb(void);
+/* QCA NSS ECM bonding support - End */
+
#endif /* _NET_BONDING_H */

View file

@ -0,0 +1,96 @@
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -15,6 +15,13 @@ struct macvlan_port;
#define MACVLAN_MC_FILTER_BITS 8
#define MACVLAN_MC_FILTER_SZ (1 << MACVLAN_MC_FILTER_BITS)
+/* QCA NSS ECM Support - Start */
+/*
+ * Callback for updating interface statistics for macvlan flows offloaded from host CPU.
+ */
+typedef void (*macvlan_offload_stats_update_cb_t)(struct net_device *dev, struct rtnl_link_stats64 *stats, bool update_mcast_rx_stats);
+/* QCA NSS ECM Support - End */
+
struct macvlan_dev {
struct net_device *dev;
struct list_head list;
@@ -35,6 +42,7 @@ struct macvlan_dev {
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+ macvlan_offload_stats_update_cb_t offload_stats_update; /* QCA NSS ECM support */
};
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
@@ -107,4 +115,26 @@ static inline int macvlan_release_l2fw_o
macvlan->accel_priv = NULL;
return dev_uc_add(macvlan->lowerdev, dev->dev_addr);
}
+
+/* QCA NSS ECM Support - Start */
+#if IS_ENABLED(CONFIG_MACVLAN)
+static inline void
+macvlan_offload_stats_update(struct net_device *dev,
+ struct rtnl_link_stats64 *stats,
+ bool update_mcast_rx_stats)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ macvlan->offload_stats_update(dev, stats, update_mcast_rx_stats);
+}
+
+static inline enum
+macvlan_mode macvlan_get_mode(struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode;
+}
+#endif
+/* QCA NSS ECM Support - End */
#endif /* _LINUX_IF_MACVLAN_H */
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -960,6 +960,34 @@ static void macvlan_uninit(struct net_de
macvlan_port_destroy(port->dev);
}
+/* QCA NSS ECM Support - Start */
+/* Update macvlan statistics processed by offload engines */
+static void macvlan_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *offl_stats,
+ bool update_mcast_rx_stats)
+{
+ struct vlan_pcpu_stats *stats;
+ struct macvlan_dev *macvlan;
+
+ /* Is this a macvlan? */
+ if (!netif_is_macvlan(dev))
+ return;
+
+ macvlan = netdev_priv(dev);
+ stats = this_cpu_ptr(macvlan->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ u64_stats_add(&stats->rx_packets, offl_stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, offl_stats->rx_bytes);
+ u64_stats_add(&stats->tx_packets, offl_stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, offl_stats->tx_bytes);
+ /* Update multicast statistics */
+ if (unlikely(update_mcast_rx_stats)) {
+ u64_stats_add(&stats->rx_multicast, offl_stats->rx_packets);
+ }
+ u64_stats_update_end(&stats->syncp);
+}
+/* QCA NSS ECM Support - End */
+
static void macvlan_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -1506,6 +1534,7 @@ int macvlan_common_newlink(struct net *s
vlan->dev = dev;
vlan->port = port;
vlan->set_features = MACVLAN_FEATURES;
+ vlan->offload_stats_update = macvlan_dev_update_stats; /* QCA NSS ECM Support */
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])

View file

@ -0,0 +1,154 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -174,6 +174,13 @@ config NF_CONNTRACK_TIMEOUT
If unsure, say `N'.
+config NF_CONNTRACK_DSCPREMARK_EXT
+ bool 'Connection tracking extension for dscp remark target'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking extension
+ for dscp remark.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -31,6 +31,10 @@ enum nf_ct_ext_id {
#if IS_ENABLED(CONFIG_NET_ACT_CT)
NF_CT_EXT_ACT_CT,
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ NF_CT_EXT_DSCPREMARK, /* QCA NSS ECM support */
+#endif
+
NF_CT_EXT_NUM,
};
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -23,6 +23,7 @@
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_conntrack_act_ct.h>
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
#include <net/netfilter/nf_nat.h>
#define NF_CT_EXT_PREALLOC 128u /* conntrack events are on by default */
@@ -54,6 +55,9 @@ static const u8 nf_ct_ext_type_len[NF_CT
#if IS_ENABLED(CONFIG_NET_ACT_CT)
[NF_CT_EXT_ACT_CT] = sizeof(struct nf_conn_act_ct_ext),
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ [NF_CT_EXT_DSCPREMARK] = sizeof(struct nf_ct_dscpremark_ext),
+#endif
};
static __always_inline unsigned int total_extension_size(void)
@@ -86,6 +90,9 @@ static __always_inline unsigned int tota
#if IS_ENABLED(CONFIG_NET_ACT_CT)
+ sizeof(struct nf_conn_act_ct_ext)
#endif
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ + sizeof(struct nf_ct_dscpremark_ext)
+#endif
;
}
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -15,6 +15,7 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_OVS)
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
nf_conntrack-$(CONFIG_NF_CT_PROTO_GRE) += nf_conntrack_proto_gre.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_DSCPREMARK_EXT) += nf_conntrack_dscpremark_ext.o
ifeq ($(CONFIG_NF_CONNTRACK),m)
nf_conntrack-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_conntrack_bpf.o
else ifeq ($(CONFIG_NF_CONNTRACK),y)
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -45,6 +45,9 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
+#endif
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
@@ -1740,6 +1743,9 @@ init_conntrack(struct net *net, struct n
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ nf_ct_dscpremark_ext_add(ct, GFP_ATOMIC);
+#endif
#ifdef CONFIG_NF_CONNTRACK_EVENTS
ecache = tmpl ? nf_ct_ecache_find(tmpl) : NULL;
--- a/net/netfilter/xt_DSCP.c
+++ b/net/netfilter/xt_DSCP.c
@@ -15,6 +15,9 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_DSCP.h>
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+#include <net/netfilter/nf_conntrack_dscpremark_ext.h>
+#endif
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_DESCRIPTION("Xtables: DSCP/TOS field modification");
@@ -31,6 +34,10 @@ dscp_tg(struct sk_buff *skb, const struc
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv4_get_dsfield(ip_hdr(skb)) >> XT_DSCP_SHIFT;
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+#endif
if (dscp != dinfo->dscp) {
if (skb_ensure_writable(skb, sizeof(struct iphdr)))
@@ -39,6 +46,13 @@ dscp_tg(struct sk_buff *skb, const struc
ipv4_change_dsfield(ip_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return XT_CONTINUE;
+
+ nf_conntrack_dscpremark_ext_set_dscp_rule_valid(ct);
+#endif
}
return XT_CONTINUE;
}
@@ -48,13 +62,24 @@ dscp_tg6(struct sk_buff *skb, const stru
{
const struct xt_DSCP_info *dinfo = par->targinfo;
u_int8_t dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> XT_DSCP_SHIFT;
-
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+#endif
if (dscp != dinfo->dscp) {
if (skb_ensure_writable(skb, sizeof(struct ipv6hdr)))
return NF_DROP;
ipv6_change_dsfield(ipv6_hdr(skb), XT_DSCP_ECN_MASK,
dinfo->dscp << XT_DSCP_SHIFT);
+
+#ifdef CONFIG_NF_CONNTRACK_DSCPREMARK_EXT
+ ct = nf_ct_get(skb, &ctinfo);
+ if (!ct)
+ return XT_CONTINUE;
+
+ nf_conntrack_dscpremark_ext_set_dscp_rule_valid(ct);
+#endif
}
return XT_CONTINUE;
}

View file

@ -0,0 +1,87 @@
From ce18a6fdff6a39a01111d74f513d2ef66142047c Mon Sep 17 00:00:00 2001
From: Murat Sezgin <msezgin@codeaurora.org>
Date: Wed, 5 Aug 2020 13:21:27 -0700
Subject: [PATCH 246/281] net:ipv6: Fix IPv6 user route change event calls
These events should be called only when the route table is
changed by the userspace. So, we should call them in the
ioctl and the netlink message handler function.
Change-Id: If7ec615014cfc79d5fa72878e49eaf99c2560c32
Signed-off-by: Murat Sezgin <msezgin@codeaurora.org>
---
net/ipv6/route.c | 31 +++++++++++++++++++++----------
1 file changed, 21 insertions(+), 10 deletions(-)
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -3867,10 +3867,6 @@ int ip6_route_add(struct fib6_config *cf
return PTR_ERR(rt);
err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
- if (!err)
- atomic_notifier_call_chain(&ip6route_chain,
- RTM_NEWROUTE, rt);
-
fib6_info_release(rt);
return err;
@@ -3892,9 +3888,6 @@ static int __ip6_del_rt(struct fib6_info
err = fib6_del(rt, info);
spin_unlock_bh(&table->tb6_lock);
- if (!err)
- atomic_notifier_call_chain(&ip6route_chain,
- RTM_DELROUTE, rt);
out:
fib6_info_release(rt);
return err;
@@ -4500,6 +4493,10 @@ int ipv6_route_ioctl(struct net *net, un
break;
}
rtnl_unlock();
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ (cmd == SIOCADDRT) ? RTM_NEWROUTE : RTM_DELROUTE, &cfg);
+
return err;
}
@@ -5518,11 +5515,17 @@ static int inet6_rtm_delroute(struct sk_
}
if (cfg.fc_mp)
- return ip6_route_multipath_del(&cfg, extack);
+ err = ip6_route_multipath_del(&cfg, extack);
else {
cfg.fc_delete_all_nh = 1;
- return ip6_route_del(&cfg, extack);
+ err = ip6_route_del(&cfg, extack);
}
+
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_DELROUTE, &cfg);
+
+ return err;
}
static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -5539,9 +5542,15 @@ static int inet6_rtm_newroute(struct sk_
cfg.fc_metric = IP6_RT_PRIO_USER;
if (cfg.fc_mp)
- return ip6_route_multipath_add(&cfg, extack);
+ err = ip6_route_multipath_add(&cfg, extack);
else
- return ip6_route_add(&cfg, GFP_KERNEL, extack);
+ err = ip6_route_add(&cfg, GFP_KERNEL, extack);
+
+ if (!err)
+ atomic_notifier_call_chain(&ip6route_chain,
+ RTM_NEWROUTE, &cfg);
+
+ return err;
}
/* add the overhead of this fib6_nh to nexthop_len */

View file

@ -0,0 +1,92 @@
From 3c17a0e1112be70071e98d5208da5b55dcec20a6 Mon Sep 17 00:00:00 2001
From: Simon Casey <simon501098c@gmail.com>
Date: Wed, 2 Feb 2022 19:37:29 +0100
Subject: [PATCH] Update 607-qca-add-add-nss-bridge-mgr-support.patch for kernel 5.15
---
include/linux/if_bridge.h | 4 ++++
net/bridge/br_fdb.c | 25 +++++++++++++++++++++----
2 files changed, 25 insertions(+), 4 deletions(-)
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -254,4 +254,8 @@ typedef struct net_bridge_port *br_get_d
extern br_get_dst_hook_t __rcu *br_get_dst_hook;
/* QCA NSS ECM support - End */
+/* QCA NSS bridge-mgr support - Start */
+extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
+/* QCA NSS bridge-mgr support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -576,7 +576,7 @@ void br_fdb_cleanup(struct work_struct *
unsigned long delay = hold_time(br);
unsigned long work_delay = delay;
unsigned long now = jiffies;
- u8 mac_addr[6]; /* QCA NSS ECM support */
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* this part is tricky, in order to avoid blocking learning and
* consequently forwarding, we rely on rcu to delete objects with
@@ -604,12 +604,13 @@ void br_fdb_cleanup(struct work_struct *
} else {
spin_lock_bh(&br->hash_lock);
if (!hlist_unhashed(&f->fdb_node)) {
- ether_addr_copy(mac_addr, f->key.addr.addr);
+ memset(&fdb_event, 0, sizeof(fdb_event));
+ ether_addr_copy(fdb_event.addr, f->key.addr.addr);
fdb_delete(br, f, true);
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list, 0,
- (void *)mac_addr);
+ (void *)&fdb_event);
/* QCA NSS ECM support - End */
}
spin_unlock_bh(&br->hash_lock);
@@ -907,10 +908,21 @@ static bool __fdb_mark_active(struct net
test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
}
+/* QCA NSS bridge-mgr support - Start */
+/* Get the bridge device */
+struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br)
+{
+ dev_hold(br->dev);
+ return br->dev;
+}
+EXPORT_SYMBOL_GPL(br_fdb_bridge_dev_get_and_hold);
+/* QCA NSS bridge-mgr support - End */
+
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid, unsigned long flags)
{
struct net_bridge_fdb_entry *fdb;
+ struct br_fdb_event fdb_event; /* QCA NSS bridge-mgr support */
/* some users want to always flood. */
if (hold_time(br) == 0)
@@ -936,6 +948,12 @@ void br_fdb_update(struct net_bridge *br
if (unlikely(source != READ_ONCE(fdb->dst) &&
!test_bit(BR_FDB_STICKY, &fdb->flags))) {
br_switchdev_fdb_notify(br, fdb, RTM_DELNEIGH);
+ /* QCA NSS bridge-mgr support - Start */
+ ether_addr_copy(fdb_event.addr, addr);
+ fdb_event.br = br;
+ fdb_event.orig_dev = fdb->dst->dev;
+ fdb_event.dev = source->dev;
+ /* QCA NSS bridge-mgr support - End */
WRITE_ONCE(fdb->dst, source);
fdb_modified = true;
/* Take over HW learned entry */
@@ -952,7 +970,7 @@ void br_fdb_update(struct net_bridge *br
/* QCA NSS ECM support - Start */
atomic_notifier_call_chain(
&br_fdb_update_notifier_list,
- 0, (void *)addr);
+ 0, (void *)&fdb_event);
/* QCA NSS ECM support - End */
}

View file

@ -0,0 +1,25 @@
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -139,6 +139,7 @@ enum tca_id {
TCA_ID_MPLS,
TCA_ID_CT,
TCA_ID_GATE,
+ TCA_ID_MIRRED_NSS, /* QCA NSS Qdisc IGS Support */
/* other actions go here */
__TCA_ID_MAX = 255
};
@@ -817,4 +818,14 @@ enum {
TCF_EM_OPND_LT
};
+/* QCA NSS Qdisc Support - Start */
+#define _TC_MAKE32(x) ((x))
+#define _TC_MAKEMASK1(n) (_TC_MAKE32(1) << _TC_MAKE32(n))
+
+#define TC_NCLS _TC_MAKEMASK1(8)
+#define TC_NCLS_NSS _TC_MAKEMASK1(12)
+#define SET_TC_NCLS_NSS(v) ( TC_NCLS_NSS | ((v) & ~TC_NCLS_NSS))
+#define CLR_TC_NCLS_NSS(v) ( (v) & ~TC_NCLS_NSS)
+/* QCA NSS Qdisc Support - End */
+
#endif

View file

@ -0,0 +1,463 @@
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -17,6 +17,7 @@ struct timer_list {
unsigned long expires;
void (*function)(struct timer_list *);
u32 flags;
+ unsigned long cust_data;
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -151,6 +151,31 @@ resched:
}
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats)
+{
+ struct ifb_dev_private *dp;
+ struct ifb_q_private *txp;
+
+ if (!dev || !offload_stats) {
+ return;
+ }
+
+ if (!(dev->priv_flags_ext & IFF_EXT_IFB)) {
+ return;
+ }
+
+ dp = netdev_priv(dev);
+ txp = dp->tx_private;
+
+ u64_stats_update_begin(&txp->rx_stats.sync);
+ txp->rx_stats.packets += u64_stats_read(&offload_stats->rx_packets);
+ txp->rx_stats.bytes += u64_stats_read(&offload_stats->rx_bytes);
+ txp->tx_stats.packets += u64_stats_read(&offload_stats->tx_packets);
+ txp->tx_stats.bytes += u64_stats_read(&offload_stats->tx_bytes);
+ u64_stats_update_end(&txp->rx_stats.sync);
+}
+EXPORT_SYMBOL(ifb_update_offload_stats);
+
static void ifb_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -326,6 +351,7 @@ static void ifb_setup(struct net_device
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags_ext |= IFF_EXT_IFB; /* Mark the device as an IFB device. */
netif_keep_dst(dev);
eth_hw_addr_random(dev);
dev->needs_free_netdev = true;
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4696,6 +4696,15 @@ void dev_uc_flush(struct net_device *dev
void dev_uc_init(struct net_device *dev);
/**
+ * ifb_update_offload_stats - Update the IFB interface stats
+ * @dev: IFB device to update the stats
+ * @offload_stats: per CPU stats structure
+ *
+ * Allows update of IFB stats when flows are offloaded to an accelerator.
+ **/
+void ifb_update_offload_stats(struct net_device *dev, struct pcpu_sw_netstats *offload_stats);
+
+/**
* __dev_uc_sync - Synchonize device's unicast list
* @dev: device to sync
* @sync: function to call if address should be added
@@ -5222,6 +5231,11 @@ static inline bool netif_is_failover_sla
return dev->priv_flags & IFF_FAILOVER_SLAVE;
}
+static inline bool netif_is_ifb_dev(const struct net_device *dev)
+{
+ return dev->priv_flags_ext & IFF_EXT_IFB;
+}
+
/* This device needs to keep skb dst for qdisc enqueue or ndo_start_xmit() */
static inline void netif_keep_dst(struct net_device *dev)
{
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1306,4 +1306,248 @@ enum {
#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+/* QCA NSS Clients Support - Start */
+enum {
+ TCA_NSS_ACCEL_MODE_NSS_FW,
+ TCA_NSS_ACCEL_MODE_PPE,
+ TCA_NSS_ACCEL_MODE_MAX
+};
+
+/* NSSFIFO section */
+
+enum {
+ TCA_NSSFIFO_UNSPEC,
+ TCA_NSSFIFO_PARMS,
+ __TCA_NSSFIFO_MAX
+};
+
+#define TCA_NSSFIFO_MAX (__TCA_NSSFIFO_MAX - 1)
+
+struct tc_nssfifo_qopt {
+ __u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRED section */
+
+enum {
+ TCA_NSSWRED_UNSPEC,
+ TCA_NSSWRED_PARMS,
+ __TCA_NSSWRED_MAX
+};
+
+#define TCA_NSSWRED_MAX (__TCA_NSSWRED_MAX - 1)
+#define NSSWRED_CLASS_MAX 6
+struct tc_red_alg_parameter {
+ __u32 min; /* qlen_avg < min: pkts are all enqueued */
+ __u32 max; /* qlen_avg > max: pkts are all dropped */
+ __u32 probability;/* Drop probability at qlen_avg = max */
+ __u32 exp_weight_factor;/* exp_weight_factor for calculate qlen_avg */
+};
+
+struct tc_nsswred_traffic_class {
+ __u32 limit; /* Queue length */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* Parameters for RED alg */
+};
+
+/*
+ * Weight modes for WRED
+ */
+enum tc_nsswred_weight_modes {
+ TC_NSSWRED_WEIGHT_MODE_DSCP = 0,/* Weight mode is DSCP */
+ TC_NSSWRED_WEIGHT_MODES, /* Must be last */
+};
+
+struct tc_nsswred_qopt {
+ __u32 limit; /* Queue length */
+ enum tc_nsswred_weight_modes weight_mode;
+ /* Weight mode */
+ __u32 traffic_classes; /* How many traffic classes: DPs */
+ __u32 def_traffic_class; /* Default traffic if no match: def_DP */
+ __u32 traffic_id; /* The traffic id to be configured: DP */
+ __u32 weight_mode_value; /* Weight mode value */
+ struct tc_red_alg_parameter rap;/* RED algorithm parameters */
+ struct tc_nsswred_traffic_class tntc[NSSWRED_CLASS_MAX];
+ /* Traffic settings for dumpping */
+ __u8 ecn; /* Setting ECN bit or dropping */
+ __u8 set_default; /* Sets qdisc to be the default for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSCODEL section */
+
+enum {
+ TCA_NSSCODEL_UNSPEC,
+ TCA_NSSCODEL_PARMS,
+ __TCA_NSSCODEL_MAX
+};
+
+#define TCA_NSSCODEL_MAX (__TCA_NSSCODEL_MAX - 1)
+
+struct tc_nsscodel_qopt {
+ __u32 target; /* Acceptable queueing delay */
+ __u32 limit; /* Max number of packets that can be held in the queue */
+ __u32 interval; /* Monitoring interval */
+ __u32 flows; /* Number of flow buckets */
+ __u32 quantum; /* Weight (in bytes) used for DRR of flow buckets */
+ __u8 ecn; /* 0 - disable ECN, 1 - enable ECN */
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+struct tc_nsscodel_xstats {
+ __u32 peak_queue_delay; /* Peak delay experienced by a dequeued packet */
+ __u32 peak_drop_delay; /* Peak delay experienced by a dropped packet */
+};
+
+/* NSSFQ_CODEL section */
+
+struct tc_nssfq_codel_xstats {
+ __u32 new_flow_count; /* Total number of new flows seen */
+ __u32 new_flows_len; /* Current number of new flows */
+ __u32 old_flows_len; /* Current number of old flows */
+ __u32 ecn_mark; /* Number of packets marked with ECN */
+ __u32 drop_overlimit; /* Number of packets dropped due to overlimit */
+ __u32 maxpacket; /* The largest packet seen so far in the queue */
+};
+
+/* NSSTBL section */
+
+enum {
+ TCA_NSSTBL_UNSPEC,
+ TCA_NSSTBL_PARMS,
+ __TCA_NSSTBL_MAX
+};
+
+#define TCA_NSSTBL_MAX (__TCA_NSSTBL_MAX - 1)
+
+struct tc_nsstbl_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Limiting rate of TBF */
+ __u32 peakrate; /* Maximum rate at which TBF is allowed to send */
+ __u32 mtu; /* Max size of packet, or minumim burst size */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSPRIO section */
+
+#define TCA_NSSPRIO_MAX_BANDS 256
+
+enum {
+ TCA_NSSPRIO_UNSPEC,
+ TCA_NSSPRIO_PARMS,
+ __TCA_NSSPRIO_MAX
+};
+
+#define TCA_NSSPRIO_MAX (__TCA_NSSPRIO_MAX - 1)
+
+struct tc_nssprio_qopt {
+ __u32 bands; /* Number of bands */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBF section */
+
+enum {
+ TCA_NSSBF_UNSPEC,
+ TCA_NSSBF_CLASS_PARMS,
+ TCA_NSSBF_QDISC_PARMS,
+ __TCA_NSSBF_MAX
+};
+
+#define TCA_NSSBF_MAX (__TCA_NSSBF_MAX - 1)
+
+struct tc_nssbf_class_qopt {
+ __u32 burst; /* Maximum burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 mtu; /* MTU of the associated interface */
+ __u32 quantum; /* Quantum allocation for DRR */
+};
+
+struct tc_nssbf_qopt {
+ __u16 defcls; /* Default class value */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWRR section */
+
+enum {
+ TCA_NSSWRR_UNSPEC,
+ TCA_NSSWRR_CLASS_PARMS,
+ TCA_NSSWRR_QDISC_PARMS,
+ __TCA_NSSWRR_MAX
+};
+
+#define TCA_NSSWRR_MAX (__TCA_NSSWRR_MAX - 1)
+
+struct tc_nsswrr_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswrr_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSWFQ section */
+
+enum {
+ TCA_NSSWFQ_UNSPEC,
+ TCA_NSSWFQ_CLASS_PARMS,
+ TCA_NSSWFQ_QDISC_PARMS,
+ __TCA_NSSWFQ_MAX
+};
+
+#define TCA_NSSWFQ_MAX (__TCA_NSSWFQ_MAX - 1)
+
+struct tc_nsswfq_class_qopt {
+ __u32 quantum; /* Weight associated to this class */
+};
+
+struct tc_nsswfq_qopt {
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSHTB section */
+
+enum {
+ TCA_NSSHTB_UNSPEC,
+ TCA_NSSHTB_CLASS_PARMS,
+ TCA_NSSHTB_QDISC_PARMS,
+ __TCA_NSSHTB_MAX
+};
+
+#define TCA_NSSHTB_MAX (__TCA_NSSHTB_MAX - 1)
+
+struct tc_nsshtb_class_qopt {
+ __u32 burst; /* Allowed burst size */
+ __u32 rate; /* Allowed bandwidth for this class */
+ __u32 cburst; /* Maximum burst size */
+ __u32 crate; /* Maximum bandwidth for this class */
+ __u32 quantum; /* Quantum allocation for DRR */
+ __u32 priority; /* Priority value associated with this class */
+ __u32 overhead; /* Overhead in bytes per packet */
+};
+
+struct tc_nsshtb_qopt {
+ __u32 r2q; /* Rate to quantum ratio */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+
+/* NSSBLACKHOLE section */
+
+enum {
+ TCA_NSSBLACKHOLE_UNSPEC,
+ TCA_NSSBLACKHOLE_PARMS,
+ __TCA_NSSBLACKHOLE_MAX
+};
+
+#define TCA_NSSBLACKHOLE_MAX (__TCA_NSSBLACKHOLE_MAX - 1)
+
+struct tc_nssblackhole_qopt {
+ __u8 set_default; /* Sets qdisc to be the default qdisc for enqueue */
+ __u8 accel_mode; /* Dictates which data plane offloads the qdisc */
+};
+/* QCA NSS Clients Support - End */
#endif
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -314,6 +314,7 @@ struct Qdisc *qdisc_lookup(struct net_de
out:
return q;
}
+EXPORT_SYMBOL(qdisc_lookup);
struct Qdisc *qdisc_lookup_rcu(struct net_device *dev, u32 handle)
{
@@ -2389,4 +2390,26 @@ static int __init pktsched_init(void)
return 0;
}
+/* QCA NSS Qdisc Support - Start */
+bool tcf_destroy(struct tcf_proto *tp, bool force)
+{
+ tp->ops->destroy(tp, force, NULL);
+ module_put(tp->ops->owner);
+ kfree_rcu(tp, rcu);
+
+ return true;
+}
+
+void tcf_destroy_chain(struct tcf_proto __rcu **fl)
+{
+ struct tcf_proto *tp;
+
+ while ((tp = rtnl_dereference(*fl)) != NULL) {
+ RCU_INIT_POINTER(*fl, tp->next);
+ tcf_destroy(tp, true);
+ }
+}
+EXPORT_SYMBOL(tcf_destroy_chain);
+/* QCA NSS Qdisc Support - End */
+
subsys_initcall(pktsched_init);
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -1069,6 +1069,7 @@ static void __qdisc_destroy(struct Qdisc
call_rcu(&qdisc->rcu, qdisc_free_cb);
}
+EXPORT_SYMBOL(qdisc_destroy);
void qdisc_destroy(struct Qdisc *qdisc)
{
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -94,6 +94,7 @@ struct Qdisc {
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
#define TCQ_F_NOLOCK 0x100 /* qdisc does not require locking */
#define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */
+#define TCQ_F_NSS 0x1000 /* NSS qdisc flag. */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -751,6 +752,42 @@ static inline bool skb_skip_tc_classify(
return false;
}
+/* QCA NSS Qdisc Support - Start */
+/*
+ * Set skb classify bit field.
+ */
+static inline void skb_set_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 1;
+#endif
+}
+
+/*
+ * Clear skb classify bit field.
+ */
+static inline void skb_clear_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_skip_classify_offload = 0;
+#endif
+}
+
+/*
+ * Skip skb processing if sent from ifb dev.
+ */
+static inline bool skb_skip_tc_classify_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_skip_classify_offload) {
+ skb_clear_tc_classify_offload(skb);
+ return true;
+ }
+#endif
+ return false;
+}
+/* QCA NSS Qdisc Support - End */
+
/* Reset all TX qdiscs greater than index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
{
@@ -1323,4 +1360,9 @@ static inline void qdisc_synchronize(con
msleep(1);
}
+/* QCA NSS Qdisc Support - Start */
+void qdisc_destroy(struct Qdisc *qdisc);
+void tcf_destroy_chain(struct tcf_proto __rcu **fl);
+/* QCA NSS Qdisc Support - End */
+
#endif
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -764,6 +764,7 @@ typedef unsigned char *sk_buff_data_t;
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
+ * @tc_skip_classify_offload: do not classify packet set by offload IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
* @redirected: packet was redirected by packet classifier
* @from_ingress: packet was redirected from the ingress path
@@ -945,6 +946,9 @@ struct sk_buff {
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
__u8 tc_skip_classify:1;
#endif
+#ifdef CONFIG_NET_CLS_ACT
+ __u8 tc_skip_classify_offload:1; /* QCA NSS Qdisc Support */
+#endif
__u8 remcsum_offload:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;

View file

@ -0,0 +1,46 @@
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -398,6 +398,31 @@ err_tlock:
}
EXPORT_SYMBOL_GPL(l2tp_session_register);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel,
+ struct l2tp_session *session,
+ struct l2tp_stats *stats)
+{
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &tunnel->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &tunnel->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &tunnel->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &tunnel->stats.tx_bytes);
+
+ atomic_long_add(atomic_long_read(&stats->rx_packets),
+ &session->stats.rx_packets);
+ atomic_long_add(atomic_long_read(&stats->rx_bytes),
+ &session->stats.rx_bytes);
+ atomic_long_add(atomic_long_read(&stats->tx_packets),
+ &session->stats.tx_packets);
+ atomic_long_add(atomic_long_read(&stats->tx_bytes),
+ &session->stats.tx_bytes);
+}
+EXPORT_SYMBOL_GPL(l2tp_stats_update);
+
+
/*****************************************************************************
* Receive data handling
*****************************************************************************/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -232,6 +232,9 @@ struct l2tp_session *l2tp_session_get_nt
struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
const char *ifname);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View file

@ -0,0 +1,478 @@
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -36,6 +36,7 @@ struct pptp_opt {
u32 ack_sent, ack_recv;
u32 seq_sent, seq_recv;
int ppp_flags;
+ bool pptp_offload_mode;
};
#include <net/sock.h>
@@ -100,8 +101,40 @@ struct pppoe_channel_ops {
int (*get_addressing)(struct ppp_channel *, struct pppoe_opt *);
};
+/* PPTP client callback */
+typedef int (*pptp_gre_seq_offload_callback_t)(struct sk_buff *skb,
+ struct net_device *pptp_dev);
+
/* Return PPPoE channel specific addressing information */
extern int pppoe_channel_addressing_get(struct ppp_channel *chan,
struct pppoe_opt *addressing);
+/* Lookup PPTP session info and return PPTP session using sip, dip and local call id */
+extern int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr);
+
+/* Lookup PPTP session info and return PPTP session using dip and peer call id */
+extern int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Return PPTP session information given the channel */
+extern void pptp_channel_addressing_get(struct pptp_opt *opt,
+ struct ppp_channel *chan);
+
+/* Enable the PPTP session offload flag */
+extern int pptp_session_enable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Disable the PPTP session offload flag */
+extern int pptp_session_disable_offload_mode(__be16 peer_call_id,
+ __be32 peer_ip_addr);
+
+/* Register the PPTP GRE packets sequence number offload callback */
+extern int
+pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_client_cb);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+extern void pptp_unregister_gre_seq_offload_callback(void);
+
#endif /* !(__LINUX_IF_PPPOX_H) */
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2973,6 +2973,20 @@ char *ppp_dev_name(struct ppp_channel *c
return name;
}
+/* Return the PPP net device index */
+int ppp_dev_index(struct ppp_channel *chan)
+{
+ struct channel *pch = chan->ppp;
+ int ifindex = 0;
+
+ if (pch) {
+ read_lock_bh(&pch->upl);
+ if (pch->ppp && pch->ppp->dev)
+ ifindex = pch->ppp->dev->ifindex;
+ read_unlock_bh(&pch->upl);
+ }
+ return ifindex;
+}
/*
* Disconnect a channel from the generic layer.
@@ -3681,6 +3695,28 @@ void ppp_update_stats(struct net_device
ppp_recv_unlock(ppp);
}
+/* Returns true if Compression is enabled on PPP device
+ */
+bool ppp_is_cp_enabled(struct net_device *dev)
+{
+ struct ppp *ppp;
+ bool flag = false;
+
+ if (!dev)
+ return false;
+
+ if (dev->type != ARPHRD_PPP)
+ return false;
+
+ ppp = netdev_priv(dev);
+ ppp_lock(ppp);
+ flag = !!(ppp->xstate & SC_COMP_RUN) || !!(ppp->rstate & SC_DECOMP_RUN);
+ ppp_unlock(ppp);
+
+ return flag;
+}
+EXPORT_SYMBOL(ppp_is_cp_enabled);
+
/* Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0 if
* the device is not PPP.
*/
@@ -3872,6 +3908,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
+EXPORT_SYMBOL(ppp_dev_index);
EXPORT_SYMBOL(ppp_input);
EXPORT_SYMBOL(ppp_input_error);
EXPORT_SYMBOL(ppp_output_wakeup);
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -84,6 +84,9 @@ extern void ppp_unregister_channel(struc
/* Get the channel number for a channel */
extern int ppp_channel_index(struct ppp_channel *);
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
/* Get the unit number associated with a channel, or -1 if none */
extern int ppp_unit_number(struct ppp_channel *);
@@ -116,6 +119,7 @@ extern int ppp_hold_channels(struct net_
/* Test if ppp xmit lock is locked */
extern bool ppp_is_xmit_locked(struct net_device *dev);
+bool ppp_is_cp_enabled(struct net_device *dev);
/* Test if the ppp device is a multi-link ppp device */
extern int ppp_is_multilink(struct net_device *dev);
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -50,6 +50,8 @@ static struct proto pptp_sk_proto __read
static const struct ppp_channel_ops pptp_chan_ops;
static const struct proto_ops pptp_ops;
+static pptp_gre_seq_offload_callback_t __rcu pptp_gre_offload_xmit_cb;
+
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
{
struct pppox_sock *sock;
@@ -91,6 +93,79 @@ static int lookup_chan_dst(u16 call_id,
return i < MAX_CALLID;
}
+/* Search a pptp session based on local call id, local and remote ip address */
+static int lookup_session_src(struct pptp_opt *opt, u16 call_id, __be32 daddr, __be32 saddr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.src_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == daddr &&
+ sock->proto.pptp.src_addr.sin_addr.s_addr == saddr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Search a pptp session based on peer call id and peer ip address */
+static int lookup_session_dst(struct pptp_opt *opt, u16 call_id, __be32 d_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == d_addr) {
+ sock_hold(sk_pppox(sock));
+ memcpy(opt, &sock->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* If offload mode set then this function sends all packets to
+ * offload module instead of network stack
+ */
+static int pptp_client_skb_xmit(struct sk_buff *skb,
+ struct net_device *pptp_dev)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+ int ret;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (!pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ ret = pptp_gre_offload_cb_f(skb, pptp_dev);
+ rcu_read_unlock();
+ return ret;
+}
+
static int add_chan(struct pppox_sock *sock,
struct pptp_addr *sa)
{
@@ -136,7 +211,7 @@ static struct rtable *pptp_route_output(
struct net *net;
net = sock_net(sk);
- flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 0,
+ flowi4_init_output(fl4, 0, sk->sk_mark, 0,
RT_SCOPE_UNIVERSE, IPPROTO_GRE, 0,
po->proto.pptp.dst_addr.sin_addr.s_addr,
po->proto.pptp.src_addr.sin_addr.s_addr,
@@ -163,8 +238,11 @@ static int pptp_xmit(struct ppp_channel
struct rtable *rt;
struct net_device *tdev;
+ struct net_device *pptp_dev;
struct iphdr *iph;
int max_headroom;
+ int pptp_ifindex;
+ int ret;
if (sk_pppox(po)->sk_state & PPPOX_DEAD)
goto tx_error;
@@ -258,7 +336,32 @@ static int pptp_xmit(struct ppp_channel
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
- ip_local_out(net, skb->sk, skb);
+ pptp_ifindex = ppp_dev_index(chan);
+
+ /* set incoming interface as the ppp interface */
+ if (skb->skb_iif)
+ skb->skb_iif = pptp_ifindex;
+
+ /* If the PPTP GRE seq number offload module is not enabled yet
+ * then sends all PPTP GRE packets through linux network stack
+ */
+ if (!opt->pptp_offload_mode) {
+ ip_local_out(net, skb->sk, skb);
+ return 1;
+ }
+
+ pptp_dev = dev_get_by_index(&init_net, pptp_ifindex);
+ if (!pptp_dev)
+ goto tx_error;
+
+ /* If PPTP offload module is enabled then forward all PPTP GRE
+ * packets to PPTP GRE offload module
+ */
+ ret = pptp_client_skb_xmit(skb, pptp_dev);
+ dev_put(pptp_dev);
+ if (ret < 0)
+ goto tx_error;
+
return 1;
tx_error:
@@ -314,6 +417,13 @@ static int pptp_rcv_core(struct sock *sk
goto drop;
payload = skb->data + headersize;
+
+ /* If offload is enabled, we expect the offload module
+ * to handle PPTP GRE sequence number checks
+ */
+ if (opt->pptp_offload_mode)
+ goto allow_packet;
+
/* check for expected sequence number */
if (seq < opt->seq_recv + 1 || WRAPPED(opt->seq_recv, seq)) {
if ((payload[0] == PPP_ALLSTATIONS) && (payload[1] == PPP_UI) &&
@@ -371,6 +481,7 @@ static int pptp_rcv(struct sk_buff *skb)
if (po) {
skb_dst_drop(skb);
nf_reset_ct(skb);
+ skb->skb_iif = ppp_dev_index(&po->chan);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:
@@ -473,7 +584,7 @@ static int pptp_connect(struct socket *s
opt->dst_addr = sp->sa_addr.pptp;
sk->sk_state |= PPPOX_CONNECTED;
-
+ opt->pptp_offload_mode = false;
end:
release_sock(sk);
return error;
@@ -603,9 +714,169 @@ static int pptp_ppp_ioctl(struct ppp_cha
return err;
}
+/* pptp_channel_addressing_get()
+ * Return PPTP channel specific addressing information.
+ */
+void pptp_channel_addressing_get(struct pptp_opt *opt, struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct pppox_sock *po;
+
+ if (!opt)
+ return;
+
+ sk = (struct sock *)chan->private;
+ if (!sk)
+ return;
+
+ sock_hold(sk);
+
+ /* This is very unlikely, but check the socket is connected state */
+ if (unlikely(sock_flag(sk, SOCK_DEAD) ||
+ !(sk->sk_state & PPPOX_CONNECTED))) {
+ sock_put(sk);
+ return;
+ }
+
+ po = pppox_sk(sk);
+ memcpy(opt, &po->proto.pptp, sizeof(struct pptp_opt));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(pptp_channel_addressing_get);
+
+/* pptp_session_find()
+ * Search and return a PPTP session info based on peer callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find(struct pptp_opt *opt, __be16 peer_call_id,
+ __be32 peer_ip_addr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_dst(opt, ntohs(peer_call_id), peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_find);
+
+/* pptp_session_find_by_src_callid()
+ * Search and return a PPTP session info based on src callid and IP
+ * address. The function accepts the parameters in network byte order.
+ */
+int pptp_session_find_by_src_callid(struct pptp_opt *opt, __be16 src_call_id,
+ __be32 daddr, __be32 saddr)
+{
+ if (!opt)
+ return -EINVAL;
+
+ return lookup_session_src(opt, ntohs(src_call_id), daddr, saddr);
+}
+EXPORT_SYMBOL(pptp_session_find_by_src_callid);
+
+ /* Function to change the offload mode true/false for a PPTP session */
+static int pptp_set_offload_mode(bool accel_mode,
+ __be16 peer_call_id, __be32 peer_ip_addr)
+{
+ struct pppox_sock *sock;
+ int i = 1;
+
+ rcu_read_lock();
+ for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
+ sock = rcu_dereference(callid_sock[i]);
+ if (!sock)
+ continue;
+
+ if (sock->proto.pptp.dst_addr.call_id == peer_call_id &&
+ sock->proto.pptp.dst_addr.sin_addr.s_addr == peer_ip_addr) {
+ sock_hold(sk_pppox(sock));
+ sock->proto.pptp.pptp_offload_mode = accel_mode;
+ sock_put(sk_pppox(sock));
+ rcu_read_unlock();
+ return 0;
+ }
+ }
+ rcu_read_unlock();
+ return -EINVAL;
+}
+
+/* Enable the PPTP session offload flag */
+int pptp_session_enable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(true, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_enable_offload_mode);
+
+/* Disable the PPTP session offload flag */
+int pptp_session_disable_offload_mode(__be16 peer_call_id, __be32 peer_ip_addr)
+{
+ return pptp_set_offload_mode(false, peer_call_id, peer_ip_addr);
+}
+EXPORT_SYMBOL(pptp_session_disable_offload_mode);
+
+/* Register the offload callback function on behalf of the module which
+ * will own the sequence and acknowledgment number updates for all
+ * PPTP GRE packets. All PPTP GRE packets are then transmitted to this
+ * module after encapsulation in order to ensure the correct seq/ack
+ * fields are set in the packets before transmission. This is required
+ * when PPTP flows are offloaded to acceleration engines, in-order to
+ * ensure consistency in sequence and ack numbers between PPTP control
+ * (PPP LCP) and data packets
+ */
+int pptp_register_gre_seq_offload_callback(pptp_gre_seq_offload_callback_t
+ pptp_gre_offload_cb)
+{
+ pptp_gre_seq_offload_callback_t pptp_gre_offload_cb_f;
+
+ rcu_read_lock();
+ pptp_gre_offload_cb_f = rcu_dereference(pptp_gre_offload_xmit_cb);
+
+ if (pptp_gre_offload_cb_f) {
+ rcu_read_unlock();
+ return -1;
+ }
+
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, pptp_gre_offload_cb);
+ rcu_read_unlock();
+ return 0;
+}
+EXPORT_SYMBOL(pptp_register_gre_seq_offload_callback);
+
+/* Unregister the PPTP GRE packets sequence number offload callback */
+void pptp_unregister_gre_seq_offload_callback(void)
+{
+ rcu_assign_pointer(pptp_gre_offload_xmit_cb, NULL);
+}
+EXPORT_SYMBOL(pptp_unregister_gre_seq_offload_callback);
+
+/* pptp_hold_chan() */
+static void pptp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pptp_release_chan() */
+static void pptp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pptp_get_channel_protocol()
+ * Return the protocol type of the PPTP over PPP protocol
+ */
+static int pptp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_PPTP;
+}
+
static const struct ppp_channel_ops pptp_chan_ops = {
.start_xmit = pptp_xmit,
.ioctl = pptp_ppp_ioctl,
+ .get_channel_protocol = pptp_get_channel_protocol,
+ .hold = pptp_hold_chan,
+ .release = pptp_release_chan,
};
static struct proto pptp_sk_proto __read_mostly = {

View file

@ -0,0 +1,77 @@
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
__u8 proto; /* tunnel protocol */
__u8 encap_limit; /* encapsulation limit for tunnel */
__u8 hop_limit; /* hop limit for tunnel */
+ __u8 draft03; /* FMR using draft03 of map-e - QCA NSS Clients Support */
bool collect_md;
__be32 flowinfo; /* traffic class and flowlabel for tunnel */
__u32 flags; /* tunnel flags */
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -558,4 +558,9 @@ static inline void ip_tunnel_info_opts_s
#endif /* CONFIG_INET */
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr);
+void ip6_update_offload_stats(struct net_device *dev, void *ptr);
+/* QCA NSS Clients Support - End */
+
#endif /* __NET_IP_TUNNELS_H */
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2411,6 +2411,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Client Support - Start */
+/*
+ * Update offload stats
+ */
+void ip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets));
+ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes));
+ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets));
+ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes));
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ip6_update_offload_stats);
+/* QCA NSS Client Support - End */
+
struct net *ip6_tnl_get_link_net(const struct net_device *dev)
{
struct ip6_tnl *tunnel = netdev_priv(dev);
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1733,6 +1733,23 @@ nla_put_failure:
return -EMSGSIZE;
}
+/* QCA NSS Clients Support - Start */
+void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
+{
+ struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ const struct pcpu_sw_netstats *offload_stats =
+ (struct pcpu_sw_netstats *)ptr;
+
+ u64_stats_update_begin(&tstats->syncp);
+ u64_stats_add(&tstats->tx_packets, u64_stats_read(&offload_stats->tx_packets));
+ u64_stats_add(&tstats->tx_bytes, u64_stats_read(&offload_stats->tx_bytes));
+ u64_stats_add(&tstats->rx_packets, u64_stats_read(&offload_stats->rx_packets));
+ u64_stats_add(&tstats->rx_bytes, u64_stats_read(&offload_stats->rx_bytes));
+ u64_stats_update_end(&tstats->syncp);
+}
+EXPORT_SYMBOL(ipip6_update_offload_stats);
+/* QCA NSS Clients Support - End */
+
static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
[IFLA_IPTUN_LINK] = { .type = NLA_U32 },
[IFLA_IPTUN_LOCAL] = { .type = NLA_U32 },

View file

@ -0,0 +1,103 @@
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -29,6 +29,20 @@
#include <net/vxlan.h>
#include <net/nexthop.h>
+ATOMIC_NOTIFIER_HEAD(vxlan_fdb_notifier_list);
+
+void vxlan_fdb_register_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_register(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_register_notify);
+
+void vxlan_fdb_unregister_notify(struct notifier_block *nb)
+{
+ atomic_notifier_chain_unregister(&vxlan_fdb_notifier_list, nb);
+}
+EXPORT_SYMBOL(vxlan_fdb_unregister_notify);
+
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ip6_tunnel.h>
#include <net/ip6_checksum.h>
@@ -260,6 +274,7 @@ static void __vxlan_fdb_notify(struct vx
{
struct net *net = dev_net(vxlan->dev);
struct sk_buff *skb;
+ struct vxlan_fdb_event vfe;
int err = -ENOBUFS;
skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
@@ -275,6 +290,10 @@ static void __vxlan_fdb_notify(struct vx
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+ vfe.dev = vxlan->dev;
+ vfe.rdst = rd;
+ ether_addr_copy(vfe.eth_addr, fdb->eth_addr);
+ atomic_notifier_call_chain(&vxlan_fdb_notifier_list, type, (void *)&vfe);
return;
errout:
if (err < 0)
@@ -441,6 +460,18 @@ static struct vxlan_fdb *vxlan_find_mac(
return f;
}
+/* Find and update age of fdb entry corresponding to MAC. */
+void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni)
+{
+ u32 hash_index;
+
+ hash_index = fdb_head_index(vxlan, mac, vni);
+ spin_lock_bh(&vxlan->hash_lock[hash_index]);
+ vxlan_find_mac(vxlan, mac, vni);
+ spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+}
+EXPORT_SYMBOL(vxlan_fdb_update_mac);
+
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
union vxlan_addr *ip, __be16 port,
@@ -2581,6 +2612,9 @@ void vxlan_xmit_one(struct sk_buff *skb,
goto out_unlock;
}
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
err = vxlan_build_skb(skb, ndst, sizeof(struct iphdr),
@@ -2652,6 +2686,9 @@ void vxlan_xmit_one(struct sk_buff *skb,
if (err < 0)
goto tx_error;
+ /* Reset the skb_iif to Tunnels interface index */
+ skb->skb_iif = dev->ifindex;
+
udp_tunnel6_xmit_skb(ndst, sock6->sock->sk, skb, dev,
&local_ip.sin6.sin6_addr,
&dst->sin6.sin6_addr, tos, ttl,
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -352,6 +352,19 @@ struct vxlan_dev {
VXLAN_F_VNIFILTER | \
VXLAN_F_LOCALBYPASS)
+/*
+ * Application data for fdb notifier event
+ */
+struct vxlan_fdb_event {
+ struct net_device *dev;
+ struct vxlan_rdst *rdst;
+ u8 eth_addr[ETH_ALEN];
+};
+
+extern void vxlan_fdb_register_notify(struct notifier_block *nb);
+extern void vxlan_fdb_unregister_notify(struct notifier_block *nb);
+extern void vxlan_fdb_update_mac(struct vxlan_dev *vxlan, const u8 *mac, uint32_t vni);
+
struct net_device *vxlan_dev_create(struct net *net, const char *name,
u8 name_assign_type, struct vxlan_config *conf);

View file

@ -0,0 +1,368 @@
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -61,6 +61,51 @@ struct ppp_channel {
};
#ifdef __KERNEL__
+/* Call this to obtain the underlying protocol of the PPP channel,
+ * e.g. PX_PROTO_OE
+ */
+extern int ppp_channel_get_protocol(struct ppp_channel *);
+
+/* Call this to hold a channel */
+extern bool ppp_channel_hold(struct ppp_channel *);
+
+/* Call this to release a hold you have upon a channel */
+extern void ppp_channel_release(struct ppp_channel *);
+
+/* Release hold on PPP channels */
+extern void ppp_release_channels(struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if ppp xmit lock is locked */
+extern bool ppp_is_xmit_locked(struct net_device *dev);
+
+/* Call this get protocol version */
+extern int ppp_channel_get_proto_version(struct ppp_channel *);
+
+/* Get the device index associated with a channel, or 0, if none */
+extern int ppp_dev_index(struct ppp_channel *);
+
+/* Hold PPP channels for the PPP device */
+extern int ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+extern int __ppp_hold_channels(struct net_device *dev,
+ struct ppp_channel *channels[],
+ unsigned int chan_sz);
+
+/* Test if the ppp device is a multi-link ppp device */
+extern int ppp_is_multilink(struct net_device *dev);
+extern int __ppp_is_multilink(struct net_device *dev);
+
+/* Update statistics of the PPP net_device by incrementing related
+ * statistics field value with corresponding parameter
+ */
+extern void ppp_update_stats(struct net_device *dev, unsigned long rx_packets,
+ unsigned long rx_bytes, unsigned long tx_packets,
+ unsigned long tx_bytes, unsigned long rx_errors,
+ unsigned long tx_errors, unsigned long rx_dropped,
+ unsigned long tx_dropped);
+
/* Called by the channel when it can send some more data. */
extern void ppp_output_wakeup(struct ppp_channel *);
@@ -148,5 +193,17 @@ extern void ppp_update_stats(struct net_
* that ppp_unregister_channel returns.
*/
+/* QCA NSS Clients Support - Start */
+/* PPP channel connection event types */
+#define PPP_CHANNEL_DISCONNECT 0
+#define PPP_CHANNEL_CONNECT 1
+
+/* Register the PPP channel connect notifier */
+extern void ppp_channel_connection_register_notify(struct notifier_block *nb);
+
+/* Unregister the PPP channel connect notifier */
+extern void ppp_channel_connection_unregister_notify(struct notifier_block *nb);
+/* QCA NSS Clients Support - End */
+
#endif /* __KERNEL__ */
#endif
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -12,4 +12,30 @@
#include <linux/in6.h>
#include <uapi/linux/if_pppol2tp.h>
+/* QCA NSS ECM support - Start */
+/*
+ * Holds L2TP channel info
+ */
+struct pppol2tp_common_addr {
+ int tunnel_version; /* v2 or v3 */
+ __u32 local_tunnel_id, remote_tunnel_id; /* tunnel id */
+ __u32 local_session_id, remote_session_id; /* session id */
+ struct sockaddr_in local_addr, remote_addr; /* ip address and port */
+};
+
+/*
+ * L2TP channel operations
+ */
+struct pppol2tp_channel_ops {
+ struct ppp_channel_ops ops; /* ppp channel ops */
+};
+
+/*
+ * exported function which calls pppol2tp channel's get addressing
+ * function
+ */
+extern int pppol2tp_channel_addressing_get(struct ppp_channel *,
+ struct pppol2tp_common_addr *);
+/* QCA NSS ECM support - End */
+
#endif
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -123,9 +123,17 @@ struct pppol2tp_session {
};
static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
-
-static const struct ppp_channel_ops pppol2tp_chan_ops = {
- .start_xmit = pppol2tp_xmit,
+static int pppol2tp_get_channel_protocol(struct ppp_channel *);
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *);
+static void pppol2tp_hold_chan(struct ppp_channel *);
+static void pppol2tp_release_chan(struct ppp_channel *);
+
+static const struct pppol2tp_channel_ops pppol2tp_chan_ops = {
+ .ops.start_xmit = pppol2tp_xmit,
+ .ops.get_channel_protocol = pppol2tp_get_channel_protocol,
+ .ops.get_channel_protocol_ver = pppol2tp_get_channel_protocol_ver,
+ .ops.hold = pppol2tp_hold_chan,
+ .ops.release = pppol2tp_release_chan,
};
static const struct proto_ops pppol2tp_ops;
@@ -373,6 +381,13 @@ static int pppol2tp_xmit(struct ppp_chan
skb->data[0] = PPP_ALLSTATIONS;
skb->data[1] = PPP_UI;
+ /* QCA NSS ECM support - start */
+ /* set incoming interface as the ppp interface */
+ if ((skb->protocol == htons(ETH_P_IP)) ||
+ (skb->protocol == htons(ETH_P_IPV6)))
+ skb->skb_iif = ppp_dev_index(chan);
+ /* QCA NSS ECM support - End */
+
local_bh_disable();
l2tp_xmit_skb(session, skb);
local_bh_enable();
@@ -818,7 +833,7 @@ static int pppol2tp_connect(struct socke
po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
po->chan.private = sk;
- po->chan.ops = &pppol2tp_chan_ops;
+ po->chan.ops = (struct ppp_channel_ops *)&pppol2tp_chan_ops.ops;
po->chan.mtu = pppol2tp_tunnel_mtu(tunnel);
error = ppp_register_net_channel(sock_net(sk), &po->chan);
@@ -1732,6 +1747,109 @@ static void __exit pppol2tp_exit(void)
unregister_pernet_device(&pppol2tp_net_ops);
}
+/* QCA NSS ECM support - Start */
+/* pppol2tp_hold_chan() */
+static void pppol2tp_hold_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_hold(sk);
+}
+
+/* pppol2tp_release_chan() */
+static void pppol2tp_release_chan(struct ppp_channel *chan)
+{
+ struct sock *sk = (struct sock *)chan->private;
+
+ sock_put(sk);
+}
+
+/* pppol2tp_get_channel_protocol()
+ * Return the protocol type of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol(struct ppp_channel *chan)
+{
+ return PX_PROTO_OL2TP;
+}
+
+/* pppol2tp_get_channel_protocol_ver()
+ * Return the protocol version of the L2TP over PPP protocol
+ */
+static int pppol2tp_get_channel_protocol_ver(struct ppp_channel *chan)
+{
+ struct sock *sk;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ int version = 0;
+
+ if (chan && chan->private)
+ sk = (struct sock *)chan->private;
+ else
+ return -1;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return -1;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return -1;
+ }
+
+ version = tunnel->version;
+
+ sock_put(sk);
+
+ return version;
+}
+
+/* pppol2tp_get_addressing() */
+static int pppol2tp_get_addressing(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ struct sock *sk = (struct sock *)chan->private;
+ struct l2tp_session *session;
+ struct l2tp_tunnel *tunnel;
+ struct inet_sock *isk = NULL;
+ int err = -ENXIO;
+
+ /* Get session and tunnel contexts from the socket */
+ session = pppol2tp_sock_to_session(sk);
+ if (!session)
+ return err;
+
+ tunnel = session->tunnel;
+ if (!tunnel) {
+ sock_put(sk);
+ return err;
+ }
+ isk = inet_sk(tunnel->sock);
+
+ addr->local_tunnel_id = tunnel->tunnel_id;
+ addr->remote_tunnel_id = tunnel->peer_tunnel_id;
+ addr->local_session_id = session->session_id;
+ addr->remote_session_id = session->peer_session_id;
+
+ addr->local_addr.sin_port = isk->inet_sport;
+ addr->remote_addr.sin_port = isk->inet_dport;
+ addr->local_addr.sin_addr.s_addr = isk->inet_saddr;
+ addr->remote_addr.sin_addr.s_addr = isk->inet_daddr;
+
+ sock_put(sk);
+ return 0;
+}
+
+/* pppol2tp_channel_addressing_get() */
+int pppol2tp_channel_addressing_get(struct ppp_channel *chan,
+ struct pppol2tp_common_addr *addr)
+{
+ return pppol2tp_get_addressing(chan, addr);
+}
+EXPORT_SYMBOL(pppol2tp_channel_addressing_get);
+/* QCA NSS ECM support - End */
+
module_init(pppol2tp_init);
module_exit(pppol2tp_exit);
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -3743,6 +3743,32 @@ int ppp_is_multilink(struct net_device *
}
EXPORT_SYMBOL(ppp_is_multilink);
+/* __ppp_is_multilink()
+ * Returns >0 if the device is a multilink PPP netdevice, 0 if not or < 0
+ * if the device is not PPP. Caller should acquire ppp_lock before calling
+ * this function
+ */
+int __ppp_is_multilink(struct net_device *dev)
+{
+ struct ppp *ppp;
+ unsigned int flags;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+ flags = ppp->flags;
+
+ if (flags & SC_MULTILINK)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(__ppp_is_multilink);
+
/* ppp_channel_get_protocol()
* Call this to obtain the underlying protocol of the PPP channel,
* e.g. PX_PROTO_OE
@@ -3881,6 +3907,59 @@ int ppp_hold_channels(struct net_device
}
EXPORT_SYMBOL(ppp_hold_channels);
+/* __ppp_hold_channels()
+ * Returns the PPP channels of the PPP device, storing each one into
+ * channels[].
+ *
+ * channels[] has chan_sz elements.
+ * This function returns the number of channels stored, up to chan_sz.
+ * It will return < 0 if the device is not PPP.
+ *
+ * You MUST release the channels using ppp_release_channels().
+ */
+int __ppp_hold_channels(struct net_device *dev, struct ppp_channel *channels[],
+ unsigned int chan_sz)
+{
+ struct ppp *ppp;
+ int c;
+ struct channel *pch;
+
+ if (!dev)
+ return -1;
+
+ if (dev->type != ARPHRD_PPP)
+ return -1;
+
+ ppp = netdev_priv(dev);
+
+ c = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ struct ppp_channel *chan;
+
+ if (!pch->chan) {
+ /* Channel is going / gone away */
+ continue;
+ }
+
+ if (c == chan_sz) {
+ /* No space to record channel */
+ return c;
+ }
+
+ /* Hold the channel, if supported */
+ chan = pch->chan;
+ if (!chan->ops->hold)
+ continue;
+
+ chan->ops->hold(chan);
+
+ /* Record the channel */
+ channels[c++] = chan;
+ }
+ return c;
+}
+EXPORT_SYMBOL(__ppp_hold_channels);
+
/* ppp_release_channels()
* Releases channels
*/
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -235,6 +235,9 @@ struct l2tp_session *l2tp_session_get_by
void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
struct l2tp_stats *stats);
+void l2tp_stats_update(struct l2tp_tunnel *tunnel, struct l2tp_session *session,
+ struct l2tp_stats *stats);
+
/* Tunnel and session lifetime management.
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.

View file

@ -0,0 +1,22 @@
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -2417,7 +2417,7 @@ nla_put_failure:
*/
void ip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1736,7 +1736,7 @@ nla_put_failure:
/* QCA NSS Clients Support - Start */
void ipip6_update_offload_stats(struct net_device *dev, void *ptr)
{
- struct pcpu_sw_netstats *tstats = per_cpu_ptr(dev->tstats, 0);
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
const struct pcpu_sw_netstats *offload_stats =
(struct pcpu_sw_netstats *)ptr;

View file

@ -0,0 +1,24 @@
--- /dev/null
+++ b/include/uapi/linux/tlshdr.h
@@ -0,0 +1,21 @@
+#ifndef _UAPI_LINUX_TLSHDR_H
+#define _UAPI_LINUX_TLSHDR_H
+
+#include <linux/types.h>
+
+struct tlshdr {
+ __u8 type;
+ __be16 version;
+ __be16 len;
+} __attribute__((packed));
+
+#define TLSHDR_REC_TYPE_CCS 20 /* TLS packet is change cipher specification */
+#define TLSHDR_REC_TYPE_ALERT 21 /* TLS packet is Alert */
+#define TLSHDR_REC_TYPE_HANDSHAKE 22 /* TLS packet is Handshake */
+#define TLSHDR_REC_TYPE_DATA 23 /* TLS packet is Application data */
+
+#define TLSHDR_VERSION_1_1 0x0302 /* TLS Header Version(tls 1.1) */
+#define TLSHDR_VERSION_1_2 0x0303 /* TLS Header Version(tls 1.2) */
+#define TLSHDR_VERSION_1_3 0x0304 /* TLS Header Version(tls 1.3) */
+
+#endif /* _UAPI_LINUX_TLSHDR_H */

View file

@ -0,0 +1,876 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -258,4 +258,17 @@ extern br_get_dst_hook_t __rcu *br_get_d
extern struct net_device *br_fdb_bridge_dev_get_and_hold(struct net_bridge *br);
/* QCA NSS bridge-mgr support - End */
+/* QCA qca-mcs support - Start */
+typedef struct net_bridge_port *br_get_dst_hook_t(const struct net_bridge_port *src,
+ struct sk_buff **skb);
+extern br_get_dst_hook_t __rcu *br_get_dst_hook;
+
+typedef int (br_multicast_handle_hook_t)(const struct net_bridge_port *src,
+ struct sk_buff *skb);
+extern br_multicast_handle_hook_t __rcu *br_multicast_handle_hook;
+
+typedef void (br_notify_hook_t)(int group, int event, const void *ptr);
+extern br_notify_hook_t __rcu *br_notify_hook;
+/* QCA qca-mcs support - End */
+
#endif
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -239,6 +239,8 @@ static void fdb_notify(struct net_bridge
kfree_skb(skb);
goto errout;
}
+
+ __br_notify(RTNLGRP_NEIGH, type, fdb); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
@@ -305,6 +307,7 @@ struct net_bridge_fdb_entry *br_fdb_find
{
return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
}
+EXPORT_SYMBOL_GPL(br_fdb_find_rcu); /* QCA qca-mcs support */
/* When a static FDB entry is added, the mac address from the entry is
* added to the bridge private HW address list and all required ports
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -906,6 +906,7 @@ void br_manage_promisc(struct net_bridge
int nbp_backup_change(struct net_bridge_port *p, struct net_device *backup_dev);
/* br_input.c */
+int br_pass_frame_up(struct sk_buff *skb); /* QCA qca-mcs support */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
rx_handler_func_t *br_get_rx_handler(const struct net_device *dev);
@@ -2268,4 +2269,14 @@ struct nd_msg *br_is_nd_neigh_msg(struct
bool br_is_neigh_suppress_enabled(const struct net_bridge_port *p, u16 vid);
#define __br_get(__hook, __default, __args ...) \
(__hook ? (__hook(__args)) : (__default)) /* QCA NSS ECM support */
+
+/* QCA qca-mcs support - Start */
+static inline void __br_notify(int group, int type, const void *data)
+{
+ br_notify_hook_t *notify_hook = rcu_dereference(br_notify_hook);
+
+ if (notify_hook)
+ notify_hook(group, type, data);
+}
+/* QCA qca-mcs support - End */
#endif
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -656,6 +656,7 @@ void br_info_notify(int event, const str
kfree_skb(skb);
goto errout;
}
+ __br_notify(RTNLGRP_LINK, event, port); /* QCA qca-mcs support */
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return;
errout:
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -472,6 +472,12 @@ static void __exit br_deinit(void)
br_fdb_fini();
}
+/* QCA qca-mcs support - Start */
+/* Hook for bridge event notifications */
+br_notify_hook_t __rcu *br_notify_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_notify_hook);
+/* QCA qca-mcs support - End */
+
module_init(br_init)
module_exit(br_deinit)
MODULE_LICENSE("GPL");
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -83,6 +83,13 @@ netdev_tx_t br_dev_xmit(struct sk_buff *
if (is_broadcast_ether_addr(dest)) {
br_flood(br, skb, BR_PKT_BROADCAST, false, true, vid);
} else if (is_multicast_ether_addr(dest)) {
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook =
+ rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, NULL, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
+
if (unlikely(netpoll_tx_running(dev))) {
br_flood(br, skb, BR_PKT_MULTICAST, false, true, vid);
goto out;
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -30,7 +30,17 @@ br_netif_receive_skb(struct net *net, st
return netif_receive_skb(skb);
}
-static int br_pass_frame_up(struct sk_buff *skb)
+/* QCA qca-mcs support - Start */
+/* Hook for external Multicast handler */
+br_multicast_handle_hook_t __rcu *br_multicast_handle_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_multicast_handle_hook);
+
+/* Hook for external forwarding logic */
+br_get_dst_hook_t __rcu *br_get_dst_hook __read_mostly;
+EXPORT_SYMBOL_GPL(br_get_dst_hook);
+/* QCA qca-mcs support - End */
+
+int br_pass_frame_up(struct sk_buff *skb)
{
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
@@ -69,6 +79,7 @@ static int br_pass_frame_up(struct sk_bu
dev_net(indev), NULL, skb, indev, NULL,
br_netif_receive_skb);
}
+EXPORT_SYMBOL_GPL(br_pass_frame_up); /* QCA qca-mcs support */
/* note: already called with rcu_read_lock */
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
@@ -82,6 +93,11 @@ int br_handle_frame_finish(struct net *n
struct net_bridge_mcast *brmctx;
struct net_bridge_vlan *vlan;
struct net_bridge *br;
+ /* QCA qca-mcs support - Start */
+ br_multicast_handle_hook_t *multicast_handle_hook;
+ struct net_bridge_port *pdst = NULL;
+ br_get_dst_hook_t *get_dst_hook = rcu_dereference(br_get_dst_hook);
+ /* QCA qca-mcs support - End */
u16 vid = 0;
u8 state;
@@ -175,6 +191,12 @@ int br_handle_frame_finish(struct net *n
switch (pkt_type) {
case BR_PKT_MULTICAST:
+ /* QCA qca-mcs support - Start */
+ multicast_handle_hook = rcu_dereference(br_multicast_handle_hook);
+ if (!__br_get(multicast_handle_hook, true, p, skb))
+ goto out;
+ /* QCA qca-mcs support - End */
+
mdst = br_mdb_get(brmctx, skb, vid);
if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) {
@@ -190,8 +212,15 @@ int br_handle_frame_finish(struct net *n
}
break;
case BR_PKT_UNICAST:
- dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
- break;
+ /* QCA qca-mcs support - Start */
+ pdst = __br_get(get_dst_hook, NULL, p, &skb);
+ if (pdst) {
+ if (!skb)
+ goto out;
+ } else {
+ /* QCA qca-mcs support - End */
+ dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
+ }
default:
break;
}
@@ -206,6 +235,13 @@ int br_handle_frame_finish(struct net *n
dst->used = now;
br_forward(dst->dst, skb, local_rcv, false);
} else {
+ /* QCA qca-mcs support - Start */
+ if (pdst) {
+ br_forward(pdst, skb, local_rcv, false);
+ goto out;
+ }
+ /* QCA qca-mcs support - End */
+
if (!mcast_hit)
br_flood(br, skb, pkt_type, local_rcv, false, vid);
else
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -92,4 +92,44 @@ struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid);
+
+/* QCA ECM qca-mcs support - Start */
+#define IPMR_MFC_EVENT_UPDATE 1
+#define IPMR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ipmr_mfc_event_offload_callback_t)(__be32 origin, __be32 group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ u8 op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur to
+ * MFC. The callback is registered by offload modules
+ */
+extern bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ipmr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list, given a multicast group and source
+ */
+extern int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dst_cnt, u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out);
+/* QCA ECM qca-mcs support - End */
#endif
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -137,4 +137,47 @@ static inline int ip6mr_sk_ioctl(struct
return 1;
}
#endif
+
+/* QCA qca-mcs support - Start */
+#define IP6MR_MFC_EVENT_UPDATE 1
+#define IP6MR_MFC_EVENT_DELETE 2
+
+/*
+ * Callback to registered modules in the event of updates to a multicast group
+ */
+typedef void (*ip6mr_mfc_event_offload_callback_t)(struct in6_addr *origin,
+ struct in6_addr *group,
+ u32 max_dest_dev,
+ u32 dest_dev_idx[],
+ uint8_t op);
+
+/*
+ * Register the callback used to inform offload modules when updates occur
+ * to MFC. The callback is registered by offload modules
+ */
+extern bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb);
+
+/*
+ * De-Register the callback used to inform offload modules when updates occur
+ * to MFC
+ */
+extern void ip6mr_unregister_mfc_event_offload_callback(void);
+
+/*
+ * Find the destination interface list given a multicast group and source
+ */
+extern int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dst_cnt,
+ u32 dest_dev[]);
+
+/*
+ * Out-of-band multicast statistics update for flows that are offloaded from
+ * Linux
+ */
+extern int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, uint64_t pkts_in,
+ uint64_t bytes_in, uint64_t pkts_out,
+ uint64_t bytes_out);
+/* QCA qca-mcs support - End */
#endif
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -89,6 +89,9 @@ static struct net_device *vif_dev_read(c
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
+/* spinlock for offload */
+static DEFINE_SPINLOCK(lock); /* QCA ECM qca-mcs support */
+
/* We return to original Alan's scheme. Hash table of resolved
* entries is changed only in process context and protected
* with weak lock mrt_lock. Queue of unresolved entries is protected
@@ -112,6 +115,9 @@ static void mroute_netlink_event(struct
static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt, __be32 origin,
+ __be32 mcastgrp);
+static ipmr_mfc_event_offload_callback_t __rcu ipmr_mfc_event_offload_callback; /* QCA ECM qca-mcs support */
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
@@ -223,6 +229,80 @@ static int ipmr_rule_fill(struct fib_rul
return 0;
}
+/* QCA ECM qca-mcs support - Start */
+/* ipmr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ipmr_sync_entry_update(struct mr_table *mrt,
+ struct mfc_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXVIFS];
+ __be32 origin;
+ __be32 group;
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ origin = cache->mfc_origin;
+ group = cache->mfc_mcastgrp;
+
+ spin_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+ if (dest_if_count == MAXVIFS) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, dest_if_count, dest_dev,
+ IPMR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ipmr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ipmr_sync_entry_delete(u32 origin, u32 group)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(group, origin, 0, NULL, IPMR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+/* QCA ECM qca-mcs support - End */
+
static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.family = RTNL_FAMILY_IPMR,
.rule_size = sizeof(struct ipmr_rule),
@@ -236,6 +316,156 @@ static const struct fib_rules_ops __net_
.owner = THIS_MODULE,
};
+/* QCA ECM qca-mcs support - Start */
+/* ipmr_register_mfc_event_offload_callback()
+ * Register the IPv4 Multicast update offload callback with IPMR
+ */
+bool ipmr_register_mfc_event_offload_callback(
+ ipmr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ipmr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ipmr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ipmr_register_mfc_event_offload_callback);
+
+/* ipmr_unregister_mfc_event_offload_callback()
+ * De-register the IPv4 Multicast update offload callback with IPMR
+ */
+void ipmr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ipmr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ipmr_unregister_mfc_event_offload_callback);
+
+/* ipmr_find_mfc_entry()
+ * Returns destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ipmr_find_mfc_entry(struct net *net, __be32 origin, __be32 group,
+ u32 max_dest_cnt, u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ spin_lock(&mrt_lock);
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ipmr_find_mfc_entry);
+
+/* ipmr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ipmr_mfc_stats_update(struct net *net, __be32 origin, __be32 group,
+ u64 pkts_in, u64 bytes_in,
+ u64 pkts_out, u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc_cache *cache;
+
+ mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
+ if (!mrt)
+ return -ENOENT;
+
+ rcu_read_lock();
+ cache = ipmr_cache_find(mrt, origin, group);
+ if (!cache) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ spin_lock(&mrt_lock);
+ if (!VIF_EXISTS(mrt, vif)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+ spin_unlock(&mrt_lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL(ipmr_mfc_stats_update);
+/* QCA ECM qca-mcs support - End */
+
static int __net_init ipmr_rules_init(struct net *net)
{
struct fib_rules_ops *ops;
@@ -1191,6 +1421,10 @@ static int ipmr_mfc_delete(struct mr_tab
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ipmr_sync_entry_delete(c->mfc_origin, c->mfc_mcastgrp);
+ /* QCA ECM qca-mcs support - End */
return 0;
}
@@ -1221,6 +1455,10 @@ static int ipmr_mfc_add(struct net *net,
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
+ /* QCA ECM qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ipmr_sync_entry_update(mrt, c);
+ /* QCA ECM qca-mcs support - End */
return 0;
}
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -74,6 +74,9 @@ static struct net_device *vif_dev_read(c
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
+/* Spinlock for offload */
+static DEFINE_SPINLOCK(lock); /* QCA qca-mcs support */
+
/* We return to original Alan's scheme. Hash table of resolved
entries is changed only in process context and protected
with weak lock mrt_lock. Queue of unresolved entries is protected
@@ -101,6 +104,13 @@ static int ip6mr_rtm_dumproute(struct sk
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
static void ipmr_expire_process(struct timer_list *t);
+/* QCA qca-mcs support - Start */
+static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
+ const struct in6_addr *origin,
+ const struct in6_addr *mcastgrp);
+static ip6mr_mfc_event_offload_callback_t __rcu
+ ip6mr_mfc_event_offload_callback;
+/* QCA qca-mcs support - End */
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
@@ -375,6 +385,84 @@ static struct mfc6_cache_cmp_arg ip6mr_m
.mf6c_mcastgrp = IN6ADDR_ANY_INIT,
};
+/* QCA qca-mcs support - Start */
+/* ip6mr_sync_entry_update()
+ * Call the registered offload callback to report an update to a multicast
+ * route entry. The callback receives the list of destination interfaces and
+ * the interface count
+ */
+static void ip6mr_sync_entry_update(struct mr_table *mrt,
+ struct mfc6_cache *cache)
+{
+ int vifi, dest_if_count = 0;
+ u32 dest_dev[MAXMIFS];
+ struct in6_addr mc_origin, mc_group;
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ memset(dest_dev, 0, sizeof(dest_dev));
+
+ spin_lock(&mrt_lock);
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ if (dest_if_count == MAXMIFS) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+
+ memcpy(&mc_origin, &cache->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &cache->mf6c_mcastgrp, sizeof(struct in6_addr));
+ spin_unlock(&mrt_lock);
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(&mc_group, &mc_origin, dest_if_count, dest_dev,
+ IP6MR_MFC_EVENT_UPDATE);
+ rcu_read_unlock();
+}
+
+/* ip6mr_sync_entry_delete()
+ * Call the registered offload callback to inform of a multicast route entry
+ * delete event
+ */
+static void ip6mr_sync_entry_delete(struct in6_addr *mc_origin,
+ struct in6_addr *mc_group)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (!offload_update_cb_f) {
+ rcu_read_unlock();
+ return;
+ }
+
+ offload_update_cb_f(mc_group, mc_origin, 0, NULL,
+ IP6MR_MFC_EVENT_DELETE);
+ rcu_read_unlock();
+}
+/* QCA qca-mcs support - End */
+
static struct mr_table_ops ip6mr_mr_table_ops = {
.rht_params = &ip6mr_rht_params,
.cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
@@ -697,6 +785,151 @@ static int call_ip6mr_mfc_entry_notifier
&mfc->_c, tb_id, &net->ipv6.ipmr_seq);
}
+/* QCA qca-mcs support - Start */
+/* ip6mr_register_mfc_event_offload_callback()
+ * Register the IPv6 multicast update callback for offload modules
+ */
+bool ip6mr_register_mfc_event_offload_callback(
+ ip6mr_mfc_event_offload_callback_t mfc_offload_cb)
+{
+ ip6mr_mfc_event_offload_callback_t offload_update_cb_f;
+
+ rcu_read_lock();
+ offload_update_cb_f = rcu_dereference(ip6mr_mfc_event_offload_callback);
+
+ if (offload_update_cb_f) {
+ rcu_read_unlock();
+ return false;
+ }
+ rcu_read_unlock();
+
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, mfc_offload_cb);
+ spin_unlock(&lock);
+ synchronize_rcu();
+ return true;
+}
+EXPORT_SYMBOL(ip6mr_register_mfc_event_offload_callback);
+
+/* ip6mr_unregister_mfc_event_offload_callback()
+ * De-register the IPv6 multicast update callback for offload modules
+ */
+void ip6mr_unregister_mfc_event_offload_callback(void)
+{
+ spin_lock(&lock);
+ rcu_assign_pointer(ip6mr_mfc_event_offload_callback, NULL);
+ spin_unlock(&lock);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(ip6mr_unregister_mfc_event_offload_callback);
+
+/* ip6mr_find_mfc_entry()
+ * Return the destination interface list for a particular multicast flow, and
+ * the number of interfaces in the list
+ */
+int ip6mr_find_mfc_entry(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u32 max_dest_cnt,
+ u32 dest_dev[])
+{
+ int vifi, dest_if_count = 0;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+ if (!mrt)
+ return -ENOENT;
+
+ spin_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ spin_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ for (vifi = 0; vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if (!((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255))) {
+ continue;
+ }
+
+ /* We have another valid destination interface entry. Check if
+ * the number of the destination interfaces for the route is
+ * exceeding the size of the array given to us
+ */
+ if (dest_if_count == max_dest_cnt) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ dest_dev[dest_if_count] = mrt->vif_table[vifi].dev->ifindex;
+ dest_if_count++;
+ }
+ spin_unlock(&mrt_lock);
+
+ return dest_if_count;
+}
+EXPORT_SYMBOL(ip6mr_find_mfc_entry);
+
+/* ip6mr_mfc_stats_update()
+ * Update the MFC/VIF statistics for offloaded flows
+ */
+int ip6mr_mfc_stats_update(struct net *net, struct in6_addr *origin,
+ struct in6_addr *group, u64 pkts_in,
+ u64 bytes_in, uint64_t pkts_out,
+ u64 bytes_out)
+{
+ int vif, vifi;
+ struct mr_table *mrt;
+ struct mfc6_cache *cache;
+
+ mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
+
+ if (!mrt)
+ return -ENOENT;
+
+ spin_lock(&mrt_lock);
+ cache = ip6mr_cache_find(mrt, origin, group);
+ if (!cache) {
+ spin_unlock(&mrt_lock);
+ return -ENOENT;
+ }
+
+ vif = cache->_c.mfc_parent;
+
+ if (!VIF_EXISTS(mrt, vif)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+
+ mrt->vif_table[vif].pkt_in += pkts_in;
+ mrt->vif_table[vif].bytes_in += bytes_in;
+ cache->_c.mfc_un.res.pkt += pkts_out;
+ cache->_c.mfc_un.res.bytes += bytes_out;
+
+ for (vifi = cache->_c.mfc_un.res.minvif;
+ vifi < cache->_c.mfc_un.res.maxvif; vifi++) {
+ if ((cache->_c.mfc_un.res.ttls[vifi] > 0) &&
+ (cache->_c.mfc_un.res.ttls[vifi] < 255)) {
+ if (!VIF_EXISTS(mrt, vifi)) {
+ spin_unlock(&mrt_lock);
+ return -EINVAL;
+ }
+ mrt->vif_table[vifi].pkt_out += pkts_out;
+ mrt->vif_table[vifi].bytes_out += bytes_out;
+ }
+ }
+
+ spin_unlock(&mrt_lock);
+ return 0;
+}
+EXPORT_SYMBOL(ip6mr_mfc_stats_update);
+/* QCA qca-mcs support - End */
+
/* Delete a VIF entry */
static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
@@ -1221,6 +1454,7 @@ static int ip6mr_mfc_delete(struct mr_ta
int parent)
{
struct mfc6_cache *c;
+ struct in6_addr mc_origin, mc_group; /* QCA qca-mcs support */
/* The entries are added/deleted only under RTNL */
rcu_read_lock();
@@ -1229,6 +1463,11 @@ static int ip6mr_mfc_delete(struct mr_ta
rcu_read_unlock();
if (!c)
return -ENOENT;
+
+ /* QCA qca-mcs support - Start */
+ memcpy(&mc_origin, &c->mf6c_origin, sizeof(struct in6_addr));
+ memcpy(&mc_group, &c->mf6c_mcastgrp, sizeof(struct in6_addr));
+ /* QCA qca-mcs support - End */
rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
list_del_rcu(&c->_c.list);
@@ -1236,6 +1475,11 @@ static int ip6mr_mfc_delete(struct mr_ta
FIB_EVENT_ENTRY_DEL, c, mrt->id);
mr6_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the delete event */
+ ip6mr_sync_entry_delete(&mc_origin, &mc_group);
+ /* QCA qca-mcs support - End */
+
return 0;
}
@@ -1457,6 +1701,10 @@ static int ip6mr_mfc_add(struct net *net
call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
c, mrt->id);
mr6_netlink_event(mrt, c, RTM_NEWROUTE);
+ /* QCA qca-mcs support - Start */
+ /* Inform offload modules of the update event */
+ ip6mr_sync_entry_update(mrt, c);
+ /* QCA qca-mcs support - End */
return 0;
}

View file

@ -0,0 +1,127 @@
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -417,6 +417,8 @@ static int crypto_authenc_create(struct
enc->base.cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
goto err_free_inst;
+ inst->alg.base.cra_flags |= (auth_base->cra_flags |
+ enc->base.cra_flags) & CRYPTO_ALG_NOSUPP_SG;
inst->alg.base.cra_priority = enc->base.cra_priority * 10 +
auth_base->cra_priority;
inst->alg.base.cra_blocksize = enc->base.cra_blocksize;
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -101,6 +101,11 @@
#define CRYPTO_NOLOAD 0x00008000
/*
+ * Set this flag if algorithm does not support SG list transforms
+ */
+#define CRYPTO_ALG_NOSUPP_SG 0x0000c000
+
+/*
* The algorithm may allocate memory during request processing, i.e. during
* encryption, decryption, or hashing. Users can request an algorithm with this
* flag unset if they can't handle memory allocation failures.
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -3,6 +3,7 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -658,6 +658,7 @@ static int esp_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -669,6 +670,11 @@ static int esp_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -890,6 +896,7 @@ static int esp_input(struct xfrm_state *
u8 *iv;
struct scatterlist *sg;
int err = -EINVAL;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
goto out;
@@ -897,6 +904,12 @@ static int esp_input(struct xfrm_state *
if (elen <= 0)
goto out;
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ err = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -15,6 +15,7 @@
#include <crypto/aead.h>
#include <crypto/authenc.h>
+#include <crypto/algapi.h>
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
@@ -696,6 +696,7 @@ static int esp6_output(struct xfrm_state
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct esp_info esp;
+ bool nosupp_sg;
esp.inplace = true;
@@ -707,6 +708,11 @@ static int esp6_output(struct xfrm_state
aead = x->data;
alen = crypto_aead_authsize(aead);
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ return -ENOMEM;
+ }
+
esp.tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
@@ -934,6 +940,7 @@ static int esp6_input(struct xfrm_state
__be32 *seqhi;
u8 *iv;
struct scatterlist *sg;
+ bool nosupp_sg;
if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
ret = -EINVAL;
@@ -945,6 +952,12 @@ static int esp6_input(struct xfrm_state
goto out;
}
+ nosupp_sg = crypto_tfm_alg_type(&aead->base) & CRYPTO_ALG_NOSUPP_SG;
+ if (nosupp_sg && skb_linearize(skb)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
assoclen = sizeof(struct ip_esp_hdr);
seqhilen = 0;

View file

@ -0,0 +1,80 @@
From eee3a7956b943dd3e23a74fbb5bfe89405eb0782 Mon Sep 17 00:00:00 2001
From: Andrea Righi <andrea.righi@canonical.com>
Date: Mon, 6 Dec 2021 17:34:47 +0100
Subject: UBUNTU: SAUCE: ipv6: fix NULL pointer dereference in ip6_output()
It is possible to trigger a NULL pointer dereference by running the srv6
net kselftest (tools/testing/selftests/net/srv6_end_dt46_l3vpn_test.sh):
[ 249.051216] BUG: kernel NULL pointer dereference, address: 0000000000000378
[ 249.052331] #PF: supervisor read access in kernel mode
[ 249.053137] #PF: error_code(0x0000) - not-present page
[ 249.053960] PGD 0 P4D 0
[ 249.054376] Oops: 0000 [#1] PREEMPT SMP NOPTI
[ 249.055083] CPU: 1 PID: 21 Comm: ksoftirqd/1 Tainted: G E 5.16.0-rc4 #2
[ 249.056328] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-2 04/01/2014
[ 249.057632] RIP: 0010:ip6_forward+0x53c/0xab0
[ 249.058354] Code: 49 c7 44 24 20 00 00 00 00 48 83 e0 fe 48 8b 40 30 48 3d 70 b2 b5 81 0f 85 b5 04 00 00 e8 7c f2 ff ff 41 89 c5 e9 17 01 00 00 <44> 8b 93 78 03 00 00 45 85 d2 0f 85 92 fb ff ff 49 8b 54 24 10 48
[ 249.061274] RSP: 0018:ffffc900000cbb30 EFLAGS: 00010246
[ 249.062042] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff8881051d3400
[ 249.063141] RDX: ffff888104bda000 RSI: 00000000000002c0 RDI: 0000000000000000
[ 249.064264] RBP: ffffc900000cbbc8 R08: 0000000000000000 R09: 0000000000000000
[ 249.065376] R10: 0000000000000040 R11: 0000000000000000 R12: ffff888103409800
[ 249.066498] R13: ffff8881051d3410 R14: ffff888102725280 R15: ffff888103525000
[ 249.067619] FS: 0000000000000000(0000) GS:ffff88813bc80000(0000) knlGS:0000000000000000
[ 249.068881] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
[ 249.069777] CR2: 0000000000000378 CR3: 0000000104980000 CR4: 0000000000750ee0
[ 249.070907] PKRU: 55555554
[ 249.071337] Call Trace:
[ 249.071730] <TASK>
[ 249.072070] ? debug_smp_processor_id+0x17/0x20
[ 249.072807] seg6_input_core+0x2bb/0x2d0
[ 249.073436] ? _raw_spin_unlock_irqrestore+0x29/0x40
[ 249.074225] seg6_input+0x3b/0x130
[ 249.074768] lwtunnel_input+0x5e/0xa0
[ 249.075357] ip_rcv+0x17b/0x190
[ 249.075867] ? update_load_avg+0x82/0x600
[ 249.076514] __netif_receive_skb_one_core+0x86/0xa0
[ 249.077231] __netif_receive_skb+0x15/0x60
[ 249.077843] process_backlog+0x97/0x160
[ 249.078389] __napi_poll+0x31/0x170
[ 249.078912] net_rx_action+0x229/0x270
[ 249.079506] __do_softirq+0xef/0x2ed
[ 249.080085] run_ksoftirqd+0x37/0x50
[ 249.080663] smpboot_thread_fn+0x193/0x230
[ 249.081312] kthread+0x17a/0x1a0
[ 249.081847] ? smpboot_register_percpu_thread+0xe0/0xe0
[ 249.082677] ? set_kthread_struct+0x50/0x50
[ 249.083340] ret_from_fork+0x22/0x30
[ 249.083926] </TASK>
[ 249.090295] ---[ end trace 1998d7ba5965a365 ]---
It looks like commit 0857d6f8c759 ("ipv6: When forwarding count rx stats
on the orig netdev") tries to determine the right netdev to account the
rx stats, but in this particular case it's failing and the netdev is
NULL.
Fallback to the previous method of determining the netdev interface (via
skb->dev) to account the rx stats when the orig netdev can't be
determined.
Fixes: 0857d6f8c759 ("ipv6: When forwarding count rx stats on the orig netdev")
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
(cherry picked from https://lore.kernel.org/lkml/20211206163447.991402-1-andrea.righi@canonical.com/T/#u)
Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
---
net/ipv6/ip6_output.c | 3 +++
1 file changed, 3 insertions(+)
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -498,6 +498,9 @@ int ip6_forward(struct sk_buff *skb)
u32 mtu;
idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
+ if (unlikely(!idev))
+ idev = __in6_dev_get_safely(skb->dev);
+
if (net->ipv6.devconf_all->forwarding == 0)
goto error;

View file

@ -0,0 +1,384 @@
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -94,6 +94,7 @@ enum cpuhp_state {
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
CPUHP_NET_DEV_DEAD,
+ CPUHP_SKB_RECYCLER_DEAD,
CPUHP_PCI_XGENE_DEAD,
CPUHP_IOMMU_IOVA_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1065,6 +1065,10 @@ struct sk_buff {
/* only useable after checking ->active_extensions != 0 */
struct skb_ext *extensions;
#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_SKBUFF
+ void *free_addr;
+#endif
};
/* if you move pkt_type around you also must adapt those constants */
@@ -1250,7 +1254,7 @@ static inline void kfree_skb_list(struct sk_buff *segs)
kfree_skb_list_reason(segs, SKB_DROP_REASON_NOT_SPECIFIED);
}
-#ifdef CONFIG_TRACEPOINTS
+#ifdef CONFIG_SKB_RECYCLER
void consume_skb(struct sk_buff *skb);
#else
static inline void consume_skb(struct sk_buff *skb)
@@ -1262,6 +1266,9 @@ static inline void consume_skb(struct sk_buff *skb)
void __consume_stateless_skb(struct sk_buff *skb);
void __kfree_skb(struct sk_buff *skb);
extern struct kmem_cache *skbuff_cache;
+extern void kfree_skbmem(struct sk_buff *skb);
+extern void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
+ bool napi_safe);
void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -369,6 +369,27 @@ config NET_FLOW_LIMIT
with many clients some protection against DoS by a single (spoofed)
flow that greatly exceeds average workload.
+config SKB_RECYCLER
+ bool "Generic skb recycling"
+ default y
+ help
+ SKB_RECYCLER is used to implement RX-to-RX skb recycling.
+ This config enables the recycling scheme for bridging and
+ routing workloads. It can reduce skbuff freeing or
+ reallocation overhead.
+
+config SKB_RECYCLER_MULTI_CPU
+ bool "Cross-CPU recycling for CPU-locked workloads"
+ depends on SMP && SKB_RECYCLER
+ default n
+
+config ALLOC_SKB_PAGE_FRAG_DISABLE
+ bool "Disable page fragment based skbuff payload allocations"
+ depends on !SKB_RECYCLER
+ default n
+ help
+ Disable page fragment based allocations for skbuff payloads.
+
menu "Network testing"
config NET_PKTGEN
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -41,3 +41,4 @@ obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o
obj-$(CONFIG_BPF_SYSCALL) += sock_map.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_sk_storage.o
obj-$(CONFIG_OF) += of_net.o
+obj-$(CONFIG_SKB_RECYCLER) += skbuff_recycle.o
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6016,10 +6016,16 @@ static int process_backlog(struct napi_struct *napi, int quota)
napi->weight = READ_ONCE(dev_rx_weight);
while (again) {
- struct sk_buff *skb;
+ struct sk_buff *skb, *next_skb;
while ((skb = __skb_dequeue(&sd->process_queue))) {
rcu_read_lock();
+
+ next_skb = skb_peek(&sd->process_queue);
+ if (likely(next_skb)) {
+ prefetch(next_skb->data);
+ }
+
__netif_receive_skb(skb);
rcu_read_unlock();
input_queue_head_incr(sd);
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -87,6 +87,31 @@
#include "dev.h"
#include "sock_destructor.h"
+#include "skbuff_recycle.h"
+
+struct kmem_cache *skb_data_cache;
+/*
+ * For low memory profile, NSS_SKB_FIXED_SIZE_2K is enabled and
+ * CONFIG_SKB_RECYCLER is disabled. For premium and enterprise profile
+ * CONFIG_SKB_RECYCLER is enabled and NSS_SKB_FIXED_SIZE_2K is disabled.
+ * Irrespective of NSS_SKB_FIXED_SIZE_2K enabled/disabled, the
+ * CONFIG_SKB_RECYCLER and __LP64__ determines the value of SKB_DATA_CACHE_SIZE
+ */
+#if defined(CONFIG_SKB_RECYCLER)
+/*
+ * 2688 for 64bit arch, 2624 for 32bit arch
+ */
+#define SKB_DATA_CACHE_SIZE (SKB_DATA_ALIGN(SKB_RECYCLE_SIZE + NET_SKB_PAD) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#else
+/*
+ * 2368 for 64bit arch, 2176 for 32bit arch
+ */
+#if defined(__LP64__)
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1984 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#else
+#define SKB_DATA_CACHE_SIZE ((SKB_DATA_ALIGN(1856 + NET_SKB_PAD)) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#endif
+#endif
struct kmem_cache *skbuff_cache __ro_after_init;
static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
@@ -551,21 +576,20 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
bool *pfmemalloc)
{
bool ret_pfmemalloc = false;
- size_t obj_size;
+ unsigned int obj_size = *size;
void *obj;
obj_size = SKB_HEAD_ALIGN(*size);
- if (obj_size <= SKB_SMALL_HEAD_CACHE_SIZE &&
- !(flags & KMALLOC_NOT_NORMAL_BITS)) {
- obj = kmem_cache_alloc_node(skb_small_head_cache,
- flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
- node);
- *size = SKB_SMALL_HEAD_CACHE_SIZE;
+ if (obj_size > SZ_2K && obj_size <= SKB_DATA_CACHE_SIZE) {
+ obj = kmem_cache_alloc_node(skb_data_cache,
+ flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
+ node);
+ *size = SKB_DATA_CACHE_SIZE;
if (obj || !(gfp_pfmemalloc_allowed(flags)))
goto out;
/* Try again but now we are using pfmemalloc reserves */
ret_pfmemalloc = true;
- obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
+ obj = kmem_cache_alloc_node(skb_data_cache, flags, node);
goto out;
}
@@ -648,10 +671,12 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
* Both skb->head and skb_shared_info are cache line aligned.
*/
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
if (unlikely(!data))
goto nodata;
- /* kmalloc_size_roundup() might give us more room than requested.
+ /* kmalloc_reserve(size) might give us more room than requested.
* Put skb_shared_info exactly at the end of allocated zone,
* to allow max possible filling before reallocation.
*/
@@ -686,7 +711,7 @@ EXPORT_SYMBOL(__alloc_skb);
/**
* __netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
- * @len: length to allocate
+ * @length: length to allocate
* @gfp_mask: get_free_pages mask, passed to alloc_skb
*
* Allocate a new &sk_buff and assign it a usage count of one. The
@@ -696,29 +721,53 @@ EXPORT_SYMBOL(__alloc_skb);
*
* %NULL is returned if there is no free memory.
*/
-struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
- gfp_t gfp_mask)
+struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
+ unsigned int length, gfp_t gfp_mask)
{
- struct page_frag_cache *nc;
struct sk_buff *skb;
+ unsigned int len = length;
+
+#ifdef CONFIG_SKB_RECYCLER
+ skb = skb_recycler_alloc(dev, length);
+ if (likely(skb))
+ return skb;
+
+ len = SKB_RECYCLE_SIZE;
+ if (unlikely(length > SKB_RECYCLE_SIZE))
+ len = length;
+
+ skb = __alloc_skb(len + NET_SKB_PAD, gfp_mask,
+ SKB_ALLOC_RX, NUMA_NO_NODE);
+ if (!skb)
+ goto skb_fail;
+ goto skb_success;
+#else
+ struct page_frag_cache *nc;
bool pfmemalloc;
+ bool page_frag_alloc_enable = true;
void *data;
len += NET_SKB_PAD;
+
+#ifdef CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE
+ page_frag_alloc_enable = false;
+#endif
/* If requested length is either too small or too big,
* we use kmalloc() for skb->head allocation.
*/
if (len <= SKB_WITH_OVERHEAD(1024) ||
len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
- (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
+ (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA)) ||
+ !page_frag_alloc_enable) {
skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
if (!skb)
goto skb_fail;
goto skb_success;
}
- len = SKB_HEAD_ALIGN(len);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -747,6 +796,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
+#endif
skb_success:
skb_reserve(skb, NET_SKB_PAD);
@@ -817,7 +867,8 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
data = page_frag_alloc_1k(&nc->page_small, gfp_mask);
pfmemalloc = NAPI_SMALL_PAGE_PFMEMALLOC(nc->page_small);
} else {
- len = SKB_HEAD_ALIGN(len);
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ len = SKB_DATA_ALIGN(len);
data = page_frag_alloc(&nc->page, len, gfp_mask);
pfmemalloc = nc->page.pfmemalloc;
@@ -975,7 +1026,7 @@ static void skb_free_head(struct sk_buff *skb, bool napi_safe)
}
}
-static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
+void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
bool napi_safe)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
@@ -1018,7 +1069,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
/*
* Free an skbuff by memory without cleaning the state.
*/
-static void kfree_skbmem(struct sk_buff *skb)
+void kfree_skbmem(struct sk_buff *skb)
{
struct sk_buff_fclones *fclones;
@@ -1282,7 +1333,6 @@ void skb_tx_error(struct sk_buff *skb)
}
EXPORT_SYMBOL(skb_tx_error);
-#ifdef CONFIG_TRACEPOINTS
/**
* consume_skb - free an skbuff
* @skb: buffer to free
@@ -1291,13 +1341,48 @@ EXPORT_SYMBOL(skb_tx_error);
* Functions identically to kfree_skb, but kfree_skb assumes that the frame
* is being dropped after a failure and notes that
*/
+#ifdef CONFIG_SKB_RECYCLER
void consume_skb(struct sk_buff *skb)
{
if (!skb_unref(skb))
return;
+ prefetch(&skb->destructor);
+
+ /*Tian: Not sure if we need to continue using this since
+ * since unref does the work in 5.4
+ */
+
+ /*
+ if (likely(atomic_read(&skb->users) == 1))
+ smp_rmb();
+ else if (likely(!atomic_dec_and_test(&skb->users)))
+ return;
+ */
+ /* If possible we'd like to recycle any skb rather than just free it,
+ * but in order to do that we need to release any head state too.
+ * We don't want to do this later because we'll be in a pre-emption
+ * disabled state.
+ */
+ skb_release_head_state(skb);
+
+ /* Can we recycle this skb? If we can then it will be much faster
+ * for us to recycle this one later than to allocate a new one
+ * from scratch.
+ */
+ if (likely(skb->head) && likely(skb_recycler_consume(skb)))
+ return;
+
+#ifdef CONFIG_TRACEPOINTS
trace_consume_skb(skb, __builtin_return_address(0));
- __kfree_skb(skb);
+#endif
+ /* We're not recycling so now we need to do the rest of what we would
+ * have done in __kfree_skb (above and beyond the skb_release_head_state
+ * that we already did).
+ */
+ if (likely(skb->head))
+ skb_release_data(skb, SKB_CONSUMED, false);
+ kfree_skbmem(skb);
}
EXPORT_SYMBOL(consume_skb);
#endif
@@ -2107,6 +2192,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
goto nodata;
@@ -4854,6 +4941,10 @@ static void skb_extensions_init(void) {}
void __init skb_init(void)
{
+ skb_data_cache = kmem_cache_create_usercopy("skb_data_cache",
+ SKB_DATA_CACHE_SIZE,
+ 0, SLAB_PANIC, 0, SKB_DATA_CACHE_SIZE,
+ NULL);
skbuff_cache = kmem_cache_create_usercopy("skbuff_head_cache",
sizeof(struct sk_buff),
0,
@@ -4879,6 +4970,7 @@ void __init skb_init(void)
SKB_SMALL_HEAD_HEADROOM,
NULL);
skb_extensions_init();
+ skb_recycler_init();
}
static int
@@ -6382,6 +6474,8 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;
@@ -6498,6 +6592,8 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
+ size = SKB_DATA_ALIGN(size);
+ size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
if (!data)
return -ENOMEM;

View file

@ -0,0 +1,99 @@
--- a/arch/arm64/boot/dts/qcom/ipq6010-wax214.dts
+++ b/arch/arm64/boot/dts/qcom/ipq6010-wax214.dts
@@ -25,7 +25,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8070-cax1800.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8070-cax1800.dts
@@ -27,7 +27,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8070-rm2-6.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8070-rm2-6.dts
@@ -32,7 +32,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8071-ax3600.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8071-ax3600.dtsi
@@ -20,7 +20,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_0";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_0";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8071-mf269.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8071-mf269.dts
@@ -24,7 +24,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_0";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_0";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8072-ax880.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8072-ax880.dts
@@ -29,7 +29,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8072-wax218.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8072-wax218.dts
@@ -27,7 +27,7 @@
* Netgear's U-Boot adds "ubi.mtd=rootfs root=mtd:ubi_rootfs"
* That fails to create a UBI block device, so add it here.
*/
- bootargs-append = " ubi.block=0,rootfs root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce ubi.block=0,rootfs root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8072-wpq873.dts
+++ b/arch/arm64/boot/dts/qcom/ipq8072-wpq873.dts
@@ -31,7 +31,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_1";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_1";
};
keys {
--- a/arch/arm64/boot/dts/qcom/ipq8174-mx4200.dtsi
+++ b/arch/arm64/boot/dts/qcom/ipq8174-mx4200.dtsi
@@ -29,7 +29,7 @@
chosen {
stdout-path = "serial0:115200n8";
- bootargs-append = " root=/dev/ubiblock0_0";
+ bootargs-append = " coherent_pool=2M swiotlb=noforce root=/dev/ubiblock0_0";
};
keys {

View file

@ -0,0 +1,10 @@
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -341,7 +341,6 @@ __crypto_register_alg(struct crypto_alg
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
- !strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}

View file

@ -0,0 +1,13 @@
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -261,10 +261,6 @@ static int mtdblock_open(struct mtd_blkt
return 0;
}
- if (mtd_type_is_nand(mbd->mtd))
- pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
- mbd->tr->name, mbd->mtd->name);
-
/* OK, it's not open. Create cache info for it */
mtdblk->count = 1;
mutex_init(&mtdblk->cache_mutex);