From e87cdc130613538fea7b20cfc32737bbbcc48a71 Mon Sep 17 00:00:00 2001 From: suyuan168 <175338101@qq.com> Date: Thu, 9 Jun 2022 11:32:36 +0800 Subject: [PATCH] Create 953-net-patch-linux-kernel-to-support-shortcut-fe.patch --- ...-linux-kernel-to-support-shortcut-fe.patch | 253 ++++++++++++++++++ 1 file changed, 253 insertions(+) create mode 100644 root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch diff --git a/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch b/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch new file mode 100644 index 00000000..57b40292 --- /dev/null +++ b/root/target/linux/generic/hack-5.4/953-net-patch-linux-kernel-to-support-shortcut-fe.patch @@ -0,0 +1,253 @@ +--- a/include/linux/if_bridge.h ++++ b/include/linux/if_bridge.h +@@ -52,6 +52,9 @@ struct br_ip_list { + + extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); + ++extern void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats); ++ + #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) + int br_multicast_list_adjacent(struct net_device *dev, + struct list_head *br_ip_list); +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -826,6 +826,10 @@ struct sk_buff { + #endif + __u8 gro_skip:1; + ++#ifdef CONFIG_SHORTCUT_FE ++ __u8 fast_forwarded:1; ++#endif ++ + #ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ + #endif +--- a/include/linux/timer.h ++++ b/include/linux/timer.h +@@ -18,6 +18,10 @@ struct timer_list { + void (*function)(struct timer_list *); + u32 flags; + ++#ifdef CONFIG_SHORTCUT_FE ++ unsigned long cust_data; ++#endif ++ + #ifdef CONFIG_LOCKDEP + struct lockdep_map lockdep_map; + #endif +--- a/include/net/netfilter/nf_conntrack_ecache.h ++++ b/include/net/netfilter/nf_conntrack_ecache.h +@@ -75,6 +75,8 @@ struct nf_ct_event { + #ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS + extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb); + extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb); ++extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb); ++extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb); + #else + struct nf_ct_event_notifier { + int (*fcn)(unsigned int events, struct nf_ct_event *item); +--- a/net/bridge/br_if.c ++++ b/net/bridge/br_if.c +@@ -763,6 +763,28 @@ void br_port_flags_change(struct net_bri + br_recalculate_neigh_suppress_enabled(br); + } + ++void br_dev_update_stats(struct net_device *dev, ++ struct rtnl_link_stats64 *nlstats) ++{ ++ struct net_bridge *br; ++ struct pcpu_sw_netstats *stats; ++ ++ /* Is this a bridge? */ ++ if (!(dev->priv_flags & IFF_EBRIDGE)) ++ return; ++ ++ br = netdev_priv(dev); ++ stats = this_cpu_ptr(br->stats); ++ ++ u64_stats_update_begin(&stats->syncp); ++ stats->rx_packets += nlstats->rx_packets; ++ stats->rx_bytes += nlstats->rx_bytes; ++ stats->tx_packets += nlstats->tx_packets; ++ stats->tx_bytes += nlstats->tx_bytes; ++ u64_stats_update_end(&stats->syncp); ++} ++EXPORT_SYMBOL_GPL(br_dev_update_stats); ++ + bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag) + { + struct net_bridge_port *p; +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3197,9 +3197,17 @@ static int xmit_one(struct sk_buff *skb, + unsigned int len; + int rc; + ++#ifdef CONFIG_SHORTCUT_FE ++ /* If this skb has been fast forwarded then we don't want it to ++ * go to any taps (by definition we're trying to bypass them). ++ */ ++ if (!skb->fast_forwarded) { ++#endif + if (dev_nit_active(dev)) + dev_queue_xmit_nit(skb, dev); +- ++#ifdef CONFIG_SHORTCUT_FE ++ } ++#endif + #ifdef CONFIG_ETHERNET_PACKET_MANGLE + if (!dev->eth_mangle_tx || + (skb = dev->eth_mangle_tx(dev, skb)) != NULL) +@@ -4714,6 +4722,11 @@ void netdev_rx_handler_unregister(struct + } + EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); + ++#ifdef CONFIG_SHORTCUT_FE ++int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly; ++EXPORT_SYMBOL_GPL(athrs_fast_nat_recv); ++#endif ++ + /* + * Limit the use of PFMEMALLOC reserves to those protocols that implement + * the special handling of PFMEMALLOC skbs. +@@ -4764,6 +4777,10 @@ static int __netif_receive_skb_core(stru + int ret = NET_RX_DROP; + __be16 type; + ++#ifdef CONFIG_SHORTCUT_FE ++ int (*fast_recv)(struct sk_buff *skb); ++#endif ++ + net_timestamp_check(!netdev_tstamp_prequeue, skb); + + trace_netif_receive_skb(skb); +@@ -4803,6 +4820,16 @@ another_round: + goto out; + } + ++#ifdef CONFIG_SHORTCUT_FE ++ fast_recv = rcu_dereference(athrs_fast_nat_recv); ++ if (fast_recv) { ++ if (fast_recv(skb)) { ++ ret = NET_RX_SUCCESS; ++ goto out; ++ } ++ } ++#endif ++ + if (skb_skip_tc_classify(skb)) + goto skip_classify; + +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT + # Extended BPF JIT (eBPF) + config HAVE_EBPF_JIT + bool ++ ++config SHORTCUT_FE ++ bool "Enables kernel network stack path for Shortcut Forwarding Engine +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -34,11 +34,19 @@ + /* Do not check the TCP window for incoming packets */ + static int nf_ct_tcp_no_window_check __read_mostly = 1; + ++#ifdef CONFIG_SHORTCUT_FE ++EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check); ++#endif ++ + /* "Be conservative in what you do, + be liberal in what you accept from others." + If it's non-zero, we mark only out of window RST segments as INVALID. */ + static int nf_ct_tcp_be_liberal __read_mostly = 0; + ++#ifdef CONFIG_SHORTCUT_FE ++EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal); ++#endif ++ + /* If it is set to zero, we disable picking up already established + connections. */ + static int nf_ct_tcp_loose __read_mostly = 1; +--- a/net/netfilter/nf_conntrack_ecache.c ++++ b/net/netfilter/nf_conntrack_ecache.c +@@ -158,7 +158,11 @@ int nf_conntrack_eventmask_report(unsign + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) ++#else + if (!notify) ++#endif + goto out_unlock; + + e = nf_ct_ecache_find(ct); +@@ -177,7 +181,14 @@ int nf_conntrack_eventmask_report(unsign + if (!((eventmask | missed) & e->ctmask)) + goto out_unlock; + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, ++ eventmask | missed, &item); ++ if (notify) ++ ret = notify->fcn(eventmask | missed, &item); ++#else + ret = notify->fcn(eventmask | missed, &item); ++#endif + if (unlikely(ret < 0 || missed)) { + spin_lock_bh(&ct->lock); + if (ret < 0) { +@@ -259,7 +270,11 @@ void nf_ct_deliver_cached_events(struct + + rcu_read_lock(); + notify = rcu_dereference(net->ct.nf_conntrack_event_cb); ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head)) ++#else + if (notify == NULL) ++#endif + goto out_unlock; + + e = nf_ct_ecache_find(ct); +@@ -283,7 +298,15 @@ void nf_ct_deliver_cached_events(struct + item.portid = 0; + item.report = 0; + ++#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS ++ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, ++ events | missed, ++ &item); ++ if (notify != NULL) ++ ret = notify->fcn(events | missed, &item); ++#else + ret = notify->fcn(events | missed, &item); ++#endif + + if (likely(ret == 0 && !missed)) + goto out_unlock; +@@ -337,6 +360,11 @@ int nf_conntrack_register_notifier(struc + { + return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); + } ++int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb); ++} ++EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier); + #else + int nf_conntrack_register_notifier(struct net *net, + struct nf_ct_event_notifier *new) +@@ -366,6 +394,11 @@ int nf_conntrack_unregister_notifier(str + { + return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); + } ++int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb) ++{ ++ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb); ++} ++EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier); + #else + void nf_conntrack_unregister_notifier(struct net *net, + struct nf_ct_event_notifier *new)