1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00
This commit is contained in:
suyuan168 2022-06-09 17:23:38 +08:00
parent 179fd5a158
commit cb28bd362e
2 changed files with 566 additions and 0 deletions

View file

@ -0,0 +1,313 @@
From 42824d4b753f84ccf885eca602c5037338b546c8 Mon Sep 17 00:00:00 2001
From: Zhi Chen <zhichen@codeaurora.org>
Date: Tue, 13 Jan 2015 14:28:18 -0800
Subject: [PATCH 3/3] net: conntrack events, support multiple registrant
Merging this patch from kernel 3.4:
This was supported by old (.28) kernel versions but removed
because of it's overhead.
But we need this feature for NA connection manager. Both ipv4
and ipv6 modules needs to register themselves to ct events.
Change-Id: Iebfb254590fb594f5baf232f849d1b7ae45ef757
Signed-off-by: Zhi Chen <zhichen@codeaurora.org>
---
include/net/netfilter/nf_conntrack_ecache.h | 42 ++++++++++++++++++-
include/net/netns/conntrack.h | 4 ++
net/netfilter/Kconfig | 8 ++++
net/netfilter/nf_conntrack_core.c | 4 ++
net/netfilter/nf_conntrack_ecache.c | 63 +++++++++++++++++++++++++++++
net/netfilter/nf_conntrack_netlink.c | 17 ++++++++
6 files changed, 137 insertions(+), 1 deletion(-)
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -72,6 +72,10 @@ struct nf_ct_event {
int report;
};
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb);
+#else
struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
@@ -80,6 +84,7 @@ int nf_conntrack_register_notifier(struc
struct nf_ct_event_notifier *nb);
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *nb);
+#endif
void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
@@ -105,11 +110,13 @@ static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
+#endif
e = nf_ct_ecache_find(ct);
if (e == NULL)
@@ -124,10 +131,12 @@ nf_conntrack_event_report(enum ip_conntr
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#else
@@ -139,10 +148,12 @@ static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#else
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -112,7 +112,11 @@ struct netns_ct {
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct atomic_notifier_head nf_conntrack_chain;
+#else
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+#endif
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -136,6 +136,14 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_CHAIN_EVENTS
+ bool "Register multiple callbacks to ct events"
+ depends on NF_CONNTRACK_EVENTS
+ help
+ Support multiple registrations.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMEOUT
bool 'Connection tracking timeout'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2572,6 +2572,9 @@ int nf_conntrack_init_net(struct net *ne
nf_conntrack_helper_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain);
+#endif
return 0;
err_expect:
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -17,6 +17,9 @@
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+#include <linux/notifier.h>
+#endif
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
@@ -116,7 +119,35 @@ static void ecache_work(struct work_stru
if (delay >= 0)
schedule_delayed_work(&ctnet->ecache_dwork, delay);
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
+ u32 portid, int report)
+{
+ struct nf_conntrack_ecache *e;
+ struct net *net = nf_ct_net(ct);
+
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ return 0;
+
+ if (nf_ct_is_confirmed(ct)) {
+ struct nf_ct_event item = {
+ .ct = ct,
+ .portid = e->portid ? e->portid : portid,
+ .report = report
+ };
+ /* This is a resent of a destroy event? If so, skip missed */
+ unsigned long missed = e->portid ? 0 : e->missed;
+
+ if (!((eventmask | missed) & e->ctmask))
+ return 0;
+ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, eventmask | missed, &item);
+ }
+
+ return 0;
+}
+#else
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report)
{
@@ -171,10 +202,52 @@ out_unlock:
rcu_read_unlock();
return ret;
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+void nf_ct_deliver_cached_events(struct nf_conn *ct)
+{
+ unsigned long events, missed;
+ struct nf_conntrack_ecache *e;
+ struct nf_ct_event item;
+ struct net *net = nf_ct_net(ct);
+
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ return;
+
+ events = xchg(&e->cache, 0);
+
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
+ return;
+
+ /* We make a copy of the missed event cache without taking
+ * the lock, thus we may send missed events twice. However,
+ * this does not harm and it happens very rarely. */
+ missed = e->missed;
+
+ if (!((events | missed) & e->ctmask))
+ return;
+
+ item.ct = ct;
+ item.portid = 0;
+ item.report = 0;
+
+ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed,
+ &item);
+
+ if (likely(!missed))
+ return;
+
+ spin_lock_bh(&ct->lock);
+ e->missed &= ~missed;
+ spin_unlock_bh(&ct->lock);
+}
+#else
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
@@ -225,6 +298,7 @@ void nf_ct_deliver_cached_events(struct
out_unlock:
rcu_read_unlock();
}
+#endif
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -257,6 +331,13 @@ out_unlock:
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_register_notifier(struct net *net,
+ struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+#else
int nf_conntrack_register_notifier(struct net *net,
struct nf_ct_event_notifier *new)
{
@@ -277,8 +358,15 @@ out_unlock:
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+#else
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *new)
{
@@ -292,6 +380,7 @@ void nf_conntrack_unregister_notifier(st
mutex_unlock(&nf_ct_ecache_mutex);
/* synchronize_rcu() is called from ctnetlink_exit. */
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
int nf_ct_expect_register_notifier(struct net *net,
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -680,13 +680,20 @@ static size_t ctnetlink_nlmsg_size(const
}
static int
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ctnetlink_conntrack_event(struct notifier_block *this, unsigned long events, void *ptr)
+#else
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
+#endif
{
const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct nf_ct_event *item = (struct nf_ct_event *)ptr;
+#endif
struct nf_conn *ct = item->ct;
struct sk_buff *skb;
unsigned int type;
@@ -3508,9 +3515,15 @@ static int ctnetlink_stat_exp_cpu(struct
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+static struct notifier_block ctnl_notifier = {
+ .notifier_call = ctnetlink_conntrack_event,
+};
+#else
static struct nf_ct_event_notifier ctnl_notifier = {
.fcn = ctnetlink_conntrack_event,
};
+#endif
static struct nf_exp_event_notifier ctnl_notifier_exp = {
.fcn = ctnetlink_expect_event,

View file

@ -0,0 +1,253 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -52,6 +52,9 @@ struct br_ip_list {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+extern void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats);
+
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -826,6 +826,10 @@ struct sk_buff {
#endif
__u8 gro_skip:1;
+#ifdef CONFIG_SHORTCUT_FE
+ __u8 fast_forwarded:1;
+#endif
+
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#endif
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -18,6 +18,10 @@ struct timer_list {
void (*function)(struct timer_list *);
u32 flags;
+#ifdef CONFIG_SHORTCUT_FE
+ unsigned long cust_data;
+#endif
+
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -75,6 +75,8 @@ struct nf_ct_event {
#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb);
extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
#else
struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -763,6 +763,28 @@ void br_port_flags_change(struct net_bri
br_recalculate_neigh_suppress_enabled(br);
}
+void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct net_bridge *br;
+ struct pcpu_sw_netstats *stats;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return;
+
+ br = netdev_priv(dev);
+ stats = this_cpu_ptr(br->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += nlstats->rx_packets;
+ stats->rx_bytes += nlstats->rx_bytes;
+ stats->tx_packets += nlstats->tx_packets;
+ stats->tx_bytes += nlstats->tx_bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
+
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
struct net_bridge_port *p;
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3197,9 +3197,17 @@ static int xmit_one(struct sk_buff *skb,
unsigned int len;
int rc;
+#ifdef CONFIG_SHORTCUT_FE
+ /* If this skb has been fast forwarded then we don't want it to
+ * go to any taps (by definition we're trying to bypass them).
+ */
+ if (!skb->fast_forwarded) {
+#endif
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
-
+#ifdef CONFIG_SHORTCUT_FE
+ }
+#endif
#ifdef CONFIG_ETHERNET_PACKET_MANGLE
if (!dev->eth_mangle_tx ||
(skb = dev->eth_mangle_tx(dev, skb)) != NULL)
@@ -4714,6 +4722,11 @@ void netdev_rx_handler_unregister(struct
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+#ifdef CONFIG_SHORTCUT_FE
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL_GPL(athrs_fast_nat_recv);
+#endif
+
/*
* Limit the use of PFMEMALLOC reserves to those protocols that implement
* the special handling of PFMEMALLOC skbs.
@@ -4764,6 +4777,10 @@ static int __netif_receive_skb_core(stru
int ret = NET_RX_DROP;
__be16 type;
+#ifdef CONFIG_SHORTCUT_FE
+ int (*fast_recv)(struct sk_buff *skb);
+#endif
+
net_timestamp_check(!netdev_tstamp_prequeue, skb);
trace_netif_receive_skb(skb);
@@ -4803,6 +4820,16 @@ another_round:
goto out;
}
+#ifdef CONFIG_SHORTCUT_FE
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
+ if (fast_recv) {
+ if (fast_recv(skb)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
+ }
+ }
+#endif
+
if (skb_skip_tc_classify(skb))
goto skip_classify;
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT
# Extended BPF JIT (eBPF)
config HAVE_EBPF_JIT
bool
+
+config SHORTCUT_FE
+ bool "Enables kernel network stack path for Shortcut Forwarding Engine
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -34,11 +34,19 @@
/* Do not check the TCP window for incoming packets */
static int nf_ct_tcp_no_window_check __read_mostly = 1;
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check);
+#endif
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
static int nf_ct_tcp_be_liberal __read_mostly = 0;
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal);
+#endif
+
/* If it is set to zero, we disable picking up already established
connections. */
static int nf_ct_tcp_loose __read_mostly = 1;
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -158,7 +158,11 @@ int nf_conntrack_eventmask_report(unsign
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (!notify)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(ct);
@@ -177,7 +181,14 @@ int nf_conntrack_eventmask_report(unsign
if (!((eventmask | missed) & e->ctmask))
goto out_unlock;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ eventmask | missed, &item);
+ if (notify)
+ ret = notify->fcn(eventmask | missed, &item);
+#else
ret = notify->fcn(eventmask | missed, &item);
+#endif
if (unlikely(ret < 0 || missed)) {
spin_lock_bh(&ct->lock);
if (ret < 0) {
@@ -259,7 +270,11 @@ void nf_ct_deliver_cached_events(struct
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (notify == NULL)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(ct);
@@ -283,7 +298,15 @@ void nf_ct_deliver_cached_events(struct
item.portid = 0;
item.report = 0;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed,
+ &item);
+ if (notify != NULL)
+ ret = notify->fcn(events | missed, &item);
+#else
ret = notify->fcn(events | missed, &item);
+#endif
if (likely(ret == 0 && !missed))
goto out_unlock;
@@ -337,6 +360,11 @@ int nf_conntrack_register_notifier(struc
{
return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
}
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
#else
int nf_conntrack_register_notifier(struct net *net,
struct nf_ct_event_notifier *new)
@@ -366,6 +394,11 @@ int nf_conntrack_unregister_notifier(str
{
return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
}
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
#else
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *new)