1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00
This commit is contained in:
suyuan 2022-11-22 14:19:14 +08:00
parent 064a17e169
commit 766555f994
4 changed files with 905 additions and 0 deletions

View file

@ -0,0 +1,34 @@
From ba041eb8000e4a8f556ca641335894f7e7429dbb Mon Sep 17 00:00:00 2001
From: Ailick <277498654@qq.com>
Date: Sat, 21 Aug 2021 17:12:22 +0800
Subject: [PATCH] net: patch linux kernel to support shortcut-fe-cm
---
nf_conntrack_proto_udp.c | 3 ++-
nf_conntrack_timeout.h | 2 +
1 file changed, 2 insertion(+)
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -124,4 +124,6 @@ extern struct nf_ct_timeout *(*nf_ct_tim
extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
+extern unsigned int *udp_get_timeouts(struct net *net);
+
#endif /* _NF_CONNTRACK_TIMEOUT_H */
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -29,10 +29,11 @@ static const unsigned int udp_timeouts[U
[UDP_CT_REPLIED] = 120*HZ,
};
-static unsigned int *udp_get_timeouts(struct net *net)
+unsigned int *udp_get_timeouts(struct net *net)
{
return nf_udp_pernet(net)->timeouts;
}
+EXPORT_SYMBOL(udp_get_timeouts);
static void udp_error_log(const struct sk_buff *skb,
const struct nf_hook_state *state,

View file

@ -0,0 +1,212 @@
From eda40b8c8c82e0f2789d6bc8bf63846dce2e8f32 Mon Sep 17 00:00:00 2001
From: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
Date: Sat, 23 Mar 2019 09:29:49 +0000
Subject: [PATCH] netfilter: connmark: introduce set-dscpmark
set-dscpmark is a method of storing the DSCP of an ip packet into
conntrack mark. In combination with a suitable tc filter action
(act_ctinfo) DSCP values are able to be stored in the mark on egress and
restored on ingress across links that otherwise alter or bleach DSCP.
This is useful for qdiscs such as CAKE which are able to shape according
to policies based on DSCP.
Ingress classification is traditionally a challenging task since
iptables rules haven't yet run and tc filter/eBPF programs are pre-NAT
lookups, hence are unable to see internal IPv4 addresses as used on the
typical home masquerading gateway.
x_tables CONNMARK set-dscpmark target solves the problem of storing the
DSCP to the conntrack mark in a way suitable for the new act_ctinfo tc
action to restore.
The set-dscpmark option accepts 2 parameters, a 32bit 'dscpmask' and a
32bit 'statemask'. The dscp mask must be 6 contiguous bits and
represents the area where the DSCP will be stored in the connmark. The
state mask is a minimum 1 bit length mask that must not overlap with the
dscpmask. It represents a flag which is set when the DSCP has been
stored in the conntrack mark. This is useful to implement a 'one shot'
iptables based classification where the 'complicated' iptables rules are
only run once to classify the connection on initial (egress) packet and
subsequent packets are all marked/restored with the same DSCP. A state
mask of zero disables the setting of a status bit/s.
example syntax with a suitably modified iptables user space application:
iptables -A QOS_MARK_eth0 -t mangle -j CONNMARK --set-dscpmark 0xfc000000/0x01000000
Would store the DSCP in the top 6 bits of the 32bit mark field, and use
the LSB of the top byte as the 'DSCP has been stored' marker.
|----0xFC----conntrack mark----000000---|
| Bits 31-26 | bit 25 | bit24 |~~~ Bit 0|
| DSCP | unused | flag |unused |
|-----------------------0x01---000000---|
^ ^
| |
---| Conditional flag
| set this when dscp
|-ip diffserv-| stored in mark
| 6 bits |
|-------------|
an identically configured tc action to restore looks like:
tc filter show dev eth0 ingress
filter parent ffff: protocol all pref 10 u32 chain 0
filter parent ffff: protocol all pref 10 u32 chain 0 fh 800: ht divisor 1
filter parent ffff: protocol all pref 10 u32 chain 0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 1: not_in_hw
match 00000000/00000000 at 0
action order 1: ctinfo zone 0 pipe
index 2 ref 1 bind 1 dscp 0xfc000000/0x1000000
action order 2: mirred (Egress Redirect to device ifb4eth0) stolen
index 1 ref 1 bind 1
|----0xFC----conntrack mark----000000---|
| Bits 31-26 | bit 25 | bit24 |~~~ Bit 0|
| DSCP | unused | flag |unused |
|-----------------------0x01---000000---|
| |
| |
---| Conditional flag
v only restore if set
|-ip diffserv-|
| 6 bits |
|-------------|
Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
---
include/uapi/linux/netfilter/xt_connmark.h | 10 ++++
net/netfilter/xt_connmark.c | 55 ++++++++++++++++++----
2 files changed, 57 insertions(+), 8 deletions(-)
--- a/include/uapi/linux/netfilter/xt_connmark.h
+++ b/include/uapi/linux/netfilter/xt_connmark.h
@@ -20,6 +20,11 @@ enum {
};
enum {
+ XT_CONNMARK_VALUE = (1 << 0),
+ XT_CONNMARK_DSCP = (1 << 1)
+};
+
+enum {
D_SHIFT_LEFT = 0,
D_SHIFT_RIGHT,
};
@@ -34,6 +39,11 @@ struct xt_connmark_tginfo2 {
__u8 shift_dir, shift_bits, mode;
};
+struct xt_connmark_tginfo3 {
+ __u32 ctmark, ctmask, nfmask;
+ __u8 shift_dir, shift_bits, mode, func;
+};
+
struct xt_connmark_mtinfo1 {
__u32 mark, mask;
__u8 invert;
--- a/net/netfilter/xt_connmark.c
+++ b/net/netfilter/xt_connmark.c
@@ -24,12 +24,13 @@ MODULE_ALIAS("ipt_connmark");
MODULE_ALIAS("ip6t_connmark");
static unsigned int
-connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info)
+connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo3 *info)
{
enum ip_conntrack_info ctinfo;
u_int32_t new_targetmark;
struct nf_conn *ct;
u_int32_t newmark;
+ u_int8_t dscp;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
@@ -37,12 +38,24 @@ connmark_tg_shift(struct sk_buff *skb, c
switch (info->mode) {
case XT_CONNMARK_SET:
- newmark = (ct->mark & ~info->ctmask) ^ info->ctmark;
- if (info->shift_dir == D_SHIFT_RIGHT)
- newmark >>= info->shift_bits;
- else
- newmark <<= info->shift_bits;
+ newmark = ct->mark;
+ if (info->func & XT_CONNMARK_VALUE) {
+ newmark = (newmark & ~info->ctmask) ^ info->ctmark;
+ if (info->shift_dir == D_SHIFT_RIGHT)
+ newmark >>= info->shift_bits;
+ else
+ newmark <<= info->shift_bits;
+ } else if (info->func & XT_CONNMARK_DSCP) {
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+ else /* protocol doesn't have diffserv */
+ break;
+ newmark = (newmark & ~info->ctmark) |
+ (info->ctmask | (dscp << info->shift_bits));
+ }
if (ct->mark != newmark) {
ct->mark = newmark;
nf_conntrack_event_cache(IPCT_MARK, ct);
@@ -81,20 +94,36 @@ static unsigned int
connmark_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connmark_tginfo1 *info = par->targinfo;
- const struct xt_connmark_tginfo2 info2 = {
+ const struct xt_connmark_tginfo3 info3 = {
.ctmark = info->ctmark,
.ctmask = info->ctmask,
.nfmask = info->nfmask,
.mode = info->mode,
+ .func = XT_CONNMARK_VALUE
};
- return connmark_tg_shift(skb, &info2);
+ return connmark_tg_shift(skb, &info3);
}
static unsigned int
connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct xt_connmark_tginfo2 *info = par->targinfo;
+ const struct xt_connmark_tginfo3 info3 = {
+ .ctmark = info->ctmark,
+ .ctmask = info->ctmask,
+ .nfmask = info->nfmask,
+ .mode = info->mode,
+ .func = XT_CONNMARK_VALUE
+ };
+
+ return connmark_tg_shift(skb, &info3);
+}
+
+static unsigned int
+connmark_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_connmark_tginfo3 *info = par->targinfo;
return connmark_tg_shift(skb, info);
}
@@ -165,6 +194,16 @@ static struct xt_target connmark_tg_reg[
.targetsize = sizeof(struct xt_connmark_tginfo2),
.destroy = connmark_tg_destroy,
.me = THIS_MODULE,
+ },
+ {
+ .name = "CONNMARK",
+ .revision = 3,
+ .family = NFPROTO_UNSPEC,
+ .checkentry = connmark_tg_check,
+ .target = connmark_tg_v3,
+ .targetsize = sizeof(struct xt_connmark_tginfo3),
+ .destroy = connmark_tg_destroy,
+ .me = THIS_MODULE,
}
};

View file

@ -0,0 +1,70 @@
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -163,6 +163,8 @@ struct nf_flow_table_hw {
int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload);
void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload);
+void nf_flow_table_acct(struct flow_offload *flow, struct sk_buff *skb, int dir);
+
extern struct work_struct nf_flow_offload_hw_work;
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -13,6 +13,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_acct.h>
struct flow_offload_entry {
struct flow_offload flow;
@@ -177,6 +178,22 @@ void flow_offload_free(struct flow_offlo
}
EXPORT_SYMBOL_GPL(flow_offload_free);
+void nf_flow_table_acct(struct flow_offload *flow, struct sk_buff *skb, int dir)
+{
+ struct flow_offload_entry *entry;
+ struct nf_conn_acct *acct;
+
+ entry = container_of(flow, struct flow_offload_entry, flow);
+ acct = nf_conn_acct_find(entry->ct);
+ if (acct) {
+ struct nf_conn_counter *counter = acct->counter;
+
+ atomic64_inc(&counter[dir].packets);
+ atomic64_add(skb->len, &counter[dir].bytes);
+ }
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_acct);
+
static u32 flow_offload_hash(const void *data, u32 len, u32 seed)
{
const struct flow_offload_tuple *tuple = data;
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -12,6 +12,7 @@
#include <net/ip6_route.h>
#include <net/neighbour.h>
#include <net/netfilter/nf_flow_table.h>
+
/* For layer 4 checksum field offset. */
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -288,6 +289,7 @@ nf_flow_offload_ip_hook(void *priv, stru
skb->dev = outdev;
nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr);
skb_dst_set_noref(skb, &rt->dst);
+ nf_flow_table_acct(flow, skb, dir);
neigh_xmit(NEIGH_ARP_TABLE, outdev, &nexthop, skb);
return NF_STOLEN;
@@ -518,6 +520,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
skb->dev = outdev;
nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
skb_dst_set_noref(skb, &rt->dst);
+ nf_flow_table_acct(flow, skb, dir);
neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
return NF_STOLEN;

View file

@ -0,0 +1,589 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Tue, 20 Feb 2018 15:56:02 +0100
Subject: [PATCH] netfilter: add xt_OFFLOAD target
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
create mode 100644 net/netfilter/xt_OFFLOAD.c
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -56,8 +56,6 @@ config NF_TABLES_ARP
help
This option enables the ARP support for nf_tables.
-endif # NF_TABLES
-
config NF_FLOW_TABLE_IPV4
tristate "Netfilter flow table IPv4 module"
depends on NF_FLOW_TABLE
@@ -66,6 +64,8 @@ config NF_FLOW_TABLE_IPV4
To compile it as a module, choose M here.
+endif # NF_TABLES
+
config NF_DUP_IPV4
tristate "Netfilter IPv4 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -45,7 +45,6 @@ config NFT_FIB_IPV6
multicast or blackhole.
endif # NF_TABLES_IPV6
-endif # NF_TABLES
config NF_FLOW_TABLE_IPV6
tristate "Netfilter flow table IPv6 module"
@@ -55,6 +54,8 @@ config NF_FLOW_TABLE_IPV6
To compile it as a module, choose M here.
+endif # NF_TABLES
+
config NF_DUP_IPV6
tristate "Netfilter IPv6 packet duplication to alternate destination"
depends on !NF_CONNTRACK || NF_CONNTRACK
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -689,8 +689,6 @@ config NFT_FIB_NETDEV
endif # NF_TABLES_NETDEV
-endif # NF_TABLES
-
config NF_FLOW_TABLE_INET
tristate "Netfilter flow table mixed IPv4/IPv6 module"
depends on NF_FLOW_TABLE
@@ -699,11 +697,12 @@ config NF_FLOW_TABLE_INET
To compile it as a module, choose M here.
+endif # NF_TABLES
+
config NF_FLOW_TABLE
tristate "Netfilter flow table module"
depends on NETFILTER_INGRESS
depends on NF_CONNTRACK
- depends on NF_TABLES
help
This option adds the flow table core infrastructure.
@@ -992,6 +991,15 @@ config NETFILTER_XT_TARGET_NOTRACK
depends on NETFILTER_ADVANCED
select NETFILTER_XT_TARGET_CT
+config NETFILTER_XT_TARGET_FLOWOFFLOAD
+ tristate '"FLOWOFFLOAD" target support'
+ depends on NF_FLOW_TABLE
+ depends on NETFILTER_INGRESS
+ help
+ This option adds a `FLOWOFFLOAD' target, which uses the nf_flow_offload
+ module to speed up processing of packets by bypassing the usual
+ netfilter chains
+
config NETFILTER_XT_TARGET_RATEEST
tristate '"RATEEST" target support'
depends on NETFILTER_ADVANCED
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIF
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_FLOWOFFLOAD) += xt_FLOWOFFLOAD.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HMARK) += xt_HMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
--- /dev/null
+++ b/net/netfilter/xt_FLOWOFFLOAD.c
@@ -0,0 +1,427 @@
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_FLOWOFFLOAD.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_flow_table.h>
+
+static struct nf_flowtable nf_flowtable;
+static HLIST_HEAD(hooks);
+static DEFINE_SPINLOCK(hooks_lock);
+static struct delayed_work hook_work;
+
+struct xt_flowoffload_hook {
+ struct hlist_node list;
+ struct nf_hook_ops ops;
+ struct net *net;
+ bool registered;
+ bool used;
+};
+
+static unsigned int
+xt_flowoffload_net_hook(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ return nf_flow_offload_ip_hook(priv, skb, state);
+ case htons(ETH_P_IPV6):
+ return nf_flow_offload_ipv6_hook(priv, skb, state);
+ }
+
+ return NF_ACCEPT;
+}
+
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data);
+
+static int
+xt_flowoffload_create_hook(struct net_device *dev)
+{
+ struct xt_flowoffload_hook *hook;
+ struct nf_hook_ops *ops;
+
+ hook = kzalloc(sizeof(*hook), GFP_ATOMIC);
+ if (!hook)
+ return -ENOMEM;
+
+ ops = &hook->ops;
+ ops->pf = NFPROTO_NETDEV;
+ ops->hooknum = NF_NETDEV_INGRESS;
+ ops->priority = 10;
+ ops->priv = &nf_flowtable;
+ ops->hook = xt_flowoffload_net_hook;
+ ops->dev = dev;
+
+ hlist_add_head(&hook->list, &hooks);
+ mod_delayed_work(system_power_efficient_wq, &hook_work, 0);
+
+ return 0;
+}
+
+static struct xt_flowoffload_hook *
+flow_offload_lookup_hook(struct net_device *dev)
+{
+ struct xt_flowoffload_hook *hook;
+
+ hlist_for_each_entry(hook, &hooks, list) {
+ if (hook->ops.dev == dev)
+ return hook;
+ }
+
+ return NULL;
+}
+
+static void
+xt_flowoffload_check_device(struct net_device *dev)
+{
+ struct xt_flowoffload_hook *hook;
+
+ spin_lock_bh(&hooks_lock);
+ hook = flow_offload_lookup_hook(dev);
+ if (hook)
+ hook->used = true;
+ else
+ xt_flowoffload_create_hook(dev);
+ spin_unlock_bh(&hooks_lock);
+}
+
+static void
+xt_flowoffload_register_hooks(void)
+{
+ struct xt_flowoffload_hook *hook;
+
+restart:
+ hlist_for_each_entry(hook, &hooks, list) {
+ if (hook->registered)
+ continue;
+
+ hook->registered = true;
+ hook->net = dev_net(hook->ops.dev);
+ spin_unlock_bh(&hooks_lock);
+ nf_register_net_hook(hook->net, &hook->ops);
+ spin_lock_bh(&hooks_lock);
+ goto restart;
+ }
+
+}
+
+static void
+xt_flowoffload_cleanup_hooks(void)
+{
+ struct xt_flowoffload_hook *hook;
+
+restart:
+ hlist_for_each_entry(hook, &hooks, list) {
+ if (hook->used || !hook->registered)
+ continue;
+
+ hlist_del(&hook->list);
+ spin_unlock_bh(&hooks_lock);
+ nf_unregister_net_hook(hook->net, &hook->ops);
+ kfree(hook);
+ spin_lock_bh(&hooks_lock);
+ goto restart;
+ }
+
+}
+
+static void
+xt_flowoffload_check_hook(struct flow_offload *flow, void *data)
+{
+ struct flow_offload_tuple *tuple = &flow->tuplehash[0].tuple;
+ struct xt_flowoffload_hook *hook;
+ bool *found = data;
+ struct rtable *rt = (struct rtable *)tuple->dst_cache;
+
+ spin_lock_bh(&hooks_lock);
+ hlist_for_each_entry(hook, &hooks, list) {
+ if (hook->ops.dev->ifindex != tuple->iifidx &&
+ hook->ops.dev->ifindex != rt->dst.dev->ifindex)
+ continue;
+
+ hook->used = true;
+ *found = true;
+ }
+ spin_unlock_bh(&hooks_lock);
+}
+
+static void
+xt_flowoffload_hook_work(struct work_struct *work)
+{
+ struct xt_flowoffload_hook *hook;
+ bool found = false;
+ int err;
+
+ spin_lock_bh(&hooks_lock);
+ xt_flowoffload_register_hooks();
+ hlist_for_each_entry(hook, &hooks, list)
+ hook->used = false;
+ spin_unlock_bh(&hooks_lock);
+
+ err = nf_flow_table_iterate(&nf_flowtable, xt_flowoffload_check_hook,
+ &found);
+ if (err && err != -EAGAIN)
+ goto out;
+
+ spin_lock_bh(&hooks_lock);
+ xt_flowoffload_cleanup_hooks();
+ spin_unlock_bh(&hooks_lock);
+
+out:
+ if (found)
+ queue_delayed_work(system_power_efficient_wq, &hook_work, HZ);
+}
+
+static bool
+xt_flowoffload_skip(struct sk_buff *skb, int family)
+{
+ if (skb_sec_path(skb))
+ return true;
+
+ if (family == NFPROTO_IPV4) {
+ const struct ip_options *opt = &(IPCB(skb)->opt);
+
+ if (unlikely(opt->optlen))
+ return true;
+ }
+
+ return false;
+}
+
+static struct dst_entry *
+xt_flowoffload_dst(const struct nf_conn *ct, enum ip_conntrack_dir dir,
+ const struct xt_action_param *par, int ifindex)
+{
+ struct dst_entry *dst = NULL;
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+ switch (xt_family(par)) {
+ case NFPROTO_IPV4:
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = ifindex;
+ break;
+ case NFPROTO_IPV6:
+ fl.u.ip6.saddr = ct->tuplehash[dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = ifindex;
+ break;
+ }
+
+ nf_route(xt_net(par), &dst, &fl, false, xt_family(par));
+
+ return dst;
+}
+
+static int
+xt_flowoffload_route(struct sk_buff *skb, const struct nf_conn *ct,
+ const struct xt_action_param *par,
+ struct nf_flow_route *route, enum ip_conntrack_dir dir)
+{
+ struct dst_entry *this_dst, *other_dst;
+
+ this_dst = xt_flowoffload_dst(ct, !dir, par, xt_out(par)->ifindex);
+ other_dst = xt_flowoffload_dst(ct, dir, par, xt_in(par)->ifindex);
+
+ route->tuple[dir].dst = this_dst;
+ route->tuple[!dir].dst = other_dst;
+
+ if (!this_dst || !other_dst)
+ return -ENOENT;
+
+ if (dst_xfrm(this_dst) || dst_xfrm(other_dst))
+ return -EINVAL;
+
+ return 0;
+}
+
+static unsigned int
+flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_flowoffload_target_info *info = par->targinfo;
+ struct tcphdr _tcph, *tcph = NULL;
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ struct nf_flow_route route;
+ struct flow_offload *flow = NULL;
+ struct nf_conn *ct;
+ struct net *net;
+
+ if (xt_flowoffload_skip(skb, xt_family(par)))
+ return XT_CONTINUE;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return XT_CONTINUE;
+
+ switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
+ case IPPROTO_TCP:
+ if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ return XT_CONTINUE;
+
+ tcph = skb_header_pointer(skb, par->thoff,
+ sizeof(_tcph), &_tcph);
+ if (unlikely(!tcph || tcph->fin || tcph->rst))
+ return XT_CONTINUE;
+ break;
+ case IPPROTO_UDP:
+ break;
+ default:
+ return XT_CONTINUE;
+ }
+
+ if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
+ ct->status & IPS_SEQ_ADJUST)
+ return XT_CONTINUE;
+
+ if (!nf_ct_is_confirmed(ct))
+ return XT_CONTINUE;
+
+ if (!xt_in(par) || !xt_out(par))
+ return XT_CONTINUE;
+
+ if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+ return XT_CONTINUE;
+
+ dir = CTINFO2DIR(ctinfo);
+
+ if (xt_flowoffload_route(skb, ct, par, &route, dir) == 0)
+ flow = flow_offload_alloc(ct, &route);
+
+ dst_release(route.tuple[dir].dst);
+ dst_release(route.tuple[!dir].dst);
+
+ if (!flow)
+ goto err_flow_route;
+
+ if (tcph) {
+ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
+ }
+
+ if (flow_offload_add(&nf_flowtable, flow) < 0)
+ goto err_flow_add;
+
+ xt_flowoffload_check_device(xt_in(par));
+ xt_flowoffload_check_device(xt_out(par));
+
+ net = read_pnet(&nf_flowtable.ft_net);
+ if (!net)
+ write_pnet(&nf_flowtable.ft_net, xt_net(par));
+
+ if (info->flags & XT_FLOWOFFLOAD_HW)
+ nf_flow_offload_hw_add(xt_net(par), flow, ct);
+
+ return XT_CONTINUE;
+
+err_flow_add:
+ flow_offload_free(flow);
+err_flow_route:
+ clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+ return XT_CONTINUE;
+}
+
+
+static int flowoffload_chk(const struct xt_tgchk_param *par)
+{
+ struct xt_flowoffload_target_info *info = par->targinfo;
+
+ if (info->flags & ~XT_FLOWOFFLOAD_MASK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct xt_target offload_tg_reg __read_mostly = {
+ .family = NFPROTO_UNSPEC,
+ .name = "FLOWOFFLOAD",
+ .revision = 0,
+ .targetsize = sizeof(struct xt_flowoffload_target_info),
+ .usersize = sizeof(struct xt_flowoffload_target_info),
+ .checkentry = flowoffload_chk,
+ .target = flowoffload_tg,
+ .me = THIS_MODULE,
+};
+
+static int xt_flowoffload_table_init(struct nf_flowtable *table)
+{
+ table->flags = NF_FLOWTABLE_F_HW;
+ nf_flow_table_init(table);
+ return 0;
+}
+
+static void xt_flowoffload_table_cleanup(struct nf_flowtable *table)
+{
+ nf_flow_table_free(table);
+}
+
+static int flow_offload_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct xt_flowoffload_hook *hook = NULL;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ spin_lock_bh(&hooks_lock);
+ hook = flow_offload_lookup_hook(dev);
+ if (hook) {
+ hlist_del(&hook->list);
+ }
+ spin_unlock_bh(&hooks_lock);
+ if (hook) {
+ nf_unregister_net_hook(hook->net, &hook->ops);
+ kfree(hook);
+ }
+
+ nf_flow_table_cleanup(dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block flow_offload_netdev_notifier = {
+ .notifier_call = flow_offload_netdev_event,
+};
+
+static int __init xt_flowoffload_tg_init(void)
+{
+ int ret;
+
+ register_netdevice_notifier(&flow_offload_netdev_notifier);
+
+ INIT_DELAYED_WORK(&hook_work, xt_flowoffload_hook_work);
+
+ ret = xt_flowoffload_table_init(&nf_flowtable);
+ if (ret)
+ return ret;
+
+ ret = xt_register_target(&offload_tg_reg);
+ if (ret)
+ xt_flowoffload_table_cleanup(&nf_flowtable);
+
+ return ret;
+}
+
+static void __exit xt_flowoffload_tg_exit(void)
+{
+ xt_unregister_target(&offload_tg_reg);
+ xt_flowoffload_table_cleanup(&nf_flowtable);
+ unregister_netdevice_notifier(&flow_offload_netdev_notifier);
+}
+
+MODULE_LICENSE("GPL");
+module_init(xt_flowoffload_tg_init);
+module_exit(xt_flowoffload_tg_exit);
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -7,7 +7,6 @@
#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/ip6_route.h>
-#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
@@ -351,8 +350,7 @@ flow_offload_lookup(struct nf_flowtable
}
EXPORT_SYMBOL_GPL(flow_offload_lookup);
-static int
-nf_flow_table_iterate(struct nf_flowtable *flow_table,
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
void (*iter)(struct flow_offload *flow, void *data),
void *data)
{
@@ -385,6 +383,7 @@ nf_flow_table_iterate(struct nf_flowtabl
return err;
}
+EXPORT_SYMBOL_GPL(nf_flow_table_iterate);
static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
{
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_FLOWOFFLOAD.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _XT_FLOWOFFLOAD_H
+#define _XT_FLOWOFFLOAD_H
+
+#include <linux/types.h>
+
+enum {
+ XT_FLOWOFFLOAD_HW = 1 << 0,
+
+ XT_FLOWOFFLOAD_MASK = XT_FLOWOFFLOAD_HW
+};
+
+struct xt_flowoffload_target_info {
+ __u32 flags;
+};
+
+#endif /* _XT_FLOWOFFLOAD_H */
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -133,6 +133,10 @@ static inline void flow_offload_dead(str
flow->flags |= FLOW_OFFLOAD_DYING;
}
+int nf_flow_table_iterate(struct nf_flowtable *flow_table,
+ void (*iter)(struct flow_offload *flow, void *data),
+ void *data);
+
int nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff,
u8 protocol, enum flow_offload_tuple_dir dir);