1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00

Add a directory by kernel instead of a common root, add qnap-301w and rpi4 kernel 6.1 suppport

This commit is contained in:
Ycarus (Yannick Chabanois) 2023-04-22 08:07:24 +02:00
parent e910436a7a
commit 46837ec4c0
9459 changed files with 362648 additions and 116345 deletions

View file

@ -0,0 +1,80 @@
Index: linux-5.4.147/kernel/bpf/syscall.c
===================================================================
--- linux-5.4.147.orig/kernel/bpf/syscall.c
+++ linux-5.4.147/kernel/bpf/syscall.c
@@ -593,6 +593,11 @@ static int map_create(union bpf_attr *at
err = PTR_ERR(btf);
goto free_map;
}
+ if (btf_is_kernel(btf)) {
+ btf_put(btf);
+ err = -EACCES;
+ goto free_map;
+ }
err = map_check_btf(map, btf, attr->btf_key_type_id,
attr->btf_value_type_id);
Index: linux-5.4.147/kernel/bpf/verifier.c
===================================================================
--- linux-5.4.147.orig/kernel/bpf/verifier.c
+++ linux-5.4.147/kernel/bpf/verifier.c
@@ -6959,6 +6959,11 @@ static int check_btf_info(struct bpf_ver
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
return PTR_ERR(btf);
+ if (btf_is_kernel(btf)) {
+ btf_put(btf);
+ return -EACCES;
+ }
+
env->prog->aux->btf = btf;
err = check_btf_func(env, attr, uattr);
Index: linux-5.4.147/include/linux/btf.h
===================================================================
--- linux-5.4.147.orig/include/linux/btf.h
+++ linux-5.4.147/include/linux/btf.h
@@ -47,6 +47,7 @@ void btf_type_seq_show(const struct btf
struct seq_file *m);
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
+bool btf_is_kernel(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
const struct btf_member *m,
u32 expected_offset, u32 expected_size);
Index: linux-5.4.147/kernel/bpf/btf.c
===================================================================
--- linux-5.4.147.orig/kernel/bpf/btf.c
+++ linux-5.4.147/kernel/bpf/btf.c
@@ -212,6 +212,7 @@ struct btf {
refcount_t refcnt;
u32 id;
struct rcu_head rcu;
+ bool kernel_btf;
};
enum verifier_phase {
@@ -352,6 +353,11 @@ static bool btf_type_nosize(const struct
btf_type_is_func(t) || btf_type_is_func_proto(t);
}
+bool btf_is_kernel(const struct btf *btf)
+{
+ return btf->kernel_btf;
+}
+
static bool btf_type_nosize_or_null(const struct btf_type *t)
{
return !t || btf_type_nosize(t);
Index: linux-5.4.147/include/uapi/linux/bpf.h
===================================================================
--- linux-5.4.147.orig/include/uapi/linux/bpf.h
+++ linux-5.4.147/include/uapi/linux/bpf.h
@@ -3275,6 +3275,7 @@ struct bpf_btf_info {
__aligned_u64 btf;
__u32 btf_size;
__u32 id;
+ __u32 kernel_btf;
} __attribute__((aligned(8)));
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed

View file

@ -0,0 +1,48 @@
From 05acefb4872dae89e772729efb194af754c877e8 Mon Sep 17 00:00:00 2001
From: Miklos Szeredi <mszeredi@redhat.com>
Date: Tue, 2 Jun 2020 22:20:26 +0200
Subject: ovl: check permission to open real file
Call inode_permission() on real inode before opening regular file on one of
the underlying layers.
In some cases ovl_permission() already checks access to an underlying file,
but it misses the metacopy case, and possibly other ones as well.
Removing the redundant permission check from ovl_permission() should be
considered later.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
---
fs/overlayfs/file.c | 16 ++++++++++++++--
1 file changed, 14 insertions(+), 2 deletions(-)
Index: linux-5.4.147/fs/overlayfs/file.c
===================================================================
--- linux-5.4.147.orig/fs/overlayfs/file.c
+++ linux-5.4.147/fs/overlayfs/file.c
@@ -34,10 +34,22 @@ static struct file *ovl_open_realfile(co
struct file *realfile;
const struct cred *old_cred;
int flags = file->f_flags | OVL_OPEN_FLAGS;
+ int acc_mode = ACC_MODE(flags);
+ int err;
+
+ if (flags & O_APPEND)
+ acc_mode |= MAY_APPEND;
old_cred = ovl_override_creds(inode->i_sb);
- realfile = open_with_fake_path(&file->f_path, flags, realinode,
- current_cred());
+ err = inode_permission(realinode, MAY_OPEN | acc_mode);
+ if (err) {
+ realfile = ERR_PTR(err);
+ } else if (!inode_owner_or_capable(realinode)) {
+ realfile = ERR_PTR(-EPERM);
+ } else {
+ realfile = open_with_fake_path(&file->f_path, flags, realinode,
+ current_cred());
+ }
revert_creds(old_cred);
pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",

View file

@ -0,0 +1,73 @@
From 48bd024b8a40d73ad6b086de2615738da0c7004f Mon Sep 17 00:00:00 2001
From: Miklos Szeredi <mszeredi@redhat.com>
Date: Tue, 2 Jun 2020 22:20:25 +0200
Subject: ovl: switch to mounter creds in readdir
In preparation for more permission checking, override credentials for
directory operations on the underlying filesystems.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
---
fs/overlayfs/readdir.c | 27 +++++++++++++++++++++------
1 file changed, 21 insertions(+), 6 deletions(-)
Index: linux-5.4.147/fs/overlayfs/readdir.c
===================================================================
--- linux-5.4.147.orig/fs/overlayfs/readdir.c
+++ linux-5.4.147/fs/overlayfs/readdir.c
@@ -732,8 +732,10 @@ static int ovl_iterate(struct file *file
struct ovl_dir_file *od = file->private_data;
struct dentry *dentry = file->f_path.dentry;
struct ovl_cache_entry *p;
+ const struct cred *old_cred;
int err;
+ old_cred = ovl_override_creds(dentry->d_sb);
if (!ctx->pos)
ovl_dir_reset(file);
@@ -747,17 +749,20 @@ static int ovl_iterate(struct file *file
(ovl_same_sb(dentry->d_sb) &&
(ovl_is_impure_dir(file) ||
OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent))))) {
- return ovl_iterate_real(file, ctx);
+ err = ovl_iterate_real(file, ctx);
+ } else {
+ err = iterate_dir(od->realfile, ctx);
}
- return iterate_dir(od->realfile, ctx);
+ goto out;
}
if (!od->cache) {
struct ovl_dir_cache *cache;
cache = ovl_cache_get(dentry);
+ err = PTR_ERR(cache);
if (IS_ERR(cache))
- return PTR_ERR(cache);
+ goto out;
od->cache = cache;
ovl_seek_cursor(od, ctx->pos);
@@ -769,7 +774,7 @@ static int ovl_iterate(struct file *file
if (!p->ino) {
err = ovl_cache_update_ino(&file->f_path, p);
if (err)
- return err;
+ goto out;
}
if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
break;
@@ -777,7 +782,10 @@ static int ovl_iterate(struct file *file
od->cursor = p->l_node.next;
ctx->pos++;
}
- return 0;
+ err = 0;
+out:
+ revert_creds(old_cred);
+ return err;
}
static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)

View file

@ -0,0 +1,61 @@
From 56230d956739b9cb1cbde439d76227d77979a04d Mon Sep 17 00:00:00 2001
From: Miklos Szeredi <mszeredi@redhat.com>
Date: Tue, 2 Jun 2020 22:20:26 +0200
Subject: ovl: verify permissions in ovl_path_open()
Check permission before opening a real file.
ovl_path_open() is used by readdir and copy-up routines.
ovl_permission() theoretically already checked copy up permissions, but it
doesn't hurt to re-do these checks during the actual copy-up.
For directory reading ovl_permission() only checks access to topmost
underlying layer. Readdir on a merged directory accesses layers below the
topmost one as well. Permission wasn't checked for these layers.
Note: modifying ovl_permission() to perform this check would be far more
complex and hence more bug prone. The result is less precise permissions
returned in access(2). If this turns out to be an issue, we can revisit
this bug.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
---
fs/overlayfs/util.c | 27 ++++++++++++++++++++++++++-
1 file changed, 26 insertions(+), 1 deletion(-)
Index: linux-5.4.147/fs/overlayfs/util.c
===================================================================
--- linux-5.4.147.orig/fs/overlayfs/util.c
+++ linux-5.4.147/fs/overlayfs/util.c
@@ -475,7 +475,29 @@ bool ovl_is_whiteout(struct dentry *dent
struct file *ovl_path_open(struct path *path, int flags)
{
- return dentry_open(path, flags | O_NOATIME, current_cred());
+ struct inode *inode = d_inode(path->dentry);
+ int err, acc_mode;
+
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ acc_mode = MAY_READ;
+ break;
+ case O_WRONLY:
+ acc_mode = MAY_WRITE;
+ break;
+ default:
+ BUG();
+ }
+
+ err = inode_permission(inode, acc_mode | MAY_OPEN);
+ if (err)
+ return ERR_PTR(err);
+
+ /* O_NOATIME is an optimization, don't fail if not permitted */
+ if (inode_owner_or_capable(inode))
+ flags |= O_NOATIME;
+
+ return dentry_open(path, flags, current_cred());
}
/* Caller should hold ovl_inode->lock */

View file

@ -0,0 +1,46 @@
From b6650dab404c701d7fe08a108b746542a934da84 Mon Sep 17 00:00:00 2001
From: Miklos Szeredi <mszeredi@redhat.com>
Date: Mon, 14 Dec 2020 15:26:14 +0100
Subject: ovl: do not fail because of O_NOATIME
In case the file cannot be opened with O_NOATIME because of lack of
capabilities, then clear O_NOATIME instead of failing.
Remove WARN_ON(), since it would now trigger if O_NOATIME was cleared.
Noticed by Amir Goldstein.
Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
---
fs/overlayfs/file.c | 11 +++--------
1 file changed, 3 insertions(+), 8 deletions(-)
Index: linux-5.4.147/fs/overlayfs/file.c
===================================================================
--- linux-5.4.147.orig/fs/overlayfs/file.c
+++ linux-5.4.147/fs/overlayfs/file.c
@@ -44,9 +44,10 @@ static struct file *ovl_open_realfile(co
err = inode_permission(realinode, MAY_OPEN | acc_mode);
if (err) {
realfile = ERR_PTR(err);
- } else if (!inode_owner_or_capable(realinode)) {
- realfile = ERR_PTR(-EPERM);
} else {
+ if (!inode_owner_or_capable(realinode))
+ flags &= ~O_NOATIME;
+
realfile = open_with_fake_path(&file->f_path, flags, realinode,
current_cred());
}
@@ -66,12 +67,6 @@ static int ovl_change_flags(struct file
struct inode *inode = file_inode(file);
int err;
- flags |= OVL_OPEN_FLAGS;
-
- /* If some flag changed that cannot be changed then something's amiss */
- if (WARN_ON((file->f_flags ^ flags) & ~OVL_SETFL_MASK))
- return -EIO;
-
flags &= OVL_SETFL_MASK;
if (((flags ^ file->f_flags) & O_APPEND) && IS_APPEND(inode))

View file

@ -0,0 +1,17 @@
Index: linux-5.4.124/drivers/net/ppp/pppoe.c
===================================================================
--- linux-5.4.124.orig/drivers/net/ppp/pppoe.c
+++ linux-5.4.124/drivers/net/ppp/pppoe.c
@@ -502,6 +502,12 @@ static int pppoe_disc_rcv(struct sk_buff
if (ph->code != PADT_CODE)
goto abort;
+ // compare the dst addr to the current net device addr and ignore packet if not matching
+ // otherwise it will terminate the connection
+ if(!ether_addr_equal(eth_hdr(skb)->h_dest, dev->dev_addr)) {
+ goto abort;
+ }
+
pn = pppoe_pernet(dev_net(dev));
po = get_item(pn, ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
if (po)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,319 @@
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 12d967b..c2b98b6 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -72,6 +72,10 @@ struct nf_ct_event {
int report;
};
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb);
+#else
struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
@@ -80,6 +84,7 @@ int nf_conntrack_register_notifier(struc
struct nf_ct_event_notifier *nb);
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *nb);
+#endif
void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
@@ -105,11 +110,13 @@ int nf_conntrack_eventmask_report(unsign
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
- struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return;
+#endif
e = nf_ct_ecache_find(ct);
if (e == NULL)
@@ -124,10 +131,12 @@ static inline int
u32 portid, int report)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
#else
@@ -139,10 +148,12 @@ nf_conntrack_event_report(enum ip_conntr
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifndef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
+#endif
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
#else
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index e469e85..1d31db8 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -112,7 +112,11 @@ struct netns_ct {
struct ct_pcpu __percpu *pcpu_lists;
struct ip_conntrack_stat __percpu *stat;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct atomic_notifier_head nf_conntrack_chain;
+#else
struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+#endif
struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
struct nf_ip_net nf_ct_proto;
#if defined(CONFIG_NF_CONNTRACK_LABELS)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63073be..08d7aab 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -148,6 +148,14 @@ config NF_CONNTRACK_RTCACHE
To compile it as a module, choose M here. If unsure, say N.
The module will be called nf_conntrack_rtcache.
+config NF_CONNTRACK_CHAIN_EVENTS
+ bool "Register multiple callbacks to ct events"
+ depends on NF_CONNTRACK_EVENTS
+ help
+ Support multiple registrations.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMEOUT
bool 'Connection tracking timeout'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 6bd1508..9b81c7c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2592,6 +2592,9 @@ int nf_conntrack_init_net(struct net *ne
nf_conntrack_helper_pernet_init(net);
nf_conntrack_proto_pernet_init(net);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ATOMIC_INIT_NOTIFIER_HEAD(&net->ct.nf_conntrack_chain);
+#endif
return 0;
err_expect:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index da9df2d..e0e2a8f 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -17,6 +17,9 @@
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+#include <linux/notifier.h>
+#endif
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
@@ -117,6 +120,38 @@ static void ecache_work(struct work_stru
schedule_delayed_work(&ctnet->ecache_dwork, delay);
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int
+nf_conntrack_eventmask_report(unsigned int eventmask,
+ struct nf_conn *ct,
+ u32 portid,
+ int report)
+{
+ struct nf_conntrack_ecache *e;
+ struct net *net = nf_ct_net(ct);
+
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ return 0;
+
+ if (nf_ct_is_confirmed(ct)) {
+ struct nf_ct_event item = {
+ .ct = ct,
+ .portid = e->portid ? e->portid : portid,
+ .report = report
+ };
+ /* This is a resent of a destroy event? If so, skip missed */
+ unsigned long missed = e->portid ? 0 : e->missed;
+
+ if (!((eventmask | missed) & e->ctmask))
+ return 0;
+
+ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain, eventmask | missed, &item);
+ }
+
+ return 0;
+}
+#else
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report)
{
@@ -171,10 +206,52 @@ out_unlock:
rcu_read_unlock();
return ret;
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_eventmask_report);
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+void nf_ct_deliver_cached_events(struct nf_conn *ct)
+{
+ unsigned long events, missed;
+ struct nf_conntrack_ecache *e;
+ struct nf_ct_event item;
+ struct net *net = nf_ct_net(ct);
+
+ e = nf_ct_ecache_find(ct);
+ if (e == NULL)
+ return;
+
+ events = xchg(&e->cache, 0);
+
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
+ return;
+
+ /* We make a copy of the missed event cache without taking
+ * the lock, thus we may send missed events twice. However,
+ * this does not harm and it happens very rarely. */
+ missed = e->missed;
+
+ if (!((events | missed) & e->ctmask))
+ return;
+
+ item.ct = ct;
+ item.portid = 0;
+ item.report = 0;
+
+ atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed,
+ &item);
+
+ if (likely(!missed))
+ return;
+
+ spin_lock_bh(&ct->lock);
+ e->missed &= ~missed;
+ spin_unlock_bh(&ct->lock);
+}
+#else
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
@@ -225,6 +302,7 @@ void nf_ct_deliver_cached_events(struct
out_unlock:
rcu_read_unlock();
}
+#endif
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
void nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
@@ -257,6 +335,12 @@ out_unlock:
rcu_read_unlock();
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+#else
int nf_conntrack_register_notifier(struct net *net,
struct nf_ct_event_notifier *new)
{
@@ -277,8 +361,15 @@ out_unlock:
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+#else
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *new)
{
@@ -292,6 +383,7 @@ void nf_conntrack_unregister_notifier(st
mutex_unlock(&nf_ct_ecache_mutex);
/* synchronize_rcu() is called from ctnetlink_exit. */
}
+#endif
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
int nf_ct_expect_register_notifier(struct net *net,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 04111c1..8c741f7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -32,6 +32,11 @@
#include <linux/siphash.h>
#include <linux/netfilter.h>
+
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+#include <linux/notifier.h>
+#endif
+
#include <net/netlink.h>
#include <net/sock.h>
#include <net/netfilter/nf_conntrack.h>
@@ -676,14 +681,22 @@ static size_t ctnetlink_nlmsg_size(const
;
}
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+static int ctnetlink_conntrack_event(struct notifier_block *this,
+ unsigned long events, void *ptr)
+#else
static int
ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
+#endif
{
const struct nf_conntrack_zone *zone;
struct net *net;
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
struct nlattr *nest_parms;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ struct nf_ct_event *item = (struct nf_ct_event *)ptr;
+#endif
struct nf_conn *ct = item->ct;
struct sk_buff *skb;
unsigned int type;
@@ -3504,9 +3517,15 @@ static int ctnetlink_stat_exp_cpu(struct
}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+static struct notifier_block ctnl_notifier = {
+ .notifier_call = ctnetlink_conntrack_event,
+};
+#else
static struct nf_ct_event_notifier ctnl_notifier = {
.fcn = ctnetlink_conntrack_event,
};
+#endif
static struct nf_exp_event_notifier ctnl_notifier_exp = {
.fcn = ctnetlink_expect_event,

View file

@ -0,0 +1,253 @@
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -52,6 +52,9 @@ struct br_ip_list {
extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
+extern void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats);
+
#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
int br_multicast_list_adjacent(struct net_device *dev,
struct list_head *br_ip_list);
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -827,6 +827,10 @@ struct sk_buff {
#endif
__u8 scm_io_uring:1;
+#ifdef CONFIG_SHORTCUT_FE
+ __u8 fast_forwarded:1;
+#endif
+
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
#endif
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -18,6 +18,10 @@ struct timer_list {
void (*function)(struct timer_list *);
u32 flags;
+#ifdef CONFIG_SHORTCUT_FE
+ unsigned long cust_data;
+#endif
+
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -75,6 +75,8 @@ struct nf_ct_event {
#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
extern int nf_conntrack_register_notifier(struct net *net, struct notifier_block *nb);
extern int nf_conntrack_unregister_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb);
+extern int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb);
#else
struct nf_ct_event_notifier {
int (*fcn)(unsigned int events, struct nf_ct_event *item);
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -763,6 +763,28 @@ void br_port_flags_change(struct net_bri
br_recalculate_neigh_suppress_enabled(br);
}
+void br_dev_update_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *nlstats)
+{
+ struct net_bridge *br;
+ struct pcpu_sw_netstats *stats;
+
+ /* Is this a bridge? */
+ if (!(dev->priv_flags & IFF_EBRIDGE))
+ return;
+
+ br = netdev_priv(dev);
+ stats = this_cpu_ptr(br->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets += nlstats->rx_packets;
+ stats->rx_bytes += nlstats->rx_bytes;
+ stats->tx_packets += nlstats->tx_packets;
+ stats->tx_bytes += nlstats->tx_bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+EXPORT_SYMBOL_GPL(br_dev_update_stats);
+
bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag)
{
struct net_bridge_port *p;
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3197,9 +3197,17 @@ static int xmit_one(struct sk_buff *skb,
unsigned int len;
int rc;
+#ifdef CONFIG_SHORTCUT_FE
+ /* If this skb has been fast forwarded then we don't want it to
+ * go to any taps (by definition we're trying to bypass them).
+ */
+ if (!skb->fast_forwarded) {
+#endif
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
-
+#ifdef CONFIG_SHORTCUT_FE
+ }
+#endif
#ifdef CONFIG_ETHERNET_PACKET_MANGLE
if (!dev->eth_mangle_tx ||
(skb = dev->eth_mangle_tx(dev, skb)) != NULL)
@@ -4715,6 +4723,11 @@ void netdev_rx_handler_unregister(struct
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
+#ifdef CONFIG_SHORTCUT_FE
+int (*athrs_fast_nat_recv)(struct sk_buff *skb) __rcu __read_mostly;
+EXPORT_SYMBOL_GPL(athrs_fast_nat_recv);
+#endif
+
/*
* Limit the use of PFMEMALLOC reserves to those protocols that implement
* the special handling of PFMEMALLOC skbs.
@@ -4765,6 +4778,10 @@ static int __netif_receive_skb_core(stru
int ret = NET_RX_DROP;
__be16 type;
+#ifdef CONFIG_SHORTCUT_FE
+ int (*fast_recv)(struct sk_buff *skb);
+#endif
+
net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
trace_netif_receive_skb(skb);
@@ -4804,6 +4821,16 @@ another_round:
goto out;
}
+#ifdef CONFIG_SHORTCUT_FE
+ fast_recv = rcu_dereference(athrs_fast_nat_recv);
+ if (fast_recv) {
+ if (fast_recv(skb)) {
+ ret = NET_RX_SUCCESS;
+ goto out;
+ }
+ }
+#endif
+
if (skb_skip_tc_classify(skb))
goto skip_classify;
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -473,3 +473,6 @@ config HAVE_CBPF_JIT
# Extended BPF JIT (eBPF)
config HAVE_EBPF_JIT
bool
+
+config SHORTCUT_FE
+ bool "Enables kernel network stack path for Shortcut Forwarding Engine
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -34,11 +34,19 @@
/* Do not check the TCP window for incoming packets */
static int nf_ct_tcp_no_window_check __read_mostly = 1;
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_no_window_check);
+#endif
+
/* "Be conservative in what you do,
be liberal in what you accept from others."
If it's non-zero, we mark only out of window RST segments as INVALID. */
static int nf_ct_tcp_be_liberal __read_mostly = 0;
+#ifdef CONFIG_SHORTCUT_FE
+EXPORT_SYMBOL_GPL(nf_ct_tcp_be_liberal);
+#endif
+
/* If it is set to zero, we disable picking up already established
connections. */
static int nf_ct_tcp_loose __read_mostly = 1;
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -158,7 +158,11 @@ int nf_conntrack_eventmask_report(unsign
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if (!notify && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (!notify)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(ct);
@@ -177,7 +181,14 @@ int nf_conntrack_eventmask_report(unsign
if (!((eventmask | missed) & e->ctmask))
goto out_unlock;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ eventmask | missed, &item);
+ if (notify)
+ ret = notify->fcn(eventmask | missed, &item);
+#else
ret = notify->fcn(eventmask | missed, &item);
+#endif
if (unlikely(ret < 0 || missed)) {
spin_lock_bh(&ct->lock);
if (ret < 0) {
@@ -259,7 +270,11 @@ void nf_ct_deliver_cached_events(struct
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ if ((notify == NULL) && !rcu_dereference_raw(net->ct.nf_conntrack_chain.head))
+#else
if (notify == NULL)
+#endif
goto out_unlock;
e = nf_ct_ecache_find(ct);
@@ -283,7 +298,15 @@ void nf_ct_deliver_cached_events(struct
item.portid = 0;
item.report = 0;
+#ifdef CONFIG_NF_CONNTRACK_CHAIN_EVENTS
+ ret = atomic_notifier_call_chain(&net->ct.nf_conntrack_chain,
+ events | missed,
+ &item);
+ if (notify != NULL)
+ ret = notify->fcn(events | missed, &item);
+#else
ret = notify->fcn(events | missed, &item);
+#endif
if (likely(ret == 0 && !missed))
goto out_unlock;
@@ -337,6 +360,11 @@ int nf_conntrack_register_notifier(struc
{
return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
}
+int nf_conntrack_register_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_register_chain_notifier);
#else
int nf_conntrack_register_notifier(struct net *net,
struct nf_ct_event_notifier *new)
@@ -366,6 +394,11 @@ int nf_conntrack_unregister_notifier(str
{
return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
}
+int nf_conntrack_unregister_chain_notifier(struct net *net, struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&net->ct.nf_conntrack_chain, nb);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_chain_notifier);
#else
void nf_conntrack_unregister_notifier(struct net *net,
struct nf_ct_event_notifier *new)

View file

@ -0,0 +1,43 @@
From 582774df32849d6638133a675f7502730b46e217 Mon Sep 17 00:00:00 2001
From: Jonathan Bell <jonathan@raspberrypi.com>
Date: Wed, 11 Aug 2021 15:33:57 +0100
Subject: [PATCH] usb: xhci: workaround for bogus SET_DEQ_PENDING endpoint
state
See https://github.com/raspberrypi/linux/issues/3981
An unknown unsafe memory access can result in the ep_state variable
in xhci_virt_ep being trampled with a stuck SET_DEQ_PENDING state
despite successful completion of a Set TR Deq Pointer command.
All URB enqueue/dequeue calls for the endpoint will fail in this state
so no transfers are possible until the device is reconnected.
As a workaround, clear the flag if we see it set and issue a new Set
TR Deq command anyway - this should be harmless, as a prior Set TR Deq
command will only have been issued in the Stopped state, and if the
endpoint is Running then the controller is required to ignore it and
respond with a Context State Error event TRB.
Signed-off-by: Jonathan Bell <jonathan@raspberrypi.com>
---
drivers/usb/host/xhci-ring.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 35b246aeb902a..726d8291bd31a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4223,9 +4223,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
}
ep = &xhci->devs[slot_id]->eps[ep_index];
if ((ep->ep_state & SET_DEQ_PENDING)) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
- return;
+ xhci_warn(xhci, "WARN A Set TR Deq Ptr command is pending for slot %u ep %u\n",
+ slot_id, ep_index);
+ ep->ep_state &= ~SET_DEQ_PENDING;
}
/* This function gets called from contexts where it cannot sleep */

View file

@ -0,0 +1,47 @@
--- a/net/core/dev.c 2018-08-10 10:31:41.199494561 +0200
+++ b/net/core/dev.c 2018-08-10 10:32:03.635272509 +0200
@@ -6613,9 +6613,11 @@
}
}
if (dev->flags != old_flags) {
+ /*
pr_info("device %s %s promiscuous mode\n",
dev->name,
dev->flags & IFF_PROMISC ? "entered" : "left");
+ */
if (audit_enabled) {
current_uid_gid(&uid, &gid);
audit_log(current->audit_context, GFP_ATOMIC,
--- a/drivers/net/usb/r8152.c 2020-08-13 13:11:25.866435255 +0200
+++ b/drivers/net/usb/r8152.c 2020-08-13 13:11:51.973994306 +0200
@@ -2353,7 +2353,7 @@
if (netdev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- netif_notice(tp, link, netdev, "Promiscuous mode enabled\n");
+ //netif_notice(tp, link, netdev, "Promiscuous mode enabled\n");
ocp_data |= RCR_AM | RCR_AAP;
mc_filter[1] = 0xffffffff;
mc_filter[0] = 0xffffffff;
--- a/drivers/net/usb/pegasus.c 2020-08-13 13:14:15.519570376 +0200
+++ b/drivers/net/usb/pegasus.c 2020-08-13 13:14:26.795380006 +0200
@@ -1031,7 +1031,7 @@
if (net->flags & IFF_PROMISC) {
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
- netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
+ //netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
} else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
--- a/drivers/net/ethernet/realtek/r8169_main.c 2020-08-13 13:15:44.478068638 +0200
+++ b/drivers/net/ethernet/realtek/r8169_main.c 2020-08-13 13:15:59.181820450 +0200
@@ -4313,7 +4313,7 @@
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
+ //netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode |= AcceptAllPhys;
} else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
dev->flags & IFF_ALLMULTI ||

View file

@ -0,0 +1,50 @@
Index: linux-5.4.124/drivers/mtd/spi-nor/spi-nor.c
===================================================================
--- linux-5.4.124.orig/drivers/mtd/spi-nor/spi-nor.c
+++ linux-5.4.124/drivers/mtd/spi-nor/spi-nor.c
@@ -2241,6 +2241,9 @@ static const struct flash_info spi_nor_i
.fixups = &gd25q256_fixups,
},
+ /* Zbit */
+ { "zb25vq128", INFO(0xC84018, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
@@ -2405,6 +2408,7 @@ static const struct flash_info spi_nor_i
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
+ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) },
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
@@ -2508,10 +2512,14 @@ static const struct flash_info spi_nor_i
{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
/* XTX Technology (Shenzhen) Limited */
+ { "XT25F128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ "xt25f128b", INFO(0x0B4018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
{ },
};
+static const struct flash_info spi_nor_unknown_id =
+ { "undefined", INFO(0x0, 0, 64 * 1024, 256, SECT_4K) };
+
static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
{
int tmp;
@@ -2542,9 +2550,12 @@ static const struct flash_info *spi_nor_
return &spi_nor_ids[tmp];
}
}
+ return &spi_nor_unknown_id; // for Teltonikia RUT devices
+ /*
dev_err(nor->dev, "unrecognized JEDEC id bytes: %*ph\n",
SPI_NOR_MAX_ID_LEN, id);
return ERR_PTR(-ENODEV);
+ */
}
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,

View file

@ -0,0 +1,198 @@
From a07e31adf2753cad2fd9790db5bfc047c81e8152 Mon Sep 17 00:00:00 2001
From: Felix Matouschek <felix@matouschek.org>
Date: Fri, 2 Jul 2021 20:31:23 +0200
Subject: [PATCH] mtd: spinand: Add support for XTX XT26G0xA
Add support for XTX Technology XT26G01AXXXXX, XTX26G02AXXXXX and
XTX26G04AXXXXX SPI NAND.
These are 3V, 1G/2G/4Gbit serial SLC NAND flash devices with on-die ECC
(8bit strength per 512bytes).
Tested on Teltonika RUTX10 flashed with OpenWrt.
Datasheets available at
http://www.xtxtech.com/download/?AId=225
https://datasheet.lcsc.com/szlcsc/2005251034_XTX-XT26G01AWSEGA_C558841.pdf
Signed-off-by: Felix Matouschek <felix@matouschek.org>
---
drivers/mtd/nand/spi/Makefile | 2 +-
drivers/mtd/nand/spi/core.c | 1 +
drivers/mtd/nand/spi/xtx.c | 122 ++++++++++++++++++++++++++++++++++
include/linux/mtd/spinand.h | 1 +
4 files changed, 125 insertions(+), 1 deletion(-)
create mode 100644 drivers/mtd/nand/spi/xtx.c
Index: linux-5.4.132/drivers/mtd/nand/spi/Makefile
===================================================================
--- linux-5.4.132.orig/drivers/mtd/nand/spi/Makefile
+++ linux-5.4.132/drivers/mtd/nand/spi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o
+spinand-objs := core.o gigadevice.o macronix.o micron.o paragon.o toshiba.o winbond.o xtx.o
obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
Index: linux-5.4.132/drivers/mtd/nand/spi/core.c
===================================================================
--- linux-5.4.132.orig/drivers/mtd/nand/spi/core.c
+++ linux-5.4.132/drivers/mtd/nand/spi/core.c
@@ -758,6 +758,7 @@ static const struct spinand_manufacturer
&paragon_spinand_manufacturer,
&toshiba_spinand_manufacturer,
&winbond_spinand_manufacturer,
+ &xtx_spinand_manufacturer,
};
static int spinand_manufacturer_detect(struct spinand_device *spinand)
Index: linux-5.4.132/drivers/mtd/nand/spi/xtx.c
===================================================================
--- /dev/null
+++ linux-5.4.132/drivers/mtd/nand/spi/xtx.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author:
+ * Felix Matouschek <felix@matouschek.org>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_XTX 0x0B
+
+#define XT26G0XA_STATUS_ECC_MASK GENMASK(5, 2)
+#define XT26G0XA_STATUS_ECC_NO_DETECTED (0 << 2)
+#define XT26G0XA_STATUS_ECC_8_CORRECTED (3 << 4)
+#define XT26G0XA_STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+ SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+ SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+ SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+ SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+ SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+ SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int xt26g0xa_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 8;
+ region->length = 40;
+
+ return 0;
+}
+
+static int xt26g0xa_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *region)
+{
+ if (section)
+ return -ERANGE;
+
+ region->offset = 1;
+ region->length = 7;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops xt26g0xa_ooblayout = {
+ .ecc = xt26g0xa_ooblayout_ecc,
+ .free = xt26g0xa_ooblayout_free,
+};
+
+static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
+ u8 status)
+{
+ switch (status & XT26G0XA_STATUS_ECC_MASK) {
+ case XT26G0XA_STATUS_ECC_NO_DETECTED:
+ return 0;
+ case XT26G0XA_STATUS_ECC_8_CORRECTED:
+ return 8;
+ case XT26G0XA_STATUS_ECC_UNCOR_ERROR:
+ return -EBADMSG;
+ default: /* (1 << 2) through (7 << 2) are 1-7 corrected errors */
+ return (status & XT26G0XA_STATUS_ECC_MASK) >> 2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct spinand_info xtx_spinand_table[] = {
+ SPINAND_INFO("XT26G01A", 0xE1,
+ NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G02A", 0xE2,
+ NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+ SPINAND_INFO("XT26G04A", 0xE3,
+ NAND_MEMORG(1, 2048, 64, 128, 2048, 40, 1, 1, 1),
+ NAND_ECCREQ(8, 512),
+ SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+ &write_cache_variants,
+ &update_cache_variants),
+ SPINAND_HAS_QE_BIT,
+ SPINAND_ECCINFO(&xt26g0xa_ooblayout,
+ xt26g0xa_ecc_get_status)),
+};
+
+static int xtx_spinand_detect(struct spinand_device *spinand)
+{
+ u8 *id = spinand->id.data;
+ int ret;
+
+ if (id[1] != SPINAND_MFR_XTX)
+ return 0;
+
+ ret = spinand_match_and_init(spinand, xtx_spinand_table,
+ ARRAY_SIZE(xtx_spinand_table), id[2]);
+ if (ret)
+ return ret;
+
+ return 1;
+}
+
+static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
+ .detect = xtx_spinand_detect,
+};
+
+const struct spinand_manufacturer xtx_spinand_manufacturer = {
+ .id = SPINAND_MFR_XTX,
+ .name = "XTX",
+ .ops = &xtx_spinand_manuf_ops,
+};
Index: linux-5.4.132/include/linux/mtd/spinand.h
===================================================================
--- linux-5.4.132.orig/include/linux/mtd/spinand.h
+++ linux-5.4.132/include/linux/mtd/spinand.h
@@ -230,6 +230,7 @@ extern const struct spinand_manufacturer
extern const struct spinand_manufacturer paragon_spinand_manufacturer;
extern const struct spinand_manufacturer toshiba_spinand_manufacturer;
extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+extern const struct spinand_manufacturer xtx_spinand_manufacturer;
/**
* struct spinand_op_variants - SPI NAND operation variants

View file

@ -0,0 +1,554 @@
From: Pablo Neira Ayuso <pablo@netfilter.org>
Date: Thu, 11 Jan 2018 16:32:00 +0100
Subject: [PATCH] netfilter: nf_flow_table: add hardware offload support
This patch adds the infrastructure to offload flows to hardware, in case
the nic/switch comes with built-in flow tables capabilities.
If the hardware comes with no hardware flow tables or they have
limitations in terms of features, the existing infrastructure falls back
to the software flow table implementation.
The software flow table garbage collector skips entries that resides in
the hardware, so the hardware will be responsible for releasing this
flow table entry too via flow_offload_dead().
Hardware configuration, either to add or to delete entries, is done from
the hardware offload workqueue, to ensure this is done from user context
given that we may sleep when grabbing the mdio mutex.
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
---
create mode 100644 net/netfilter/nf_flow_table_hw.c
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -922,6 +922,13 @@ struct devlink;
struct tlsdev_ops;
+struct flow_offload;
+
+enum flow_offload_type {
+ FLOW_OFFLOAD_ADD = 0,
+ FLOW_OFFLOAD_DEL,
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1154,6 +1161,10 @@ struct tlsdev_ops;
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags);
*
+ * int (*ndo_flow_offload)(enum flow_offload_type type,
+ * struct flow_offload *flow);
+ * Adds/deletes flow entry to/from net device flowtable.
+ *
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
* Called to change device carrier. Soft-devices (like dummy, team, etc)
* which do not represent real hardware may define this to allow their
@@ -1401,6 +1412,8 @@ struct net_device_ops {
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
+ int (*ndo_flow_offload)(enum flow_offload_type type,
+ struct flow_offload *flow);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
int (*ndo_get_phys_port_id)(struct net_device *dev,
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -21,11 +21,17 @@ struct nf_flowtable_type {
struct module *owner;
};
+enum nf_flowtable_flags {
+ NF_FLOWTABLE_F_HW = 0x1,
+};
+
struct nf_flowtable {
struct list_head list;
struct rhashtable rhashtable;
const struct nf_flowtable_type *type;
+ u32 flags;
struct delayed_work gc_work;
+ possible_net_t ft_net;
};
enum flow_offload_tuple_dir {
@@ -68,6 +74,7 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_DNAT 0x2
#define FLOW_OFFLOAD_DYING 0x4
#define FLOW_OFFLOAD_TEARDOWN 0x8
+#define FLOW_OFFLOAD_HW 0x10
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
@@ -120,6 +127,22 @@ unsigned int nf_flow_offload_ip_hook(voi
unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
+void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow,
+ struct nf_conn *ct);
+void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow);
+
+struct nf_flow_table_hw {
+ struct module *owner;
+ void (*add)(struct net *net, struct flow_offload *flow,
+ struct nf_conn *ct);
+ void (*del)(struct net *net, struct flow_offload *flow);
+};
+
+int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload);
+void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload);
+
+extern struct work_struct nf_flow_offload_hw_work;
+
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1516,6 +1516,7 @@ enum nft_object_attributes {
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
*/
enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC,
@@ -1525,6 +1526,7 @@ enum nft_flowtable_attributes {
NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD,
+ NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX
};
#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -711,6 +711,15 @@ config NF_FLOW_TABLE
To compile it as a module, choose M here.
+config NF_FLOW_TABLE_HW
+ tristate "Netfilter flow table hardware offload module"
+ depends on NF_FLOW_TABLE
+ help
+ This option adds hardware offload support for the flow table core
+ infrastructure.
+
+ To compile it as a module, choose M here.
+
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
default m if NETFILTER_ADVANCED=n
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -123,6 +123,7 @@ obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_t
nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
+obj-$(CONFIG_NF_FLOW_TABLE_HW) += nf_flow_table_hw.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -248,10 +248,16 @@ static inline bool nf_flow_has_expired(c
return nf_flow_timeout_delta(flow->timeout) <= 0;
}
+static inline bool nf_flow_in_hw(const struct flow_offload *flow)
+{
+ return flow->flags & FLOW_OFFLOAD_HW;
+}
+
static void flow_offload_del(struct nf_flowtable *flow_table,
struct flow_offload *flow)
{
struct flow_offload_entry *e;
+ struct net *net = read_pnet(&flow_table->ft_net);
rhashtable_remove_fast(&flow_table->rhashtable,
&flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
@@ -271,6 +277,9 @@ static void flow_offload_del(struct nf_f
if (!(flow->flags & FLOW_OFFLOAD_TEARDOWN))
flow_offload_fixup_ct_state(e->ct);
+ if (nf_flow_in_hw(flow))
+ nf_flow_offload_hw_del(net, flow);
+
flow_offload_free(flow);
}
@@ -490,10 +502,43 @@ int nf_flow_dnat_port(const struct flow_
}
EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
+static const struct nf_flow_table_hw __rcu *nf_flow_table_hw_hook __read_mostly;
+
+static int nf_flow_offload_hw_init(struct nf_flowtable *flow_table)
+{
+ const struct nf_flow_table_hw *offload;
+
+ if (!rcu_access_pointer(nf_flow_table_hw_hook))
+ request_module("nf-flow-table-hw");
+
+ rcu_read_lock();
+ offload = rcu_dereference(nf_flow_table_hw_hook);
+ if (!offload)
+ goto err_no_hw_offload;
+
+ if (!try_module_get(offload->owner))
+ goto err_no_hw_offload;
+
+ rcu_read_unlock();
+
+ return 0;
+
+err_no_hw_offload:
+ rcu_read_unlock();
+
+ return -EOPNOTSUPP;
+}
+
int nf_flow_table_init(struct nf_flowtable *flowtable)
{
int err;
+ if (flowtable->flags & NF_FLOWTABLE_F_HW) {
+ err = nf_flow_offload_hw_init(flowtable);
+ if (err)
+ return err;
+ }
+
INIT_DEFERRABLE_WORK(&flowtable->gc_work, nf_flow_offload_work_gc);
err = rhashtable_init(&flowtable->rhashtable,
@@ -534,6 +579,8 @@ static void nf_flow_table_iterate_cleanu
{
nf_flow_table_iterate(flowtable, nf_flow_table_do_cleanup, dev);
flush_delayed_work(&flowtable->gc_work);
+ if (flowtable->flags & NF_FLOWTABLE_F_HW)
+ flush_work(&nf_flow_offload_hw_work);
}
void nf_flow_table_cleanup(struct net_device *dev)
@@ -547,6 +594,26 @@ void nf_flow_table_cleanup(struct net_de
}
EXPORT_SYMBOL_GPL(nf_flow_table_cleanup);
+struct work_struct nf_flow_offload_hw_work;
+EXPORT_SYMBOL_GPL(nf_flow_offload_hw_work);
+
+/* Give the hardware workqueue the chance to remove entries from hardware.*/
+static void nf_flow_offload_hw_free(struct nf_flowtable *flowtable)
+{
+ const struct nf_flow_table_hw *offload;
+
+ flush_work(&nf_flow_offload_hw_work);
+
+ rcu_read_lock();
+ offload = rcu_dereference(nf_flow_table_hw_hook);
+ if (!offload) {
+ rcu_read_unlock();
+ return;
+ }
+ module_put(offload->owner);
+ rcu_read_unlock();
+}
+
void nf_flow_table_free(struct nf_flowtable *flow_table)
{
mutex_lock(&flowtable_lock);
@@ -556,9 +623,58 @@ void nf_flow_table_free(struct nf_flowta
nf_flow_table_iterate(flow_table, nf_flow_table_do_cleanup, NULL);
nf_flow_table_iterate(flow_table, nf_flow_offload_gc_step, flow_table);
rhashtable_destroy(&flow_table->rhashtable);
+ if (flow_table->flags & NF_FLOWTABLE_F_HW)
+ nf_flow_offload_hw_free(flow_table);
}
EXPORT_SYMBOL_GPL(nf_flow_table_free);
+/* Must be called from user context. */
+void nf_flow_offload_hw_add(struct net *net, struct flow_offload *flow,
+ struct nf_conn *ct)
+{
+ const struct nf_flow_table_hw *offload;
+
+ rcu_read_lock();
+ offload = rcu_dereference(nf_flow_table_hw_hook);
+ if (offload)
+ offload->add(net, flow, ct);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_hw_add);
+
+/* Must be called from user context. */
+void nf_flow_offload_hw_del(struct net *net, struct flow_offload *flow)
+{
+ const struct nf_flow_table_hw *offload;
+
+ rcu_read_lock();
+ offload = rcu_dereference(nf_flow_table_hw_hook);
+ if (offload)
+ offload->del(net, flow);
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_flow_offload_hw_del);
+
+int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload)
+{
+ if (rcu_access_pointer(nf_flow_table_hw_hook))
+ return -EBUSY;
+
+ rcu_assign_pointer(nf_flow_table_hw_hook, offload);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_hw_register);
+
+void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload)
+{
+ WARN_ON(rcu_access_pointer(nf_flow_table_hw_hook) != offload);
+ rcu_assign_pointer(nf_flow_table_hw_hook, NULL);
+
+ synchronize_rcu();
+}
+EXPORT_SYMBOL_GPL(nf_flow_table_hw_unregister);
+
static int nf_flow_table_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
--- /dev/null
+++ b/net/netfilter/nf_flow_table_hw.c
@@ -0,0 +1,169 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/rhashtable.h>
+#include <linux/netdevice.h>
+#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+static DEFINE_SPINLOCK(flow_offload_hw_pending_list_lock);
+static LIST_HEAD(flow_offload_hw_pending_list);
+
+static DEFINE_MUTEX(nf_flow_offload_hw_mutex);
+
+struct flow_offload_hw {
+ struct list_head list;
+ enum flow_offload_type type;
+ struct flow_offload *flow;
+ struct nf_conn *ct;
+ possible_net_t flow_hw_net;
+};
+
+static int do_flow_offload_hw(struct net *net, struct flow_offload *flow,
+ int type)
+{
+ struct net_device *indev;
+ int ret, ifindex;
+
+ ifindex = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.iifidx;
+ indev = dev_get_by_index(net, ifindex);
+ if (WARN_ON(!indev))
+ return 0;
+
+ mutex_lock(&nf_flow_offload_hw_mutex);
+ ret = indev->netdev_ops->ndo_flow_offload(type, flow);
+ mutex_unlock(&nf_flow_offload_hw_mutex);
+
+ dev_put(indev);
+
+ return ret;
+}
+
+static void flow_offload_hw_work_add(struct flow_offload_hw *offload)
+{
+ struct net *net;
+ int ret;
+
+ if (nf_ct_is_dying(offload->ct))
+ return;
+
+ net = read_pnet(&offload->flow_hw_net);
+ ret = do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_ADD);
+ if (ret >= 0)
+ offload->flow->flags |= FLOW_OFFLOAD_HW;
+}
+
+static void flow_offload_hw_work_del(struct flow_offload_hw *offload)
+{
+ struct net *net = read_pnet(&offload->flow_hw_net);
+
+ do_flow_offload_hw(net, offload->flow, FLOW_OFFLOAD_DEL);
+}
+
+static void flow_offload_hw_work(struct work_struct *work)
+{
+ struct flow_offload_hw *offload, *next;
+ LIST_HEAD(hw_offload_pending);
+
+ spin_lock_bh(&flow_offload_hw_pending_list_lock);
+ list_replace_init(&flow_offload_hw_pending_list, &hw_offload_pending);
+ spin_unlock_bh(&flow_offload_hw_pending_list_lock);
+
+ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
+ switch (offload->type) {
+ case FLOW_OFFLOAD_ADD:
+ flow_offload_hw_work_add(offload);
+ break;
+ case FLOW_OFFLOAD_DEL:
+ flow_offload_hw_work_del(offload);
+ break;
+ }
+ if (offload->ct)
+ nf_conntrack_put(&offload->ct->ct_general);
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
+
+static void flow_offload_queue_work(struct flow_offload_hw *offload)
+{
+ spin_lock_bh(&flow_offload_hw_pending_list_lock);
+ list_add_tail(&offload->list, &flow_offload_hw_pending_list);
+ spin_unlock_bh(&flow_offload_hw_pending_list_lock);
+
+ schedule_work(&nf_flow_offload_hw_work);
+}
+
+static void flow_offload_hw_add(struct net *net, struct flow_offload *flow,
+ struct nf_conn *ct)
+{
+ struct flow_offload_hw *offload;
+
+ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ nf_conntrack_get(&ct->ct_general);
+ offload->type = FLOW_OFFLOAD_ADD;
+ offload->ct = ct;
+ offload->flow = flow;
+ write_pnet(&offload->flow_hw_net, net);
+
+ flow_offload_queue_work(offload);
+}
+
+static void flow_offload_hw_del(struct net *net, struct flow_offload *flow)
+{
+ struct flow_offload_hw *offload;
+
+ offload = kmalloc(sizeof(struct flow_offload_hw), GFP_ATOMIC);
+ if (!offload)
+ return;
+
+ offload->type = FLOW_OFFLOAD_DEL;
+ offload->ct = NULL;
+ offload->flow = flow;
+ write_pnet(&offload->flow_hw_net, net);
+
+ flow_offload_queue_work(offload);
+}
+
+static const struct nf_flow_table_hw flow_offload_hw = {
+ .add = flow_offload_hw_add,
+ .del = flow_offload_hw_del,
+ .owner = THIS_MODULE,
+};
+
+static int __init nf_flow_table_hw_module_init(void)
+{
+ INIT_WORK(&nf_flow_offload_hw_work, flow_offload_hw_work);
+ nf_flow_table_hw_register(&flow_offload_hw);
+
+ return 0;
+}
+
+static void __exit nf_flow_table_hw_module_exit(void)
+{
+ struct flow_offload_hw *offload, *next;
+ LIST_HEAD(hw_offload_pending);
+
+ nf_flow_table_hw_unregister(&flow_offload_hw);
+ cancel_work_sync(&nf_flow_offload_hw_work);
+
+ list_for_each_entry_safe(offload, next, &hw_offload_pending, list) {
+ if (offload->ct)
+ nf_conntrack_put(&offload->ct->ct_general);
+ list_del(&offload->list);
+ kfree(offload);
+ }
+}
+
+module_init(nf_flow_table_hw_module_init);
+module_exit(nf_flow_table_hw_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS("nf-flow-table-hw");
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -5743,6 +5743,13 @@ static int nf_tables_flowtable_parse_hoo
if (err < 0)
return err;
+ for (i = 0; i < n; i++) {
+ if (flowtable->data.flags & NF_FLOWTABLE_F_HW &&
+ !dev_array[i]->netdev_ops->ndo_flow_offload) {
+ return -EOPNOTSUPP;
+ }
+ }
+
ops = kcalloc(n, sizeof(struct nf_hook_ops), GFP_KERNEL);
if (!ops)
return -ENOMEM;
@@ -5873,10 +5880,19 @@ static int nf_tables_newflowtable(struct
}
flowtable->data.type = type;
+ write_pnet(&flowtable->data.ft_net, net);
+
err = type->init(&flowtable->data);
if (err < 0)
goto err3;
+ if (nla[NFTA_FLOWTABLE_FLAGS]) {
+ flowtable->data.flags =
+ ntohl(nla_get_be32(nla[NFTA_FLOWTABLE_FLAGS]));
+ if (flowtable->data.flags & ~NF_FLOWTABLE_F_HW)
+ goto err4;
+ }
+
err = nf_tables_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
flowtable);
if (err < 0)
@@ -6002,7 +6018,8 @@ static int nf_tables_fill_flowtable_info
nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) ||
nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) ||
nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle),
- NFTA_FLOWTABLE_PAD))
+ NFTA_FLOWTABLE_PAD) ||
+ nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags)))
goto nla_put_failure;
nest = nla_nest_start_noflag(skb, NFTA_FLOWTABLE_HOOK);
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -128,6 +128,9 @@ static void nft_flow_offload_eval(const
if (ret < 0)
goto err_flow_add;
+ if (flowtable->flags & NF_FLOWTABLE_F_HW)
+ nf_flow_offload_hw_add(nft_net(pkt), flow, ct);
+
dst_release(route.tuple[!dir].dst);
return;

View file

@ -0,0 +1,26 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Sun, 25 Mar 2018 21:10:55 +0200
Subject: [PATCH] netfilter: nf_flow_table: rework hardware offload timeout
handling
Some offload implementations send keepalive packets + explicit
notifications of TCP FIN/RST packets. In this case it is more convenient
to simply let the driver update flow->timeout handling and use the
regular flow offload gc step.
For drivers that manage their own lifetime, a separate flag can be set
to avoid gc timeouts.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -75,6 +75,7 @@ struct flow_offload_tuple_rhash {
#define FLOW_OFFLOAD_DYING 0x4
#define FLOW_OFFLOAD_TEARDOWN 0x8
#define FLOW_OFFLOAD_HW 0x10
+#define FLOW_OFFLOAD_KEEP 0x20
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];