1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00

Update Linux 5.4 kernel and clean some files

This commit is contained in:
Ycarus (Yannick Chabanois) 2022-03-16 20:53:52 +01:00
parent 272324aa1a
commit 0a55523ea5
26 changed files with 870 additions and 4059 deletions

View file

@ -1,53 +0,0 @@
From: Bui Quang Minh @ 2021-01-26 8:26 UTC (permalink / raw)
To: ast, daniel, davem, kuba, hawk, john.fastabend, andrii, kafai,
songliubraving, yhs, kpsingh, jakub, lmb
Cc: netdev, bpf, linux-kernel, minhquangbui99
In 32-bit architecture, the result of sizeof() is a 32-bit integer so
the expression becomes the multiplication between 2 32-bit integer which
can potentially leads to integer overflow. As a result,
bpf_map_area_alloc() allocates less memory than needed.
Fix this by casting 1 operand to u64.
Signed-off-by: Bui Quang Minh <minhquangbui99@gmail.com>
---
kernel/bpf/devmap.c | 4 ++--
net/core/sock_map.c | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
Index: linux-5.4.147/kernel/bpf/devmap.c
===================================================================
--- linux-5.4.147.orig/kernel/bpf/devmap.c
+++ linux-5.4.147/kernel/bpf/devmap.c
@@ -94,7 +94,7 @@ static struct hlist_head *dev_map_create
int i;
struct hlist_head *hash;
- hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node);
+ hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node);
if (hash != NULL)
for (i = 0; i < entries; i++)
INIT_HLIST_HEAD(&hash[i]);
@@ -159,7 +159,7 @@ static int dev_map_init_map(struct bpf_d
spin_lock_init(&dtab->index_lock);
} else {
- dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries *
+ dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries *
sizeof(struct bpf_dtab_netdev *),
dtab->map.numa_node);
if (!dtab->netdev_map)
Index: linux-5.4.147/net/core/sock_map.c
===================================================================
--- linux-5.4.147.orig/net/core/sock_map.c
+++ linux-5.4.147/net/core/sock_map.c
@@ -48,7 +48,7 @@ static struct bpf_map *sock_map_alloc(un
if (err)
goto free_stab;
- stab->sks = bpf_map_area_alloc(stab->map.max_entries *
+ stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries *
sizeof(struct sock *),
stab->map.numa_node);
if (stab->sks)

View file

@ -1,115 +0,0 @@
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 13 Jun 2018 12:33:39 +0200
Subject: [PATCH] netfilter: nf_flow_table: fix offloaded connection timeout
corner case
The full teardown of offloaded flows is deferred to a gc work item,
however processing of packets by netfilter needs to happen immediately
after a teardown is requested, because the conntrack state needs to be
fixed up.
Since the IPS_OFFLOAD_BIT is still kept until the teardown is complete,
the netfilter conntrack gc can accidentally bump the timeout of a
connection where offload was just stopped, causing a conntrack entry
leak.
Fix this by moving the conntrack timeout bumping from conntrack core to
the nf_flow_offload and add a check to prevent bogus timeout bumps.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1207,18 +1207,6 @@ static bool gc_worker_can_early_drop(con
return false;
}
-#define DAY (86400 * HZ)
-
-/* Set an arbitrary timeout large enough not to ever expire, this save
- * us a check for the IPS_OFFLOAD_BIT from the packet path via
- * nf_ct_is_expired().
- */
-static void nf_ct_offload_timeout(struct nf_conn *ct)
-{
- if (nf_ct_expires(ct) < DAY / 2)
- ct->timeout = nfct_time_stamp + DAY;
-}
-
static void gc_worker(struct work_struct *work)
{
unsigned long end_time = jiffies + GC_SCAN_MAX_DURATION;
@@ -1250,11 +1238,9 @@ static void gc_worker(struct work_struct
tmp = nf_ct_tuplehash_to_ctrack(h);
scanned++;
- if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
- nf_ct_offload_timeout(tmp);
+ if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
continue;
- }
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -198,10 +198,29 @@ static const struct rhashtable_params nf
.automatic_shrinking = true,
};
+#define DAY (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static void nf_ct_offload_timeout(struct flow_offload *flow)
+{
+ struct flow_offload_entry *entry;
+ struct nf_conn *ct;
+
+ entry = container_of(flow, struct flow_offload_entry, flow);
+ ct = entry->ct;
+
+ if (nf_ct_expires(ct) < DAY / 2)
+ ct->timeout = nfct_time_stamp + DAY;
+}
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
{
int err;
+ nf_ct_offload_timeout(flow);
flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
err = rhashtable_insert_fast(&flow_table->rhashtable,
@@ -304,6 +323,7 @@ nf_flow_table_iterate(struct nf_flowtabl
rhashtable_walk_start(&hti);
while ((tuplehash = rhashtable_walk_next(&hti))) {
+
if (IS_ERR(tuplehash)) {
if (PTR_ERR(tuplehash) != -EAGAIN) {
err = PTR_ERR(tuplehash);
@@ -328,10 +348,17 @@ static void nf_flow_offload_gc_step(stru
{
struct nf_flowtable *flow_table = data;
struct flow_offload_entry *e;
+ bool teardown;
e = container_of(flow, struct flow_offload_entry, flow);
- if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
- (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
+
+ teardown = flow->flags & (FLOW_OFFLOAD_DYING |
+ FLOW_OFFLOAD_TEARDOWN);
+
+ if (!teardown)
+ nf_ct_offload_timeout(flow);
+
+ if (nf_flow_has_expired(flow) || teardown)
flow_offload_del(flow_table, flow);
}