1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-02-15 04:42:02 +00:00
openmptcprouter/root/target/linux/bcm27xx/patches-5.15/950-0820-mm-page_alloc-cma-introduce-a-customisable-threshold.patch
Ycarus (Yannick Chabanois) 07d07219be Update bcm27xx 5.15 patches
2022-04-22 17:00:47 +02:00

67 lines
2.5 KiB
Diff

From 1cad2f1eef8d711c7935fb37bee31556a5d1046a Mon Sep 17 00:00:00 2001
From: David Plowman <david.plowman@raspberrypi.com>
Date: Tue, 29 Mar 2022 16:10:06 +0100
Subject: [PATCH 820/828] mm,page_alloc,cma: introduce a customisable threshold
for allocating pages in cma
On some platforms the cma area can be half the entire system memory,
meaning that allocations start happening in the cma area immediately.
This leads to fragmentation and subsequent fatal cma_alloc failures.
We introduce an "alloc_in_cma_threshold" parameter which requires that
this many sixteenths of the free pages must be in cma before it will
try to use them. By default this is set to 12, but the previous
behaviour can be restored by setting it to 8 on startup.
Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
---
mm/page_alloc.c | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -190,6 +190,27 @@ EXPORT_SYMBOL(init_on_alloc);
DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
EXPORT_SYMBOL(init_on_free);
+#define ALLOC_IN_CMA_THRESHOLD_MAX 16
+#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
+
+static unsigned long _alloc_in_cma_threshold __read_mostly
+ = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
+
+static int __init alloc_in_cma_threshold_setup(char *buf)
+{
+ unsigned long res;
+
+ if (kstrtoul(buf, 10, &res) < 0 ||
+ res > ALLOC_IN_CMA_THRESHOLD_MAX) {
+ pr_err("Bad alloc_cma_threshold value\n");
+ return 0;
+ }
+ _alloc_in_cma_threshold = res;
+ pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
+ return 0;
+}
+early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
+
static bool _init_on_alloc_enabled_early __read_mostly
= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
static int __init early_init_on_alloc(char *buf)
@@ -2980,12 +3001,13 @@ __rmqueue(struct zone *zone, unsigned in
if (IS_ENABLED(CONFIG_CMA)) {
/*
* Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
+ * allocating from CMA when over more than a given proportion of
+ * the zone's free memory is in the CMA area.
*/
if (alloc_flags & ALLOC_CMA &&
zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
+ zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
+ * _alloc_in_cma_threshold) {
page = __rmqueue_cma_fallback(zone, order);
if (page)
goto out;