1
0
Fork 0
mirror of https://github.com/Ysurac/openmptcprouter.git synced 2025-03-09 15:40:20 +00:00
openmptcprouter/6.12/target/linux/bcm27xx/patches-6.12/950-0278-mm-page_alloc-cma-introduce-a-customisable-threshold.patch
Ycarus (Yannick Chabanois) bdb9b0046f Add bcm27xx 6.12 test support
2024-12-20 14:17:26 +01:00

67 lines
2.4 KiB
Diff

From d2c8004ece45fcaa34ae36b8e2f7918e6f0c804d Mon Sep 17 00:00:00 2001
From: David Plowman <david.plowman@raspberrypi.com>
Date: Tue, 29 Mar 2022 16:10:06 +0100
Subject: [PATCH 278/697] mm,page_alloc,cma: introduce a customisable threshold
for allocating pages in cma
On some platforms the cma area can be half the entire system memory,
meaning that allocations start happening in the cma area immediately.
This leads to fragmentation and subsequent fatal cma_alloc failures.
We introduce an "alloc_in_cma_threshold" parameter which requires that
this many sixteenths of the free pages must be in cma before it will
try to use them. By default this is set to 12, but the previous
behaviour can be restored by setting it to 8 on startup.
Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
---
mm/page_alloc.c | 28 +++++++++++++++++++++++++---
1 file changed, 25 insertions(+), 3 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -207,6 +207,27 @@ EXPORT_SYMBOL(node_states);
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
+#define ALLOC_IN_CMA_THRESHOLD_MAX 16
+#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
+
+static unsigned long _alloc_in_cma_threshold __read_mostly
+ = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
+
+static int __init alloc_in_cma_threshold_setup(char *buf)
+{
+ unsigned long res;
+
+ if (kstrtoul(buf, 10, &res) < 0 ||
+ res > ALLOC_IN_CMA_THRESHOLD_MAX) {
+ pr_err("Bad alloc_cma_threshold value\n");
+ return 0;
+ }
+ _alloc_in_cma_threshold = res;
+ pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
+ return 0;
+}
+early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
+
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
unsigned int pageblock_order __read_mostly;
#endif
@@ -2268,12 +2289,13 @@ __rmqueue(struct zone *zone, unsigned in
if (IS_ENABLED(CONFIG_CMA)) {
/*
* Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
+ * allocating from CMA when over more than a given proportion of
+ * the zone's free memory is in the CMA area.
*/
if (alloc_flags & ALLOC_CMA &&
zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
+ zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
+ * _alloc_in_cma_threshold) {
page = __rmqueue_cma_fallback(zone, order);
if (page)
return page;