aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch')
-rw-r--r--target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch67
1 files changed, 67 insertions, 0 deletions
diff --git a/target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch b/target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch
new file mode 100644
index 0000000000..91ec85a0b2
--- /dev/null
+++ b/target/linux/bcm27xx/patches-5.15/950-0800-mm-page_alloc-cma-introduce-a-customisable-threshold.patch
@@ -0,0 +1,67 @@
+From 6896ae528e4654e6f4bdff575337237f40de48b2 Mon Sep 17 00:00:00 2001
+From: David Plowman <david.plowman@raspberrypi.com>
+Date: Tue, 29 Mar 2022 16:10:06 +0100
+Subject: [PATCH] mm,page_alloc,cma: introduce a customisable threshold
+ for allocating pages in cma
+
+On some platforms the cma area can be half the entire system memory,
+meaning that allocations start happening in the cma area immediately.
+This leads to fragmentation and subsequent fatal cma_alloc failures.
+
+We introduce an "alloc_in_cma_threshold" parameter which requires that
+this many sixteenths of the free pages must be in cma before it will
+try to use them. By default this is set to 12, but the previous
+behaviour can be restored by setting it to 8 on startup.
+
+Signed-off-by: David Plowman <david.plowman@raspberrypi.com>
+---
+ mm/page_alloc.c | 28 +++++++++++++++++++++++++---
+ 1 file changed, 25 insertions(+), 3 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -190,6 +190,27 @@ EXPORT_SYMBOL(init_on_alloc);
+ DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
+ EXPORT_SYMBOL(init_on_free);
+
++#define ALLOC_IN_CMA_THRESHOLD_MAX 16
++#define ALLOC_IN_CMA_THRESHOLD_DEFAULT 12
++
++static unsigned long _alloc_in_cma_threshold __read_mostly
++ = ALLOC_IN_CMA_THRESHOLD_DEFAULT;
++
++static int __init alloc_in_cma_threshold_setup(char *buf)
++{
++ unsigned long res;
++
++ if (kstrtoul(buf, 10, &res) < 0 ||
++ res > ALLOC_IN_CMA_THRESHOLD_MAX) {
++ pr_err("Bad alloc_cma_threshold value\n");
++ return 0;
++ }
++ _alloc_in_cma_threshold = res;
++ pr_info("Setting alloc_in_cma_threshold to %lu\n", res);
++ return 0;
++}
++early_param("alloc_in_cma_threshold", alloc_in_cma_threshold_setup);
++
+ static bool _init_on_alloc_enabled_early __read_mostly
+ = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
+ static int __init early_init_on_alloc(char *buf)
+@@ -2980,12 +3001,13 @@ __rmqueue(struct zone *zone, unsigned in
+ if (IS_ENABLED(CONFIG_CMA)) {
+ /*
+ * Balance movable allocations between regular and CMA areas by
+- * allocating from CMA when over half of the zone's free memory
+- * is in the CMA area.
++ * allocating from CMA when over more than a given proportion of
++ * the zone's free memory is in the CMA area.
+ */
+ if (alloc_flags & ALLOC_CMA &&
+ zone_page_state(zone, NR_FREE_CMA_PAGES) >
+- zone_page_state(zone, NR_FREE_PAGES) / 2) {
++ zone_page_state(zone, NR_FREE_PAGES) / ALLOC_IN_CMA_THRESHOLD_MAX
++ * _alloc_in_cma_threshold) {
+ page = __rmqueue_cma_fallback(zone, order);
+ if (page)
+ goto out;