aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch
diff options
context:
space:
mode:
authorKazuki H <kazukih0205@gmail.com>2023-03-21 06:51:03 +0900
committerChristian Marangi <ansuelsmth@gmail.com>2023-03-27 14:16:10 +0200
commit0d0928f58795e336646ad31ea96d2919b5328f39 (patch)
treeeb321324f622f740f72233d019ef01873a4f97cf /target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch
parentdc79b51533cfe9a7806353f6c6fd6b22cd80d536 (diff)
downloadupstream-0d0928f58795e336646ad31ea96d2919b5328f39.tar.gz
upstream-0d0928f58795e336646ad31ea96d2919b5328f39.tar.bz2
upstream-0d0928f58795e336646ad31ea96d2919b5328f39.zip
kernel: Update MGLRU patchset
The current patches are old, update them from mainline. Backports taken from https://github.com/yuzhaogoogle/linux/commits/mglru-5.15 Tested-by: Kazuki H <kazukih0205@gmail.com> #mt7622/Linksys E8450 UBI Signed-off-by: Kazuki H <kazukih0205@gmail.com>
Diffstat (limited to 'target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch')
-rw-r--r--target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch166
1 files changed, 166 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch b/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch
new file mode 100644
index 0000000000..bb5402a3f2
--- /dev/null
+++ b/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch
@@ -0,0 +1,166 @@
+From 107d54931df3c28d81648122e219bf0034ef4e99 Mon Sep 17 00:00:00 2001
+From: Yu Zhao <yuzhao@google.com>
+Date: Wed, 21 Dec 2022 21:19:03 -0700
+Subject: [PATCH 25/29] mm: multi-gen LRU: shuffle should_run_aging()
+
+Move should_run_aging() next to its only caller left.
+
+Link: https://lkml.kernel.org/r/20221222041905.2431096-6-yuzhao@google.com
+Signed-off-by: Yu Zhao <yuzhao@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Michael Larabel <Michael@MichaelLarabel.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+ mm/vmscan.c | 124 ++++++++++++++++++++++++++--------------------------
+ 1 file changed, 62 insertions(+), 62 deletions(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 7159436872ba..cb026e2714d7 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -4183,68 +4183,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq,
+ return true;
+ }
+
+-static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
+- struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
+-{
+- int gen, type, zone;
+- unsigned long old = 0;
+- unsigned long young = 0;
+- unsigned long total = 0;
+- struct lru_gen_page *lrugen = &lruvec->lrugen;
+- struct mem_cgroup *memcg = lruvec_memcg(lruvec);
+- DEFINE_MIN_SEQ(lruvec);
+-
+- /* whether this lruvec is completely out of cold pages */
+- if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
+- *nr_to_scan = 0;
+- return true;
+- }
+-
+- for (type = !can_swap; type < ANON_AND_FILE; type++) {
+- unsigned long seq;
+-
+- for (seq = min_seq[type]; seq <= max_seq; seq++) {
+- unsigned long size = 0;
+-
+- gen = lru_gen_from_seq(seq);
+-
+- for (zone = 0; zone < MAX_NR_ZONES; zone++)
+- size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
+-
+- total += size;
+- if (seq == max_seq)
+- young += size;
+- else if (seq + MIN_NR_GENS == max_seq)
+- old += size;
+- }
+- }
+-
+- /* try to scrape all its memory if this memcg was deleted */
+- *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
+-
+- /*
+- * The aging tries to be lazy to reduce the overhead, while the eviction
+- * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
+- * ideal number of generations is MIN_NR_GENS+1.
+- */
+- if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
+- return false;
+-
+- /*
+- * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
+- * of the total number of pages for each generation. A reasonable range
+- * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
+- * aging cares about the upper bound of hot pages, while the eviction
+- * cares about the lower bound of cold pages.
+- */
+- if (young * MIN_NR_GENS > total)
+- return true;
+- if (old * (MIN_NR_GENS + 2) < total)
+- return true;
+-
+- return false;
+-}
+-
+ static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc)
+ {
+ int gen, type, zone;
+@@ -4828,6 +4766,68 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp
+ return scanned;
+ }
+
++static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq,
++ struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan)
++{
++ int gen, type, zone;
++ unsigned long old = 0;
++ unsigned long young = 0;
++ unsigned long total = 0;
++ struct lru_gen_page *lrugen = &lruvec->lrugen;
++ struct mem_cgroup *memcg = lruvec_memcg(lruvec);
++ DEFINE_MIN_SEQ(lruvec);
++
++ /* whether this lruvec is completely out of cold pages */
++ if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) {
++ *nr_to_scan = 0;
++ return true;
++ }
++
++ for (type = !can_swap; type < ANON_AND_FILE; type++) {
++ unsigned long seq;
++
++ for (seq = min_seq[type]; seq <= max_seq; seq++) {
++ unsigned long size = 0;
++
++ gen = lru_gen_from_seq(seq);
++
++ for (zone = 0; zone < MAX_NR_ZONES; zone++)
++ size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L);
++
++ total += size;
++ if (seq == max_seq)
++ young += size;
++ else if (seq + MIN_NR_GENS == max_seq)
++ old += size;
++ }
++ }
++
++ /* try to scrape all its memory if this memcg was deleted */
++ *nr_to_scan = mem_cgroup_online(memcg) ? (total >> sc->priority) : total;
++
++ /*
++ * The aging tries to be lazy to reduce the overhead, while the eviction
++ * stalls when the number of generations reaches MIN_NR_GENS. Hence, the
++ * ideal number of generations is MIN_NR_GENS+1.
++ */
++ if (min_seq[!can_swap] + MIN_NR_GENS < max_seq)
++ return false;
++
++ /*
++ * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1)
++ * of the total number of pages for each generation. A reasonable range
++ * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The
++ * aging cares about the upper bound of hot pages, while the eviction
++ * cares about the lower bound of cold pages.
++ */
++ if (young * MIN_NR_GENS > total)
++ return true;
++ if (old * (MIN_NR_GENS + 2) < total)
++ return true;
++
++ return false;
++}
++
+ /*
+ * For future optimizations:
+ * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg
+--
+2.40.0
+