aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch
diff options
context:
space:
mode:
authorKazuki H <kazukih0205@gmail.com>2023-03-21 06:51:03 +0900
committerChristian Marangi <ansuelsmth@gmail.com>2023-03-27 14:16:10 +0200
commit0d0928f58795e336646ad31ea96d2919b5328f39 (patch)
treeeb321324f622f740f72233d019ef01873a4f97cf /target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch
parentdc79b51533cfe9a7806353f6c6fd6b22cd80d536 (diff)
downloadupstream-0d0928f58795e336646ad31ea96d2919b5328f39.tar.gz
upstream-0d0928f58795e336646ad31ea96d2919b5328f39.tar.bz2
upstream-0d0928f58795e336646ad31ea96d2919b5328f39.zip
kernel: Update MGLRU patchset
The current patches are old, update them from mainline. Backports taken from https://github.com/yuzhaogoogle/linux/commits/mglru-5.15 Tested-by: Kazuki H <kazukih0205@gmail.com> #mt7622/Linksys E8450 UBI Signed-off-by: Kazuki H <kazukih0205@gmail.com>
Diffstat (limited to 'target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch')
-rw-r--r--target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch171
1 files changed, 171 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch b/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch
new file mode 100644
index 0000000000..2e1783661d
--- /dev/null
+++ b/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch
@@ -0,0 +1,171 @@
+From afd37e73db04c7e6b47411120ac5f6a7eca51fec Mon Sep 17 00:00:00 2001
+From: Yu Zhao <yuzhao@google.com>
+Date: Wed, 21 Dec 2022 21:19:00 -0700
+Subject: [PATCH 22/29] mm: multi-gen LRU: rename lrugen->lists[] to
+ lrugen->pages[]
+
+lru_gen_page will be chained into per-node lists by the coming
+lrugen->list.
+
+Link: https://lkml.kernel.org/r/20221222041905.2431096-3-yuzhao@google.com
+Signed-off-by: Yu Zhao <yuzhao@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Michael Larabel <Michael@MichaelLarabel.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+---
+ include/linux/mm_inline.h | 4 ++--
+ include/linux/mmzone.h | 8 ++++----
+ mm/vmscan.c | 20 ++++++++++----------
+ 3 files changed, 16 insertions(+), 16 deletions(-)
+
+diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
+index 27c4890503c5..4adc9ba59569 100644
+--- a/include/linux/mm_inline.h
++++ b/include/linux/mm_inline.h
+@@ -246,9 +246,9 @@ static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bo
+ lru_gen_update_size(lruvec, page, -1, gen);
+ /* for rotate_reclaimable_page() */
+ if (reclaiming)
+- list_add_tail(&page->lru, &lrugen->lists[gen][type][zone]);
++ list_add_tail(&page->lru, &lrugen->pages[gen][type][zone]);
+ else
+- list_add(&page->lru, &lrugen->lists[gen][type][zone]);
++ list_add(&page->lru, &lrugen->pages[gen][type][zone]);
+
+ return true;
+ }
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 5856b026c089..7b8a26aaf381 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -302,7 +302,7 @@ enum lruvec_flags {
+ * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An
+ * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the
+ * corresponding generation. The gen counter in page->flags stores gen+1 while
+- * a page is on one of lrugen->lists[]. Otherwise it stores 0.
++ * a page is on one of lrugen->pages[]. Otherwise it stores 0.
+ *
+ * A page is added to the youngest generation on faulting. The aging needs to
+ * check the accessed bit at least twice before handing this page over to the
+@@ -314,8 +314,8 @@ enum lruvec_flags {
+ * rest of generations, if they exist, are considered inactive. See
+ * lru_gen_is_active().
+ *
+- * PG_active is always cleared while a page is on one of lrugen->lists[] so that
+- * the aging needs not to worry about it. And it's set again when a page
++ * PG_active is always cleared while a page is on one of lrugen->pages[] so
++ * that the aging needs not to worry about it. And it's set again when a page
+ * considered active is isolated for non-reclaiming purposes, e.g., migration.
+ * See lru_gen_add_page() and lru_gen_del_page().
+ *
+@@ -402,7 +402,7 @@ struct lru_gen_page {
+ /* the birth time of each generation in jiffies */
+ unsigned long timestamps[MAX_NR_GENS];
+ /* the multi-gen LRU lists, lazily sorted on eviction */
+- struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
++ struct list_head pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the multi-gen LRU sizes, eventually consistent */
+ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES];
+ /* the exponential moving average of refaulted */
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 3b1b5bd9736a..2322c913aa64 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+
+ /* prevent cold/hot inversion if force_scan is true */
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+- struct list_head *head = &lrugen->lists[old_gen][type][zone];
++ struct list_head *head = &lrugen->pages[old_gen][type][zone];
+
+ while (!list_empty(head)) {
+ struct page *page = lru_to_page(head);
+@@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap)
+ VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page);
+
+ new_gen = page_inc_gen(lruvec, page, false);
+- list_move_tail(&page->lru, &lrugen->lists[new_gen][type][zone]);
++ list_move_tail(&page->lru, &lrugen->pages[new_gen][type][zone]);
+
+ if (!--remaining)
+ return false;
+@@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap)
+ gen = lru_gen_from_seq(min_seq[type]);
+
+ for (zone = 0; zone < MAX_NR_ZONES; zone++) {
+- if (!list_empty(&lrugen->lists[gen][type][zone]))
++ if (!list_empty(&lrugen->pages[gen][type][zone]))
+ goto next;
+ }
+
+@@ -4491,7 +4491,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
+
+ /* promoted */
+ if (gen != lru_gen_from_seq(lrugen->min_seq[type])) {
+- list_move(&page->lru, &lrugen->lists[gen][type][zone]);
++ list_move(&page->lru, &lrugen->pages[gen][type][zone]);
+ return true;
+ }
+
+@@ -4500,7 +4500,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
+ int hist = lru_hist_from_seq(lrugen->min_seq[type]);
+
+ gen = page_inc_gen(lruvec, page, false);
+- list_move_tail(&page->lru, &lrugen->lists[gen][type][zone]);
++ list_move_tail(&page->lru, &lrugen->pages[gen][type][zone]);
+
+ WRITE_ONCE(lrugen->protected[hist][type][tier - 1],
+ lrugen->protected[hist][type][tier - 1] + delta);
+@@ -4512,7 +4512,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
+ if (PageLocked(page) || PageWriteback(page) ||
+ (type == LRU_GEN_FILE && PageDirty(page))) {
+ gen = page_inc_gen(lruvec, page, true);
+- list_move(&page->lru, &lrugen->lists[gen][type][zone]);
++ list_move(&page->lru, &lrugen->pages[gen][type][zone]);
+ return true;
+ }
+
+@@ -4579,7 +4579,7 @@ static int scan_pages(struct lruvec *lruvec, struct scan_control *sc,
+ for (zone = sc->reclaim_idx; zone >= 0; zone--) {
+ LIST_HEAD(moved);
+ int skipped = 0;
+- struct list_head *head = &lrugen->lists[gen][type][zone];
++ struct list_head *head = &lrugen->pages[gen][type][zone];
+
+ while (!list_empty(head)) {
+ struct page *page = lru_to_page(head);
+@@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_valid(struct lruvec *lruvec)
+ int gen, type, zone;
+
+ for_each_gen_type_zone(gen, type, zone) {
+- if (!list_empty(&lrugen->lists[gen][type][zone]))
++ if (!list_empty(&lrugen->pages[gen][type][zone]))
+ return false;
+ }
+ }
+@@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruvec *lruvec)
+ int remaining = MAX_LRU_BATCH;
+
+ for_each_gen_type_zone(gen, type, zone) {
+- struct list_head *head = &lruvec->lrugen.lists[gen][type][zone];
++ struct list_head *head = &lruvec->lrugen.pages[gen][type][zone];
+
+ while (!list_empty(head)) {
+ bool success;
+@@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec)
+ lrugen->timestamps[i] = jiffies;
+
+ for_each_gen_type_zone(gen, type, zone)
+- INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]);
++ INIT_LIST_HEAD(&lrugen->pages[gen][type][zone]);
+
+ lruvec->mm_state.seq = MIN_NR_GENS;
+ init_waitqueue_head(&lruvec->mm_state.wait);
+--
+2.40.0
+