aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
diff options
context:
space:
mode:
authorChristian Marangi <ansuelsmth@gmail.com>2023-03-25 17:24:27 +0100
committerChristian Marangi <ansuelsmth@gmail.com>2023-03-27 14:16:10 +0200
commit708a507af00d618fd91eadf1f2a03e2f7e86b6ea (patch)
treea7258fc0860ef3cb2f59bfef850e3130bab341ad /target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
parent0d0928f58795e336646ad31ea96d2919b5328f39 (diff)
downloadupstream-708a507af00d618fd91eadf1f2a03e2f7e86b6ea.tar.gz
upstream-708a507af00d618fd91eadf1f2a03e2f7e86b6ea.tar.bz2
upstream-708a507af00d618fd91eadf1f2a03e2f7e86b6ea.zip
generic: 5.15: refresh kernel patches
Refresh kernel patches for generic kernel 5.15 due to new backport version of MGLRU patchset. Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
Diffstat (limited to 'target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch')
-rw-r--r--target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch47
1 files changed, 15 insertions, 32 deletions
diff --git a/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch b/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
index e0c6380b5f..5cd6e03dc6 100644
--- a/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
+++ b/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch
@@ -108,11 +108,9 @@ Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/vmscan.c | 184 +++++++++++++++++++++++++++++++++++++
7 files changed, 232 insertions(+), 2 deletions(-)
-diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
-index 4f189b17dafc..8d6a0329bc59 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
-@@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_objcg(struct page *page)
+@@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
@@ -120,7 +118,7 @@ index 4f189b17dafc..8d6a0329bc59 100644
*
* For a kmem page a caller should hold an rcu read lock to protect memcg
* associated with a kmem page from being released.
-@@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
+@@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_me
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
@@ -128,7 +126,7 @@ index 4f189b17dafc..8d6a0329bc59 100644
*
* For a kmem page a caller should hold an rcu read lock to protect memcg
* associated with a kmem page from being released.
-@@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page);
+@@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page
void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val);
@@ -152,7 +150,7 @@ index 4f189b17dafc..8d6a0329bc59 100644
/* idx can be of type enum memcg_stat_item or node_stat_item */
static inline void mod_memcg_state(struct mem_cgroup *memcg,
int idx, int val)
-@@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(struct page *page)
+@@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(str
{
}
@@ -171,8 +169,6 @@ index 4f189b17dafc..8d6a0329bc59 100644
static inline void mem_cgroup_handle_over_high(void)
{
}
-diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
-index fce8945c507c..4db2b877fcf9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -352,6 +352,7 @@ enum lruvec_flags {
@@ -191,7 +187,7 @@ index fce8945c507c..4db2b877fcf9 100644
#ifdef CONFIG_MEMCG
void lru_gen_init_memcg(struct mem_cgroup *memcg);
-@@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec)
+@@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(s
{
}
@@ -202,8 +198,6 @@ index fce8945c507c..4db2b877fcf9 100644
#ifdef CONFIG_MEMCG
static inline void lru_gen_init_memcg(struct mem_cgroup *memcg)
{
-diff --git a/mm/internal.h b/mm/internal.h
-index cf3cb933eba3..5c73246a092e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -35,6 +35,7 @@
@@ -214,11 +208,9 @@ index cf3cb933eba3..5c73246a092e 100644
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);
-diff --git a/mm/memcontrol.c b/mm/memcontrol.c
-index 8b634dc72e7f..cc3431c5d9ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg)
+@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *p
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
@@ -226,8 +218,6 @@ index 8b634dc72e7f..cc3431c5d9ba 100644
*/
page->memcg_data = (unsigned long)memcg;
}
-diff --git a/mm/rmap.c b/mm/rmap.c
-index 330b361a460e..22a86122732e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -73,6 +73,7 @@
@@ -238,7 +228,7 @@ index 330b361a460e..22a86122732e 100644
#include <asm/tlbflush.h>
-@@ -793,6 +794,12 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
+@@ -793,6 +794,12 @@ static bool page_referenced_one(struct p
}
if (pvmw.pte) {
@@ -251,11 +241,9 @@ index 330b361a460e..22a86122732e 100644
if (ptep_clear_flush_young_notify(vma, address,
pvmw.pte)) {
/*
-diff --git a/mm/swap.c b/mm/swap.c
-index 5d227577b609..966ff2d83343 100644
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -325,7 +325,7 @@ static bool need_activate_page_drain(int cpu)
+@@ -325,7 +325,7 @@ static bool need_activate_page_drain(int
return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0;
}
@@ -264,7 +252,7 @@ index 5d227577b609..966ff2d83343 100644
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
-@@ -345,7 +345,7 @@ static inline void activate_page_drain(int cpu)
+@@ -345,7 +345,7 @@ static inline void activate_page_drain(i
{
}
@@ -273,11 +261,9 @@ index 5d227577b609..966ff2d83343 100644
{
struct lruvec *lruvec;
-diff --git a/mm/vmscan.c b/mm/vmscan.c
-index 932abd24c1b3..1d0b25ae378c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
-@@ -1409,6 +1409,11 @@ static unsigned int shrink_page_list(struct list_head *page_list,
+@@ -1409,6 +1409,11 @@ retry:
if (!sc->may_unmap && page_mapped(page))
goto keep_locked;
@@ -289,7 +275,7 @@ index 932abd24c1b3..1d0b25ae378c 100644
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
(PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
-@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv)
+@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctr
* the aging
******************************************************************************/
@@ -319,7 +305,7 @@ index 932abd24c1b3..1d0b25ae378c 100644
/* protect pages accessed multiple times through file descriptors */
static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming)
{
-@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin
+@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *l
VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page);
do {
@@ -331,7 +317,7 @@ index 932abd24c1b3..1d0b25ae378c 100644
new_gen = (old_gen + 1) % MAX_NR_GENS;
new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS);
-@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin
+@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *l
return new_gen;
}
@@ -375,7 +361,7 @@ index 932abd24c1b3..1d0b25ae378c 100644
static void inc_min_seq(struct lruvec *lruvec, int type)
{
struct lru_gen_struct *lrugen = &lruvec->lrugen;
-@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
+@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pgli
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)));
}
@@ -490,7 +476,7 @@ index 932abd24c1b3..1d0b25ae378c 100644
/******************************************************************************
* the eviction
******************************************************************************/
-@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx)
+@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lru
return true;
}
@@ -503,6 +489,3 @@ index 932abd24c1b3..1d0b25ae378c 100644
/* protected */
if (tier > tier_idx) {
int hist = lru_hist_from_seq(lrugen->min_seq[type]);
---
-2.40.0
-