From 708a507af00d618fd91eadf1f2a03e2f7e86b6ea Mon Sep 17 00:00:00 2001 From: Christian Marangi Date: Sat, 25 Mar 2023 17:24:27 +0100 Subject: generic: 5.15: refresh kernel patches Refresh kernel patches for generic kernel 5.15 due to new backport version of MGLRU patchset. Signed-off-by: Christian Marangi --- ...01-mm-x86-arm64-add-arch_has_hw_pte_young.patch | 19 +---- ...x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch | 23 ++--- ...-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch | 7 +- ...ude-linux-mm_inline.h-fold-__update_lru_s.patch | 9 +- .../020-v6.1-05-mm-multi-gen-LRU-groundwork.patch | 69 ++++----------- ...6-mm-multi-gen-LRU-minimal-implementation.patch | 51 ++++-------- ...mm-multi-gen-LRU-exploit-locality-in-rmap.patch | 47 ++++------- ...mm-multi-gen-LRU-support-page-table-walks.patch | 97 +++++++++------------- ...mm-multi-gen-LRU-optimize-multiple-memcgs.patch | 19 ++--- .../020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch | 41 +++------ ...-11-mm-multi-gen-LRU-thrashing-prevention.patch | 21 ++--- ...6.1-12-mm-multi-gen-LRU-debugfs-interface.patch | 33 +++----- ...glru-don-t-sync-disk-for-each-aging-cycle.patch | 7 +- ...n-LRU-retry-pages-written-back-while-isol.patch | 11 +-- ...n-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch | 9 +- ...y-pmd_young-for-architectures-not-having-.patch | 21 +---- ...m-introduce-arch_has_hw_nonleaf_pmd_young.patch | 19 ++--- ...gen-LRU-fix-crash-during-cgroup-migration.patch | 7 +- .../020-v6.3-19-mm-add-vma_has_recency.patch | 23 ++--- ...020-v6.3-20-mm-support-POSIX_FADV_NOREUSE.patch | 17 +--- ...n-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch | 51 +++++------- ...n-LRU-rename-lrugen-lists-to-lrugen-pages.patch | 31 +++---- ...en-LRU-remove-eviction-fairness-safeguard.patch | 21 ++--- ...i-gen-LRU-remove-aging-fairness-safeguard.patch | 23 ++--- ...mm-multi-gen-LRU-shuffle-should_run_aging.patch | 9 +- ...multi-gen-LRU-per-node-lru_gen_page-lists.patch | 78 +++++++---------- ...-multi-gen-LRU-clarify-scan_control-flags.patch | 31 +++---- ...n-LRU-simplify-arch_has_hw_pte_young-chec.patch | 7 +- ...-29-mm-multi-gen-LRU-avoid-futile-retries.patch | 11 +-- ...c_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch | 2 +- 30 files changed, 261 insertions(+), 553 deletions(-) diff --git a/target/linux/generic/backport-5.15/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch b/target/linux/generic/backport-5.15/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch index 3bea44d865..df854ffd3d 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-01-mm-x86-arm64-add-arch_has_hw_pte_young.patch @@ -327,11 +327,9 @@ Signed-off-by: Andrew Morton mm/memory.c | 14 +------------- 4 files changed, 19 insertions(+), 28 deletions(-) -diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h -index ed57717cd004..874827fc7bc6 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h -@@ -999,23 +999,13 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, +@@ -999,23 +999,13 @@ static inline void update_mmu_cache(stru * page after fork() + CoW for pfn mappings. We don't always have a * hardware-managed access flag on arm64. */ @@ -357,11 +355,9 @@ index ed57717cd004..874827fc7bc6 100644 #endif /* !__ASSEMBLY__ */ -diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h -index 448cd01eb3ec..3908780fc408 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h -@@ -1397,10 +1397,10 @@ static inline bool arch_has_pfn_modify_check(void) +@@ -1397,10 +1397,10 @@ static inline bool arch_has_pfn_modify_c return boot_cpu_has_bug(X86_BUG_L1TF); } @@ -375,11 +371,9 @@ index 448cd01eb3ec..3908780fc408 100644 } #endif /* __ASSEMBLY__ */ -diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h -index d468efcf48f4..2f1188980baf 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h -@@ -259,6 +259,19 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, +@@ -259,6 +259,19 @@ static inline int pmdp_clear_flush_young #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -399,8 +393,6 @@ index d468efcf48f4..2f1188980baf 100644 #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, -diff --git a/mm/memory.c b/mm/memory.c -index a4d0f744a458..392b7326a2d2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -121,18 +121,6 @@ int randomize_va_space __read_mostly = @@ -422,7 +414,7 @@ index a4d0f744a458..392b7326a2d2 100644 #ifndef arch_wants_old_prefaulted_pte static inline bool arch_wants_old_prefaulted_pte(void) { -@@ -2782,7 +2770,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, +@@ -2782,7 +2770,7 @@ static inline bool cow_user_page(struct * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ @@ -431,6 +423,3 @@ index a4d0f744a458..392b7326a2d2 100644 pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch b/target/linux/generic/backport-5.15/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch index 60ce9c07cc..9e0430ea2a 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-02-mm-x86-add-CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG.patch @@ -71,8 +71,6 @@ Signed-off-by: Andrew Morton include/linux/pgtable.h | 4 ++-- 5 files changed, 17 insertions(+), 4 deletions(-) -diff --git a/arch/Kconfig b/arch/Kconfig -index 5987363b41c2..62d55b7ccca1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1295,6 +1295,14 @@ config ARCH_HAS_ELFCORE_COMPAT @@ -90,8 +88,6 @@ index 5987363b41c2..62d55b7ccca1 100644 source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" -diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig -index a08ce6360382..38e1d231d52a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -84,6 +84,7 @@ config X86 @@ -102,11 +98,9 @@ index a08ce6360382..38e1d231d52a 100644 select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64 select ARCH_HAS_COPY_MC if X86_64 select ARCH_HAS_SET_MEMORY -diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h -index 3908780fc408..01a1763123ff 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h -@@ -817,7 +817,8 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) +@@ -817,7 +817,8 @@ static inline unsigned long pmd_page_vad static inline int pmd_bad(pmd_t pmd) { @@ -116,11 +110,9 @@ index 3908780fc408..01a1763123ff 100644 } static inline unsigned long pages_to_mb(unsigned long npg) -diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c -index 3481b35cb4ec..a224193d84bf 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c -@@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_area_struct *vma, +@@ -550,7 +550,7 @@ int ptep_test_and_clear_young(struct vm_ return ret; } @@ -129,7 +121,7 @@ index 3481b35cb4ec..a224193d84bf 100644 int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmdp) { -@@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma, +@@ -562,6 +562,9 @@ int pmdp_test_and_clear_young(struct vm_ return ret; } @@ -139,11 +131,9 @@ index 3481b35cb4ec..a224193d84bf 100644 int pudp_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp) { -diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h -index 2f1188980baf..e6889556e0bf 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h -@@ -212,7 +212,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, +@@ -212,7 +212,7 @@ static inline int ptep_test_and_clear_yo #endif #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG @@ -152,7 +142,7 @@ index 2f1188980baf..e6889556e0bf 100644 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pmd_t *pmdp) -@@ -233,7 +233,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, +@@ -233,7 +233,7 @@ static inline int pmdp_test_and_clear_yo BUILD_BUG(); return 0; } @@ -161,6 +151,3 @@ index 2f1188980baf..e6889556e0bf 100644 #endif #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch index 1b9a70dbc1..b8d2917d26 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-03-mm-vmscan.c-refactor-shrink_node.patch @@ -50,8 +50,6 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 198 +++++++++++++++++++++++++++------------------------- 1 file changed, 104 insertions(+), 94 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 201acea81804..dc5f0381513f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2497,6 +2497,109 @@ enum scan_balance { @@ -164,7 +162,7 @@ index 201acea81804..dc5f0381513f 100644 /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined -@@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) +@@ -2965,109 +3068,16 @@ static void shrink_node(pg_data_t *pgdat unsigned long nr_reclaimed, nr_scanned; struct lruvec *target_lruvec; bool reclaimable = false; @@ -275,6 +273,3 @@ index 201acea81804..dc5f0381513f 100644 shrink_node_memcgs(pgdat, sc); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch b/target/linux/generic/backport-5.15/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch index 24b5c8f797..2f277a56e1 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-04-Revert-include-linux-mm_inline.h-fold-__update_lru_s.patch @@ -55,11 +55,9 @@ Signed-off-by: Andrew Morton include/linux/mm_inline.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 355ea1ee32bd..a822d6b690a5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -24,7 +24,7 @@ static inline int page_is_file_lru(struct page *page) +@@ -24,7 +24,7 @@ static inline int page_is_file_lru(struc return !PageSwapBacked(page); } @@ -68,7 +66,7 @@ index 355ea1ee32bd..a822d6b690a5 100644 enum lru_list lru, enum zone_type zid, int nr_pages) { -@@ -33,6 +33,13 @@ static __always_inline void update_lru_size(struct lruvec *lruvec, +@@ -33,6 +33,13 @@ static __always_inline void update_lru_s __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); @@ -82,6 +80,3 @@ index 355ea1ee32bd..a822d6b690a5 100644 #ifdef CONFIG_MEMCG mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages); #endif --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch b/target/linux/generic/backport-5.15/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch index 5c143f3cfa..577c2817da 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-05-mm-multi-gen-LRU-groundwork.patch @@ -124,11 +124,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 75 +++++++++++++ 16 files changed, 425 insertions(+), 14 deletions(-) -diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c -index d6b5339c56e2..4ec08f7c3e75 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c -@@ -785,7 +785,8 @@ static int fuse_check_page(struct page *page) +@@ -785,7 +785,8 @@ static int fuse_check_page(struct page * 1 << PG_active | 1 << PG_workingset | 1 << PG_reclaim | @@ -138,11 +136,9 @@ index d6b5339c56e2..4ec08f7c3e75 100644 dump_page(page, "fuse: trying to steal weird page"); return 1; } -diff --git a/include/linux/mm.h b/include/linux/mm.h -index e4e1817bb3b8..699068f39aa0 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h -@@ -1093,6 +1093,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf); +@@ -1093,6 +1093,8 @@ vm_fault_t finish_mkwrite_fault(struct v #define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) #define LAST_CPUPID_PGOFF (ZONES_PGOFF - LAST_CPUPID_WIDTH) #define KASAN_TAG_PGOFF (LAST_CPUPID_PGOFF - KASAN_TAG_WIDTH) @@ -151,11 +147,9 @@ index e4e1817bb3b8..699068f39aa0 100644 /* * Define the bit shifts to access each section. For non-existent -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index a822d6b690a5..65320d2b8f60 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -26,10 +26,13 @@ static inline int page_is_file_lru(struct page *page) +@@ -26,10 +26,13 @@ static inline int page_is_file_lru(struc static __always_inline void __update_lru_size(struct lruvec *lruvec, enum lru_list lru, enum zone_type zid, @@ -170,7 +164,7 @@ index a822d6b690a5..65320d2b8f60 100644 __mod_lruvec_state(lruvec, NR_LRU_BASE + lru, nr_pages); __mod_zone_page_state(&pgdat->node_zones[zid], NR_ZONE_LRU_BASE + lru, nr_pages); -@@ -86,11 +89,177 @@ static __always_inline enum lru_list page_lru(struct page *page) +@@ -86,11 +89,177 @@ static __always_inline enum lru_list pag return lru; } @@ -348,7 +342,7 @@ index a822d6b690a5..65320d2b8f60 100644 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add(&page->lru, &lruvec->lists[lru]); } -@@ -100,6 +269,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, +@@ -100,6 +269,9 @@ static __always_inline void add_page_to_ { enum lru_list lru = page_lru(page); @@ -358,7 +352,7 @@ index a822d6b690a5..65320d2b8f60 100644 update_lru_size(lruvec, lru, page_zonenum(page), thp_nr_pages(page)); list_add_tail(&page->lru, &lruvec->lists[lru]); } -@@ -107,6 +279,9 @@ static __always_inline void add_page_to_lru_list_tail(struct page *page, +@@ -107,6 +279,9 @@ static __always_inline void add_page_to_ static __always_inline void del_page_from_lru_list(struct page *page, struct lruvec *lruvec) { @@ -368,8 +362,6 @@ index a822d6b690a5..65320d2b8f60 100644 list_del(&page->lru); update_lru_size(lruvec, page_lru(page), page_zonenum(page), -thp_nr_pages(page)); -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 6ba100216530..0c39f72184d0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -294,6 +294,102 @@ enum lruvec_flags { @@ -486,8 +478,6 @@ index 6ba100216530..0c39f72184d0 100644 #ifdef CONFIG_MEMCG struct pglist_data *pgdat; #endif -diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h -index ef1e3e736e14..240905407a18 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -55,7 +55,8 @@ @@ -526,11 +516,9 @@ index ef1e3e736e14..240905407a18 100644 + #endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ -diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h -index fbfd3fad48f2..a7d7ff4c621d 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h -@@ -845,7 +845,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) +@@ -845,7 +845,7 @@ static inline void ClearPageSlabPfmemall 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ 1UL << PG_slab | 1UL << PG_active | \ @@ -539,7 +527,7 @@ index fbfd3fad48f2..a7d7ff4c621d 100644 /* * Flags checked when a page is prepped for return by the page allocator. -@@ -856,7 +856,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) +@@ -856,7 +856,7 @@ static inline void ClearPageSlabPfmemall * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ @@ -548,8 +536,6 @@ index fbfd3fad48f2..a7d7ff4c621d 100644 #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) -diff --git a/include/linux/sched.h b/include/linux/sched.h -index e418935f8db6..545f6b1ccd50 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -911,6 +911,10 @@ struct task_struct { @@ -563,8 +549,6 @@ index e418935f8db6..545f6b1ccd50 100644 #ifdef CONFIG_COMPAT_BRK unsigned brk_randomized:1; #endif -diff --git a/kernel/bounds.c b/kernel/bounds.c -index 9795d75b09b2..5ee60777d8e4 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -22,6 +22,11 @@ int main(void) @@ -579,8 +563,6 @@ index 9795d75b09b2..5ee60777d8e4 100644 /* End of constants */ return 0; -diff --git a/mm/Kconfig b/mm/Kconfig -index c048dea7e342..0eeb27397884 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -897,6 +897,14 @@ config IO_MAPPING @@ -598,11 +580,9 @@ index c048dea7e342..0eeb27397884 100644 source "mm/damon/Kconfig" endmenu -diff --git a/mm/huge_memory.c b/mm/huge_memory.c -index 98ff57c8eda6..f260ef82f03a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c -@@ -2366,7 +2366,8 @@ static void __split_huge_page_tail(struct page *head, int tail, +@@ -2366,7 +2366,8 @@ static void __split_huge_page_tail(struc #ifdef CONFIG_64BIT (1L << PG_arch_2) | #endif @@ -612,11 +592,9 @@ index 98ff57c8eda6..f260ef82f03a 100644 /* ->mapping in first tail page is compound_mapcount */ VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING, -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index b68b2fe639fd..8b634dc72e7f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -5178,6 +5178,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) +@@ -5178,6 +5178,7 @@ static void __mem_cgroup_free(struct mem static void mem_cgroup_free(struct mem_cgroup *memcg) { @@ -624,7 +602,7 @@ index b68b2fe639fd..8b634dc72e7f 100644 memcg_wb_domain_exit(memcg); __mem_cgroup_free(memcg); } -@@ -5241,6 +5242,7 @@ static struct mem_cgroup *mem_cgroup_alloc(void) +@@ -5241,6 +5242,7 @@ static struct mem_cgroup *mem_cgroup_all memcg->deferred_split_queue.split_queue_len = 0; #endif idr_replace(&mem_cgroup_idr, memcg, memcg->id.id); @@ -632,11 +610,9 @@ index b68b2fe639fd..8b634dc72e7f 100644 return memcg; fail: mem_cgroup_id_remove(memcg); -diff --git a/mm/memory.c b/mm/memory.c -index 392b7326a2d2..7d5be951de9e 100644 --- a/mm/memory.c +++ b/mm/memory.c -@@ -4778,6 +4778,27 @@ static inline void mm_account_fault(struct pt_regs *regs, +@@ -4778,6 +4778,27 @@ static inline void mm_account_fault(stru perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } @@ -664,7 +640,7 @@ index 392b7326a2d2..7d5be951de9e 100644 /* * By the time we get here, we already hold the mm semaphore * -@@ -4809,11 +4830,15 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address, +@@ -4809,11 +4830,15 @@ vm_fault_t handle_mm_fault(struct vm_are if (flags & FAULT_FLAG_USER) mem_cgroup_enter_user_fault(); @@ -680,11 +656,9 @@ index 392b7326a2d2..7d5be951de9e 100644 if (flags & FAULT_FLAG_USER) { mem_cgroup_exit_user_fault(); /* -diff --git a/mm/mm_init.c b/mm/mm_init.c -index 9ddaf0e1b0ab..0d7b2bd2454a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c -@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layout(void) +@@ -65,14 +65,16 @@ void __init mminit_verify_pageflags_layo shift = 8 * sizeof(unsigned long); width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH @@ -703,8 +677,6 @@ index 9ddaf0e1b0ab..0d7b2bd2454a 100644 NR_PAGEFLAGS); mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts", "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n", -diff --git a/mm/mmzone.c b/mm/mmzone.c -index eb89d6e018e2..2ec0d7793424 100644 --- a/mm/mmzone.c +++ b/mm/mmzone.c @@ -81,6 +81,8 @@ void lruvec_init(struct lruvec *lruvec) @@ -716,8 +688,6 @@ index eb89d6e018e2..2ec0d7793424 100644 } #if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) -diff --git a/mm/swap.c b/mm/swap.c -index af3cad4e5378..0bdc96661fb6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -446,6 +446,11 @@ void lru_cache_add(struct page *page) @@ -732,7 +702,7 @@ index af3cad4e5378..0bdc96661fb6 100644 get_page(page); local_lock(&lru_pvecs.lock); pvec = this_cpu_ptr(&lru_pvecs.lru_add); -@@ -547,7 +552,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) +@@ -547,7 +552,7 @@ static void lru_deactivate_file_fn(struc static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) { @@ -741,7 +711,7 @@ index af3cad4e5378..0bdc96661fb6 100644 int nr_pages = thp_nr_pages(page); del_page_from_lru_list(page, lruvec); -@@ -661,7 +666,8 @@ void deactivate_file_page(struct page *page) +@@ -661,7 +666,8 @@ void deactivate_file_page(struct page *p */ void deactivate_page(struct page *page) { @@ -751,11 +721,9 @@ index af3cad4e5378..0bdc96661fb6 100644 struct pagevec *pvec; local_lock(&lru_pvecs.lock); -diff --git a/mm/vmscan.c b/mm/vmscan.c -index dc5f0381513f..41826fe17eb3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -2821,6 +2821,81 @@ static bool can_age_anon_pages(struct pglist_data *pgdat, +@@ -2821,6 +2821,81 @@ static bool can_age_anon_pages(struct pg return can_demote(pgdat->node_id, sc); } @@ -837,6 +805,3 @@ index dc5f0381513f..41826fe17eb3 100644 static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch b/target/linux/generic/backport-5.15/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch index 1e310ae211..f8a7d9bd7f 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-06-mm-multi-gen-LRU-minimal-implementation.patch @@ -208,11 +208,9 @@ Signed-off-by: Andrew Morton mm/workingset.c | 110 ++++- 8 files changed, 1025 insertions(+), 11 deletions(-) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 65320d2b8f60..58aabb1ba020 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -106,6 +106,33 @@ static inline int lru_gen_from_seq(unsigned long seq) +@@ -106,6 +106,33 @@ static inline int lru_gen_from_seq(unsig return seq % MAX_NR_GENS; } @@ -246,7 +244,7 @@ index 65320d2b8f60..58aabb1ba020 100644 static inline int page_lru_gen(struct page *page) { unsigned long flags = READ_ONCE(page->flags); -@@ -158,6 +185,15 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page, +@@ -158,6 +185,15 @@ static inline void lru_gen_update_size(s __update_lru_size(lruvec, lru, zone, -delta); return; } @@ -262,8 +260,6 @@ index 65320d2b8f60..58aabb1ba020 100644 } static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming) -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 0c39f72184d0..fce8945c507c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -327,6 +327,28 @@ enum lruvec_flags { @@ -328,8 +324,6 @@ index 0c39f72184d0..fce8945c507c 100644 }; void lru_gen_init_lruvec(struct lruvec *lruvec); -diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h -index 240905407a18..7d79818dc065 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -106,7 +106,10 @@ @@ -344,8 +338,6 @@ index 240905407a18..7d79818dc065 100644 #endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ -diff --git a/kernel/bounds.c b/kernel/bounds.c -index 5ee60777d8e4..b529182e8b04 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c @@ -24,8 +24,10 @@ int main(void) @@ -359,8 +351,6 @@ index 5ee60777d8e4..b529182e8b04 100644 #endif /* End of constants */ -diff --git a/mm/Kconfig b/mm/Kconfig -index 0eeb27397884..62433f3cd7ae 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -897,6 +897,7 @@ config IO_MAPPING @@ -388,11 +378,9 @@ index 0eeb27397884..62433f3cd7ae 100644 source "mm/damon/Kconfig" endmenu -diff --git a/mm/swap.c b/mm/swap.c -index 0bdc96661fb6..5d227577b609 100644 --- a/mm/swap.c +++ b/mm/swap.c -@@ -389,6 +389,40 @@ static void __lru_cache_activate_page(struct page *page) +@@ -389,6 +389,40 @@ static void __lru_cache_activate_page(st local_unlock(&lru_pvecs.lock); } @@ -433,7 +421,7 @@ index 0bdc96661fb6..5d227577b609 100644 /* * Mark a page as having seen activity. * -@@ -403,6 +437,11 @@ void mark_page_accessed(struct page *page) +@@ -403,6 +437,11 @@ void mark_page_accessed(struct page *pag { page = compound_head(page); @@ -445,11 +433,9 @@ index 0bdc96661fb6..5d227577b609 100644 if (!PageReferenced(page)) { SetPageReferenced(page); } else if (PageUnevictable(page)) { -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 41826fe17eb3..932abd24c1b3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -1142,9 +1142,11 @@ static int __remove_mapping(struct address_space *mapping, struct page *page, +@@ -1142,9 +1142,11 @@ static int __remove_mapping(struct addre if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; @@ -462,7 +448,7 @@ index 41826fe17eb3..932abd24c1b3 100644 __delete_from_swap_cache(page, swap, shadow); xa_unlock_irq(&mapping->i_pages); put_swap_page(page, swap); -@@ -2502,6 +2504,9 @@ static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc) +@@ -2502,6 +2504,9 @@ static void prepare_scan_count(pg_data_t unsigned long file; struct lruvec *target_lruvec; @@ -472,7 +458,7 @@ index 41826fe17eb3..932abd24c1b3 100644 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); /* -@@ -2827,6 +2832,17 @@ static bool can_age_anon_pages(struct pglist_data *pgdat, +@@ -2827,6 +2832,17 @@ static bool can_age_anon_pages(struct pg * shorthand helpers ******************************************************************************/ @@ -490,7 +476,7 @@ index 41826fe17eb3..932abd24c1b3 100644 #define for_each_gen_type_zone(gen, type, zone) \ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ -@@ -2852,6 +2868,745 @@ static struct lruvec __maybe_unused *get_lruvec(struct mem_cgroup *memcg, int ni +@@ -2852,6 +2868,745 @@ static struct lruvec __maybe_unused *get return pgdat ? &pgdat->__lruvec : NULL; } @@ -1253,7 +1239,7 @@ index 41826fe17eb3..932abd24c1b3 100644 #endif /* CONFIG_LRU_GEN */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) -@@ -2907,6 +3672,11 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) +@@ -2907,6 +3672,11 @@ static void shrink_lruvec(struct lruvec bool proportional_reclaim; struct blk_plug plug; @@ -1265,7 +1251,7 @@ index 41826fe17eb3..932abd24c1b3 100644 get_scan_count(lruvec, sc, nr); /* Record the original scan target for proportional adjustments later */ -@@ -3372,6 +4142,9 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) +@@ -3372,6 +4142,9 @@ static void snapshot_refaults(struct mem struct lruvec *target_lruvec; unsigned long refaults; @@ -1275,7 +1261,7 @@ index 41826fe17eb3..932abd24c1b3 100644 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat); refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON); target_lruvec->refaults[0] = refaults; -@@ -3736,12 +4509,16 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, +@@ -3736,12 +4509,16 @@ unsigned long try_to_free_mem_cgroup_pag } #endif @@ -1294,7 +1280,7 @@ index 41826fe17eb3..932abd24c1b3 100644 if (!can_age_anon_pages(pgdat, sc)) return; -@@ -4058,12 +4835,11 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) +@@ -4058,12 +4835,11 @@ restart: sc.may_swap = !nr_boost_reclaim; /* @@ -1311,11 +1297,9 @@ index 41826fe17eb3..932abd24c1b3 100644 /* * If we're getting trouble reclaiming, start doing writepage -diff --git a/mm/workingset.c b/mm/workingset.c -index 880d882f3325..aeba62cebf8c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c -@@ -187,7 +187,6 @@ static unsigned int bucket_order __read_mostly; +@@ -187,7 +187,6 @@ static unsigned int bucket_order __read_ static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, bool workingset) { @@ -1323,7 +1307,7 @@ index 880d882f3325..aeba62cebf8c 100644 eviction &= EVICTION_MASK; eviction = (eviction << MEM_CGROUP_ID_SHIFT) | memcgid; eviction = (eviction << NODES_SHIFT) | pgdat->node_id; -@@ -212,10 +211,107 @@ static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, +@@ -212,10 +211,107 @@ static void unpack_shadow(void *shadow, *memcgidp = memcgid; *pgdat = NODE_DATA(nid); @@ -1432,7 +1416,7 @@ index 880d882f3325..aeba62cebf8c 100644 /** * workingset_age_nonresident - age non-resident entries as LRU ages * @lruvec: the lruvec that was aged -@@ -264,10 +360,14 @@ void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg) +@@ -264,10 +360,14 @@ void *workingset_eviction(struct page *p VM_BUG_ON_PAGE(page_count(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -1447,7 +1431,7 @@ index 880d882f3325..aeba62cebf8c 100644 workingset_age_nonresident(lruvec, thp_nr_pages(page)); return pack_shadow(memcgid, pgdat, eviction, PageWorkingset(page)); } -@@ -296,7 +396,13 @@ void workingset_refault(struct page *page, void *shadow) +@@ -296,7 +396,13 @@ void workingset_refault(struct page *pag bool workingset; int memcgid; @@ -1461,6 +1445,3 @@ index 880d882f3325..aeba62cebf8c 100644 rcu_read_lock(); /* --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch b/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch index e0c6380b5f..5cd6e03dc6 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-07-mm-multi-gen-LRU-exploit-locality-in-rmap.patch @@ -108,11 +108,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 184 +++++++++++++++++++++++++++++++++++++ 7 files changed, 232 insertions(+), 2 deletions(-) -diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h -index 4f189b17dafc..8d6a0329bc59 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h -@@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_objcg(struct page *page) +@@ -442,6 +442,7 @@ static inline struct obj_cgroup *__page_ * - LRU isolation * - lock_page_memcg() * - exclusive reference @@ -120,7 +118,7 @@ index 4f189b17dafc..8d6a0329bc59 100644 * * For a kmem page a caller should hold an rcu read lock to protect memcg * associated with a kmem page from being released. -@@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_memcg_rcu(struct page *page) +@@ -497,6 +498,7 @@ static inline struct mem_cgroup *page_me * - LRU isolation * - lock_page_memcg() * - exclusive reference @@ -128,7 +126,7 @@ index 4f189b17dafc..8d6a0329bc59 100644 * * For a kmem page a caller should hold an rcu read lock to protect memcg * associated with a kmem page from being released. -@@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page); +@@ -953,6 +955,23 @@ void unlock_page_memcg(struct page *page void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); @@ -152,7 +150,7 @@ index 4f189b17dafc..8d6a0329bc59 100644 /* idx can be of type enum memcg_stat_item or node_stat_item */ static inline void mod_memcg_state(struct mem_cgroup *memcg, int idx, int val) -@@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(struct page *page) +@@ -1369,6 +1388,18 @@ static inline void unlock_page_memcg(str { } @@ -171,8 +169,6 @@ index 4f189b17dafc..8d6a0329bc59 100644 static inline void mem_cgroup_handle_over_high(void) { } -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index fce8945c507c..4db2b877fcf9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -352,6 +352,7 @@ enum lruvec_flags { @@ -191,7 +187,7 @@ index fce8945c507c..4db2b877fcf9 100644 #ifdef CONFIG_MEMCG void lru_gen_init_memcg(struct mem_cgroup *memcg); -@@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(struct lruvec *lruvec) +@@ -419,6 +421,10 @@ static inline void lru_gen_init_lruvec(s { } @@ -202,8 +198,6 @@ index fce8945c507c..4db2b877fcf9 100644 #ifdef CONFIG_MEMCG static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) { -diff --git a/mm/internal.h b/mm/internal.h -index cf3cb933eba3..5c73246a092e 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -35,6 +35,7 @@ @@ -214,11 +208,9 @@ index cf3cb933eba3..5c73246a092e 100644 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, unsigned long floor, unsigned long ceiling); -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index 8b634dc72e7f..cc3431c5d9ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg) +@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *p * - LRU isolation * - lock_page_memcg() * - exclusive reference @@ -226,8 +218,6 @@ index 8b634dc72e7f..cc3431c5d9ba 100644 */ page->memcg_data = (unsigned long)memcg; } -diff --git a/mm/rmap.c b/mm/rmap.c -index 330b361a460e..22a86122732e 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -73,6 +73,7 @@ @@ -238,7 +228,7 @@ index 330b361a460e..22a86122732e 100644 #include -@@ -793,6 +794,12 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, +@@ -793,6 +794,12 @@ static bool page_referenced_one(struct p } if (pvmw.pte) { @@ -251,11 +241,9 @@ index 330b361a460e..22a86122732e 100644 if (ptep_clear_flush_young_notify(vma, address, pvmw.pte)) { /* -diff --git a/mm/swap.c b/mm/swap.c -index 5d227577b609..966ff2d83343 100644 --- a/mm/swap.c +++ b/mm/swap.c -@@ -325,7 +325,7 @@ static bool need_activate_page_drain(int cpu) +@@ -325,7 +325,7 @@ static bool need_activate_page_drain(int return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; } @@ -264,7 +252,7 @@ index 5d227577b609..966ff2d83343 100644 { page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { -@@ -345,7 +345,7 @@ static inline void activate_page_drain(int cpu) +@@ -345,7 +345,7 @@ static inline void activate_page_drain(i { } @@ -273,11 +261,9 @@ index 5d227577b609..966ff2d83343 100644 { struct lruvec *lruvec; -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 932abd24c1b3..1d0b25ae378c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -1409,6 +1409,11 @@ static unsigned int shrink_page_list(struct list_head *page_list, +@@ -1409,6 +1409,11 @@ retry: if (!sc->may_unmap && page_mapped(page)) goto keep_locked; @@ -289,7 +275,7 @@ index 932abd24c1b3..1d0b25ae378c 100644 may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); -@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctrl_pos *sp, struct ctrl_pos *pv) +@@ -2990,6 +2995,29 @@ static bool positive_ctrl_err(struct ctr * the aging ******************************************************************************/ @@ -319,7 +305,7 @@ index 932abd24c1b3..1d0b25ae378c 100644 /* protect pages accessed multiple times through file descriptors */ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming) { -@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin +@@ -3001,6 +3029,11 @@ static int page_inc_gen(struct lruvec *l VM_WARN_ON_ONCE_PAGE(!(old_flags & LRU_GEN_MASK), page); do { @@ -331,7 +317,7 @@ index 932abd24c1b3..1d0b25ae378c 100644 new_gen = (old_gen + 1) % MAX_NR_GENS; new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); -@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin +@@ -3015,6 +3048,43 @@ static int page_inc_gen(struct lruvec *l return new_gen; } @@ -375,7 +361,7 @@ index 932abd24c1b3..1d0b25ae378c 100644 static void inc_min_seq(struct lruvec *lruvec, int type) { struct lru_gen_struct *lrugen = &lruvec->lrugen; -@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -3214,6 +3284,114 @@ static void lru_gen_age_node(struct pgli } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); } @@ -490,7 +476,7 @@ index 932abd24c1b3..1d0b25ae378c 100644 /****************************************************************************** * the eviction ******************************************************************************/ -@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) +@@ -3250,6 +3428,12 @@ static bool sort_page(struct lruvec *lru return true; } @@ -503,6 +489,3 @@ index 932abd24c1b3..1d0b25ae378c 100644 /* protected */ if (tier > tier_idx) { int hist = lru_hist_from_seq(lrugen->min_seq[type]); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch b/target/linux/generic/backport-5.15/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch index 64de2c0c82..b0df223b7c 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-08-mm-multi-gen-LRU-support-page-table-walks.patch @@ -147,11 +147,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 1010 +++++++++++++++++++++++++++++++++++- 10 files changed, 1172 insertions(+), 17 deletions(-) -diff --git a/fs/exec.c b/fs/exec.c -index 881390b44cfd..1afa15a07d26 100644 --- a/fs/exec.c +++ b/fs/exec.c -@@ -1013,6 +1013,7 @@ static int exec_mmap(struct mm_struct *mm) +@@ -1013,6 +1013,7 @@ static int exec_mmap(struct mm_struct *m active_mm = tsk->active_mm; tsk->active_mm = mm; tsk->mm = mm; @@ -159,7 +157,7 @@ index 881390b44cfd..1afa15a07d26 100644 /* * This prevents preemption while active_mm is being loaded and * it and mm are being updated, which could cause problems for -@@ -1028,6 +1029,7 @@ static int exec_mmap(struct mm_struct *mm) +@@ -1028,6 +1029,7 @@ static int exec_mmap(struct mm_struct *m tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); task_unlock(tsk); @@ -167,8 +165,6 @@ index 881390b44cfd..1afa15a07d26 100644 if (old_mm) { mmap_read_unlock(old_mm); BUG_ON(active_mm != old_mm); -diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h -index 8d6a0329bc59..3736405cbcf6 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -348,6 +348,11 @@ struct mem_cgroup { @@ -183,8 +179,6 @@ index 8d6a0329bc59..3736405cbcf6 100644 struct mem_cgroup_per_node *nodeinfo[]; }; -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 7f8ee09c711f..33c142d31261 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -580,6 +580,22 @@ struct mm_struct { @@ -210,7 +204,7 @@ index 7f8ee09c711f..33c142d31261 100644 } __randomize_layout; /* -@@ -606,6 +622,66 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm) +@@ -606,6 +622,66 @@ static inline cpumask_t *mm_cpumask(stru return (struct cpumask *)&mm->cpu_bitmap; } @@ -277,8 +271,6 @@ index 7f8ee09c711f..33c142d31261 100644 struct mmu_gather; extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm); extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm); -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 4db2b877fcf9..659bab633bdf 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -385,7 +385,7 @@ enum { @@ -365,8 +357,6 @@ index 4db2b877fcf9..659bab633bdf 100644 ZONE_PADDING(_pad2_) /* Per-node vmstats */ -diff --git a/include/linux/swap.h b/include/linux/swap.h -index 4efd267e2937..e970fca4f178 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -137,6 +137,10 @@ union swap_header { @@ -380,11 +370,9 @@ index 4efd267e2937..e970fca4f178 100644 }; #ifdef __KERNEL__ -diff --git a/kernel/exit.c b/kernel/exit.c -index 80efdfda6662..06b477395012 100644 --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -469,6 +469,7 @@ void mm_update_next_owner(struct mm_struct *mm) +@@ -469,6 +469,7 @@ assign_new_owner: goto retry; } WRITE_ONCE(mm->owner, c); @@ -392,11 +380,9 @@ index 80efdfda6662..06b477395012 100644 task_unlock(c); put_task_struct(c); } -diff --git a/kernel/fork.c b/kernel/fork.c -index 68eab6ce3085..d8f37ecdde87 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1083,6 +1083,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, +@@ -1083,6 +1083,7 @@ static struct mm_struct *mm_init(struct goto fail_nocontext; mm->user_ns = get_user_ns(user_ns); @@ -404,7 +390,7 @@ index 68eab6ce3085..d8f37ecdde87 100644 return mm; fail_nocontext: -@@ -1125,6 +1126,7 @@ static inline void __mmput(struct mm_struct *mm) +@@ -1125,6 +1126,7 @@ static inline void __mmput(struct mm_str } if (mm->binfmt) module_put(mm->binfmt->module); @@ -412,7 +398,7 @@ index 68eab6ce3085..d8f37ecdde87 100644 mmdrop(mm); } -@@ -2622,6 +2624,13 @@ pid_t kernel_clone(struct kernel_clone_args *args) +@@ -2622,6 +2624,13 @@ pid_t kernel_clone(struct kernel_clone_a get_task_struct(p); } @@ -426,11 +412,9 @@ index 68eab6ce3085..d8f37ecdde87 100644 wake_up_new_task(p); /* forking complete and child started to run, tell ptracer */ -diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index c1458fa8beb3..fe4d60474d4a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -5007,6 +5007,7 @@ context_switch(struct rq *rq, struct task_struct *prev, +@@ -5007,6 +5007,7 @@ context_switch(struct rq *rq, struct tas * finish_task_switch()'s mmdrop(). */ switch_mm_irqs_off(prev->active_mm, next->mm, next); @@ -438,8 +422,6 @@ index c1458fa8beb3..fe4d60474d4a 100644 if (!prev->mm) { // from kernel /* will mmdrop() in finish_task_switch(). */ -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index cc3431c5d9ba..ed87d1256f0e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6212,6 +6212,30 @@ static void mem_cgroup_move_task(void) @@ -473,7 +455,7 @@ index cc3431c5d9ba..ed87d1256f0e 100644 static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value) { if (value == PAGE_COUNTER_MAX) -@@ -6555,6 +6579,7 @@ struct cgroup_subsys memory_cgrp_subsys = { +@@ -6555,6 +6579,7 @@ struct cgroup_subsys memory_cgrp_subsys .css_reset = mem_cgroup_css_reset, .css_rstat_flush = mem_cgroup_css_rstat_flush, .can_attach = mem_cgroup_can_attach, @@ -481,8 +463,6 @@ index cc3431c5d9ba..ed87d1256f0e 100644 .cancel_attach = mem_cgroup_cancel_attach, .post_attach = mem_cgroup_move_task, .dfl_cftypes = memory_files, -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 1d0b25ae378c..a7844c689522 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -50,6 +50,8 @@ @@ -494,7 +474,7 @@ index 1d0b25ae378c..a7844c689522 100644 #include #include -@@ -2853,7 +2855,7 @@ static bool can_age_anon_pages(struct pglist_data *pgdat, +@@ -2853,7 +2855,7 @@ static bool can_age_anon_pages(struct pg for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) @@ -503,11 +483,10 @@ index 1d0b25ae378c..a7844c689522 100644 { struct pglist_data *pgdat = NODE_DATA(nid); -@@ -2898,6 +2900,371 @@ static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) - get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; +@@ -2899,6 +2901,371 @@ static bool __maybe_unused seq_is_valid( } -+/****************************************************************************** + /****************************************************************************** + * mm_struct list + ******************************************************************************/ + @@ -872,10 +851,11 @@ index 1d0b25ae378c..a7844c689522 100644 + return success; +} + - /****************************************************************************** ++/****************************************************************************** * refault feedback loop ******************************************************************************/ -@@ -3048,6 +3415,118 @@ static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaimin + +@@ -3048,6 +3415,118 @@ static int page_inc_gen(struct lruvec *l return new_gen; } @@ -994,7 +974,7 @@ index 1d0b25ae378c..a7844c689522 100644 static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned long addr) { unsigned long pfn = pte_pfn(pte); -@@ -3066,8 +3545,28 @@ static unsigned long get_pte_pfn(pte_t pte, struct vm_area_struct *vma, unsigned +@@ -3066,8 +3545,28 @@ static unsigned long get_pte_pfn(pte_t p return pfn; } @@ -1024,7 +1004,7 @@ index 1d0b25ae378c..a7844c689522 100644 { struct page *page; -@@ -3082,9 +3581,375 @@ static struct page *get_pfn_page(unsigned long pfn, struct mem_cgroup *memcg, +@@ -3082,9 +3581,375 @@ static struct page *get_pfn_page(unsigne if (page_memcg_rcu(page) != memcg) return NULL; @@ -1400,7 +1380,7 @@ index 1d0b25ae378c..a7844c689522 100644 static void inc_min_seq(struct lruvec *lruvec, int type) { struct lru_gen_struct *lrugen = &lruvec->lrugen; -@@ -3136,7 +4001,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +@@ -3136,7 +4001,7 @@ next: return success; } @@ -1409,7 +1389,7 @@ index 1d0b25ae378c..a7844c689522 100644 { int prev, next; int type, zone; -@@ -3146,9 +4011,6 @@ static void inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, bool can_s +@@ -3146,9 +4011,6 @@ static void inc_max_seq(struct lruvec *l VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); @@ -1419,7 +1399,7 @@ index 1d0b25ae378c..a7844c689522 100644 for (type = ANON_AND_FILE - 1; type >= 0; type--) { if (get_nr_gens(lruvec, type) != MAX_NR_GENS) continue; -@@ -3186,10 +4048,76 @@ static void inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, bool can_s +@@ -3186,10 +4048,76 @@ static void inc_max_seq(struct lruvec *l /* make sure preceding modifications appear */ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); @@ -1497,7 +1477,7 @@ index 1d0b25ae378c..a7844c689522 100644 static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsigned long *min_seq, struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) { -@@ -3265,7 +4193,7 @@ static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) +@@ -3265,7 +4193,7 @@ static void age_lruvec(struct lruvec *lr need_aging = should_run_aging(lruvec, max_seq, min_seq, sc, swappiness, &nr_to_scan); if (need_aging) @@ -1506,7 +1486,7 @@ index 1d0b25ae378c..a7844c689522 100644 } static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) -@@ -3274,6 +4202,8 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -3274,6 +4202,8 @@ static void lru_gen_age_node(struct pgli VM_WARN_ON_ONCE(!current_is_kswapd()); @@ -1515,7 +1495,7 @@ index 1d0b25ae378c..a7844c689522 100644 memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); -@@ -3282,11 +4212,16 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -3282,11 +4212,16 @@ static void lru_gen_age_node(struct pgli cond_resched(); } while ((memcg = mem_cgroup_iter(NULL, memcg, NULL))); @@ -1533,7 +1513,7 @@ index 1d0b25ae378c..a7844c689522 100644 */ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) { -@@ -3295,6 +4230,8 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3295,6 +4230,8 @@ void lru_gen_look_around(struct page_vma unsigned long start; unsigned long end; unsigned long addr; @@ -1542,7 +1522,7 @@ index 1d0b25ae378c..a7844c689522 100644 unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)] = {}; struct page *page = pvmw->page; struct mem_cgroup *memcg = page_memcg(page); -@@ -3309,6 +4246,9 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3309,6 +4246,9 @@ void lru_gen_look_around(struct page_vma if (spin_is_contended(pvmw->ptl)) return; @@ -1552,7 +1532,7 @@ index 1d0b25ae378c..a7844c689522 100644 start = max(pvmw->address & PMD_MASK, pvmw->vma->vm_start); end = min(pvmw->address | ~PMD_MASK, pvmw->vma->vm_end - 1) + 1; -@@ -3338,13 +4278,15 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3338,13 +4278,15 @@ void lru_gen_look_around(struct page_vma if (!pte_young(pte[i])) continue; @@ -1569,7 +1549,7 @@ index 1d0b25ae378c..a7844c689522 100644 if (pte_dirty(pte[i]) && !PageDirty(page) && !(PageAnon(page) && PageSwapBacked(page) && !PageSwapCache(page))) -@@ -3360,7 +4302,11 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3360,7 +4302,11 @@ void lru_gen_look_around(struct page_vma arch_leave_lazy_mmu_mode(); rcu_read_unlock(); @@ -1582,7 +1562,7 @@ index 1d0b25ae378c..a7844c689522 100644 for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { page = pte_page(pte[i]); activate_page(page); -@@ -3372,8 +4318,10 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3372,8 +4318,10 @@ void lru_gen_look_around(struct page_vma if (!mem_cgroup_trylock_pages(memcg)) return; @@ -1595,7 +1575,7 @@ index 1d0b25ae378c..a7844c689522 100644 for_each_set_bit(i, bitmap, MIN_LRU_BATCH) { page = compound_head(pte_page(pte[i])); -@@ -3384,10 +4332,14 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -3384,10 +4332,14 @@ void lru_gen_look_around(struct page_vma if (old_gen < 0 || old_gen == new_gen) continue; @@ -1612,7 +1592,7 @@ index 1d0b25ae378c..a7844c689522 100644 mem_cgroup_unlock_pages(); } -@@ -3670,6 +4622,7 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -3670,6 +4622,7 @@ static int evict_pages(struct lruvec *lr struct page *page; enum vm_event_item item; struct reclaim_stat stat; @@ -1620,7 +1600,7 @@ index 1d0b25ae378c..a7844c689522 100644 struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); -@@ -3706,6 +4659,10 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -3706,6 +4659,10 @@ static int evict_pages(struct lruvec *lr move_pages_to_lru(lruvec, &list); @@ -1631,7 +1611,7 @@ index 1d0b25ae378c..a7844c689522 100644 item = current_is_kswapd() ? PGSTEAL_KSWAPD : PGSTEAL_DIRECT; if (!cgroup_reclaim(sc)) __count_vm_events(item, reclaimed); -@@ -3722,6 +4679,11 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -3722,6 +4679,11 @@ static int evict_pages(struct lruvec *lr return scanned; } @@ -1643,7 +1623,7 @@ index 1d0b25ae378c..a7844c689522 100644 static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) { -@@ -3747,7 +4709,8 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -3747,7 +4709,8 @@ static unsigned long get_nr_to_scan(stru if (current_is_kswapd()) return 0; @@ -1653,7 +1633,7 @@ index 1d0b25ae378c..a7844c689522 100644 done: return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; } -@@ -3761,6 +4724,8 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -3761,6 +4724,8 @@ static void lru_gen_shrink_lruvec(struct blk_start_plug(&plug); @@ -1662,7 +1642,7 @@ index 1d0b25ae378c..a7844c689522 100644 while (true) { int delta; int swappiness; -@@ -3788,6 +4753,8 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -3788,6 +4753,8 @@ static void lru_gen_shrink_lruvec(struct cond_resched(); } @@ -1671,7 +1651,7 @@ index 1d0b25ae378c..a7844c689522 100644 blk_finish_plug(&plug); } -@@ -3804,15 +4771,21 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) +@@ -3804,15 +4771,21 @@ void lru_gen_init_lruvec(struct lruvec * for_each_gen_type_zone(gen, type, zone) INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); @@ -1693,7 +1673,7 @@ index 1d0b25ae378c..a7844c689522 100644 int nid; for_each_node(nid) { -@@ -3820,6 +4793,11 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg) +@@ -3820,6 +4793,11 @@ void lru_gen_exit_memcg(struct mem_cgrou VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, sizeof(lruvec->lrugen.nr_pages))); @@ -1705,6 +1685,3 @@ index 1d0b25ae378c..a7844c689522 100644 } } #endif --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch b/target/linux/generic/backport-5.15/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch index e47bfc36d4..b5fb195151 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-09-mm-multi-gen-LRU-optimize-multiple-memcgs.patch @@ -134,8 +134,6 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 105 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 96 insertions(+), 9 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index a7844c689522..b6f6fc2585e1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -127,6 +127,12 @@ struct scan_control { @@ -151,7 +149,7 @@ index a7844c689522..b6f6fc2585e1 100644 /* Allocation order */ s8 order; -@@ -4202,6 +4208,19 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -4202,6 +4208,19 @@ static void lru_gen_age_node(struct pgli VM_WARN_ON_ONCE(!current_is_kswapd()); @@ -171,7 +169,7 @@ index a7844c689522..b6f6fc2585e1 100644 set_mm_walk(pgdat); memcg = mem_cgroup_iter(NULL, NULL, NULL); -@@ -4613,7 +4632,8 @@ static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swa +@@ -4613,7 +4632,8 @@ static int isolate_pages(struct lruvec * return scanned; } @@ -181,7 +179,7 @@ index a7844c689522..b6f6fc2585e1 100644 { int type; int scanned; -@@ -4676,6 +4696,9 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4676,6 +4696,9 @@ static int evict_pages(struct lruvec *lr sc->nr_reclaimed += reclaimed; @@ -191,7 +189,7 @@ index a7844c689522..b6f6fc2585e1 100644 return scanned; } -@@ -4685,9 +4708,8 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4685,9 +4708,8 @@ static int evict_pages(struct lruvec *lr * reclaim. */ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, @@ -202,7 +200,7 @@ index a7844c689522..b6f6fc2585e1 100644 unsigned long nr_to_scan; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); -@@ -4697,8 +4719,8 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -4697,8 +4719,8 @@ static unsigned long get_nr_to_scan(stru (mem_cgroup_below_low(memcg) && !sc->memcg_low_reclaim)) return 0; @@ -213,7 +211,7 @@ index a7844c689522..b6f6fc2585e1 100644 return nr_to_scan; /* skip the aging path at the default priority */ -@@ -4715,10 +4737,68 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -4715,10 +4737,68 @@ done: return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; } @@ -282,7 +280,7 @@ index a7844c689522..b6f6fc2585e1 100644 lru_add_drain(); -@@ -4738,21 +4818,28 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4738,21 +4818,28 @@ static void lru_gen_shrink_lruvec(struct else swappiness = 0; @@ -315,6 +313,3 @@ index a7844c689522..b6f6fc2585e1 100644 clear_mm_walk(); blk_finish_plug(&plug); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch b/target/linux/generic/backport-5.15/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch index 0adb15f5e2..cf5b8f0e9f 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-10-mm-multi-gen-LRU-kill-switch.patch @@ -87,11 +87,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 228 +++++++++++++++++++++++++++++++- 6 files changed, 265 insertions(+), 9 deletions(-) -diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h -index 45cdb12243e3..f9a5d6a81101 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h -@@ -433,6 +433,18 @@ static inline void cgroup_put(struct cgroup *cgrp) +@@ -433,6 +433,18 @@ static inline void cgroup_put(struct cgr css_put(&cgrp->self); } @@ -110,7 +108,7 @@ index 45cdb12243e3..f9a5d6a81101 100644 /** * task_css_set_check - obtain a task's css_set with extra access conditions * @task: the task to obtain css_set for -@@ -447,7 +459,6 @@ static inline void cgroup_put(struct cgroup *cgrp) +@@ -447,7 +459,6 @@ static inline void cgroup_put(struct cgr * as locks used during the cgroup_subsys::attach() methods. */ #ifdef CONFIG_PROVE_RCU @@ -127,11 +125,9 @@ index 45cdb12243e3..f9a5d6a81101 100644 static inline int cgroup_attach_task_all(struct task_struct *from, struct task_struct *t) { return 0; } static inline int cgroupstats_build(struct cgroupstats *stats, -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 58aabb1ba020..e095c1c24311 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -91,10 +91,21 @@ static __always_inline enum lru_list page_lru(struct page *page) +@@ -91,10 +91,21 @@ static __always_inline enum lru_list pag #ifdef CONFIG_LRU_GEN @@ -154,7 +150,7 @@ index 58aabb1ba020..e095c1c24311 100644 static inline bool lru_gen_in_fault(void) { -@@ -207,7 +218,7 @@ static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bo +@@ -207,7 +218,7 @@ static inline bool lru_gen_add_page(stru VM_WARN_ON_ONCE_PAGE(gen != -1, page); @@ -163,8 +159,6 @@ index 58aabb1ba020..e095c1c24311 100644 return false; /* * There are three common cases for this page: -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 659bab633bdf..edaf035503ed 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -364,6 +364,13 @@ enum { @@ -190,8 +184,6 @@ index 659bab633bdf..edaf035503ed 100644 }; enum { -diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h -index d8fcc139ac05..28c32a01da7d 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -165,7 +165,6 @@ struct cgroup_mgctx { @@ -202,8 +194,6 @@ index d8fcc139ac05..28c32a01da7d 100644 extern spinlock_t css_set_lock; extern struct cgroup_subsys *cgroup_subsys[]; extern struct list_head cgroup_roots; -diff --git a/mm/Kconfig b/mm/Kconfig -index 62433f3cd7ae..4a7d0af3c39b 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -906,6 +906,12 @@ config LRU_GEN @@ -219,8 +209,6 @@ index 62433f3cd7ae..4a7d0af3c39b 100644 config LRU_GEN_STATS bool "Full stats for debugging" depends on LRU_GEN -diff --git a/mm/vmscan.c b/mm/vmscan.c -index b6f6fc2585e1..be37d996bc92 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -52,6 +52,7 @@ @@ -231,7 +219,7 @@ index b6f6fc2585e1..be37d996bc92 100644 #include #include -@@ -2841,6 +2842,14 @@ static bool can_age_anon_pages(struct pglist_data *pgdat, +@@ -2841,6 +2842,14 @@ static bool can_age_anon_pages(struct pg #ifdef CONFIG_LRU_GEN @@ -246,7 +234,7 @@ index b6f6fc2585e1..be37d996bc92 100644 /****************************************************************************** * shorthand helpers ******************************************************************************/ -@@ -3717,7 +3726,8 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area +@@ -3717,7 +3726,8 @@ static void walk_pmd_range_locked(pud_t goto next; if (!pmd_trans_huge(pmd[i])) { @@ -256,7 +244,7 @@ index b6f6fc2585e1..be37d996bc92 100644 pmdp_test_and_clear_young(vma, addr, pmd + i); goto next; } -@@ -3815,10 +3825,12 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, +@@ -3815,10 +3825,12 @@ restart: walk->mm_stats[MM_NONLEAF_TOTAL]++; #ifdef CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG @@ -272,7 +260,7 @@ index b6f6fc2585e1..be37d996bc92 100644 #endif if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) continue; -@@ -4080,7 +4092,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4080,7 +4092,7 @@ static bool try_to_inc_max_seq(struct lr * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ @@ -281,11 +269,10 @@ index b6f6fc2585e1..be37d996bc92 100644 success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } -@@ -4845,6 +4857,208 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc - blk_finish_plug(&plug); +@@ -4846,6 +4858,208 @@ done: } -+/****************************************************************************** + /****************************************************************************** + * state change + ******************************************************************************/ + @@ -487,10 +474,11 @@ index b6f6fc2585e1..be37d996bc92 100644 + .attrs = lru_gen_attrs, +}; + - /****************************************************************************** ++/****************************************************************************** * initialization ******************************************************************************/ -@@ -4855,6 +5069,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) + +@@ -4855,6 +5069,7 @@ void lru_gen_init_lruvec(struct lruvec * struct lru_gen_struct *lrugen = &lruvec->lrugen; lrugen->max_seq = MIN_NR_GENS + 1; @@ -508,6 +496,3 @@ index b6f6fc2585e1..be37d996bc92 100644 return 0; }; late_initcall(init_lru_gen); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch b/target/linux/generic/backport-5.15/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch index 5b1ed03ca9..30e20aff6e 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-11-mm-multi-gen-LRU-thrashing-prevention.patch @@ -68,8 +68,6 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 74 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 73 insertions(+), 3 deletions(-) -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index edaf035503ed..6b85ba1f4e18 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -399,6 +399,8 @@ struct lru_gen_struct { @@ -81,11 +79,9 @@ index edaf035503ed..6b85ba1f4e18 100644 /* the multi-gen LRU lists, lazily sorted on eviction */ struct list_head lists[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* the multi-gen LRU sizes, eventually consistent */ -diff --git a/mm/vmscan.c b/mm/vmscan.c -index be37d996bc92..642ee7bef61d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4064,6 +4064,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap) +@@ -4064,6 +4064,7 @@ static void inc_max_seq(struct lruvec *l for (type = 0; type < ANON_AND_FILE; type++) reset_ctrl_pos(lruvec, type, false); @@ -93,7 +89,7 @@ index be37d996bc92..642ee7bef61d 100644 /* make sure preceding modifications appear */ smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); -@@ -4193,7 +4194,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig +@@ -4193,7 +4194,7 @@ static bool should_run_aging(struct lruv return false; } @@ -102,7 +98,7 @@ index be37d996bc92..642ee7bef61d 100644 { bool need_aging; unsigned long nr_to_scan; -@@ -4207,16 +4208,36 @@ static void age_lruvec(struct lruvec *lruvec, struct scan_control *sc) +@@ -4207,16 +4208,36 @@ static void age_lruvec(struct lruvec *lr mem_cgroup_calculate_protection(NULL, memcg); if (mem_cgroup_below_min(memcg)) @@ -140,7 +136,7 @@ index be37d996bc92..642ee7bef61d 100644 VM_WARN_ON_ONCE(!current_is_kswapd()); -@@ -4239,12 +4260,32 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -4239,12 +4260,32 @@ static void lru_gen_age_node(struct pgli do { struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); @@ -174,7 +170,7 @@ index be37d996bc92..642ee7bef61d 100644 } /* -@@ -5002,6 +5043,28 @@ static void lru_gen_change_state(bool enabled) +@@ -5002,6 +5043,28 @@ unlock: * sysfs interface ******************************************************************************/ @@ -203,7 +199,7 @@ index be37d996bc92..642ee7bef61d 100644 static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { unsigned int caps = 0; -@@ -5050,6 +5113,7 @@ static struct kobj_attribute lru_gen_enabled_attr = __ATTR( +@@ -5050,6 +5113,7 @@ static struct kobj_attribute lru_gen_ena ); static struct attribute *lru_gen_attrs[] = { @@ -211,7 +207,7 @@ index be37d996bc92..642ee7bef61d 100644 &lru_gen_enabled_attr.attr, NULL }; -@@ -5065,12 +5129,16 @@ static struct attribute_group lru_gen_attr_group = { +@@ -5065,12 +5129,16 @@ static struct attribute_group lru_gen_at void lru_gen_init_lruvec(struct lruvec *lruvec) { @@ -228,6 +224,3 @@ index be37d996bc92..642ee7bef61d 100644 for_each_gen_type_zone(gen, type, zone) INIT_LIST_HEAD(&lrugen->lists[gen][type][zone]); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch b/target/linux/generic/backport-5.15/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch index ae9d76faa4..482e714bb6 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-12-mm-multi-gen-LRU-debugfs-interface.patch @@ -64,11 +64,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 411 ++++++++++++++++++++++++++++++++++++++- 2 files changed, 402 insertions(+), 10 deletions(-) -diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h -index 0f233b76c9ce..292ec0ce0d63 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h -@@ -485,6 +485,7 @@ static inline int num_node_state(enum node_states state) +@@ -485,6 +485,7 @@ static inline int num_node_state(enum no #define first_online_node 0 #define first_memory_node 0 #define next_online_node(nid) (MAX_NUMNODES) @@ -76,8 +74,6 @@ index 0f233b76c9ce..292ec0ce0d63 100644 #define nr_node_ids 1U #define nr_online_nodes 1U -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 642ee7bef61d..b74b334488d8 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -53,6 +53,7 @@ @@ -130,7 +126,7 @@ index 642ee7bef61d..b74b334488d8 100644 } static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) -@@ -4019,7 +4048,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +@@ -4019,7 +4048,7 @@ next: return success; } @@ -139,7 +135,7 @@ index 642ee7bef61d..b74b334488d8 100644 { int prev, next; int type, zone; -@@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap) +@@ -4033,9 +4062,13 @@ static void inc_max_seq(struct lruvec *l if (get_nr_gens(lruvec, type) != MAX_NR_GENS) continue; @@ -155,7 +151,7 @@ index 642ee7bef61d..b74b334488d8 100644 } /* -@@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap) +@@ -4072,7 +4105,7 @@ static void inc_max_seq(struct lruvec *l } static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, @@ -164,7 +160,7 @@ index 642ee7bef61d..b74b334488d8 100644 { bool success; struct lru_gen_mm_walk *walk; -@@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4093,7 +4126,7 @@ static bool try_to_inc_max_seq(struct lr * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ @@ -173,7 +169,7 @@ index 642ee7bef61d..b74b334488d8 100644 success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } -@@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4107,7 +4140,7 @@ static bool try_to_inc_max_seq(struct lr walk->lruvec = lruvec; walk->max_seq = max_seq; walk->can_swap = can_swap; @@ -182,7 +178,7 @@ index 642ee7bef61d..b74b334488d8 100644 do { success = iterate_mm_list(lruvec, walk, &mm); -@@ -4127,7 +4160,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4127,7 +4160,7 @@ done: VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); @@ -191,7 +187,7 @@ index 642ee7bef61d..b74b334488d8 100644 /* either this sees any waiters or they will see updated max_seq */ if (wq_has_sleeper(&lruvec->mm_state.wait)) wake_up_all(&lruvec->mm_state.wait); -@@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lruvec, struct scan_control *sc, unsigned +@@ -4225,7 +4258,7 @@ static bool age_lruvec(struct lruvec *lr } if (need_aging) @@ -200,7 +196,7 @@ index 642ee7bef61d..b74b334488d8 100644 return true; } -@@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -4784,7 +4817,7 @@ static unsigned long get_nr_to_scan(stru if (current_is_kswapd()) return 0; @@ -209,11 +205,10 @@ index 642ee7bef61d..b74b334488d8 100644 return nr_to_scan; done: return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; -@@ -5123,6 +5156,361 @@ static struct attribute_group lru_gen_attr_group = { - .attrs = lru_gen_attrs, +@@ -5124,6 +5157,361 @@ static struct attribute_group lru_gen_at }; -+/****************************************************************************** + /****************************************************************************** + * debugfs interface + ******************************************************************************/ + @@ -568,9 +563,10 @@ index 642ee7bef61d..b74b334488d8 100644 + .release = seq_release, +}; + - /****************************************************************************** ++/****************************************************************************** * initialization ******************************************************************************/ + @@ -5180,6 +5568,9 @@ static int __init init_lru_gen(void) if (sysfs_create_group(mm_kobj, &lru_gen_attr_group)) pr_err("lru_gen: failed to create sysfs group\n"); @@ -581,6 +577,3 @@ index 642ee7bef61d..b74b334488d8 100644 return 0; }; late_initcall(init_lru_gen); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch b/target/linux/generic/backport-5.15/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch index a2318499e7..fd4aa72747 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-13-mm-mglru-don-t-sync-disk-for-each-aging-cycle.patch @@ -19,11 +19,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 2 -- 1 file changed, 2 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index b74b334488d8..1c0875e6514a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4165,8 +4165,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4165,8 +4165,6 @@ done: if (wq_has_sleeper(&lruvec->mm_state.wait)) wake_up_all(&lruvec->mm_state.wait); @@ -32,6 +30,3 @@ index b74b334488d8..1c0875e6514a 100644 return true; } --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch b/target/linux/generic/backport-5.15/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch index ffdebafa2c..31b35cbc4b 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-14-mm-multi-gen-LRU-retry-pages-written-back-while-isol.patch @@ -43,11 +43,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 48 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 11 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 1c0875e6514a..27bc525380f9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4723,10 +4723,13 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4723,10 +4723,13 @@ static int evict_pages(struct lruvec *lr int scanned; int reclaimed; LIST_HEAD(list); @@ -61,7 +59,7 @@ index 1c0875e6514a..27bc525380f9 100644 struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); -@@ -4743,20 +4746,37 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4743,20 +4746,37 @@ static int evict_pages(struct lruvec *lr if (list_empty(&list)) return scanned; @@ -109,7 +107,7 @@ index 1c0875e6514a..27bc525380f9 100644 } spin_lock_irq(&lruvec->lru_lock); -@@ -4778,7 +4798,13 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4778,7 +4798,13 @@ static int evict_pages(struct lruvec *lr mem_cgroup_uncharge_list(&list); free_unref_page_list(&list); @@ -124,6 +122,3 @@ index 1c0875e6514a..27bc525380f9 100644 if (need_swapping && type == LRU_GEN_ANON) *need_swapping = true; --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch b/target/linux/generic/backport-5.15/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch index 7dc296b5b3..5b1d378504 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-15-mm-multi-gen-LRU-move-lru_gen_add_mm-out-of-IRQ-off-.patch @@ -29,11 +29,9 @@ Signed-off-by: Andrew Morton fs/exec.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/fs/exec.c b/fs/exec.c -index 1afa15a07d26..718c58947be1 100644 --- a/fs/exec.c +++ b/fs/exec.c -@@ -1013,7 +1013,6 @@ static int exec_mmap(struct mm_struct *mm) +@@ -1013,7 +1013,6 @@ static int exec_mmap(struct mm_struct *m active_mm = tsk->active_mm; tsk->active_mm = mm; tsk->mm = mm; @@ -41,7 +39,7 @@ index 1afa15a07d26..718c58947be1 100644 /* * This prevents preemption while active_mm is being loaded and * it and mm are being updated, which could cause problems for -@@ -1028,6 +1027,7 @@ static int exec_mmap(struct mm_struct *mm) +@@ -1028,6 +1027,7 @@ static int exec_mmap(struct mm_struct *m local_irq_enable(); tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); @@ -49,6 +47,3 @@ index 1afa15a07d26..718c58947be1 100644 task_unlock(tsk); lru_gen_use_mm(mm); if (old_mm) { --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch b/target/linux/generic/backport-5.15/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch index 4e11e82115..c91252eb6b 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-17-mm-add-dummy-pmd_young-for-architectures-not-having-.patch @@ -28,11 +28,9 @@ Signed-off-by: Andrew Morton include/linux/pgtable.h | 7 +++++++ 6 files changed, 12 insertions(+) -diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h -index 804889b70965..89ab8b4cf971 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h -@@ -632,6 +632,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) +@@ -632,6 +632,7 @@ static inline pmd_t pmd_mkdirty(pmd_t pm return pmd; } @@ -40,8 +38,6 @@ index 804889b70965..89ab8b4cf971 100644 static inline int pmd_young(pmd_t pmd) { return !!(pmd_val(pmd) & _PAGE_ACCESSED); -diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h -index 39b550310ec6..4a64e03dcdd4 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -531,6 +531,7 @@ static inline int pmd_dirty(pmd_t pmd) @@ -52,8 +48,6 @@ index 39b550310ec6..4a64e03dcdd4 100644 static inline int pmd_young(pmd_t pmd) { return pte_young(pmd_pte(pmd)); -diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h -index b61426c9ef17..55ff4f22da2d 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -748,6 +748,7 @@ static inline int pmd_dirty(pmd_t pmd) @@ -64,11 +58,9 @@ index b61426c9ef17..55ff4f22da2d 100644 static inline int pmd_young(pmd_t pmd) { return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; -diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h -index 4679e45c8348..bfd10179c137 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h -@@ -712,6 +712,7 @@ static inline unsigned long pmd_dirty(pmd_t pmd) +@@ -712,6 +712,7 @@ static inline unsigned long pmd_dirty(pm return pte_dirty(pte); } @@ -76,8 +68,6 @@ index 4679e45c8348..bfd10179c137 100644 static inline unsigned long pmd_young(pmd_t pmd) { pte_t pte = __pte(pmd_val(pmd)); -diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h -index 01a1763123ff..c4b64ee357fd 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -136,6 +136,7 @@ static inline int pmd_dirty(pmd_t pmd) @@ -88,11 +78,9 @@ index 01a1763123ff..c4b64ee357fd 100644 static inline int pmd_young(pmd_t pmd) { return pmd_flags(pmd) & _PAGE_ACCESSED; -diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h -index e6889556e0bf..dec3d890e814 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h -@@ -164,6 +164,13 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr) +@@ -164,6 +164,13 @@ static inline pte_t *virt_to_kpte(unsign return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr); } @@ -106,6 +94,3 @@ index e6889556e0bf..dec3d890e814 100644 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch b/target/linux/generic/backport-5.15/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch index 4c4a2d11f2..9a1f9bead6 100644 --- a/target/linux/generic/backport-5.15/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch +++ b/target/linux/generic/backport-5.15/020-v6.1-18-mm-introduce-arch_has_hw_nonleaf_pmd_young.patch @@ -36,11 +36,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 10 +++++----- 3 files changed, 24 insertions(+), 5 deletions(-) -diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h -index c4b64ee357fd..d8363c676496 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h -@@ -1405,6 +1405,14 @@ static inline bool arch_has_hw_pte_young(void) +@@ -1405,6 +1405,14 @@ static inline bool arch_has_hw_pte_young return true; } @@ -55,11 +53,9 @@ index c4b64ee357fd..d8363c676496 100644 #endif /* __ASSEMBLY__ */ #endif /* _ASM_X86_PGTABLE_H */ -diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h -index dec3d890e814..562b4cc82b33 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h -@@ -266,6 +266,17 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma, +@@ -266,6 +266,17 @@ static inline int pmdp_clear_flush_young #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif @@ -77,11 +73,9 @@ index dec3d890e814..562b4cc82b33 100644 #ifndef arch_has_hw_pte_young /* * Return whether the accessed bit is supported on the local CPU. -diff --git a/mm/vmscan.c b/mm/vmscan.c -index d310e0b9e520..96f1af44bb77 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -3730,7 +3730,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long next, struct vm_area +@@ -3727,7 +3727,7 @@ static void walk_pmd_range_locked(pud_t goto next; if (!pmd_trans_huge(pmd[i])) { @@ -90,7 +84,7 @@ index d310e0b9e520..96f1af44bb77 100644 get_cap(LRU_GEN_NONLEAF_YOUNG)) pmdp_test_and_clear_young(vma, addr, pmd + i); goto next; -@@ -3828,14 +3828,14 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, +@@ -3825,14 +3825,14 @@ restart: #endif walk->mm_stats[MM_NONLEAF_TOTAL]++; @@ -108,7 +102,7 @@ index d310e0b9e520..96f1af44bb77 100644 if (!walk->force_scan && !test_bloom_filter(walk->lruvec, walk->max_seq, pmd + i)) continue; -@@ -5135,7 +5135,7 @@ static ssize_t show_enabled(struct kobject *kobj, struct kobj_attribute *attr, c +@@ -5132,7 +5132,7 @@ static ssize_t show_enabled(struct kobje if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK)) caps |= BIT(LRU_GEN_MM_WALK); @@ -117,6 +111,3 @@ index d310e0b9e520..96f1af44bb77 100644 caps |= BIT(LRU_GEN_NONLEAF_YOUNG); return snprintf(buf, PAGE_SIZE, "0x%04x\n", caps); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch b/target/linux/generic/backport-5.15/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch index a8cd1fca1b..e37386abdf 100644 --- a/target/linux/generic/backport-5.15/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch +++ b/target/linux/generic/backport-5.15/020-v6.2-16-mm-multi-gen-LRU-fix-crash-during-cgroup-migration.patch @@ -34,11 +34,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 27bc525380f9..d310e0b9e520 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -3024,13 +3024,16 @@ void lru_gen_migrate_mm(struct mm_struct *mm) +@@ -3024,13 +3024,16 @@ void lru_gen_migrate_mm(struct mm_struct if (mem_cgroup_disabled()) return; @@ -56,6 +54,3 @@ index 27bc525380f9..d310e0b9e520 100644 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); lru_gen_del_mm(mm); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-19-mm-add-vma_has_recency.patch b/target/linux/generic/backport-5.15/020-v6.3-19-mm-add-vma_has_recency.patch index 542bb0c3a8..6154bbe352 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-19-mm-add-vma_has_recency.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-19-mm-add-vma_has_recency.patch @@ -56,11 +56,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 5 ++++- 4 files changed, 35 insertions(+), 29 deletions(-) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index e095c1c24311..e8c723053a52 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -333,4 +333,13 @@ static __always_inline void del_page_from_lru_list(struct page *page, +@@ -333,4 +333,13 @@ static __always_inline void del_page_fro update_lru_size(lruvec, page_lru(page), page_zonenum(page), -thp_nr_pages(page)); } @@ -74,8 +72,6 @@ index e095c1c24311..e8c723053a52 100644 +} + #endif -diff --git a/mm/memory.c b/mm/memory.c -index 7d5be951de9e..1306b1ff0c10 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -41,6 +41,7 @@ @@ -86,7 +82,7 @@ index 7d5be951de9e..1306b1ff0c10 100644 #include #include #include -@@ -1353,8 +1354,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, +@@ -1353,8 +1354,7 @@ again: force_flush = 1; set_page_dirty(page); } @@ -96,7 +92,7 @@ index 7d5be951de9e..1306b1ff0c10 100644 mark_page_accessed(page); } rss[mm_counter(page)]--; -@@ -4781,8 +4781,8 @@ static inline void mm_account_fault(struct pt_regs *regs, +@@ -4781,8 +4781,8 @@ static inline void mm_account_fault(stru #ifdef CONFIG_LRU_GEN static void lru_gen_enter_fault(struct vm_area_struct *vma) { @@ -107,11 +103,9 @@ index 7d5be951de9e..1306b1ff0c10 100644 } static void lru_gen_exit_fault(void) -diff --git a/mm/rmap.c b/mm/rmap.c -index 22a86122732e..53df47753f3c 100644 --- a/mm/rmap.c +++ b/mm/rmap.c -@@ -794,25 +794,14 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, +@@ -794,25 +794,14 @@ static bool page_referenced_one(struct p } if (pvmw.pte) { @@ -140,7 +134,7 @@ index 22a86122732e..53df47753f3c 100644 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { if (pmdp_clear_flush_young_notify(vma, address, pvmw.pmd)) -@@ -846,7 +835,20 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) +@@ -846,7 +835,20 @@ static bool invalid_page_referenced_vma( struct page_referenced_arg *pra = arg; struct mem_cgroup *memcg = pra->memcg; @@ -186,11 +180,9 @@ index 22a86122732e..53df47753f3c 100644 rmap_walk(page, &rwc); *vm_flags = pra.vm_flags; -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 96f1af44bb77..4ab376abeaae 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long start, unsigned long end, struct mm_wal +@@ -3486,7 +3486,10 @@ static int should_skip_vma(unsigned long if (is_vm_hugetlb_page(vma)) return true; @@ -202,6 +194,3 @@ index 96f1af44bb77..4ab376abeaae 100644 return true; if (vma == get_gate_vma(vma->vm_mm)) --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-20-mm-support-POSIX_FADV_NOREUSE.patch b/target/linux/generic/backport-5.15/020-v6.3-20-mm-support-POSIX_FADV_NOREUSE.patch index 75f74114c6..3bb075bf36 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-20-mm-support-POSIX_FADV_NOREUSE.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-20-mm-support-POSIX_FADV_NOREUSE.patch @@ -79,11 +79,9 @@ Signed-off-by: Andrew Morton mm/fadvise.c | 5 ++++- 3 files changed, 9 insertions(+), 1 deletion(-) -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 23ecfecdc450..601e52991f4a 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -167,6 +167,8 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, +@@ -167,6 +167,8 @@ typedef int (dio_iodone_t)(struct kiocb /* File is stream-like */ #define FMODE_STREAM ((__force fmode_t)0x200000) @@ -92,11 +90,9 @@ index 23ecfecdc450..601e52991f4a 100644 /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index e8c723053a52..8a6a2a23f9b6 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -339,6 +339,9 @@ static inline bool vma_has_recency(struct vm_area_struct *vma) +@@ -339,6 +339,9 @@ static inline bool vma_has_recency(struc if (vma->vm_flags & (VM_SEQ_READ | VM_RAND_READ)) return false; @@ -106,11 +102,9 @@ index e8c723053a52..8a6a2a23f9b6 100644 return true; } -diff --git a/mm/fadvise.c b/mm/fadvise.c -index d6baa4f451c5..e8023c69f219 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c -@@ -80,7 +80,7 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +@@ -80,7 +80,7 @@ int generic_fadvise(struct file *file, l case POSIX_FADV_NORMAL: file->f_ra.ra_pages = bdi->ra_pages; spin_lock(&file->f_lock); @@ -119,7 +113,7 @@ index d6baa4f451c5..e8023c69f219 100644 spin_unlock(&file->f_lock); break; case POSIX_FADV_RANDOM: -@@ -107,6 +107,9 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) +@@ -107,6 +107,9 @@ int generic_fadvise(struct file *file, l force_page_cache_readahead(mapping, file, start_index, nrpages); break; case POSIX_FADV_NOREUSE: @@ -129,6 +123,3 @@ index d6baa4f451c5..e8023c69f219 100644 break; case POSIX_FADV_DONTNEED: if (!inode_write_congested(mapping->host)) --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-21-mm-multi-gen-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch b/target/linux/generic/backport-5.15/020-v6.3-21-mm-multi-gen-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch index 836a16b8c7..4e09173681 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-21-mm-multi-gen-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-21-mm-multi-gen-LRU-rename-lru_gen_struct-to-lru_gen_pa.patch @@ -122,11 +122,9 @@ Signed-off-by: Andrew Morton mm/workingset.c | 4 ++-- 4 files changed, 24 insertions(+), 24 deletions(-) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 8a6a2a23f9b6..27c4890503c5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -168,7 +168,7 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct page *page, +@@ -168,7 +168,7 @@ static inline void lru_gen_update_size(s int zone = page_zonenum(page); int delta = thp_nr_pages(page); enum lru_list lru = type * LRU_INACTIVE_FILE; @@ -135,7 +133,7 @@ index 8a6a2a23f9b6..27c4890503c5 100644 VM_WARN_ON_ONCE(old_gen != -1 && old_gen >= MAX_NR_GENS); VM_WARN_ON_ONCE(new_gen != -1 && new_gen >= MAX_NR_GENS); -@@ -214,7 +214,7 @@ static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bo +@@ -214,7 +214,7 @@ static inline bool lru_gen_add_page(stru int gen = page_lru_gen(page); int type = page_is_file_lru(page); int zone = page_zonenum(page); @@ -144,8 +142,6 @@ index 8a6a2a23f9b6..27c4890503c5 100644 VM_WARN_ON_ONCE_PAGE(gen != -1, page); -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 6b85ba1f4e18..5856b026c089 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -394,7 +394,7 @@ enum { @@ -175,11 +171,9 @@ index 6b85ba1f4e18..5856b026c089 100644 /* to concurrently iterate lru_gen_mm_list */ struct lru_gen_mm_state mm_state; #endif -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 4ab376abeaae..3b1b5bd9736a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -2910,7 +2910,7 @@ static int get_nr_gens(struct lruvec *lruvec, int type) +@@ -2910,7 +2910,7 @@ static int get_nr_gens(struct lruvec *lr static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) { @@ -197,7 +191,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 int hist = lru_hist_from_seq(lrugen->min_seq[type]); pos->refaulted = lrugen->avg_refaulted[type][tier] + -@@ -3331,7 +3331,7 @@ static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, +@@ -3331,7 +3331,7 @@ static void read_ctrl_pos(struct lruvec static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) { int hist, tier; @@ -206,7 +200,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 bool clear = carryover ? NR_HIST_GENS == 1 : NR_HIST_GENS > 1; unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; -@@ -3408,7 +3408,7 @@ static int page_update_gen(struct page *page, int gen) +@@ -3408,7 +3408,7 @@ static int page_update_gen(struct page * static int page_inc_gen(struct lruvec *lruvec, struct page *page, bool reclaiming) { int type = page_is_file_lru(page); @@ -215,7 +209,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); unsigned long new_flags, old_flags = READ_ONCE(page->flags); -@@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru_gen_mm_walk *walk, struct page *page, +@@ -3453,7 +3453,7 @@ static void update_batch_size(struct lru static void reset_batch_size(struct lruvec *lruvec, struct lru_gen_mm_walk *walk) { int gen, type, zone; @@ -224,7 +218,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 walk->batched = 0; -@@ -3979,7 +3979,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) +@@ -3979,7 +3979,7 @@ static bool inc_min_seq(struct lruvec *l { int zone; int remaining = MAX_LRU_BATCH; @@ -233,7 +227,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); if (type == LRU_GEN_ANON && !can_swap) -@@ -4015,7 +4015,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +@@ -4015,7 +4015,7 @@ static bool try_to_inc_min_seq(struct lr { int gen, type, zone; bool success = false; @@ -242,7 +236,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 DEFINE_MIN_SEQ(lruvec); VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); -@@ -4036,7 +4036,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +@@ -4036,7 +4036,7 @@ next: ; } @@ -251,7 +245,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 if (can_swap) { min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); -@@ -4058,7 +4058,7 @@ static void inc_max_seq(struct lruvec *lruvec, bool can_swap, bool force_scan) +@@ -4058,7 +4058,7 @@ static void inc_max_seq(struct lruvec *l { int prev, next; int type, zone; @@ -260,7 +254,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 spin_lock_irq(&lruvec->lru_lock); -@@ -4116,7 +4116,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4116,7 +4116,7 @@ static bool try_to_inc_max_seq(struct lr bool success; struct lru_gen_mm_walk *walk; struct mm_struct *mm = NULL; @@ -269,7 +263,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 VM_WARN_ON_ONCE(max_seq > READ_ONCE(lrugen->max_seq)); -@@ -4181,7 +4181,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig +@@ -4181,7 +4181,7 @@ static bool should_run_aging(struct lruv unsigned long old = 0; unsigned long young = 0; unsigned long total = 0; @@ -278,7 +272,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 struct mem_cgroup *memcg = lruvec_memcg(lruvec); for (type = !can_swap; type < ANON_AND_FILE; type++) { -@@ -4466,7 +4466,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) +@@ -4466,7 +4466,7 @@ static bool sort_page(struct lruvec *lru int delta = thp_nr_pages(page); int refs = page_lru_refs(page); int tier = lru_tier_from_refs(refs); @@ -287,7 +281,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 VM_WARN_ON_ONCE_PAGE(gen >= MAX_NR_GENS, page); -@@ -4566,7 +4566,7 @@ static int scan_pages(struct lruvec *lruvec, struct scan_control *sc, +@@ -4566,7 +4566,7 @@ static int scan_pages(struct lruvec *lru int scanned = 0; int isolated = 0; int remaining = MAX_LRU_BATCH; @@ -296,7 +290,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 struct mem_cgroup *memcg = lruvec_memcg(lruvec); VM_WARN_ON_ONCE(!list_empty(list)); -@@ -4967,7 +4967,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4967,7 +4967,7 @@ done: static bool __maybe_unused state_is_valid(struct lruvec *lruvec) { @@ -305,7 +299,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 if (lrugen->enabled) { enum lru_list lru; -@@ -5247,7 +5247,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, +@@ -5247,7 +5247,7 @@ static void lru_gen_seq_show_full(struct int i; int type, tier; int hist = lru_hist_from_seq(seq); @@ -314,7 +308,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 for (tier = 0; tier < MAX_NR_TIERS; tier++) { seq_printf(m, " %10d", tier); -@@ -5296,7 +5296,7 @@ static int lru_gen_seq_show(struct seq_file *m, void *v) +@@ -5296,7 +5296,7 @@ static int lru_gen_seq_show(struct seq_f unsigned long seq; bool full = !debugfs_real_fops(m->file)->write; struct lruvec *lruvec = v; @@ -323,7 +317,7 @@ index 4ab376abeaae..3b1b5bd9736a 100644 int nid = lruvec_pgdat(lruvec)->node_id; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); -@@ -5549,7 +5549,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) +@@ -5549,7 +5549,7 @@ void lru_gen_init_lruvec(struct lruvec * { int i; int gen, type, zone; @@ -332,11 +326,9 @@ index 4ab376abeaae..3b1b5bd9736a 100644 lrugen->max_seq = MIN_NR_GENS + 1; lrugen->enabled = lru_gen_enabled(); -diff --git a/mm/workingset.c b/mm/workingset.c -index aeba62cebf8c..a5e1798c6d60 100644 --- a/mm/workingset.c +++ b/mm/workingset.c -@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct page *page) +@@ -223,7 +223,7 @@ static void *lru_gen_eviction(struct pag unsigned long token; unsigned long min_seq; struct lruvec *lruvec; @@ -345,7 +337,7 @@ index aeba62cebf8c..a5e1798c6d60 100644 int type = page_is_file_lru(page); int delta = thp_nr_pages(page); int refs = page_lru_refs(page); -@@ -252,7 +252,7 @@ static void lru_gen_refault(struct page *page, void *shadow) +@@ -252,7 +252,7 @@ static void lru_gen_refault(struct page unsigned long token; unsigned long min_seq; struct lruvec *lruvec; @@ -354,6 +346,3 @@ index aeba62cebf8c..a5e1798c6d60 100644 struct mem_cgroup *memcg; struct pglist_data *pgdat; int type = page_is_file_lru(page); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch b/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch index 2e1783661d..b548c1c8b3 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-22-mm-multi-gen-LRU-rename-lrugen-lists-to-lrugen-pages.patch @@ -23,11 +23,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 20 ++++++++++---------- 3 files changed, 16 insertions(+), 16 deletions(-) -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 27c4890503c5..4adc9ba59569 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -246,9 +246,9 @@ static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bo +@@ -246,9 +246,9 @@ static inline bool lru_gen_add_page(stru lru_gen_update_size(lruvec, page, -1, gen); /* for rotate_reclaimable_page() */ if (reclaiming) @@ -39,8 +37,6 @@ index 27c4890503c5..4adc9ba59569 100644 return true; } -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 5856b026c089..7b8a26aaf381 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -302,7 +302,7 @@ enum lruvec_flags { @@ -72,11 +68,9 @@ index 5856b026c089..7b8a26aaf381 100644 /* the multi-gen LRU sizes, eventually consistent */ long nr_pages[MAX_NR_GENS][ANON_AND_FILE][MAX_NR_ZONES]; /* the exponential moving average of refaulted */ -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 3b1b5bd9736a..2322c913aa64 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) +@@ -3987,7 +3987,7 @@ static bool inc_min_seq(struct lruvec *l /* prevent cold/hot inversion if force_scan is true */ for (zone = 0; zone < MAX_NR_ZONES; zone++) { @@ -85,7 +79,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 while (!list_empty(head)) { struct page *page = lru_to_page(head); -@@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) +@@ -3998,7 +3998,7 @@ static bool inc_min_seq(struct lruvec *l VM_WARN_ON_ONCE_PAGE(page_zonenum(page) != zone, page); new_gen = page_inc_gen(lruvec, page, false); @@ -94,7 +88,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 if (!--remaining) return false; -@@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +@@ -4026,7 +4026,7 @@ static bool try_to_inc_min_seq(struct lr gen = lru_gen_from_seq(min_seq[type]); for (zone = 0; zone < MAX_NR_ZONES; zone++) { @@ -103,7 +97,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 goto next; } -@@ -4491,7 +4491,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) +@@ -4491,7 +4491,7 @@ static bool sort_page(struct lruvec *lru /* promoted */ if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { @@ -112,7 +106,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 return true; } -@@ -4500,7 +4500,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) +@@ -4500,7 +4500,7 @@ static bool sort_page(struct lruvec *lru int hist = lru_hist_from_seq(lrugen->min_seq[type]); gen = page_inc_gen(lruvec, page, false); @@ -121,7 +115,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], lrugen->protected[hist][type][tier - 1] + delta); -@@ -4512,7 +4512,7 @@ static bool sort_page(struct lruvec *lruvec, struct page *page, int tier_idx) +@@ -4512,7 +4512,7 @@ static bool sort_page(struct lruvec *lru if (PageLocked(page) || PageWriteback(page) || (type == LRU_GEN_FILE && PageDirty(page))) { gen = page_inc_gen(lruvec, page, true); @@ -130,7 +124,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 return true; } -@@ -4579,7 +4579,7 @@ static int scan_pages(struct lruvec *lruvec, struct scan_control *sc, +@@ -4579,7 +4579,7 @@ static int scan_pages(struct lruvec *lru for (zone = sc->reclaim_idx; zone >= 0; zone--) { LIST_HEAD(moved); int skipped = 0; @@ -139,7 +133,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 while (!list_empty(head)) { struct page *page = lru_to_page(head); -@@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_valid(struct lruvec *lruvec) +@@ -4980,7 +4980,7 @@ static bool __maybe_unused state_is_vali int gen, type, zone; for_each_gen_type_zone(gen, type, zone) { @@ -148,7 +142,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 return false; } } -@@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruvec *lruvec) +@@ -5025,7 +5025,7 @@ static bool drain_evictable(struct lruve int remaining = MAX_LRU_BATCH; for_each_gen_type_zone(gen, type, zone) { @@ -157,7 +151,7 @@ index 3b1b5bd9736a..2322c913aa64 100644 while (!list_empty(head)) { bool success; -@@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) +@@ -5558,7 +5558,7 @@ void lru_gen_init_lruvec(struct lruvec * lrugen->timestamps[i] = jiffies; for_each_gen_type_zone(gen, type, zone) @@ -166,6 +160,3 @@ index 3b1b5bd9736a..2322c913aa64 100644 lruvec->mm_state.seq = MIN_NR_GENS; init_waitqueue_head(&lruvec->mm_state.wait); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-23-mm-multi-gen-LRU-remove-eviction-fairness-safeguard.patch b/target/linux/generic/backport-5.15/020-v6.3-23-mm-multi-gen-LRU-remove-eviction-fairness-safeguard.patch index 1490f9d4bc..2bb6e12a5a 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-23-mm-multi-gen-LRU-remove-eviction-fairness-safeguard.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-23-mm-multi-gen-LRU-remove-eviction-fairness-safeguard.patch @@ -35,11 +35,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 82 +++++++++++++++-------------------------------------- 1 file changed, 23 insertions(+), 59 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 2322c913aa64..40e7a947c5c7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -443,6 +443,11 @@ static bool cgroup_reclaim(struct scan_control *sc) +@@ -443,6 +443,11 @@ static bool cgroup_reclaim(struct scan_c return sc->target_mem_cgroup; } @@ -51,7 +49,7 @@ index 2322c913aa64..40e7a947c5c7 100644 /** * writeback_throttling_sane - is the usual dirty throttling mechanism available? * @sc: scan_control in question -@@ -493,6 +498,11 @@ static bool cgroup_reclaim(struct scan_control *sc) +@@ -493,6 +498,11 @@ static bool cgroup_reclaim(struct scan_c return false; } @@ -63,7 +61,7 @@ index 2322c913aa64..40e7a947c5c7 100644 static bool writeback_throttling_sane(struct scan_control *sc) { return true; -@@ -4722,8 +4732,7 @@ static int isolate_pages(struct lruvec *lruvec, struct scan_control *sc, int swa +@@ -4722,8 +4732,7 @@ static int isolate_pages(struct lruvec * return scanned; } @@ -73,7 +71,7 @@ index 2322c913aa64..40e7a947c5c7 100644 { int type; int scanned; -@@ -4812,9 +4821,6 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4812,9 +4821,6 @@ retry: goto retry; } @@ -83,7 +81,7 @@ index 2322c913aa64..40e7a947c5c7 100644 return scanned; } -@@ -4853,68 +4859,26 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -4853,68 +4859,26 @@ done: return min_seq[!can_swap] + MIN_NR_GENS <= max_seq ? nr_to_scan : 0; } @@ -161,7 +159,7 @@ index 2322c913aa64..40e7a947c5c7 100644 lru_add_drain(); -@@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4938,7 +4902,7 @@ static void lru_gen_shrink_lruvec(struct if (!nr_to_scan) goto done; @@ -170,7 +168,7 @@ index 2322c913aa64..40e7a947c5c7 100644 if (!delta) goto done; -@@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4946,7 +4910,7 @@ static void lru_gen_shrink_lruvec(struct if (scanned >= nr_to_scan) break; @@ -179,7 +177,7 @@ index 2322c913aa64..40e7a947c5c7 100644 break; cond_resched(); -@@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co +@@ -5393,7 +5357,7 @@ static int run_eviction(struct lruvec *l if (sc->nr_reclaimed >= nr_to_reclaim) return 0; @@ -188,6 +186,3 @@ index 2322c913aa64..40e7a947c5c7 100644 return 0; cond_resched(); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-24-mm-multi-gen-LRU-remove-aging-fairness-safeguard.patch b/target/linux/generic/backport-5.15/020-v6.3-24-mm-multi-gen-LRU-remove-aging-fairness-safeguard.patch index 82ba77dec2..316217ed02 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-24-mm-multi-gen-LRU-remove-aging-fairness-safeguard.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-24-mm-multi-gen-LRU-remove-aging-fairness-safeguard.patch @@ -35,8 +35,6 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 126 ++++++++++++++++++++++++---------------------------- 1 file changed, 59 insertions(+), 67 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 40e7a947c5c7..7159436872ba 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -131,7 +131,6 @@ struct scan_control { @@ -47,7 +45,7 @@ index 40e7a947c5c7..7159436872ba 100644 unsigned long last_reclaimed; #endif -@@ -4184,7 +4183,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4184,7 +4183,7 @@ done: return true; } @@ -56,7 +54,7 @@ index 40e7a947c5c7..7159436872ba 100644 struct scan_control *sc, bool can_swap, unsigned long *nr_to_scan) { int gen, type, zone; -@@ -4193,6 +4192,13 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig +@@ -4193,6 +4192,13 @@ static bool should_run_aging(struct lruv unsigned long total = 0; struct lru_gen_page *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); @@ -70,7 +68,7 @@ index 40e7a947c5c7..7159436872ba 100644 for (type = !can_swap; type < ANON_AND_FILE; type++) { unsigned long seq; -@@ -4221,8 +4227,6 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig +@@ -4221,8 +4227,6 @@ static bool should_run_aging(struct lruv * stalls when the number of generations reaches MIN_NR_GENS. Hence, the * ideal number of generations is MIN_NR_GENS+1. */ @@ -79,7 +77,7 @@ index 40e7a947c5c7..7159436872ba 100644 if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) return false; -@@ -4241,40 +4245,54 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, unsig +@@ -4241,40 +4245,54 @@ static bool should_run_aging(struct lruv return false; } @@ -155,7 +153,7 @@ index 40e7a947c5c7..7159436872ba 100644 } /* to protect the working set of the last N jiffies */ -@@ -4283,46 +4301,32 @@ static unsigned long lru_gen_min_ttl __read_mostly; +@@ -4283,46 +4301,32 @@ static unsigned long lru_gen_min_ttl __r static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) { struct mem_cgroup *memcg; @@ -209,7 +207,7 @@ index 40e7a947c5c7..7159436872ba 100644 */ if (mutex_trylock(&oom_lock)) { struct oom_control oc = { -@@ -4830,33 +4834,27 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4830,33 +4834,27 @@ retry: * reclaim. */ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, @@ -249,7 +247,7 @@ index 40e7a947c5c7..7159436872ba 100644 } static unsigned long get_nr_to_reclaim(struct scan_control *sc) -@@ -4875,9 +4873,7 @@ static unsigned long get_nr_to_reclaim(struct scan_control *sc) +@@ -4875,9 +4873,7 @@ static unsigned long get_nr_to_reclaim(s static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { struct blk_plug plug; @@ -259,7 +257,7 @@ index 40e7a947c5c7..7159436872ba 100644 unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); lru_add_drain(); -@@ -4898,13 +4894,13 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4898,13 +4894,13 @@ static void lru_gen_shrink_lruvec(struct else swappiness = 0; @@ -276,7 +274,7 @@ index 40e7a947c5c7..7159436872ba 100644 scanned += delta; if (scanned >= nr_to_scan) -@@ -4916,10 +4912,6 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4916,10 +4912,6 @@ static void lru_gen_shrink_lruvec(struct cond_resched(); } @@ -287,6 +285,3 @@ index 40e7a947c5c7..7159436872ba 100644 clear_mm_walk(); blk_finish_plug(&plug); --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch b/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch index bb5402a3f2..391ee6e67c 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-25-mm-multi-gen-LRU-shuffle-should_run_aging.patch @@ -19,11 +19,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 124 ++++++++++++++++++++++++++-------------------------- 1 file changed, 62 insertions(+), 62 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 7159436872ba..cb026e2714d7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4183,68 +4183,6 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4183,68 +4183,6 @@ done: return true; } @@ -92,7 +90,7 @@ index 7159436872ba..cb026e2714d7 100644 static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) { int gen, type, zone; -@@ -4828,6 +4766,68 @@ static int evict_pages(struct lruvec *lruvec, struct scan_control *sc, int swapp +@@ -4828,6 +4766,68 @@ retry: return scanned; } @@ -161,6 +159,3 @@ index 7159436872ba..cb026e2714d7 100644 /* * For future optimizations: * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-26-mm-multi-gen-LRU-per-node-lru_gen_page-lists.patch b/target/linux/generic/backport-5.15/020-v6.3-26-mm-multi-gen-LRU-per-node-lru_gen_page-lists.patch index 2a62570346..cf01c3997a 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-26-mm-multi-gen-LRU-per-node-lru_gen_page-lists.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-26-mm-multi-gen-LRU-per-node-lru_gen_page-lists.patch @@ -67,11 +67,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 373 +++++++++++++++++++++++++++++++++---- 6 files changed, 499 insertions(+), 35 deletions(-) -diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h -index 3736405cbcf6..2e405fd88846 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h -@@ -818,6 +818,11 @@ static inline void obj_cgroup_put(struct obj_cgroup *objcg) +@@ -818,6 +818,11 @@ static inline void obj_cgroup_put(struct percpu_ref_put(&objcg->refcnt); } @@ -83,7 +81,7 @@ index 3736405cbcf6..2e405fd88846 100644 static inline void mem_cgroup_put(struct mem_cgroup *memcg) { if (memcg) -@@ -1283,6 +1288,11 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) +@@ -1283,6 +1288,11 @@ struct mem_cgroup *mem_cgroup_from_css(s return NULL; } @@ -95,11 +93,9 @@ index 3736405cbcf6..2e405fd88846 100644 static inline void mem_cgroup_put(struct mem_cgroup *memcg) { } -diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h -index 4adc9ba59569..9138c2e638ce 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h -@@ -112,6 +112,18 @@ static inline bool lru_gen_in_fault(void) +@@ -112,6 +112,18 @@ static inline bool lru_gen_in_fault(void return current->in_lru_fault; } @@ -118,7 +114,7 @@ index 4adc9ba59569..9138c2e638ce 100644 static inline int lru_gen_from_seq(unsigned long seq) { return seq % MAX_NR_GENS; -@@ -287,6 +299,11 @@ static inline bool lru_gen_in_fault(void) +@@ -287,6 +299,11 @@ static inline bool lru_gen_in_fault(void return false; } @@ -130,8 +126,6 @@ index 4adc9ba59569..9138c2e638ce 100644 static inline bool lru_gen_add_page(struct lruvec *lruvec, struct page *page, bool reclaiming) { return false; -diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 7b8a26aaf381..4bbf191517e2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -7,6 +7,7 @@ @@ -173,7 +167,7 @@ index 7b8a26aaf381..4bbf191517e2 100644 }; enum { -@@ -469,12 +487,87 @@ void lru_gen_init_lruvec(struct lruvec *lruvec); +@@ -469,12 +487,87 @@ void lru_gen_init_lruvec(struct lruvec * void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); #ifdef CONFIG_MEMCG @@ -262,7 +256,7 @@ index 7b8a26aaf381..4bbf191517e2 100644 static inline void lru_gen_init_lruvec(struct lruvec *lruvec) { } -@@ -484,6 +577,7 @@ static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) +@@ -484,6 +577,7 @@ static inline void lru_gen_look_around(s } #ifdef CONFIG_MEMCG @@ -270,7 +264,7 @@ index 7b8a26aaf381..4bbf191517e2 100644 static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) { } -@@ -491,7 +585,24 @@ static inline void lru_gen_init_memcg(struct mem_cgroup *memcg) +@@ -491,7 +585,24 @@ static inline void lru_gen_init_memcg(st static inline void lru_gen_exit_memcg(struct mem_cgroup *memcg) { } @@ -305,11 +299,9 @@ index 7b8a26aaf381..4bbf191517e2 100644 #endif ZONE_PADDING(_pad2_) -diff --git a/mm/memcontrol.c b/mm/memcontrol.c -index ed87d1256f0e..172adfbee06e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -549,6 +549,16 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) +@@ -549,6 +549,16 @@ static void mem_cgroup_update_tree(struc struct mem_cgroup_per_node *mz; struct mem_cgroup_tree_per_node *mctz; @@ -326,7 +318,7 @@ index ed87d1256f0e..172adfbee06e 100644 mctz = soft_limit_tree_from_page(page); if (!mctz) return; -@@ -3433,6 +3443,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, +@@ -3433,6 +3443,9 @@ unsigned long mem_cgroup_soft_limit_recl unsigned long excess; unsigned long nr_scanned; @@ -336,7 +328,7 @@ index ed87d1256f0e..172adfbee06e 100644 if (order > 0) return 0; -@@ -5321,6 +5334,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) +@@ -5321,6 +5334,7 @@ static int mem_cgroup_css_online(struct if (unlikely(mem_cgroup_is_root(memcg))) queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ); @@ -344,7 +336,7 @@ index ed87d1256f0e..172adfbee06e 100644 return 0; } -@@ -5347,6 +5361,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) +@@ -5347,6 +5361,7 @@ static void mem_cgroup_css_offline(struc memcg_offline_kmem(memcg); reparent_shrinker_deferred(memcg); wb_memcg_offline(memcg); @@ -352,7 +344,7 @@ index ed87d1256f0e..172adfbee06e 100644 drain_all_stock(memcg); -@@ -5358,6 +5373,7 @@ static void mem_cgroup_css_released(struct cgroup_subsys_state *css) +@@ -5358,6 +5373,7 @@ static void mem_cgroup_css_released(stru struct mem_cgroup *memcg = mem_cgroup_from_css(css); invalidate_reclaim_iterators(memcg); @@ -360,11 +352,9 @@ index ed87d1256f0e..172adfbee06e 100644 } static void mem_cgroup_css_free(struct cgroup_subsys_state *css) -diff --git a/mm/page_alloc.c b/mm/page_alloc.c -index c929357fbefe..6459d9c018be 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -7645,6 +7645,7 @@ static void __init free_area_init_node(int nid) +@@ -7645,6 +7645,7 @@ static void __init free_area_init_node(i pgdat_set_deferred_range(pgdat); free_area_init_core(pgdat); @@ -372,8 +362,6 @@ index c929357fbefe..6459d9c018be 100644 } void __init free_area_init_memoryless_node(int nid) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index cb026e2714d7..3d8e0665186c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -54,6 +54,8 @@ @@ -397,7 +385,7 @@ index cb026e2714d7..3d8e0665186c 100644 /* Allocation order */ s8 order; -@@ -2880,6 +2877,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); +@@ -2880,6 +2877,9 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_ca for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) @@ -407,7 +395,7 @@ index cb026e2714d7..3d8e0665186c 100644 static struct lruvec *get_lruvec(struct mem_cgroup *memcg, int nid) { struct pglist_data *pgdat = NODE_DATA(nid); -@@ -4169,8 +4169,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4169,8 +4169,7 @@ done: if (sc->priority <= DEF_PRIORITY - 2) wait_event_killable(lruvec->mm_state.wait, max_seq < READ_ONCE(lrugen->max_seq)); @@ -417,7 +405,7 @@ index cb026e2714d7..3d8e0665186c 100644 } VM_WARN_ON_ONCE(max_seq != READ_ONCE(lrugen->max_seq)); -@@ -4243,8 +4242,6 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) +@@ -4243,8 +4242,6 @@ static void lru_gen_age_node(struct pgli VM_WARN_ON_ONCE(!current_is_kswapd()); @@ -426,7 +414,7 @@ index cb026e2714d7..3d8e0665186c 100644 /* check the order to exclude compaction-induced reclaim */ if (!min_ttl || sc->order || sc->priority == DEF_PRIORITY) return; -@@ -4833,8 +4830,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, +@@ -4833,8 +4830,7 @@ static bool should_run_aging(struct lruv * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg * reclaim. */ @@ -436,7 +424,7 @@ index cb026e2714d7..3d8e0665186c 100644 { unsigned long nr_to_scan; struct mem_cgroup *memcg = lruvec_memcg(lruvec); -@@ -4851,10 +4847,8 @@ static unsigned long get_nr_to_scan(struct lruvec *lruvec, struct scan_control * +@@ -4851,10 +4847,8 @@ static unsigned long get_nr_to_scan(stru if (sc->priority == DEF_PRIORITY) return nr_to_scan; @@ -448,7 +436,7 @@ index cb026e2714d7..3d8e0665186c 100644 } static unsigned long get_nr_to_reclaim(struct scan_control *sc) -@@ -4863,29 +4857,18 @@ static unsigned long get_nr_to_reclaim(struct scan_control *sc) +@@ -4863,29 +4857,18 @@ static unsigned long get_nr_to_reclaim(s if (!global_reclaim(sc)) return -1; @@ -480,7 +468,7 @@ index cb026e2714d7..3d8e0665186c 100644 if (sc->may_swap) swappiness = get_swappiness(lruvec, sc); -@@ -4895,7 +4878,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4895,7 +4878,7 @@ static void lru_gen_shrink_lruvec(struct swappiness = 0; nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); @@ -489,7 +477,7 @@ index cb026e2714d7..3d8e0665186c 100644 break; delta = evict_pages(lruvec, sc, swappiness); -@@ -4912,11 +4895,251 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -4912,10 +4895,250 @@ static void lru_gen_shrink_lruvec(struct cond_resched(); } @@ -689,8 +677,8 @@ index cb026e2714d7..3d8e0665186c 100644 + + /* kswapd should never fail */ + pgdat->kswapd_failures = 0; - } - ++} ++ +#ifdef CONFIG_MEMCG +void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) +{ @@ -735,13 +723,12 @@ index cb026e2714d7..3d8e0665186c 100644 + WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); + + spin_unlock(&pgdat->memcg_lru.lock); -+} + } +#endif -+ + /****************************************************************************** * state change - ******************************************************************************/ -@@ -5370,11 +5593,11 @@ static int run_cmd(char cmd, int memcg_id, int nid, unsigned long seq, +@@ -5370,11 +5593,11 @@ static int run_cmd(char cmd, int memcg_i if (!mem_cgroup_disabled()) { rcu_read_lock(); @@ -756,7 +743,7 @@ index cb026e2714d7..3d8e0665186c 100644 rcu_read_unlock(); if (!memcg) -@@ -5521,6 +5744,19 @@ void lru_gen_init_lruvec(struct lruvec *lruvec) +@@ -5521,6 +5744,19 @@ void lru_gen_init_lruvec(struct lruvec * } #ifdef CONFIG_MEMCG @@ -776,7 +763,7 @@ index cb026e2714d7..3d8e0665186c 100644 void lru_gen_init_memcg(struct mem_cgroup *memcg) { INIT_LIST_HEAD(&memcg->mm_list.fifo); -@@ -5544,7 +5780,69 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg) +@@ -5544,7 +5780,69 @@ void lru_gen_exit_memcg(struct mem_cgrou } } } @@ -847,7 +834,7 @@ index cb026e2714d7..3d8e0665186c 100644 static int __init init_lru_gen(void) { -@@ -5571,6 +5869,10 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -5571,6 +5869,10 @@ static void lru_gen_shrink_lruvec(struct { } @@ -858,7 +845,7 @@ index cb026e2714d7..3d8e0665186c 100644 #endif /* CONFIG_LRU_GEN */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) -@@ -5584,7 +5886,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) +@@ -5584,7 +5886,7 @@ static void shrink_lruvec(struct lruvec bool proportional_reclaim; struct blk_plug plug; @@ -867,7 +854,7 @@ index cb026e2714d7..3d8e0665186c 100644 lru_gen_shrink_lruvec(lruvec, sc); return; } -@@ -5826,6 +6128,11 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) +@@ -5826,6 +6128,11 @@ static void shrink_node(pg_data_t *pgdat struct lruvec *target_lruvec; bool reclaimable = false; @@ -879,6 +866,3 @@ index cb026e2714d7..3d8e0665186c 100644 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); again: --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-27-mm-multi-gen-LRU-clarify-scan_control-flags.patch b/target/linux/generic/backport-5.15/020-v6.3-27-mm-multi-gen-LRU-clarify-scan_control-flags.patch index 59d2c82b56..882f29e989 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-27-mm-multi-gen-LRU-clarify-scan_control-flags.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-27-mm-multi-gen-LRU-clarify-scan_control-flags.patch @@ -33,11 +33,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 55 +++++++++++++++++++++++++++-------------------------- 1 file changed, 28 insertions(+), 27 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 3d8e0665186c..4bcb93df316c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -2905,6 +2905,9 @@ static int get_swappiness(struct lruvec *lruvec, struct scan_control *sc) +@@ -2905,6 +2905,9 @@ static int get_swappiness(struct lruvec struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); @@ -47,7 +45,7 @@ index 3d8e0665186c..4bcb93df316c 100644 if (!can_demote(pgdat->node_id, sc) && mem_cgroup_get_nr_swap_pages(memcg) < MIN_LRU_BATCH) return 0; -@@ -3952,7 +3955,7 @@ static void walk_mm(struct lruvec *lruvec, struct mm_struct *mm, struct lru_gen_ +@@ -3952,7 +3955,7 @@ static void walk_mm(struct lruvec *lruve } while (err == -EAGAIN); } @@ -56,7 +54,7 @@ index 3d8e0665186c..4bcb93df316c 100644 { struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; -@@ -3960,7 +3963,7 @@ static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat) +@@ -3960,7 +3963,7 @@ static struct lru_gen_mm_walk *set_mm_wa VM_WARN_ON_ONCE(walk); walk = &pgdat->mm_walk; @@ -65,7 +63,7 @@ index 3d8e0665186c..4bcb93df316c 100644 VM_WARN_ON_ONCE(current_is_kswapd()); walk = kzalloc(sizeof(*walk), __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); -@@ -4146,7 +4149,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4146,7 +4149,7 @@ static bool try_to_inc_max_seq(struct lr goto done; } @@ -74,7 +72,7 @@ index 3d8e0665186c..4bcb93df316c 100644 if (!walk) { success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; -@@ -4215,8 +4218,6 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc +@@ -4215,8 +4218,6 @@ static bool lruvec_is_reclaimable(struct struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MIN_SEQ(lruvec); @@ -83,7 +81,7 @@ index 3d8e0665186c..4bcb93df316c 100644 /* see the comment on lru_gen_page */ gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); -@@ -4472,12 +4473,8 @@ static bool isolate_page(struct lruvec *lruvec, struct page *page, struct scan_c +@@ -4472,12 +4473,8 @@ static bool isolate_page(struct lruvec * { bool success; @@ -97,7 +95,7 @@ index 3d8e0665186c..4bcb93df316c 100644 (PageDirty(page) || (PageAnon(page) && !PageSwapCache(page)))) return false; -@@ -4574,9 +4571,8 @@ static int scan_pages(struct lruvec *lruvec, struct scan_control *sc, +@@ -4574,9 +4571,8 @@ static int scan_pages(struct lruvec *lru __count_vm_events(PGSCAN_ANON + type, isolated); /* @@ -109,7 +107,7 @@ index 3d8e0665186c..4bcb93df316c 100644 */ return isolated || !remaining ? scanned : 0; } -@@ -4836,8 +4832,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool +@@ -4836,8 +4832,7 @@ static long get_nr_to_scan(struct lruvec struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); @@ -119,7 +117,7 @@ index 3d8e0665186c..4bcb93df316c 100644 return 0; if (!should_run_aging(lruvec, max_seq, sc, can_swap, &nr_to_scan)) -@@ -4865,17 +4860,14 @@ static bool try_to_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) +@@ -4865,17 +4860,14 @@ static bool try_to_shrink_lruvec(struct long nr_to_scan; unsigned long scanned = 0; unsigned long nr_to_reclaim = get_nr_to_reclaim(sc); @@ -142,7 +140,7 @@ index 3d8e0665186c..4bcb93df316c 100644 nr_to_scan = get_nr_to_scan(lruvec, sc, swappiness); if (nr_to_scan <= 0) -@@ -5005,12 +4997,13 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc +@@ -5005,12 +4997,13 @@ static void lru_gen_shrink_lruvec(struct struct blk_plug plug; VM_WARN_ON_ONCE(global_reclaim(sc)); @@ -157,7 +155,7 @@ index 3d8e0665186c..4bcb93df316c 100644 if (try_to_shrink_lruvec(lruvec, sc)) lru_gen_rotate_memcg(lruvec, MEMCG_LRU_YOUNG); -@@ -5066,11 +5059,19 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * +@@ -5066,11 +5059,19 @@ static void lru_gen_shrink_node(struct p VM_WARN_ON_ONCE(!global_reclaim(sc)); @@ -178,7 +176,7 @@ index 3d8e0665186c..4bcb93df316c 100644 set_initial_priority(pgdat, sc); -@@ -5088,7 +5089,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * +@@ -5088,7 +5089,7 @@ static void lru_gen_shrink_node(struct p clear_mm_walk(); blk_finish_plug(&plug); @@ -187,7 +185,7 @@ index 3d8e0665186c..4bcb93df316c 100644 /* kswapd should never fail */ pgdat->kswapd_failures = 0; } -@@ -5656,7 +5657,7 @@ static ssize_t lru_gen_seq_write(struct file *file, const char __user *src, +@@ -5656,7 +5657,7 @@ static ssize_t lru_gen_seq_write(struct set_task_reclaim_state(current, &sc.reclaim_state); flags = memalloc_noreclaim_save(); blk_start_plug(&plug); @@ -196,6 +194,3 @@ index 3d8e0665186c..4bcb93df316c 100644 err = -ENOMEM; goto done; } --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-28-mm-multi-gen-LRU-simplify-arch_has_hw_pte_young-chec.patch b/target/linux/generic/backport-5.15/020-v6.3-28-mm-multi-gen-LRU-simplify-arch_has_hw_pte_young-chec.patch index a6c7b019ce..38d0e5496c 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-28-mm-multi-gen-LRU-simplify-arch_has_hw_pte_young-chec.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-28-mm-multi-gen-LRU-simplify-arch_has_hw_pte_young-chec.patch @@ -21,11 +21,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 4bcb93df316c..3f6874a69886 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4144,7 +4144,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, +@@ -4144,7 +4144,7 @@ static bool try_to_inc_max_seq(struct lr * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ @@ -34,6 +32,3 @@ index 4bcb93df316c..3f6874a69886 100644 success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } --- -2.40.0 - diff --git a/target/linux/generic/backport-5.15/020-v6.3-29-mm-multi-gen-LRU-avoid-futile-retries.patch b/target/linux/generic/backport-5.15/020-v6.3-29-mm-multi-gen-LRU-avoid-futile-retries.patch index 14e1aff177..2d19d0d7da 100644 --- a/target/linux/generic/backport-5.15/020-v6.3-29-mm-multi-gen-LRU-avoid-futile-retries.patch +++ b/target/linux/generic/backport-5.15/020-v6.3-29-mm-multi-gen-LRU-avoid-futile-retries.patch @@ -23,11 +23,9 @@ Signed-off-by: Andrew Morton mm/vmscan.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) -diff --git a/mm/vmscan.c b/mm/vmscan.c -index 3f6874a69886..0b76774963ff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c -@@ -4934,18 +4934,20 @@ static int shrink_one(struct lruvec *lruvec, struct scan_control *sc) +@@ -4934,18 +4934,20 @@ static int shrink_one(struct lruvec *lru static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) { @@ -50,7 +48,7 @@ index 3f6874a69886..0b76774963ff 100644 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); rcu_read_lock(); -@@ -4969,14 +4971,22 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) +@@ -4969,14 +4971,22 @@ restart: op = shrink_one(lruvec, sc); @@ -76,7 +74,7 @@ index 3f6874a69886..0b76774963ff 100644 /* restart if raced with lru_gen_rotate_memcg() */ if (gen != get_nulls_value(pos)) goto restart; -@@ -4985,11 +4995,6 @@ static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc) +@@ -4985,11 +4995,6 @@ restart: bin = get_memcg_bin(bin + 1); if (bin != first_bin) goto restart; @@ -88,6 +86,3 @@ index 3f6874a69886..0b76774963ff 100644 } static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) --- -2.40.0 - diff --git a/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch b/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch index 4865659c04..c1313c82f4 100644 --- a/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch +++ b/target/linux/generic/pending-5.15/120-Fix-alloc_node_mem_map-with-ARCH_PFN_OFFSET-calcu.patch @@ -71,7 +71,7 @@ Signed-off-by: Tobias Wolf --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -7605,7 +7605,7 @@ static void __init alloc_node_mem_map(st +@@ -7604,7 +7604,7 @@ static void __init alloc_node_mem_map(st if (pgdat == NODE_DATA(0)) { mem_map = NODE_DATA(0)->node_mem_map; if (page_to_pfn(mem_map) != pgdat->node_start_pfn) -- cgit v1.2.3