summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorShakeel Butt <shakeel.butt@linux.dev>2025-11-10 15:20:08 -0800
committerAndrew Morton <akpm@linux-foundation.org>2025-11-24 15:08:54 -0800
commitc1bd09994c4d5b897571671bed16581335e93242 (patch)
treecf0551b78a1951b500cdd50e2ff95e22ea0d4957 /mm
parent5b3eb779a20cf30d74bb346d2a1e525bc9072685 (diff)
memcg: remove __lruvec_stat_mod_folio
__lruvec_stat_mod_folio() is already safe against irqs, so there is no need to have a separate interface (i.e. lruvec_stat_mod_folio) which wraps calls to it with irq disabling and reenabling. Let's rename __lruvec_stat_mod_folio() to lruvec_stat_mod_folio(). Link: https://lkml.kernel.org/r/20251110232008.1352063-5-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Harry Yoo <harry.yoo@oracle.com> Acked-by: Roman Gushchin <roman.gushchin@linux.dev> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Qi Zheng <zhengqi.arch@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/khugepaged.c8
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/page-writeback.c2
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c6
7 files changed, 24 insertions, 24 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 07634b7d9934..7d15a9c216ef 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -182,13 +182,13 @@ static void filemap_unaccount_folio(struct address_space *mapping,
nr = folio_nr_pages(folio);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);
} else if (folio_test_pmd_mappable(folio)) {
- __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))
@@ -844,13 +844,13 @@ void replace_page_cache_folio(struct folio *old, struct folio *new)
old->mapping = NULL;
/* hugetlb pages do not participate in page cache accounting. */
if (!folio_test_hugetlb(old))
- __lruvec_stat_sub_folio(old, NR_FILE_PAGES);
+ lruvec_stat_sub_folio(old, NR_FILE_PAGES);
if (!folio_test_hugetlb(new))
- __lruvec_stat_add_folio(new, NR_FILE_PAGES);
+ lruvec_stat_add_folio(new, NR_FILE_PAGES);
if (folio_test_swapbacked(old))
- __lruvec_stat_sub_folio(old, NR_SHMEM);
+ lruvec_stat_sub_folio(old, NR_SHMEM);
if (folio_test_swapbacked(new))
- __lruvec_stat_add_folio(new, NR_SHMEM);
+ lruvec_stat_add_folio(new, NR_SHMEM);
xas_unlock_irq(&xas);
if (free_folio)
free_folio(old);
@@ -933,9 +933,9 @@ noinline int __filemap_add_folio(struct address_space *mapping,
/* hugetlb pages do not participate in page cache accounting */
if (!huge) {
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, nr);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 53a8d380eab2..7af3e037d891 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3783,10 +3783,10 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
if (folio_test_pmd_mappable(folio) &&
new_order < HPAGE_PMD_ORDER) {
if (folio_test_swapbacked(folio)) {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_SHMEM_THPS, -nr);
} else {
- __lruvec_stat_mod_folio(folio,
+ lruvec_stat_mod_folio(folio,
NR_FILE_THPS, -nr);
filemap_nr_thps_dec(mapping);
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 40f9d5939aa5..89c33ef7aac3 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -2195,14 +2195,14 @@ immap_locked:
}
if (is_shmem)
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
else
- __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
if (nr_none) {
- __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
/* nr_none is always 0 for non-shmem. */
- __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
+ lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
}
/*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a659f16af77..9b07db2cb232 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -777,7 +777,7 @@ void mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
mod_memcg_lruvec_state(lruvec, idx, val);
}
-void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
+void lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
int val)
{
struct mem_cgroup *memcg;
@@ -797,7 +797,7 @@ void __lruvec_stat_mod_folio(struct folio *folio, enum node_stat_item idx,
mod_lruvec_state(lruvec, idx, val);
rcu_read_unlock();
}
-EXPORT_SYMBOL(__lruvec_stat_mod_folio);
+EXPORT_SYMBOL(lruvec_stat_mod_folio);
void mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val)
{
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 757bc4d3b5b5..d6b339cc876d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2658,7 +2658,7 @@ static void folio_account_dirtied(struct folio *folio,
inode_attach_wb(inode, folio);
wb = inode_to_wb(inode);
- __lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
+ lruvec_stat_mod_folio(folio, NR_FILE_DIRTY, nr);
__zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
__node_stat_mod_folio(folio, NR_DIRTIED, nr);
wb_stat_mod(wb, WB_RECLAIMABLE, nr);
diff --git a/mm/rmap.c b/mm/rmap.c
index d871f2eb821c..f955f02d570e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1212,12 +1212,12 @@ static void __folio_mod_stat(struct folio *folio, int nr, int nr_pmdmapped)
if (nr) {
idx = folio_test_anon(folio) ? NR_ANON_MAPPED : NR_FILE_MAPPED;
- __lruvec_stat_mod_folio(folio, idx, nr);
+ lruvec_stat_mod_folio(folio, idx, nr);
}
if (nr_pmdmapped) {
if (folio_test_anon(folio)) {
idx = NR_ANON_THPS;
- __lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
+ lruvec_stat_mod_folio(folio, idx, nr_pmdmapped);
} else {
/* NR_*_PMDMAPPED are not maintained per-memcg */
idx = folio_test_swapbacked(folio) ?
diff --git a/mm/shmem.c b/mm/shmem.c
index fc835b3e4914..ad18172ff831 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -871,9 +871,9 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
static void shmem_update_stats(struct folio *folio, int nr_pages)
{
if (folio_test_pmd_mappable(folio))
- __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
- __lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
+ lruvec_stat_mod_folio(folio, NR_SHMEM, nr_pages);
}
/*