summaryrefslogtreecommitdiff
path: root/mm/slab.h
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-06-11 16:59:05 +0100
committerVlastimil Babka <vbabka@suse.cz>2025-06-18 13:06:26 +0200
commit30908096dd8d79b66d987782df04d14e1c907c25 (patch)
tree8101aa9257b20d6a7deb4452af61d5265c1a8bd5 /mm/slab.h
parent262e086f93026a6633da034f270c4baae47c4706 (diff)
slab: Rename slab->__page_flags to slab->flags
Slab has its own reasons for using flag bits; they aren't just the page bits. Maybe this won't be the ultimate solution, but we should be clear that these bits are in use. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Link: https://patch.msgid.link/20250611155916.2579160-3-willy@infradead.org Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slab.h b/mm/slab.h
index 05a21dc796e0..32785ff3470a 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -50,7 +50,7 @@ typedef union {
/* Reuses the bits in struct page */
struct slab {
- unsigned long __page_flags;
+ unsigned long flags;
struct kmem_cache *slab_cache;
union {
@@ -99,7 +99,7 @@ struct slab {
#define SLAB_MATCH(pg, sl) \
static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
-SLAB_MATCH(flags, __page_flags);
+SLAB_MATCH(flags, flags);
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
SLAB_MATCH(_refcount, __page_refcount);
#ifdef CONFIG_MEMCG