Files
linux-stable-mirror/include/linux/page-isolation.h
Kefeng Wang c83109e95c mm: page_isolation: introduce page_is_unmovable()
Patch series "mm: accelerate gigantic folio allocation".

Optimize pfn_range_valid_contig() and replace_free_hugepage_folios() in
alloc_contig_frozen_pages() to speed up gigantic folio allocation.  The
allocation time for 120*1G folios drops from 3.605s to 0.431s.


This patch (of 5):

Factor out the check if a page is unmovable into a new helper, and will be
reused in the following patch.

No functional change intended, the minor changes are as follows,
1) Avoid unnecessary calls by checking CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
2) Directly call PageCompound since PageTransCompound may be dropped
3) Using folio_test_hugetlb()

Link: https://lkml.kernel.org/r/20260112150954.1802953-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20260112150954.1802953-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Brendan Jackman <jackmanb@google.com>
Cc: David Hildenbrand <david@kernel.org>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2026-01-31 14:22:42 -08:00

73 lines
2.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_PAGEISOLATION_H
#define __LINUX_PAGEISOLATION_H
#ifdef CONFIG_MEMORY_ISOLATION
static inline bool is_migrate_isolate_page(struct page *page)
{
return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
}
static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
#define get_pageblock_isolate(page) \
get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#define clear_pageblock_isolate(page) \
clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#define set_pageblock_isolate(page) \
set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#else
static inline bool is_migrate_isolate_page(struct page *page)
{
return false;
}
static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
static inline bool get_pageblock_isolate(struct page *page)
{
return false;
}
static inline void clear_pageblock_isolate(struct page *page)
{
}
static inline void set_pageblock_isolate(struct page *page)
{
}
#endif
/*
* Pageblock isolation modes:
* PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
* e.g., skip over PageHWPoison() pages and
* PageOffline() pages. Unmovable pages will be
* reported in this mode.
* PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
* PB_ISOLATE_MODE_OTHER - isolate for other purposes
*/
enum pb_isolate_mode {
PB_ISOLATE_MODE_MEM_OFFLINE,
PB_ISOLATE_MODE_CMA_ALLOC,
PB_ISOLATE_MODE_OTHER,
};
void __meminit init_pageblock_migratetype(struct page *page,
enum migratetype migratetype,
bool isolate);
bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
enum pb_isolate_mode mode);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
enum pb_isolate_mode mode);
bool page_is_unmovable(struct zone *zone, struct page *page,
enum pb_isolate_mode mode, unsigned long *step);
#endif