mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-03-03 18:28:01 +01:00
The only external caller of collapse_pte_mapped_thp() is uprobe, which ignores the return value. Change the external API to return void to simplify the interface. Introduce try_collapse_pte_mapped_thp() for internal use that preserves the return value. This prepares for future patch that will convert the return type to use enum scan_result. Link: https://lkml.kernel.org/r/20260118192253.9263-10-shivankg@amd.com Signed-off-by: Shivank Garg <shivankg@amd.com> Suggested-by: David Hildenbrand (Red Hat) <david@kernel.org> Acked-by: Lance Yang <lance.yang@linux.dev> Acked-by: David Hildenbrand (Red Hat) <david@kernel.org> Reviewed-by: Zi Yan <ziy@nvidia.com> Tested-by: Nico Pache <npache@redhat.com> Reviewed-by: Nico Pache <npache@redhat.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <baohua@kernel.org> Cc: Dev Jain <dev.jain@arm.com> Cc: Liam R. Howlett <Liam.Howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Ryan Roberts <ryan.roberts@arm.com> Cc: Wei Yang <richard.weiyang@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
61 lines
1.6 KiB
C
61 lines
1.6 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_KHUGEPAGED_H
|
|
#define _LINUX_KHUGEPAGED_H
|
|
|
|
#include <linux/mm.h>
|
|
|
|
extern unsigned int khugepaged_max_ptes_none __read_mostly;
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern struct attribute_group khugepaged_attr_group;
|
|
|
|
extern int khugepaged_init(void);
|
|
extern void khugepaged_destroy(void);
|
|
extern int start_stop_khugepaged(void);
|
|
extern void __khugepaged_enter(struct mm_struct *mm);
|
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
|
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
vm_flags_t vm_flags);
|
|
extern void khugepaged_min_free_kbytes_update(void);
|
|
extern bool current_is_khugepaged(void);
|
|
void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
|
bool install_pmd);
|
|
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
if (mm_flags_test(MMF_VM_HUGEPAGE, oldmm))
|
|
__khugepaged_enter(mm);
|
|
}
|
|
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
if (mm_flags_test(MMF_VM_HUGEPAGE, mm))
|
|
__khugepaged_exit(mm);
|
|
}
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
}
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
vm_flags_t vm_flags)
|
|
{
|
|
}
|
|
static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
|
|
unsigned long addr, bool install_pmd)
|
|
{
|
|
}
|
|
|
|
static inline void khugepaged_min_free_kbytes_update(void)
|
|
{
|
|
}
|
|
|
|
static inline bool current_is_khugepaged(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif /* _LINUX_KHUGEPAGED_H */
|