mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-05-05 09:57:21 +02:00
0510bdab53
free_reserved_area() is related to memblock as it frees reserved memory back to the buddy allocator, similar to what memblock_free_late() does. Move free_reserved_area() to mm/memblock.c to prepare for further consolidation of the functions that free reserved memory. No functional changes. Link: https://patch.msgid.link/20260323074836.3653702-5-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
70 lines
1.3 KiB
C
70 lines
1.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
#ifndef _MM_INTERNAL_H
|
|
#define _MM_INTERNAL_H
|
|
|
|
/*
|
|
* Enable memblock_dbg() messages
|
|
*/
|
|
#ifdef MEMBLOCK_DEBUG
|
|
static int memblock_debug = 1;
|
|
#endif
|
|
|
|
#define pr_warn_ratelimited(fmt, ...) printf(fmt, ##__VA_ARGS__)
|
|
|
|
#define K(x) ((x) << (PAGE_SHIFT-10))
|
|
|
|
bool mirrored_kernelcore = false;
|
|
|
|
struct page {};
|
|
static inline void *page_address(struct page *page)
|
|
{
|
|
BUG();
|
|
return page;
|
|
}
|
|
|
|
static inline struct page *virt_to_page(void *virt)
|
|
{
|
|
BUG();
|
|
return virt;
|
|
}
|
|
|
|
void memblock_free_pages(unsigned long pfn, unsigned int order)
|
|
{
|
|
}
|
|
|
|
static inline void accept_memory(phys_addr_t start, unsigned long size)
|
|
{
|
|
}
|
|
|
|
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s);
|
|
void free_reserved_page(struct page *page);
|
|
|
|
static inline bool deferred_pages_enabled(void)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
|
|
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
|
|
|
|
static inline void *kasan_reset_tag(const void *addr)
|
|
{
|
|
return (void *)addr;
|
|
}
|
|
|
|
static inline bool __is_kernel(unsigned long addr)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#define for_each_valid_pfn(pfn, start_pfn, end_pfn) \
|
|
for ((pfn) = (start_pfn); (pfn) < (end_pfn); (pfn)++)
|
|
|
|
static inline void init_deferred_page(unsigned long pfn, int nid)
|
|
{
|
|
}
|
|
|
|
#define __SetPageReserved(p) ((void)(p))
|
|
|
|
#endif
|