mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-05-09 21:42:09 +02:00
d49004c5f0
To initialize node, zone and memory map data structures every architecture calls free_area_init() during setup_arch() and passes it an array of zone limits. Beside code duplication it creates "interesting" ordering cases between allocation and initialization of hugetlb and the memory map. Some architectures allocate hugetlb pages very early in setup_arch() in certain cases, some only create hugetlb CMA areas in setup_arch() and sometimes hugetlb allocations happen mm_core_init(). With arch_zone_limits_init() helper available now on all architectures it is no longer necessary to call free_area_init() from architecture setup code. Rather core MM initialization can call arch_zone_limits_init() in a single place. This allows to unify ordering of hugetlb vs memory map allocation and initialization. Remove the call to free_area_init() from architecture specific code and place it in a new mm_core_init_early() function that is called immediately after setup_arch(). After this refactoring it is possible to consolidate hugetlb allocations and eliminate differences in ordering of hugetlb and memory map initialization among different architectures. As the first step of this consolidation move hugetlb_bootmem_alloc() to mm_core_early_init(). Link: https://lkml.kernel.org/r/20260111082105.290734-24-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Alex Shi <alexs@kernel.org> Cc: Andreas Larsson <andreas@gaisler.com> Cc: "Borislav Petkov (AMD)" <bp@alien8.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: David Hildenbrand <david@kernel.org> Cc: David S. Miller <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Klara Modin <klarasmodin@gmail.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Magnus Lindholm <linmag7@gmail.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Muchun Song <muchun.song@linux.dev> Cc: Oscar Salvador <osalvador@suse.de> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Pratyush Yadav <pratyush@kernel.org> Cc: Richard Weinberger <richard@nod.at> Cc: "Ritesh Harjani (IBM)" <ritesh.list@gmail.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Will Deacon <will@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
203 lines
5.2 KiB
C
203 lines
5.2 KiB
C
/*
|
|
* arch/xtensa/mm/init.c
|
|
*
|
|
* Derived from MIPS, PPC.
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
|
* Copyright (C) 2014 - 2016 Cadence Design Systems Inc.
|
|
*
|
|
* Chris Zankel <chris@zankel.net>
|
|
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
|
* Marc Gauthier
|
|
* Kevin Chea
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/of_fdt.h>
|
|
#include <linux/dma-map-ops.h>
|
|
|
|
#include <asm/bootparam.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/sysmem.h>
|
|
|
|
/*
|
|
* Initialize the bootmem system and give it all low memory we have available.
|
|
*/
|
|
|
|
void __init bootmem_init(void)
|
|
{
|
|
/* Reserve all memory below PHYS_OFFSET, as memory
|
|
* accounting doesn't work for pages below that address.
|
|
*
|
|
* If PHYS_OFFSET is zero reserve page at address 0:
|
|
* successfull allocations should never return NULL.
|
|
*/
|
|
memblock_reserve(0, PHYS_OFFSET ? PHYS_OFFSET : 1);
|
|
|
|
early_init_fdt_scan_reserved_mem();
|
|
|
|
if (!memblock_phys_mem_size())
|
|
panic("No memory found!\n");
|
|
|
|
min_low_pfn = PFN_UP(memblock_start_of_DRAM());
|
|
min_low_pfn = max(min_low_pfn, PFN_UP(PHYS_OFFSET));
|
|
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
|
max_low_pfn = min(max_pfn, MAX_LOW_PFN);
|
|
|
|
early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
|
|
(phys_addr_t)max_low_pfn << PAGE_SHIFT);
|
|
|
|
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
|
|
dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
|
|
|
|
memblock_dump_all();
|
|
}
|
|
|
|
static void __init print_vm_layout(void)
|
|
{
|
|
pr_info("virtual kernel memory layout:\n"
|
|
#ifdef CONFIG_KASAN
|
|
" kasan : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
#endif
|
|
#ifdef CONFIG_MMU
|
|
" vmalloc : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
#endif
|
|
#ifdef CONFIG_HIGHMEM
|
|
" pkmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" fixmap : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
#endif
|
|
" lowmem : 0x%08lx - 0x%08lx (%5lu MB)\n"
|
|
" .text : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .rodata : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .data : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .init : 0x%08lx - 0x%08lx (%5lu kB)\n"
|
|
" .bss : 0x%08lx - 0x%08lx (%5lu kB)\n",
|
|
#ifdef CONFIG_KASAN
|
|
KASAN_SHADOW_START, KASAN_SHADOW_START + KASAN_SHADOW_SIZE,
|
|
KASAN_SHADOW_SIZE >> 20,
|
|
#endif
|
|
#ifdef CONFIG_MMU
|
|
VMALLOC_START, VMALLOC_END,
|
|
(VMALLOC_END - VMALLOC_START) >> 20,
|
|
#ifdef CONFIG_HIGHMEM
|
|
PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
|
|
(LAST_PKMAP*PAGE_SIZE) >> 10,
|
|
FIXADDR_START, FIXADDR_END,
|
|
(FIXADDR_END - FIXADDR_START) >> 10,
|
|
#endif
|
|
PAGE_OFFSET, PAGE_OFFSET +
|
|
(max_low_pfn - min_low_pfn) * PAGE_SIZE,
|
|
#else
|
|
min_low_pfn * PAGE_SIZE, max_low_pfn * PAGE_SIZE,
|
|
#endif
|
|
((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20,
|
|
(unsigned long)_text, (unsigned long)_etext,
|
|
(unsigned long)(_etext - _text) >> 10,
|
|
(unsigned long)__start_rodata, (unsigned long)__end_rodata,
|
|
(unsigned long)(__end_rodata - __start_rodata) >> 10,
|
|
(unsigned long)_sdata, (unsigned long)_edata,
|
|
(unsigned long)(_edata - _sdata) >> 10,
|
|
(unsigned long)__init_begin, (unsigned long)__init_end,
|
|
(unsigned long)(__init_end - __init_begin) >> 10,
|
|
(unsigned long)__bss_start, (unsigned long)__bss_stop,
|
|
(unsigned long)(__bss_stop - __bss_start) >> 10);
|
|
}
|
|
|
|
void __init arch_zone_limits_init(unsigned long *max_zone_pfns)
|
|
{
|
|
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
|
|
#ifdef CONFIG_HIGHMEM
|
|
max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
|
|
#endif
|
|
}
|
|
|
|
void __init zones_init(void)
|
|
{
|
|
print_vm_layout();
|
|
}
|
|
|
|
static void __init parse_memmap_one(char *p)
|
|
{
|
|
char *oldp;
|
|
unsigned long start_at, mem_size;
|
|
|
|
if (!p)
|
|
return;
|
|
|
|
oldp = p;
|
|
mem_size = memparse(p, &p);
|
|
if (p == oldp)
|
|
return;
|
|
|
|
switch (*p) {
|
|
case '@':
|
|
start_at = memparse(p + 1, &p);
|
|
memblock_add(start_at, mem_size);
|
|
break;
|
|
|
|
case '$':
|
|
start_at = memparse(p + 1, &p);
|
|
memblock_reserve(start_at, mem_size);
|
|
break;
|
|
|
|
case 0:
|
|
memblock_reserve(mem_size, -mem_size);
|
|
break;
|
|
|
|
default:
|
|
pr_warn("Unrecognized memmap syntax: %s\n", p);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int __init parse_memmap_opt(char *str)
|
|
{
|
|
while (str) {
|
|
char *k = strchr(str, ',');
|
|
|
|
if (k)
|
|
*k++ = 0;
|
|
|
|
parse_memmap_one(str);
|
|
str = k;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
early_param("memmap", parse_memmap_opt);
|
|
|
|
#ifdef CONFIG_MMU
|
|
static const pgprot_t protection_map[16] = {
|
|
[VM_NONE] = PAGE_NONE,
|
|
[VM_READ] = PAGE_READONLY,
|
|
[VM_WRITE] = PAGE_COPY,
|
|
[VM_WRITE | VM_READ] = PAGE_COPY,
|
|
[VM_EXEC] = PAGE_READONLY_EXEC,
|
|
[VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
|
|
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
|
|
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
|
|
[VM_SHARED] = PAGE_NONE,
|
|
[VM_SHARED | VM_READ] = PAGE_READONLY,
|
|
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
|
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
|
|
[VM_SHARED | VM_EXEC] = PAGE_READONLY_EXEC,
|
|
[VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_EXEC,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_EXEC,
|
|
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_EXEC
|
|
};
|
|
DECLARE_VM_GET_PAGE_PROT
|
|
#endif
|