mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-01-15 12:25:41 +01:00
Patch series "Add and use memdesc_flags_t". At some point struct page will be separated from struct slab and struct folio. This is a step towards that by introducing a type for the 'flags' word of all three structures. This gives us a certain amount of type safety by establishing that some of these unsigned longs are different from other unsigned longs in that they contain things like node ID, section number and zone number in the upper bits. That lets us have functions that can be easily called by anyone who has a slab, folio or page (but not easily by anyone else) to get the node or zone. There's going to be some unusual merge problems with this as some odd bits of the kernel decide they want to print out the flags value or something similar by writing page->flags and now they'll need to write page->flags.f instead. That's most of the churn here. Maybe we should be removing these things from the debug output? This patch (of 11): Wrap the unsigned long flags in a typedef. In upcoming patches, this will provide a strong hint that you can't just pass a random unsigned long to functions which take this as an argument. [willy@infradead.org: s/flags/flags.f/ in several architectures] Link: https://lkml.kernel.org/r/aKMgPRLD-WnkPxYm@casper.infradead.org [nicola.vetrini@gmail.com: mips: fix compilation error] Link: https://lore.kernel.org/lkml/CA+G9fYvkpmqGr6wjBNHY=dRp71PLCoi2341JxOudi60yqaeUdg@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250825214245.1838158-1-nicola.vetrini@gmail.com Link: https://lkml.kernel.org/r/20250805172307.1302730-1-willy@infradead.org Link: https://lkml.kernel.org/r/20250805172307.1302730-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Zi Yan <ziy@nvidia.com> Cc: Shakeel Butt <shakeel.butt@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
97 lines
3.1 KiB
C
97 lines
3.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* OpenRISC Linux
|
|
*
|
|
* Linux architectural port borrowing liberally from similar works of
|
|
* others. All original copyrights apply as per the original source
|
|
* declaration.
|
|
*
|
|
* OpenRISC implementation:
|
|
* Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
|
|
* et al.
|
|
*/
|
|
|
|
#ifndef __ASM_CACHEFLUSH_H
|
|
#define __ASM_CACHEFLUSH_H
|
|
|
|
#include <linux/mm.h>
|
|
|
|
/*
|
|
* Helper function for flushing or invalidating entire pages from data
|
|
* and instruction caches. SMP needs a little extra work, since we need
|
|
* to flush the pages on all cpus.
|
|
*/
|
|
extern void local_dcache_page_flush(struct page *page);
|
|
extern void local_icache_page_inv(struct page *page);
|
|
extern void local_dcache_range_flush(unsigned long start, unsigned long end);
|
|
extern void local_dcache_range_inv(unsigned long start, unsigned long end);
|
|
extern void local_icache_range_inv(unsigned long start, unsigned long end);
|
|
|
|
/*
|
|
* Data cache flushing always happen on the local cpu. Instruction cache
|
|
* invalidations need to be broadcasted to all other cpu in the system in
|
|
* case of SMP configurations.
|
|
*/
|
|
#ifndef CONFIG_SMP
|
|
#define dcache_page_flush(page) local_dcache_page_flush(page)
|
|
#define icache_page_inv(page) local_icache_page_inv(page)
|
|
#else /* CONFIG_SMP */
|
|
#define dcache_page_flush(page) local_dcache_page_flush(page)
|
|
#define icache_page_inv(page) smp_icache_page_inv(page)
|
|
extern void smp_icache_page_inv(struct page *page);
|
|
#endif /* CONFIG_SMP */
|
|
|
|
/*
|
|
* Even if the actual block size is larger than L1_CACHE_BYTES, paddr
|
|
* can be incremented by L1_CACHE_BYTES. When paddr is written to the
|
|
* invalidate register, the entire cache line encompassing this address
|
|
* is invalidated. Each subsequent reference to the same cache line will
|
|
* not affect the invalidation process.
|
|
*/
|
|
#define local_dcache_block_flush(addr) \
|
|
local_dcache_range_flush(addr, addr + L1_CACHE_BYTES)
|
|
#define local_dcache_block_inv(addr) \
|
|
local_dcache_range_inv(addr, addr + L1_CACHE_BYTES)
|
|
#define local_icache_block_inv(addr) \
|
|
local_icache_range_inv(addr, addr + L1_CACHE_BYTES)
|
|
|
|
/*
|
|
* Synchronizes caches. Whenever a cpu writes executable code to memory, this
|
|
* should be called to make sure the processor sees the newly written code.
|
|
*/
|
|
static inline void sync_icache_dcache(struct page *page)
|
|
{
|
|
if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))
|
|
dcache_page_flush(page);
|
|
icache_page_inv(page);
|
|
}
|
|
|
|
/*
|
|
* Pages with this bit set need not be flushed/invalidated, since
|
|
* they have not changed since last flush. New pages start with
|
|
* PG_arch_1 not set and are therefore dirty by default.
|
|
*/
|
|
#define PG_dc_clean PG_arch_1
|
|
|
|
static inline void flush_dcache_folio(struct folio *folio)
|
|
{
|
|
clear_bit(PG_dc_clean, &folio->flags.f);
|
|
}
|
|
#define flush_dcache_folio flush_dcache_folio
|
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
static inline void flush_dcache_page(struct page *page)
|
|
{
|
|
flush_dcache_folio(page_folio(page));
|
|
}
|
|
|
|
#define flush_icache_user_page(vma, page, addr, len) \
|
|
do { \
|
|
if (vma->vm_flags & VM_EXEC) \
|
|
sync_icache_dcache(page); \
|
|
} while (0)
|
|
|
|
#include <asm-generic/cacheflush.h>
|
|
|
|
#endif /* __ASM_CACHEFLUSH_H */
|