mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-12-26 12:21:01 +01:00
Lots of functions in mm/highmem.c do not write to the given pointers and do not call functions that take non-const pointers and can therefore be constified. This includes functions like kunmap() which might be implemented in a way that writes to the pointer (e.g. to update reference counters or mapping fields), but currently are not. kmap() on the other hand cannot be made const because it calls set_page_address() which is non-const in some architectures/configurations. [akpm@linux-foundation.org: "fix" folio_page() build failure] Link: https://lkml.kernel.org/r/20250901205021.3573313-13-max.kellermann@ionos.com Signed-off-by: Max Kellermann <max.kellermann@ionos.com> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Acked-by: David Hildenbrand <david@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andreas Larsson <andreas@gaisler.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Borislav Betkov <bp@alien8.de> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Christian Zankel <chris@zankel.net> Cc: David Rientjes <rientjes@google.com> Cc: David S. Miller <davem@davemloft.net> Cc: Gerald Schaefer <gerald.schaefer@linux.ibm.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Bottomley <james.bottomley@HansenPartnership.com> Cc: Jan Kara <jack@suse.cz> Cc: Jocelyn Falempe <jfalempe@redhat.com> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Mark Brown <broonie@kernel.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: "Nysal Jan K.A" <nysal@linux.ibm.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russel King <linux@armlinux.org.uk> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleinxer <tglx@linutronix.de> Cc: Thomas Huth <thuth@redhat.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com> Cc: Wei Xu <weixugc@google.com> Cc: Yuanchu Xie <yuanchu@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
299 lines
7.0 KiB
C
299 lines
7.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_HIGHMEM_INTERNAL_H
|
|
#define _LINUX_HIGHMEM_INTERNAL_H
|
|
|
|
/*
|
|
* Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
|
|
*/
|
|
#ifdef CONFIG_KMAP_LOCAL
|
|
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
|
|
void *__kmap_local_page_prot(const struct page *page, pgprot_t prot);
|
|
void kunmap_local_indexed(const void *vaddr);
|
|
void kmap_local_fork(struct task_struct *tsk);
|
|
void __kmap_local_sched_out(void);
|
|
void __kmap_local_sched_in(void);
|
|
static inline void kmap_assert_nomap(void)
|
|
{
|
|
DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
|
|
}
|
|
#else
|
|
static inline void kmap_local_fork(struct task_struct *tsk) { }
|
|
static inline void kmap_assert_nomap(void) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_HIGHMEM
|
|
#include <asm/highmem.h>
|
|
|
|
#ifndef ARCH_HAS_KMAP_FLUSH_TLB
|
|
static inline void kmap_flush_tlb(unsigned long addr) { }
|
|
#endif
|
|
|
|
#ifndef kmap_prot
|
|
#define kmap_prot PAGE_KERNEL
|
|
#endif
|
|
|
|
void *kmap_high(struct page *page);
|
|
void kunmap_high(const struct page *page);
|
|
void __kmap_flush_unused(void);
|
|
struct page *__kmap_to_page(void *addr);
|
|
|
|
static inline void *kmap(struct page *page)
|
|
{
|
|
void *addr;
|
|
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
addr = page_address(page);
|
|
else
|
|
addr = kmap_high(page);
|
|
kmap_flush_tlb((unsigned long)addr);
|
|
return addr;
|
|
}
|
|
|
|
static inline void kunmap(const struct page *page)
|
|
{
|
|
might_sleep();
|
|
if (!PageHighMem(page))
|
|
return;
|
|
kunmap_high(page);
|
|
}
|
|
|
|
static inline struct page *kmap_to_page(void *addr)
|
|
{
|
|
return __kmap_to_page(addr);
|
|
}
|
|
|
|
static inline void kmap_flush_unused(void)
|
|
{
|
|
__kmap_flush_unused();
|
|
}
|
|
|
|
static inline void *kmap_local_page(const struct page *page)
|
|
{
|
|
return __kmap_local_page_prot(page, kmap_prot);
|
|
}
|
|
|
|
static inline void *kmap_local_page_try_from_panic(const struct page *page)
|
|
{
|
|
if (!PageHighMem(page))
|
|
return page_address(page);
|
|
/* If the page is in HighMem, it's not safe to kmap it.*/
|
|
return NULL;
|
|
}
|
|
|
|
static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
|
|
{
|
|
const struct page *page = folio_page(folio, offset / PAGE_SIZE);
|
|
return __kmap_local_page_prot(page, kmap_prot) + offset % PAGE_SIZE;
|
|
}
|
|
|
|
static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
|
|
{
|
|
return __kmap_local_page_prot(page, prot);
|
|
}
|
|
|
|
static inline void *kmap_local_pfn(unsigned long pfn)
|
|
{
|
|
return __kmap_local_pfn_prot(pfn, kmap_prot);
|
|
}
|
|
|
|
static inline void __kunmap_local(const void *vaddr)
|
|
{
|
|
kunmap_local_indexed(vaddr);
|
|
}
|
|
|
|
static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
|
|
pagefault_disable();
|
|
return __kmap_local_page_prot(page, prot);
|
|
}
|
|
|
|
static inline void *kmap_atomic(const struct page *page)
|
|
{
|
|
return kmap_atomic_prot(page, kmap_prot);
|
|
}
|
|
|
|
static inline void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
|
|
pagefault_disable();
|
|
return __kmap_local_pfn_prot(pfn, kmap_prot);
|
|
}
|
|
|
|
static inline void __kunmap_atomic(const void *addr)
|
|
{
|
|
kunmap_local_indexed(addr);
|
|
pagefault_enable();
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_enable();
|
|
else
|
|
preempt_enable();
|
|
}
|
|
|
|
unsigned long __nr_free_highpages(void);
|
|
unsigned long __totalhigh_pages(void);
|
|
|
|
static inline unsigned long nr_free_highpages(void)
|
|
{
|
|
return __nr_free_highpages();
|
|
}
|
|
|
|
static inline unsigned long totalhigh_pages(void)
|
|
{
|
|
return __totalhigh_pages();
|
|
}
|
|
|
|
static inline bool is_kmap_addr(const void *x)
|
|
{
|
|
unsigned long addr = (unsigned long)x;
|
|
|
|
return (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) ||
|
|
(addr >= __fix_to_virt(FIX_KMAP_END) &&
|
|
addr < __fix_to_virt(FIX_KMAP_BEGIN));
|
|
}
|
|
#else /* CONFIG_HIGHMEM */
|
|
|
|
static inline struct page *kmap_to_page(void *addr)
|
|
{
|
|
return virt_to_page(addr);
|
|
}
|
|
|
|
static inline void *kmap(struct page *page)
|
|
{
|
|
might_sleep();
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void kunmap_high(const struct page *page) { }
|
|
static inline void kmap_flush_unused(void) { }
|
|
|
|
static inline void kunmap(const struct page *page)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(page_address(page));
|
|
#endif
|
|
}
|
|
|
|
static inline void *kmap_local_page(const struct page *page)
|
|
{
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void *kmap_local_page_try_from_panic(const struct page *page)
|
|
{
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void *kmap_local_folio(const struct folio *folio, size_t offset)
|
|
{
|
|
return folio_address(folio) + offset;
|
|
}
|
|
|
|
static inline void *kmap_local_page_prot(const struct page *page, pgprot_t prot)
|
|
{
|
|
return kmap_local_page(page);
|
|
}
|
|
|
|
static inline void *kmap_local_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_local_page(pfn_to_page(pfn));
|
|
}
|
|
|
|
static inline void __kunmap_local(const void *addr)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
|
|
#endif
|
|
}
|
|
|
|
static inline void *kmap_atomic(const struct page *page)
|
|
{
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_disable();
|
|
else
|
|
preempt_disable();
|
|
pagefault_disable();
|
|
return page_address(page);
|
|
}
|
|
|
|
static inline void *kmap_atomic_prot(const struct page *page, pgprot_t prot)
|
|
{
|
|
return kmap_atomic(page);
|
|
}
|
|
|
|
static inline void *kmap_atomic_pfn(unsigned long pfn)
|
|
{
|
|
return kmap_atomic(pfn_to_page(pfn));
|
|
}
|
|
|
|
static inline void __kunmap_atomic(const void *addr)
|
|
{
|
|
#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
|
|
kunmap_flush_on_unmap(PTR_ALIGN_DOWN(addr, PAGE_SIZE));
|
|
#endif
|
|
pagefault_enable();
|
|
if (IS_ENABLED(CONFIG_PREEMPT_RT))
|
|
migrate_enable();
|
|
else
|
|
preempt_enable();
|
|
}
|
|
|
|
static inline unsigned long nr_free_highpages(void) { return 0; }
|
|
static inline unsigned long totalhigh_pages(void) { return 0; }
|
|
|
|
static inline bool is_kmap_addr(const void *x)
|
|
{
|
|
return false;
|
|
}
|
|
|
|
#endif /* CONFIG_HIGHMEM */
|
|
|
|
/**
|
|
* kunmap_atomic - Unmap the virtual address mapped by kmap_atomic() - deprecated!
|
|
* @__addr: Virtual address to be unmapped
|
|
*
|
|
* Unmaps an address previously mapped by kmap_atomic() and re-enables
|
|
* pagefaults. Depending on PREEMP_RT configuration, re-enables also
|
|
* migration and preemption. Users should not count on these side effects.
|
|
*
|
|
* Mappings should be unmapped in the reverse order that they were mapped.
|
|
* See kmap_local_page() for details on nesting.
|
|
*
|
|
* @__addr can be any address within the mapped page, so there is no need
|
|
* to subtract any offset that has been added. In contrast to kunmap(),
|
|
* this function takes the address returned from kmap_atomic(), not the
|
|
* page passed to it. The compiler will warn you if you pass the page.
|
|
*/
|
|
#define kunmap_atomic(__addr) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
|
|
__kunmap_atomic(__addr); \
|
|
} while (0)
|
|
|
|
/**
|
|
* kunmap_local - Unmap a page mapped via kmap_local_page().
|
|
* @__addr: An address within the page mapped
|
|
*
|
|
* @__addr can be any address within the mapped page. Commonly it is the
|
|
* address return from kmap_local_page(), but it can also include offsets.
|
|
*
|
|
* Unmapping should be done in the reverse order of the mapping. See
|
|
* kmap_local_page() for details.
|
|
*/
|
|
#define kunmap_local(__addr) \
|
|
do { \
|
|
BUILD_BUG_ON(__same_type((__addr), struct page *)); \
|
|
__kunmap_local(__addr); \
|
|
} while (0)
|
|
|
|
#endif
|