Files
linux-stable-mirror/include/linux/local_lock_internal.h
T
Bart Van Assche 14de1552a4 include/linux/local_lock_internal.h: Make this header file again compatible with sparse
There are two versions of the __this_cpu_local_lock() definitions in
include/linux/local_lock_internal.h: one version that relies on the
Clang overloading functionality and another version that does not.
Select the latter version when using sparse. This patch fixes the
following errors reported by sparse:

   include/linux/local_lock_internal.h:331:40: sparse: sparse: multiple definitions for function '__this_cpu_local_lock'
   include/linux/local_lock_internal.h:325:37: sparse:  the previous one is here

Closes: https://lore.kernel.org/oe-kbuild-all/202603062334.wgI5htP0-lkp@intel.com/
Fixes: d3febf16de ("locking/local_lock: Support Clang's context analysis")
Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Marco Elver <elver@google.com>
Link: https://patch.msgid.link/20260311231455.1961413-1-bvanassche@acm.org
2026-03-12 11:29:11 +01:00

340 lines
8.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_LOCAL_LOCK_H
# error "Do not include directly, include linux/local_lock.h"
#endif
#include <linux/percpu-defs.h>
#include <linux/irqflags.h>
#include <linux/lockdep.h>
#include <linux/debug_locks.h>
#include <asm/current.h>
#ifndef CONFIG_PREEMPT_RT
context_lock_struct(local_lock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
};
typedef struct local_lock local_lock_t;
/* local_trylock() and local_trylock_irqsave() only work with local_trylock_t */
context_lock_struct(local_trylock) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
struct task_struct *owner;
#endif
u8 acquired;
};
typedef struct local_trylock local_trylock_t;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define LOCAL_LOCK_DEBUG_INIT(lockname) \
.dep_map = { \
.name = #lockname, \
.wait_type_inner = LD_WAIT_CONFIG, \
.lock_type = LD_LOCK_PERCPU, \
}, \
.owner = NULL,
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname) \
LOCAL_LOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
DEBUG_LOCKS_WARN_ON(l->owner);
l->owner = current;
}
static inline void local_trylock_acquire(local_lock_t *l)
{
lock_map_acquire_try(&l->dep_map);
DEBUG_LOCKS_WARN_ON(l->owner);
l->owner = current;
}
static inline void local_lock_release(local_lock_t *l)
{
DEBUG_LOCKS_WARN_ON(l->owner != current);
l->owner = NULL;
lock_map_release(&l->dep_map);
}
static inline void local_lock_debug_init(local_lock_t *l)
{
l->owner = NULL;
}
#else /* CONFIG_DEBUG_LOCK_ALLOC */
# define LOCAL_LOCK_DEBUG_INIT(lockname)
# define LOCAL_TRYLOCK_DEBUG_INIT(lockname)
static inline void local_lock_acquire(local_lock_t *l) { }
static inline void local_trylock_acquire(local_lock_t *l) { }
static inline void local_lock_release(local_lock_t *l) { }
static inline void local_lock_debug_init(local_lock_t *l) { }
#endif /* !CONFIG_DEBUG_LOCK_ALLOC */
#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
#define INIT_LOCAL_TRYLOCK(lockname) { LOCAL_TRYLOCK_DEBUG_INIT(lockname) }
#define __local_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_PERCPU); \
local_lock_debug_init(lock); \
} while (0)
#define __local_trylock_init(lock) \
do { \
__local_lock_init((local_lock_t *)lock); \
} while (0)
#define __spinlock_nested_bh_init(lock) \
do { \
static struct lock_class_key __key; \
\
debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
0, LD_WAIT_CONFIG, LD_WAIT_INV, \
LD_LOCK_NORMAL); \
local_lock_debug_init(lock); \
} while (0)
#define __local_lock_acquire(lock) \
do { \
local_trylock_t *__tl; \
local_lock_t *__l; \
\
__l = (local_lock_t *)(lock); \
__tl = (local_trylock_t *)__l; \
_Generic((lock), \
local_trylock_t *: ({ \
lockdep_assert(__tl->acquired == 0); \
WRITE_ONCE(__tl->acquired, 1); \
}), \
local_lock_t *: (void)0); \
local_lock_acquire(__l); \
} while (0)
#define __local_lock(lock) \
do { \
preempt_disable(); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_lock_irq(lock) \
do { \
local_irq_disable(); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_lock_irqsave(lock, flags) \
do { \
local_irq_save(flags); \
__local_lock_acquire(lock); \
__acquire(lock); \
} while (0)
#define __local_trylock(lock) \
__try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
preempt_disable(); \
__tl = (lock); \
if (READ_ONCE(__tl->acquired)) { \
preempt_enable(); \
__tl = NULL; \
} else { \
WRITE_ONCE(__tl->acquired, 1); \
local_trylock_acquire( \
(local_lock_t *)__tl); \
} \
!!__tl; \
}))
#define __local_trylock_irqsave(lock, flags) \
__try_acquire_ctx_lock(lock, ({ \
local_trylock_t *__tl; \
\
local_irq_save(flags); \
__tl = (lock); \
if (READ_ONCE(__tl->acquired)) { \
local_irq_restore(flags); \
__tl = NULL; \
} else { \
WRITE_ONCE(__tl->acquired, 1); \
local_trylock_acquire( \
(local_lock_t *)__tl); \
} \
!!__tl; \
}))
/* preemption or migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(lock) READ_ONCE(this_cpu_ptr(lock)->acquired)
#define __local_lock_release(lock) \
do { \
local_trylock_t *__tl; \
local_lock_t *__l; \
\
__l = (local_lock_t *)(lock); \
__tl = (local_trylock_t *)__l; \
local_lock_release(__l); \
_Generic((lock), \
local_trylock_t *: ({ \
lockdep_assert(__tl->acquired == 1); \
WRITE_ONCE(__tl->acquired, 0); \
}), \
local_lock_t *: (void)0); \
} while (0)
#define __local_unlock(lock) \
do { \
__release(lock); \
__local_lock_release(lock); \
preempt_enable(); \
} while (0)
#define __local_unlock_irq(lock) \
do { \
__release(lock); \
__local_lock_release(lock); \
local_irq_enable(); \
} while (0)
#define __local_unlock_irqrestore(lock, flags) \
do { \
__release(lock); \
__local_lock_release(lock); \
local_irq_restore(flags); \
} while (0)
#define __local_lock_nested_bh(lock) \
do { \
lockdep_assert_in_softirq(); \
local_lock_acquire((lock)); \
__acquire(lock); \
} while (0)
#define __local_unlock_nested_bh(lock) \
do { \
__release(lock); \
local_lock_release((lock)); \
} while (0)
#else /* !CONFIG_PREEMPT_RT */
#include <linux/sched.h>
#include <linux/spinlock.h>
/*
* On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
* critical section while staying preemptible.
*/
typedef spinlock_t local_lock_t;
typedef spinlock_t local_trylock_t;
#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define INIT_LOCAL_TRYLOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
#define __local_lock_init(__l) \
do { \
local_spin_lock_init((__l)); \
} while (0)
#define __local_trylock_init(__l) __local_lock_init(__l)
#define __local_lock(__lock) \
do { \
migrate_disable(); \
spin_lock((__lock)); \
} while (0)
#define __local_lock_irq(lock) __local_lock(lock)
#define __local_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = 0; \
__local_lock(lock); \
} while (0)
#define __local_unlock(__lock) \
do { \
spin_unlock((__lock)); \
migrate_enable(); \
} while (0)
#define __local_unlock_irq(lock) __local_unlock(lock)
#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
#define __local_lock_nested_bh(lock) \
do { \
lockdep_assert_in_softirq_func(); \
spin_lock((lock)); \
} while (0)
#define __local_unlock_nested_bh(lock) \
do { \
spin_unlock((lock)); \
} while (0)
#define __local_trylock(lock) \
__try_acquire_ctx_lock(lock, context_unsafe(({ \
int __locked; \
\
if (in_nmi() | in_hardirq()) { \
__locked = 0; \
} else { \
migrate_disable(); \
__locked = spin_trylock((lock)); \
if (!__locked) \
migrate_enable(); \
} \
__locked; \
})))
#define __local_trylock_irqsave(lock, flags) \
__try_acquire_ctx_lock(lock, ({ \
typecheck(unsigned long, flags); \
flags = 0; \
__local_trylock(lock); \
}))
/* migration must be disabled before calling __local_lock_is_locked */
#define __local_lock_is_locked(__lock) \
(rt_mutex_owner(&this_cpu_ptr(__lock)->lock) == current)
#endif /* CONFIG_PREEMPT_RT */
#if defined(WARN_CONTEXT_ANALYSIS) && !defined(__CHECKER__)
/*
* Because the compiler only knows about the base per-CPU variable, use this
* helper function to make the compiler think we lock/unlock the @base variable,
* and hide the fact we actually pass the per-CPU instance to lock/unlock
* functions.
*/
static __always_inline local_lock_t *__this_cpu_local_lock(local_lock_t __percpu *base)
__returns_ctx_lock(base) __attribute__((overloadable))
{
return this_cpu_ptr(base);
}
#ifndef CONFIG_PREEMPT_RT
static __always_inline local_trylock_t *__this_cpu_local_lock(local_trylock_t __percpu *base)
__returns_ctx_lock(base) __attribute__((overloadable))
{
return this_cpu_ptr(base);
}
#endif /* CONFIG_PREEMPT_RT */
#else /* WARN_CONTEXT_ANALYSIS */
#define __this_cpu_local_lock(base) this_cpu_ptr(base)
#endif /* WARN_CONTEXT_ANALYSIS */