mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-04-29 12:28:27 +02:00
ipv4/route: Use this_cpu_inc() for stats on PREEMPT_RT
[ Upstream commit 1c0829788a ]
The statistics are incremented with raw_cpu_inc() assuming it always
happens with bottom half disabled. Without per-CPU locking in
local_bh_disable() on PREEMPT_RT this is no longer true.
Use this_cpu_inc() on PREEMPT_RT for the increment to not worry about
preemption.
Cc: David Ahern <dsahern@kernel.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://patch.msgid.link/20250512092736.229935-4-bigeasy@linutronix.de
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
committed by
Greg Kroah-Hartman
parent
97033659fc
commit
c1ee5f1675
@@ -200,7 +200,11 @@ const __u8 ip_tos2prio[16] = {
|
||||
EXPORT_SYMBOL(ip_tos2prio);
|
||||
|
||||
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
|
||||
#ifndef CONFIG_PREEMPT_RT
|
||||
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
|
||||
#else
|
||||
#define RT_CACHE_STAT_INC(field) this_cpu_inc(rt_cache_stat.field)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
|
||||
Reference in New Issue
Block a user