mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-04-29 12:28:27 +02:00
e5ba90abb2
Similar with x86, when VM is detected, revert to a simple test-and-set
lock to avoid the horrors of queue preemption.
Tested on 3C5000 Dual-way machine with 32 cores and 2 numa nodes,
test case is kcbench on kernel mainline 6.10, the detailed command is
"kcbench --src /root/src/linux"
Performance on host machine
kernel compile time performance impact
Original 150.29 seconds
With patch 150.19 seconds almost no impact
Performance on virtual machine:
1. 1 VM with 32 vCPUs and 2 numa node, numa node pinned
kernel compile time performance impact
Original 170.87 seconds
With patch 171.73 seconds almost no impact
2. 2 VMs, each VM with 32 vCPUs and 2 numa node, numa node pinned
kernel compile time performance impact
Original 2362.04 seconds
With patch 354.73 seconds +565%
Signed-off-by: Bibo Mao <maobibo@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
43 lines
773 B
C
43 lines
773 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_LOONGARCH_PARAVIRT_H
|
|
#define _ASM_LOONGARCH_PARAVIRT_H
|
|
|
|
#ifdef CONFIG_PARAVIRT
|
|
|
|
#include <linux/static_call_types.h>
|
|
struct static_key;
|
|
extern struct static_key paravirt_steal_enabled;
|
|
extern struct static_key paravirt_steal_rq_enabled;
|
|
|
|
u64 dummy_steal_clock(int cpu);
|
|
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
|
|
|
|
static inline u64 paravirt_steal_clock(int cpu)
|
|
{
|
|
return static_call(pv_steal_clock)(cpu);
|
|
}
|
|
|
|
int __init pv_ipi_init(void);
|
|
int __init pv_time_init(void);
|
|
int __init pv_spinlock_init(void);
|
|
|
|
#else
|
|
|
|
static inline int pv_ipi_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int pv_time_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int pv_spinlock_init(void)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
#endif // CONFIG_PARAVIRT
|
|
#endif
|