mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2026-04-03 12:05:13 +02:00
[ Upstream commitd5d01b7199] Add ftrace_fill_perf_regs() which should be compatible with the perf_fetch_caller_regs(). In other words, the pt_regs returned from the ftrace_fill_perf_regs() must satisfy 'user_mode(regs) == false' and can be used for stack tracing. Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Heiko Carstens <hca@linux.ibm.com> # s390 Cc: Alexei Starovoitov <alexei.starovoitov@gmail.com> Cc: Florent Revest <revest@chromium.org> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: bpf <bpf@vger.kernel.org> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Alan Maguire <alan.maguire@oracle.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Naveen N Rao <naveen@kernel.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: x86@kernel.org Cc: "H. Peter Anvin" <hpa@zytor.com> Link: https://lore.kernel.org/173518997908.391279.15910334347345106424.stgit@devnote2 Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> Stable-dep-of:aea2517999("x86/fgraph,bpf: Switch kprobe_multi program stack unwind to hw_regs path") Signed-off-by: Sasha Levin <sashal@kernel.org>
129 lines
3.8 KiB
C
129 lines
3.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_POWERPC_FTRACE
|
|
#define _ASM_POWERPC_FTRACE
|
|
|
|
#include <asm/types.h>
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
#define MCOUNT_ADDR ((unsigned long)(_mcount))
|
|
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
|
|
|
/* Ignore unused weak functions which will have larger offsets */
|
|
#if defined(CONFIG_MPROFILE_KERNEL) || defined(CONFIG_ARCH_USING_PATCHABLE_FUNCTION_ENTRY)
|
|
#define FTRACE_MCOUNT_MAX_OFFSET 16
|
|
#elif defined(CONFIG_PPC32)
|
|
#define FTRACE_MCOUNT_MAX_OFFSET 8
|
|
#endif
|
|
|
|
#ifndef __ASSEMBLY__
|
|
extern void _mcount(void);
|
|
|
|
unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip,
|
|
unsigned long sp);
|
|
|
|
struct module;
|
|
struct dyn_ftrace;
|
|
struct dyn_arch_ftrace {
|
|
struct module *mod;
|
|
};
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
|
|
#define ftrace_need_init_nop() (true)
|
|
int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
|
|
#define ftrace_init_nop ftrace_init_nop
|
|
|
|
#include <linux/ftrace_regs.h>
|
|
|
|
static __always_inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs)
|
|
{
|
|
/* We clear regs.msr in ftrace_call */
|
|
return arch_ftrace_regs(fregs)->regs.msr ? &arch_ftrace_regs(fregs)->regs : NULL;
|
|
}
|
|
|
|
#define arch_ftrace_fill_perf_regs(fregs, _regs) do { \
|
|
(_regs)->result = 0; \
|
|
(_regs)->nip = arch_ftrace_regs(fregs)->regs.nip; \
|
|
(_regs)->gpr[1] = arch_ftrace_regs(fregs)->regs.gpr[1]; \
|
|
asm volatile("mfmsr %0" : "=r" ((_regs)->msr)); \
|
|
} while (0)
|
|
|
|
static __always_inline void
|
|
ftrace_regs_set_instruction_pointer(struct ftrace_regs *fregs,
|
|
unsigned long ip)
|
|
{
|
|
regs_set_return_ip(&arch_ftrace_regs(fregs)->regs, ip);
|
|
}
|
|
|
|
struct ftrace_ops;
|
|
|
|
#define ftrace_graph_func ftrace_graph_func
|
|
void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
|
|
struct ftrace_ops *op, struct ftrace_regs *fregs);
|
|
#endif
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
|
#endif
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
|
/*
|
|
* Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
|
|
* for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
|
|
* those.
|
|
*/
|
|
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
|
|
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
|
|
{
|
|
return !strcmp(sym, name) ||
|
|
(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
|
|
(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
|
|
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
|
|
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
|
|
}
|
|
#endif /* CONFIG_FTRACE_SYSCALLS */
|
|
|
|
#if defined(CONFIG_PPC64) && defined(CONFIG_FUNCTION_TRACER)
|
|
#include <asm/paca.h>
|
|
|
|
static inline void this_cpu_disable_ftrace(void)
|
|
{
|
|
get_paca()->ftrace_enabled = 0;
|
|
}
|
|
|
|
static inline void this_cpu_enable_ftrace(void)
|
|
{
|
|
get_paca()->ftrace_enabled = 1;
|
|
}
|
|
|
|
/* Disable ftrace on this CPU if possible (may not be implemented) */
|
|
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
|
|
{
|
|
get_paca()->ftrace_enabled = ftrace_enabled;
|
|
}
|
|
|
|
static inline u8 this_cpu_get_ftrace_enabled(void)
|
|
{
|
|
return get_paca()->ftrace_enabled;
|
|
}
|
|
#else /* CONFIG_PPC64 */
|
|
static inline void this_cpu_disable_ftrace(void) { }
|
|
static inline void this_cpu_enable_ftrace(void) { }
|
|
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
|
|
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
|
extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
|
|
void ftrace_free_init_tramp(void);
|
|
unsigned long ftrace_call_adjust(unsigned long addr);
|
|
#else
|
|
static inline void ftrace_free_init_tramp(void) { }
|
|
static inline unsigned long ftrace_call_adjust(unsigned long addr) { return addr; }
|
|
#endif
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_POWERPC_FTRACE */
|