[PATCH v2 2/2] arm64: kprobes: add support for KPROBES_ON_FTRACE
Masami Hiramatsu
mhiramat at kernel.org
Thu Dec 16 23:40:19 PST 2021
Hi,
On Tue, 2 Nov 2021 21:11:46 +0800
Janet Liu <jianhua.ljh at gmail.com> wrote:
> From: Janet Liu <janet.liu at unisoc.com>
>
> This patch allow kprobes on ftrace call sites. This optimization
> avoids use of a trap with regular kprobes.
>
> This depends on HAVE_DYNAMIC_FTRACE_WITH_REGS which depends on
> "patchable-function-entry" options which is only implemented with newer
> toolchains.
>
> Signed-off-by: Janet Liu <janet.liu at unisoc.com>
> ---
> arch/arm64/Kconfig | 1 +
> arch/arm64/kernel/probes/Makefile | 1 +
> arch/arm64/kernel/probes/ftrace.c | 73 ++++++++++++++++++++++++++++++
> arch/arm64/kernel/probes/kprobes.c | 27 +++++++++++
> 4 files changed, 102 insertions(+)
> create mode 100644 arch/arm64/kernel/probes/ftrace.c
>
> diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
> index 339130712093..f59005608976 100644
> --- a/arch/arm64/Kconfig
> +++ b/arch/arm64/Kconfig
> @@ -200,6 +200,7 @@ config ARM64
> select HAVE_SYSCALL_TRACEPOINTS
> select HAVE_KPROBES
> select HAVE_OPTPROBES
> + select HAVE_KPROBES_ON_FTRACE
> select HAVE_KRETPROBES
> select HAVE_GENERIC_VDSO
> select IOMMU_DMA if IOMMU_SUPPORT
> diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
> index c77c92ac95fd..d9b204f4795a 100644
> --- a/arch/arm64/kernel/probes/Makefile
> +++ b/arch/arm64/kernel/probes/Makefile
> @@ -3,5 +3,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
> kprobes_trampoline.o \
> simulate-insn.o
> obj-$(CONFIG_OPTPROBES) += opt.o opt_head.o
> +obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
> obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
> simulate-insn.o
> diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
> new file mode 100644
> index 000000000000..46ea92eb552f
> --- /dev/null
> +++ b/arch/arm64/kernel/probes/ftrace.c
> @@ -0,0 +1,73 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +//
> +// Dynamic Ftrace based Kprobes Optimization
> +//
> +// Copyright (C) 2021, Unisoc Inc.
> +// Author: Janet Liu <janet.liu at unisoc.com>
> +#include <linux/kprobes.h>
> +#include <linux/ptrace.h>
> +#include <linux/hardirq.h>
> +#include <linux/preempt.h>
> +#include <linux/ftrace.h>
> +
> +
> +/* Ftrace callback handler for kprobes*/
> +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
> + struct ftrace_ops *ops, struct ftrace_regs *fregs)
> +{
> + struct kprobe *p;
> + struct kprobe_ctlblk *kcb;
> + struct pt_regs *regs = ftrace_get_regs(fregs);
> + int bit;
> +
> + bit = ftrace_test_recursion_trylock(ip, parent_ip);
> + if (bit < 0)
> + return;
> +
> + preempt_disable_notrace();
This already has been done in ftrace side.
> + p = get_kprobe((kprobe_opcode_t *)ip);
> + if (unlikely(!p) || kprobe_disabled(p))
> + goto end;
> +
> + kcb = get_kprobe_ctlblk();
> + if (kprobe_running()) {
> + kprobes_inc_nmissed_count(p);
> + } else {
> + unsigned long orig_ip = instruction_pointer(regs);
> +
> + instruction_pointer_set(regs, ip);
The 'ip' is the address of the 'bl' instruction, which must be
p->addr + AARCH64_INSN_SIZE * 2. But this is a bit strange.
On aarch64, if the user probe callback is called from breakpoint handler,
regs->pc == kp->addr. But in this case, it is not the same.
So, what about this?
instruction_pointer_set(regs, ip - AARCH64_INSN_SIZE);
> +
> + __this_cpu_write(current_kprobe, p);
> + kcb->kprobe_status = KPROBE_HIT_ACTIVE;
> + if (!p->pre_handler || !p->pre_handler(p, regs)) {
> + /*
> + *Emulate singlestep (and also recover regs->pc)
> + *as if there is a nop
> + */
> + instruction_pointer_set(regs,
> + (unsigned long)p->addr + MCOUNT_INSN_SIZE);
And then, this will be
instruction_pointer_set(regs,
(unsigned long)p->addr + AARCH64_INSN_SIZE * 2);
So basically, kprobes on ftrace will skips 2 NOP instructions (the compiler installed
2 nops) and call post handler. This means we have a virtual big NOP instruction there.
> + if (unlikely(p->post_handler)) {
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + p->post_handler(p, regs, 0);
> + }
> + instruction_pointer_set(regs, orig_ip);
> + }
> +
> + /*
> + * If pre_handler returns !0,it changes regs->pc. We have to
> + * skip emulating post_handler.
> + */
> + __this_cpu_write(current_kprobe, NULL);
> + }
> +end:
> + preempt_enable_notrace();
> + ftrace_test_recursion_unlock(bit);
> +}
> +NOKPROBE_SYMBOL(kprobe_ftrace_handler);
> +
> +int arch_prepare_kprobe_ftrace(struct kprobe *p)
> +{
> + p->ainsn.api.insn = NULL;
> + p->ainsn.api.restore = 0;
> + return 0;
> +}
> diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
> index 6dbcc89f6662..3d371d3e4dfa 100644
> --- a/arch/arm64/kernel/probes/kprobes.c
> +++ b/arch/arm64/kernel/probes/kprobes.c
> @@ -417,6 +417,33 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
> return 0;
> }
>
> +kprobe_opcode_t __kprobes *kprobe_lookup_name(const char *name, unsigned int offset)
> +{
> + kprobe_opcode_t *addr;
> +
> + addr = (kprobe_opcode_t *)kallsyms_lookup_name(name);
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> + if (addr && !offset) {
> + unsigned long faddr;
> +
> + faddr = ftrace_location_range((unsigned long)addr,
> + (unsigned long)addr + 8);
this '8' must be (AARCH64_INSN_SIZE * 2). And here you may need to add
a comment why search the 2 instructions. (it is because arm64 uses
-fpatchable-function-entry=2.)
> + if (faddr)
> + addr = (kprobe_opcode_t *)faddr;
> + }
> +#endif
> + return addr;
> +}
> +
> +bool __kprobes arch_kprobe_on_func_entry(unsigned long offset)
> +{
> +#ifdef CONFIG_KPROBES_ON_FTRACE
> + return offset <= 8;
Ditto.
> +#else
> + return !offset;
> +#endif
> +}
> +
> int __init arch_init_kprobes(void)
> {
> register_kernel_break_hook(&kprobes_break_hook);
> --
> 2.25.1
>
Thank you,
--
Masami Hiramatsu <mhiramat at kernel.org>
More information about the linux-arm-kernel
mailing list