s390: support KPROBES_ON_FTRACE
authorSven Schnelle <svens@linux.ibm.com>
Tue, 21 Jan 2020 11:31:47 +0000 (12:31 +0100)
committerVasily Gorbik <gor@linux.ibm.com>
Thu, 30 Jan 2020 12:07:55 +0000 (13:07 +0100)
Instead of using our own kprobes-on-ftrace handling convert the
code to support KPROBES_ON_FTRACE.

Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
arch/s390/Kconfig
arch/s390/include/asm/kprobes.h
arch/s390/kernel/ftrace.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/mcount.S

index 4fae0464ddff27807da70c2fcc664fa2ba8f8680..32b297295fff79f5da38b7d291fa8fb1f3fe96d9 100644 (file)
@@ -24,7 +24,7 @@
     |      parisc: |  ok  |
     |     powerpc: |  ok  |
     |       riscv: | TODO |
-    |        s390: | TODO |
+    |        s390: |  ok  |
     |          sh: | TODO |
     |       sparc: | TODO |
     |          um: | TODO |
index 287714d51b47754a63a1d0529cadab73133ea399..cd84125fe1268ce4d921112321b1f8d7531448d5 100644 (file)
@@ -156,6 +156,7 @@ config S390
        select HAVE_KERNEL_UNCOMPRESSED
        select HAVE_KERNEL_XZ
        select HAVE_KPROBES
+       select HAVE_KPROBES_ON_FTRACE
        select HAVE_KRETPROBES
        select HAVE_KVM
        select HAVE_LIVEPATCH
index b106aa29bf55c461edb59860d84b8d1b4645c7e8..09cdb632a490e3e783ed6fda4be95ad3de34c46d 100644 (file)
@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
 struct arch_specific_insn {
        /* copy of original instruction */
        kprobe_opcode_t *insn;
-       unsigned int is_ftrace_insn : 1;
 };
 
 struct prev_kprobe {
index 1bb85f60c0dd515efcda8a3f8812aa5ebc3e3018..4cd9b1ada8340c7ea21522963dcf8e9a46294c51 100644 (file)
@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
 #endif
 }
 
-static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
-       if (insn->opc == BREAKPOINT_INSTRUCTION)
-               return 1;
-#endif
-       return 0;
-}
-
 static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
 {
 #ifdef CONFIG_KPROBES
@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
                /* Initial code replacement */
                ftrace_generate_orig_insn(&orig);
                ftrace_generate_nop_insn(&new);
-       } else if (is_kprobe_on_ftrace(&old)) {
-               /*
-                * If we find a breakpoint instruction, a kprobe has been
-                * placed at the beginning of the function. We write the
-                * constant KPROBE_ON_FTRACE_NOP into the remaining four
-                * bytes of the original instruction so that the kprobes
-                * handler can execute a nop, if it reaches this breakpoint.
-                */
-               ftrace_generate_kprobe_call_insn(&orig);
-               ftrace_generate_kprobe_nop_insn(&new);
        } else {
                /* Replace ftrace call with a nop. */
                ftrace_generate_call_insn(&orig, rec->ip);
@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
                return -EFAULT;
-       if (is_kprobe_on_ftrace(&old)) {
-               /*
-                * If we find a breakpoint instruction, a kprobe has been
-                * placed at the beginning of the function. We write the
-                * constant KPROBE_ON_FTRACE_CALL into the remaining four
-                * bytes of the original instruction so that the kprobes
-                * handler can execute a brasl if it reaches this breakpoint.
-                */
-               ftrace_generate_kprobe_nop_insn(&orig);
-               ftrace_generate_kprobe_call_insn(&new);
-       } else {
-               /* Replace nop with an ftrace call. */
-               ftrace_generate_nop_insn(&orig);
-               ftrace_generate_call_insn(&new, rec->ip);
-       }
+       /* Replace nop with an ftrace call. */
+       ftrace_generate_nop_insn(&orig);
+       ftrace_generate_call_insn(&new, rec->ip);
+
        /* Verify that the to be replaced code matches what we expect. */
        if (memcmp(&orig, &old, sizeof(old)))
                return -EINVAL;
@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
 }
 
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+               struct ftrace_ops *ops, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb;
+       struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+
+       if (unlikely(!p) || kprobe_disabled(p))
+               return;
+
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(p);
+               return;
+       }
+
+       __this_cpu_write(current_kprobe, p);
+
+       kcb = get_kprobe_ctlblk();
+       kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+       instruction_pointer_set(regs, ip);
+
+       if (!p->pre_handler || !p->pre_handler(p, regs)) {
+
+               instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
+
+               if (unlikely(p->post_handler)) {
+                       kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                       p->post_handler(p, regs, 0);
+               }
+       }
+       __this_cpu_write(current_kprobe, NULL);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+       p->ainsn.insn = NULL;
+       return 0;
+}
+#endif
index 6f1388391620afc84734e526c51ca8dc8319ee14..548d0ea9808d28a6f98a7541efcc8117c05a65b2 100644 (file)
@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
 
 static void copy_instruction(struct kprobe *p)
 {
-       unsigned long ip = (unsigned long) p->addr;
        s64 disp, new_disp;
        u64 addr, new_addr;
 
-       if (ftrace_location(ip) == ip) {
-               /*
-                * If kprobes patches the instruction that is morphed by
-                * ftrace make sure that kprobes always sees the branch
-                * "jg .+24" that skips the mcount block or the "brcl 0,0"
-                * in case of hotpatch.
-                */
-               ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
-               p->ainsn.is_ftrace_insn = 1;
-       } else
-               memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
+       memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
        p->opcode = p->ainsn.insn[0];
        if (!probe_is_insn_relative_long(p->ainsn.insn))
                return;
@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
 }
 NOKPROBE_SYMBOL(arch_prepare_kprobe);
 
-int arch_check_ftrace_location(struct kprobe *p)
-{
-       return 0;
-}
-
 struct swap_insn_args {
        struct kprobe *p;
        unsigned int arm_kprobe : 1;
@@ -149,28 +133,11 @@ struct swap_insn_args {
 static int swap_instruction(void *data)
 {
        struct swap_insn_args *args = data;
-       struct ftrace_insn new_insn, *insn;
        struct kprobe *p = args->p;
-       size_t len;
-
-       new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
-       len = sizeof(new_insn.opc);
-       if (!p->ainsn.is_ftrace_insn)
-               goto skip_ftrace;
-       len = sizeof(new_insn);
-       insn = (struct ftrace_insn *) p->addr;
-       if (args->arm_kprobe) {
-               if (is_ftrace_nop(insn))
-                       new_insn.disp = KPROBE_ON_FTRACE_NOP;
-               else
-                       new_insn.disp = KPROBE_ON_FTRACE_CALL;
-       } else {
-               ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
-               if (insn->disp == KPROBE_ON_FTRACE_NOP)
-                       ftrace_generate_nop_insn(&new_insn);
-       }
-skip_ftrace:
-       s390_kernel_write(p->addr, &new_insn, len);
+       u16 opc;
+
+       opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
+       s390_kernel_write(p->addr, &opc, sizeof(opc));
        return 0;
 }
 NOKPROBE_SYMBOL(swap_instruction);
@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
        unsigned long ip = regs->psw.addr;
        int fixup = probe_get_fixup_type(p->ainsn.insn);
 
-       /* Check if the kprobes location is an enabled ftrace caller */
-       if (p->ainsn.is_ftrace_insn) {
-               struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
-               struct ftrace_insn call_insn;
-
-               ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
-               /*
-                * A kprobe on an enabled ftrace call site actually single
-                * stepped an unconditional branch (ftrace nop equivalent).
-                * Now we need to fixup things and pretend that a brasl r0,...
-                * was executed instead.
-                */
-               if (insn->disp == KPROBE_ON_FTRACE_CALL) {
-                       ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
-                       regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
-               }
-       }
-
        if (fixup & FIXUP_PSW_NORMAL)
                ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
 
index f942341429b1cd30309da7e911683a62562d8ae1..7458dcfd64642cf4e2016bd8e1b4bf2ecfc4a716 100644 (file)
@@ -42,6 +42,9 @@ ENTRY(ftrace_caller)
        .globl  ftrace_regs_caller
        .set    ftrace_regs_caller,ftrace_caller
        stg     %r14,(__SF_GPRS+8*8)(%r15)      # save traced function caller
+       lghi    %r14,0                          # save condition code
+       ipm     %r14                            # don't put any instructions
+       sllg    %r14,%r14,16                    # clobbering CC before this point
        lgr     %r1,%r15
 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
        aghi    %r0,MCOUNT_RETURN_FIXUP
@@ -54,6 +57,9 @@ ENTRY(ftrace_caller)
        # allocate pt_regs and stack frame for ftrace_trace_function
        aghi    %r15,-STACK_FRAME_SIZE
        stg     %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+       stg     %r14,(STACK_PTREGS_PSW)(%r15)
+       lg      %r14,(__SF_GPRS+8*8)(%r1)       # restore original return address
+       stosm   (STACK_PTREGS_PSW)(%r15),0
        aghi    %r1,-TRACED_FUNC_FRAME_SIZE
        stg     %r1,__SF_BACKCHAIN(%r15)
        stg     %r0,(STACK_PTREGS_PSW+8)(%r15)