x86/xen: Make irq_enable() noinstr
authorPeter Zijlstra <peterz@infradead.org>
Thu, 24 Jun 2021 09:41:19 +0000 (11:41 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 17 Sep 2021 11:17:12 +0000 (13:17 +0200)
vmlinux.o: warning: objtool: pv_ops[32]: native_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: __raw_callee_save_xen_irq_enable
vmlinux.o: warning: objtool: pv_ops[32]: xen_irq_enable_direct
vmlinux.o: warning: objtool: lock_is_held_type()+0xfe: call to pv_ops[32]() leaves .noinstr.text section

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Juergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20210624095148.872254932@infradead.org
arch/x86/kernel/paravirt.c
arch/x86/xen/irq.c
arch/x86/xen/xen-asm.S

index cdaf8624ea1b451606d7c1db25299fbd7490825c..75f0d241752bc3d9629e911fc80e33c39f0919f4 100644 (file)
@@ -238,6 +238,11 @@ static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
 {
        native_set_debugreg(regno, val);
 }
+
+static noinstr void pv_native_irq_enable(void)
+{
+       native_irq_enable();
+}
 #endif
 
 enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
@@ -302,7 +307,7 @@ struct paravirt_patch_template pv_ops = {
        /* Irq ops. */
        .irq.save_fl            = __PV_IS_CALLEE_SAVE(native_save_fl),
        .irq.irq_disable        = __PV_IS_CALLEE_SAVE(native_irq_disable),
-       .irq.irq_enable         = __PV_IS_CALLEE_SAVE(native_irq_enable),
+       .irq.irq_enable         = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
        .irq.safe_halt          = native_safe_halt,
        .irq.halt               = native_halt,
 #endif /* CONFIG_PARAVIRT_XXL */
index 9c71f43ba30324219830fa26c5ca674a882fa121..7fb4cf28879e3804d9da15f412134fa6ab3101a6 100644 (file)
@@ -53,7 +53,7 @@ asmlinkage __visible void xen_irq_disable(void)
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
 
-asmlinkage __visible void xen_irq_enable(void)
+asmlinkage __visible noinstr void xen_irq_enable(void)
 {
        struct vcpu_info *vcpu;
 
@@ -76,7 +76,7 @@ asmlinkage __visible void xen_irq_enable(void)
 
        preempt_enable();
 }
-PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
+__PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable, ".noinstr.text");
 
 static void xen_safe_halt(void)
 {
index 0883e39fee2e7755c284e1a350249a3b9ef93ce0..2225195288906290a19bcb5d52bfc1bf06d4786d 100644 (file)
 #include <linux/init.h>
 #include <linux/linkage.h>
 
-/*
- * Enable events.  This clears the event mask and tests the pending
- * event status with one and operation.  If there are pending events,
- * then enter the hypervisor to get them handled.
- */
-SYM_FUNC_START(xen_irq_enable_direct)
-       FRAME_BEGIN
-       /* Unmask events */
-       movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
-
-       /*
-        * Preempt here doesn't matter because that will deal with any
-        * pending interrupts.  The pending check may end up being run
-        * on the wrong CPU, but that doesn't hurt.
-        */
-
-       /* Test for pending */
-       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
-       jz 1f
-
-       call check_events
-1:
-       FRAME_END
-       ret
-SYM_FUNC_END(xen_irq_enable_direct)
-
-
 /*
  * Disabling events is simply a matter of making the event mask
  * non-zero.
@@ -57,6 +30,8 @@ SYM_FUNC_START(xen_irq_disable_direct)
        ret
 SYM_FUNC_END(xen_irq_disable_direct)
 
+.pushsection .noinstr.text, "ax"
+
 /*
  * Force an event check by making a hypercall, but preserve regs
  * before making the call.
@@ -86,7 +61,32 @@ SYM_FUNC_START(check_events)
        ret
 SYM_FUNC_END(check_events)
 
-.pushsection .noinstr.text, "ax"
+/*
+ * Enable events.  This clears the event mask and tests the pending
+ * event status with one and operation.  If there are pending events,
+ * then enter the hypervisor to get them handled.
+ */
+SYM_FUNC_START(xen_irq_enable_direct)
+       FRAME_BEGIN
+       /* Unmask events */
+       movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
+
+       /*
+        * Preempt here doesn't matter because that will deal with any
+        * pending interrupts.  The pending check may end up being run
+        * on the wrong CPU, but that doesn't hurt.
+        */
+
+       /* Test for pending */
+       testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
+       jz 1f
+
+       call check_events
+1:
+       FRAME_END
+       ret
+SYM_FUNC_END(xen_irq_enable_direct)
+
 /*
  * (xen_)save_fl is used to get the current interrupt enable status.
  * Callers expect the status to be in X86_EFLAGS_IF, and other bits