return -ENOSYS;
 }
 
-noinstr bool idtentry_enter_nmi(struct pt_regs *regs)
-{
-       bool irq_state = lockdep_hardirqs_enabled();
-
-       __nmi_enter();
-       lockdep_hardirqs_off(CALLER_ADDR0);
-       lockdep_hardirq_enter();
-       rcu_nmi_enter();
-
-       instrumentation_begin();
-       trace_hardirqs_off_finish();
-       ftrace_nmi_enter();
-       instrumentation_end();
-
-       return irq_state;
-}
-
-noinstr void idtentry_exit_nmi(struct pt_regs *regs, bool restore)
-{
-       instrumentation_begin();
-       ftrace_nmi_exit();
-       if (restore) {
-               trace_hardirqs_on_prepare();
-               lockdep_hardirqs_on_prepare(CALLER_ADDR0);
-       }
-       instrumentation_end();
-
-       rcu_nmi_exit();
-       lockdep_hardirq_exit();
-       if (restore)
-               lockdep_hardirqs_on(CALLER_ADDR0);
-       __nmi_exit();
-}
-
 #ifdef CONFIG_XEN_PV
 #ifndef CONFIG_PREEMPTION
 /*
 
 
 #include <asm/irq_stack.h>
 
-bool idtentry_enter_nmi(struct pt_regs *regs);
-void idtentry_exit_nmi(struct pt_regs *regs, bool irq_state);
-
 /**
  * DECLARE_IDTENTRY - Declare functions for simple IDT entry points
  *                   No error code pushed by hardware
 
 
 static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
 {
-       bool irq_state;
+       irqentry_state_t irq_state;
 
        WARN_ON_ONCE(user_mode(regs));
 
            mce_check_crashing_cpu())
                return;
 
-       irq_state = idtentry_enter_nmi(regs);
+       irq_state = irqentry_nmi_enter(regs);
        /*
         * The call targets are marked noinstr, but objtool can't figure
         * that out because it's an indirect call. Annotate it.
        if (regs->flags & X86_EFLAGS_IF)
                trace_hardirqs_on_prepare();
        instrumentation_end();
-       idtentry_exit_nmi(regs, irq_state);
+       irqentry_nmi_exit(regs, irq_state);
 }
 
 static __always_inline void exc_machine_check_user(struct pt_regs *regs)
 
 
 DEFINE_IDTENTRY_RAW(exc_nmi)
 {
-       bool irq_state;
+       irqentry_state_t irq_state;
 
        /*
         * Re-enable NMIs right here when running as an SEV-ES guest. This might
 
        this_cpu_write(nmi_dr7, local_db_save());
 
-       irq_state = idtentry_enter_nmi(regs);
+       irq_state = irqentry_nmi_enter(regs);
 
        inc_irq_stat(__nmi_count);
 
        if (!ignore_nmis)
                default_do_nmi(regs);
 
-       idtentry_exit_nmi(regs, irq_state);
+       irqentry_nmi_exit(regs, irq_state);
 
        local_db_restore(this_cpu_read(nmi_dr7));
 
 
        }
 #endif
 
-       idtentry_enter_nmi(regs);
+       irqentry_nmi_enter(regs);
        instrumentation_begin();
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
 
                instrumentation_end();
                irqentry_exit_to_user_mode(regs);
        } else {
-               bool irq_state = idtentry_enter_nmi(regs);
+               irqentry_state_t irq_state = irqentry_nmi_enter(regs);
+
                instrumentation_begin();
                if (!do_int3(regs))
                        die("int3", regs, 0);
                instrumentation_end();
-               idtentry_exit_nmi(regs, irq_state);
+               irqentry_nmi_exit(regs, irq_state);
        }
 }
 
         * includes the entry stack is excluded for everything.
         */
        unsigned long dr7 = local_db_save();
-       bool irq_state = idtentry_enter_nmi(regs);
+       irqentry_state_t irq_state = irqentry_nmi_enter(regs);
        instrumentation_begin();
 
        /*
                regs->flags &= ~X86_EFLAGS_TF;
 out:
        instrumentation_end();
-       idtentry_exit_nmi(regs, irq_state);
+       irqentry_nmi_exit(regs, irq_state);
 
        local_db_restore(dr7);
 }
 
        /*
         * NB: We can't easily clear DR7 here because
-        * idtentry_exit_to_usermode() can invoke ptrace, schedule, access
+        * irqentry_exit_to_usermode() can invoke ptrace, schedule, access
         * user memory, etc.  This means that a recursive #DB is possible.  If
         * this happens, that #DB will hit exc_debug_kernel() and clear DR7.
         * Since we're not on the IST stack right now, everything will be
 
 void irqentry_exit_to_user_mode(struct pt_regs *regs);
 
 #ifndef irqentry_state
+/**
+ * struct irqentry_state - Opaque object for exception state storage
+ * @exit_rcu: Used exclusively in the irqentry_*() calls; signals whether the
+ *            exit path has to invoke rcu_irq_exit().
+ * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
+ *           lockdep state is restored correctly on exit from nmi.
+ *
+ * This opaque object is filled in by the irqentry_*_enter() functions and
+ * must be passed back into the corresponding irqentry_*_exit() functions
+ * when the exception is complete.
+ *
+ * Callers of irqentry_*_[enter|exit]() must consider this structure opaque
+ * and all members private.  Descriptions of the members are provided to aid in
+ * the maintenance of the irqentry_*() functions.
+ */
 typedef struct irqentry_state {
-       bool    exit_rcu;
+       union {
+               bool    exit_rcu;
+               bool    lockdep;
+       };
 } irqentry_state_t;
 #endif
 
  */
 void noinstr irqentry_exit(struct pt_regs *regs, irqentry_state_t state);
 
+/**
+ * irqentry_nmi_enter - Handle NMI entry
+ * @regs:      Pointer to currents pt_regs
+ *
+ * Similar to irqentry_enter() but taking care of the NMI constraints.
+ */
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs);
+
+/**
+ * irqentry_nmi_exit - Handle return from NMI handling
+ * @regs:      Pointer to pt_regs (NMI entry regs)
+ * @irq_state: Return value from matching call to irqentry_nmi_enter()
+ *
+ * Last action before returning to the low level assmenbly code.
+ *
+ * Counterpart to irqentry_nmi_enter().
+ */
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state);
+
 #endif
 
                        rcu_irq_exit();
        }
 }
+
+irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
+{
+       irqentry_state_t irq_state;
+
+       irq_state.lockdep = lockdep_hardirqs_enabled();
+
+       __nmi_enter();
+       lockdep_hardirqs_off(CALLER_ADDR0);
+       lockdep_hardirq_enter();
+       rcu_nmi_enter();
+
+       instrumentation_begin();
+       trace_hardirqs_off_finish();
+       ftrace_nmi_enter();
+       instrumentation_end();
+
+       return irq_state;
+}
+
+void noinstr irqentry_nmi_exit(struct pt_regs *regs, irqentry_state_t irq_state)
+{
+       instrumentation_begin();
+       ftrace_nmi_exit();
+       if (irq_state.lockdep) {
+               trace_hardirqs_on_prepare();
+               lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+       }
+       instrumentation_end();
+
+       rcu_nmi_exit();
+       lockdep_hardirq_exit();
+       if (irq_state.lockdep)
+               lockdep_hardirqs_on(CALLER_ADDR0);
+       __nmi_exit();
+}