struct timer_base {
        raw_spinlock_t          lock;
        struct timer_list       *running_timer;
+#ifdef CONFIG_PREEMPT_RT
+       spinlock_t              expiry_lock;
+       atomic_t                timer_waiters;
+#endif
        unsigned long           clk;
        unsigned long           next_expiry;
        unsigned int            cpu;
 }
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_PREEMPT_RT
+static __init void timer_base_init_expiry_lock(struct timer_base *base)
+{
+       spin_lock_init(&base->expiry_lock);
+}
+
+static inline void timer_base_lock_expiry(struct timer_base *base)
+{
+       spin_lock(&base->expiry_lock);
+}
+
+static inline void timer_base_unlock_expiry(struct timer_base *base)
+{
+       spin_unlock(&base->expiry_lock);
+}
+
+/*
+ * The counterpart to del_timer_wait_running().
+ *
+ * If there is a waiter for base->expiry_lock, then it was waiting for the
+ * timer callback to finish. Drop expiry_lock and reaquire it. That allows
+ * the waiter to acquire the lock and make progress.
+ */
+static void timer_sync_wait_running(struct timer_base *base)
+{
+       if (atomic_read(&base->timer_waiters)) {
+               spin_unlock(&base->expiry_lock);
+               spin_lock(&base->expiry_lock);
+       }
+}
+
+/*
+ * This function is called on PREEMPT_RT kernels when the fast path
+ * deletion of a timer failed because the timer callback function was
+ * running.
+ *
+ * This prevents priority inversion, if the softirq thread on a remote CPU
+ * got preempted, and it prevents a life lock when the task which tries to
+ * delete a timer preempted the softirq thread running the timer callback
+ * function.
+ */
+static void del_timer_wait_running(struct timer_list *timer)
+{
+       u32 tf;
+
+       tf = READ_ONCE(timer->flags);
+       if (!(tf & TIMER_MIGRATING)) {
+               struct timer_base *base = get_timer_base(tf);
+
+               /*
+                * Mark the base as contended and grab the expiry lock,
+                * which is held by the softirq across the timer
+                * callback. Drop the lock immediately so the softirq can
+                * expire the next timer. In theory the timer could already
+                * be running again, but that's more than unlikely and just
+                * causes another wait loop.
+                */
+               atomic_inc(&base->timer_waiters);
+               spin_lock_bh(&base->expiry_lock);
+               atomic_dec(&base->timer_waiters);
+               spin_unlock_bh(&base->expiry_lock);
+       }
+}
+#else
+static inline void timer_base_init_expiry_lock(struct timer_base *base) { }
+static inline void timer_base_lock_expiry(struct timer_base *base) { }
+static inline void timer_base_unlock_expiry(struct timer_base *base) { }
+static inline void timer_sync_wait_running(struct timer_base *base) { }
+static inline void del_timer_wait_running(struct timer_list *timer) { }
+#endif
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
  */
 int del_timer_sync(struct timer_list *timer)
 {
+       int ret;
+
 #ifdef CONFIG_LOCKDEP
        unsigned long flags;
 
         * could lead to deadlock.
         */
        WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
-       for (;;) {
-               int ret = try_to_del_timer_sync(timer);
-               if (ret >= 0)
-                       return ret;
-               cpu_relax();
-       }
+
+       do {
+               ret = try_to_del_timer_sync(timer);
+
+               if (unlikely(ret < 0)) {
+                       del_timer_wait_running(timer);
+                       cpu_relax();
+               }
+       } while (ret < 0);
+
+       return ret;
 }
 EXPORT_SYMBOL(del_timer_sync);
 #endif
                if (timer->flags & TIMER_IRQSAFE) {
                        raw_spin_unlock(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
+                       base->running_timer = NULL;
                        raw_spin_lock(&base->lock);
                } else {
                        raw_spin_unlock_irq(&base->lock);
                        call_timer_fn(timer, fn, baseclk);
+                       base->running_timer = NULL;
+                       timer_sync_wait_running(base);
                        raw_spin_lock_irq(&base->lock);
                }
        }
        if (!time_after_eq(jiffies, base->clk))
                return;
 
+       timer_base_lock_expiry(base);
        raw_spin_lock_irq(&base->lock);
 
        /*
                while (levels--)
                        expire_timers(base, heads + levels);
        }
-       base->running_timer = NULL;
        raw_spin_unlock_irq(&base->lock);
+       timer_base_unlock_expiry(base);
 }
 
 /*
                base->cpu = cpu;
                raw_spin_lock_init(&base->lock);
                base->clk = jiffies;
+               timer_base_init_expiry_lock(base);
        }
 }