#ifdef CONFIG_TASKS_TRACE_RCU
 
-void rcu_read_unlock_trace_special(struct task_struct *t, int nesting);
+void rcu_read_unlock_trace_special(struct task_struct *t);
 
 /**
  * rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
                WRITE_ONCE(t->trc_reader_nesting, nesting);
                return;  // We assume shallow reader nesting.
        }
-       rcu_read_unlock_trace_special(t, nesting);
+       WARN_ON_ONCE(nesting != 0);
+       rcu_read_unlock_trace_special(t);
 }
 
 void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
 
 static DEFINE_IRQ_WORK(rcu_tasks_trace_iw, rcu_read_unlock_iw);
 
 /* If we are the last reader, wake up the grace-period kthread. */
-void rcu_read_unlock_trace_special(struct task_struct *t, int nesting)
+void rcu_read_unlock_trace_special(struct task_struct *t)
 {
        int nq = READ_ONCE(t->trc_reader_special.b.need_qs);
 
        // Update .need_qs before ->trc_reader_nesting for irq/NMI handlers.
        if (nq)
                WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
-       WRITE_ONCE(t->trc_reader_nesting, nesting);
+       WRITE_ONCE(t->trc_reader_nesting, 0);
        if (nq && atomic_dec_and_test(&trc_n_readers_need_end))
                irq_work_queue(&rcu_tasks_trace_iw);
 }
        WARN_ON_ONCE(READ_ONCE(t->trc_reader_nesting));
        WRITE_ONCE(t->trc_reader_nesting, 0);
        if (WARN_ON_ONCE(READ_ONCE(t->trc_reader_special.b.need_qs)))
-               rcu_read_unlock_trace_special(t, 0);
+               rcu_read_unlock_trace_special(t);
 }
 
 /**