#endif
 
-static inline void rps_lock_irqsave(struct softnet_data *sd,
-                                   unsigned long *flags)
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
+                                        unsigned long *flags)
 {
        if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
                local_irq_save(*flags);
 }
 
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
 {
        if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_lock_irq(&sd->input_pkt_queue.lock);
                local_irq_disable();
 }
 
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
-                                         unsigned long *flags)
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
+                                             unsigned long *flags)
 {
        if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
                local_irq_restore(*flags);
 }
 
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
 {
        if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
                spin_unlock_irq(&sd->input_pkt_queue.lock);
        unsigned long flags;
 
        if (use_backlog_threads()) {
-               rps_lock_irqsave(sd, &flags);
+               backlog_lock_irq_save(sd, &flags);
 
                if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
                        __napi_schedule_irqoff(&sd->backlog);
 
-               rps_unlock_irq_restore(sd, &flags);
+               backlog_unlock_irq_restore(sd, &flags);
 
        } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
                smp_call_function_single_async(cpu, &sd->defer_csd);
        reason = SKB_DROP_REASON_NOT_SPECIFIED;
        sd = &per_cpu(softnet_data, cpu);
 
-       rps_lock_irqsave(sd, &flags);
+       backlog_lock_irq_save(sd, &flags);
        if (!netif_running(skb->dev))
                goto drop;
        qlen = skb_queue_len(&sd->input_pkt_queue);
 enqueue:
                        __skb_queue_tail(&sd->input_pkt_queue, skb);
                        input_queue_tail_incr_save(sd, qtail);
-                       rps_unlock_irq_restore(sd, &flags);
+                       backlog_unlock_irq_restore(sd, &flags);
                        return NET_RX_SUCCESS;
                }
 
 
 drop:
        sd->dropped++;
-       rps_unlock_irq_restore(sd, &flags);
+       backlog_unlock_irq_restore(sd, &flags);
 
        dev_core_stats_rx_dropped_inc(skb->dev);
        kfree_skb_reason(skb, reason);
        local_bh_disable();
        sd = this_cpu_ptr(&softnet_data);
 
-       rps_lock_irq_disable(sd);
+       backlog_lock_irq_disable(sd);
        skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->input_pkt_queue);
                        input_queue_head_incr(sd);
                }
        }
-       rps_unlock_irq_enable(sd);
+       backlog_unlock_irq_enable(sd);
 
        skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
        struct softnet_data *sd = &per_cpu(softnet_data, cpu);
        bool do_flush;
 
-       rps_lock_irq_disable(sd);
+       backlog_lock_irq_disable(sd);
 
        /* as insertion into process_queue happens with the rps lock held,
         * process_queue access may race only with dequeue
         */
        do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
                   !skb_queue_empty_lockless(&sd->process_queue);
-       rps_unlock_irq_enable(sd);
+       backlog_unlock_irq_enable(sd);
 
        return do_flush;
 #endif
 
                }
 
-               rps_lock_irq_disable(sd);
+               backlog_lock_irq_disable(sd);
                if (skb_queue_empty(&sd->input_pkt_queue)) {
                        /*
                         * Inline a custom version of __napi_complete().
                        skb_queue_splice_tail_init(&sd->input_pkt_queue,
                                                   &sd->process_queue);
                }
-               rps_unlock_irq_enable(sd);
+               backlog_unlock_irq_enable(sd);
        }
 
        return work;