call_single_data_t      defer_csd;
 };
 
-static inline void input_queue_head_incr(struct softnet_data *sd)
-{
-#ifdef CONFIG_RPS
-       sd->input_queue_head++;
-#endif
-}
-
-static inline void input_queue_tail_incr_save(struct softnet_data *sd,
-                                             unsigned int *qtail)
-{
-#ifdef CONFIG_RPS
-       *qtail = ++sd->input_queue_tail;
-#endif
-}
-
 DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 
 static inline int dev_recursion_level(void)
 
                if (unlikely(tcpu != next_cpu) &&
                    (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
                     ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
-                     rflow->last_qtail)) >= 0)) {
+                     READ_ONCE(rflow->last_qtail))) >= 0)) {
                        tcpu = next_cpu;
                        rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
                }
                cpu = READ_ONCE(rflow->cpu);
                if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
                    ((int)(per_cpu(softnet_data, cpu).input_queue_head -
-                          rflow->last_qtail) <
+                          READ_ONCE(rflow->last_qtail)) <
                     (int)(10 * flow_table->mask)))
                        expire = false;
        }
        unsigned long flags;
        unsigned int qlen;
        int max_backlog;
+       u32 tail;
 
        reason = SKB_DROP_REASON_DEV_READY;
        if (!netif_running(skb->dev))
                                napi_schedule_rps(sd);
                }
                __skb_queue_tail(&sd->input_pkt_queue, skb);
-               input_queue_tail_incr_save(sd, qtail);
+               tail = rps_input_queue_tail_incr(sd);
                backlog_unlock_irq_restore(sd, &flags);
+
+               /* save the tail outside of the critical section */
+               rps_input_queue_tail_save(qtail, tail);
                return NET_RX_SUCCESS;
        }
 
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->input_pkt_queue);
                        dev_kfree_skb_irq(skb);
-                       input_queue_head_incr(sd);
+                       rps_input_queue_head_incr(sd);
                }
        }
        backlog_unlock_irq_enable(sd);
                if (skb->dev->reg_state == NETREG_UNREGISTERING) {
                        __skb_unlink(skb, &sd->process_queue);
                        kfree_skb(skb);
-                       input_queue_head_incr(sd);
+                       rps_input_queue_head_incr(sd);
                }
        }
        local_bh_enable();
                        rcu_read_lock();
                        __netif_receive_skb(skb);
                        rcu_read_unlock();
-                       input_queue_head_incr(sd);
+                       rps_input_queue_head_incr(sd);
                        if (++work >= quota)
                                return work;
 
        /* Process offline CPU's input_pkt_queue */
        while ((skb = __skb_dequeue(&oldsd->process_queue))) {
                netif_rx(skb);
-               input_queue_head_incr(oldsd);
+               rps_input_queue_head_incr(oldsd);
        }
        while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
                netif_rx(skb);
-               input_queue_head_incr(oldsd);
+               rps_input_queue_head_incr(oldsd);
        }
 
        return 0;