can: rx-offload: add skb queue for use during ISR
authorMarc Kleine-Budde <mkl@pengutronix.de>
Wed, 9 Oct 2019 04:41:08 +0000 (06:41 +0200)
committerMarc Kleine-Budde <mkl@pengutronix.de>
Sun, 25 Jul 2021 09:36:25 +0000 (11:36 +0200)
Adding a skb to the skb_queue in rx-offload requires to take a lock.

This commit avoids this by adding an unlocked skb queue that is
appended at the end of the ISR. Having one lock at the end of the ISR
should be OK as the HW is empty, not about to overflow.

Link: https://lore.kernel.org/r/20210724204745.736053-2-mkl@pengutronix.de
Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
Co-developed-by: Kurt Van Dijck <dev.kurt@vandijck-laurijssen.be>
Signed-off-by: Kurt Van Dijck <dev.kurt@vandijck-laurijssen.be>
Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
drivers/net/can/dev/rx-offload.c
drivers/net/can/flexcan.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/ti_hecc.c
include/linux/can/rx-offload.h

index ab2c1543786cff7af0af07ef3c372129edf57dd1..d0bdb6db3a5759841943dc5af20856687bcad086 100644 (file)
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /* Copyright (c) 2014      Protonic Holland,
  *                         David Jander
- * Copyright (C) 2014-2017 Pengutronix,
+ * Copyright (C) 2014-2021 Pengutronix,
  *                         Marc Kleine-Budde <kernel@pengutronix.de>
  */
 
@@ -174,10 +174,8 @@ can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
                                         u64 pending)
 {
-       struct sk_buff_head skb_queue;
        unsigned int i;
-
-       __skb_queue_head_init(&skb_queue);
+       int received = 0;
 
        for (i = offload->mb_first;
             can_rx_offload_le(offload, i, offload->mb_last);
@@ -191,26 +189,12 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
                if (IS_ERR_OR_NULL(skb))
                        continue;
 
-               __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
-       }
-
-       if (!skb_queue_empty(&skb_queue)) {
-               unsigned long flags;
-               u32 queue_len;
-
-               spin_lock_irqsave(&offload->skb_queue.lock, flags);
-               skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
-               spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
-               queue_len = skb_queue_len(&offload->skb_queue);
-               if (queue_len > offload->skb_queue_len_max / 8)
-                       netdev_dbg(offload->dev, "%s: queue_len=%d\n",
-                                  __func__, queue_len);
-
-               can_rx_offload_schedule(offload);
+               __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+                                    can_rx_offload_compare);
+               received++;
        }
 
-       return skb_queue_len(&skb_queue);
+       return received;
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
 
@@ -226,13 +210,10 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
                if (!skb)
                        break;
 
-               skb_queue_tail(&offload->skb_queue, skb);
+               __skb_queue_tail(&offload->skb_irq_queue, skb);
                received++;
        }
 
-       if (received)
-               can_rx_offload_schedule(offload);
-
        return received;
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
@@ -241,7 +222,6 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
                                struct sk_buff *skb, u32 timestamp)
 {
        struct can_rx_offload_cb *cb;
-       unsigned long flags;
 
        if (skb_queue_len(&offload->skb_queue) >
            offload->skb_queue_len_max) {
@@ -252,11 +232,8 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
        cb = can_rx_offload_get_cb(skb);
        cb->timestamp = timestamp;
 
-       spin_lock_irqsave(&offload->skb_queue.lock, flags);
-       __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
-       spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
-
-       can_rx_offload_schedule(offload);
+       __skb_queue_add_sort(&offload->skb_irq_queue, skb,
+                            can_rx_offload_compare);
 
        return 0;
 }
@@ -295,13 +272,33 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
                return -ENOBUFS;
        }
 
-       skb_queue_tail(&offload->skb_queue, skb);
-       can_rx_offload_schedule(offload);
+       __skb_queue_tail(&offload->skb_irq_queue, skb);
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
 
+void can_rx_offload_irq_finish(struct can_rx_offload *offload)
+{
+       unsigned long flags;
+       int queue_len;
+
+       if (skb_queue_empty_lockless(&offload->skb_irq_queue))
+               return;
+
+       spin_lock_irqsave(&offload->skb_queue.lock, flags);
+       skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
+       spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
+
+       queue_len = skb_queue_len(&offload->skb_queue);
+       if (queue_len > offload->skb_queue_len_max / 8)
+               netdev_dbg(offload->dev, "%s: queue_len=%d\n",
+                          __func__, queue_len);
+
+       can_rx_offload_schedule(offload);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
+
 static int can_rx_offload_init_queue(struct net_device *dev,
                                     struct can_rx_offload *offload,
                                     unsigned int weight)
@@ -312,6 +309,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
        offload->skb_queue_len_max = 2 << fls(weight);
        offload->skb_queue_len_max *= 4;
        skb_queue_head_init(&offload->skb_queue);
+       __skb_queue_head_init(&offload->skb_irq_queue);
 
        netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
 
@@ -373,5 +371,6 @@ void can_rx_offload_del(struct can_rx_offload *offload)
 {
        netif_napi_del(&offload->napi);
        skb_queue_purge(&offload->skb_queue);
+       __skb_queue_purge(&offload->skb_irq_queue);
 }
 EXPORT_SYMBOL_GPL(can_rx_offload_del);
index 57f3635ad8d7d0e7962e43d81d55e8b75a1bf6e9..d9dcf6a8412b2fe7076a82e6c8776411cbc0efc3 100644 (file)
@@ -1198,6 +1198,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
                }
        }
 
+       if (handled)
+               can_rx_offload_irq_finish(&priv->offload);
+
        return handled;
 }
 
index bba2a449ac70d8634e2fb91c2533bcaec3ab9bd0..18461982f7a13910ad7d825c97bd8a92956c12df 100644 (file)
@@ -1058,6 +1058,9 @@ static irqreturn_t m_can_isr(int irq, void *dev_id)
                }
        }
 
+       if (cdev->is_peripheral)
+               can_rx_offload_irq_finish(&cdev->offload);
+
        return IRQ_HANDLED;
 }
 
index 47c3f408a799a85cf212744a21c3ec21cb240807..f3b267ec22e025a9c3b7315f70c5ced9bcbf3395 100644 (file)
@@ -2195,8 +2195,10 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
                        FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
                                  priv->regs_status.intf);
 
-               if (!(intf_pending))
+               if (!(intf_pending)) {
+                       can_rx_offload_irq_finish(&priv->offload);
                        return handled;
+               }
 
                /* Some interrupts must be ACKed in the
                 * MCP251XFD_REG_INT register.
@@ -2296,6 +2298,8 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
        } while (1);
 
  out_fail:
+       can_rx_offload_irq_finish(&priv->offload);
+
        netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
                   err, priv->regs_status.intf);
        mcp251xfd_dump(priv);
index 73245d8836a93019e39a6c96be8b634d3eb02252..353062ead98ffa0c53c8f0d7c01e6556df8b2770 100644 (file)
@@ -786,6 +786,8 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
                int_status = hecc_read(priv, HECC_CANGIF0);
        }
 
+       can_rx_offload_irq_finish(&priv->offload);
+
        return IRQ_HANDLED;
 }
 
index 40882df7105e89921d95dd2d3884dd9659878fcb..d71c938e17d03e9a4b6ec525bcc624e1fb3d27ec 100644 (file)
@@ -20,6 +20,7 @@ struct can_rx_offload {
                                        bool drop);
 
        struct sk_buff_head skb_queue;
+       struct sk_buff_head skb_irq_queue;
        u32 skb_queue_len_max;
 
        unsigned int mb_first;
@@ -48,6 +49,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
                                         unsigned int *frame_len_ptr);
 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
                              struct sk_buff *skb);
+void can_rx_offload_irq_finish(struct can_rx_offload *offload);
 void can_rx_offload_del(struct can_rx_offload *offload);
 void can_rx_offload_enable(struct can_rx_offload *offload);