spi: spi-qcom-qspi: Avoid clock setting if not needed
authorDouglas Anderson <dianders@chromium.org>
Thu, 9 Jul 2020 14:51:44 +0000 (07:51 -0700)
committerBjorn Andersson <bjorn.andersson@linaro.org>
Mon, 13 Jul 2020 23:17:13 +0000 (16:17 -0700)
As per recent changes to the spi-qcom-qspi, now when we set the clock
we'll call into the interconnect framework and also call the OPP API.
Those are expensive operations.  Let's avoid calling them if possible.
This has a big impact on getting transfer rates back up to where they
were (or maybe slightly better) before those patches landed.

Fixes: cff80645d6d3 ("spi: spi-qcom-qspi: Add interconnect support")
Signed-off-by: Douglas Anderson <dianders@chromium.org>
Acked-by: Mark Brown <broonie@kernel.org>
Reviewed-by: Rajendra Nayak <rnayak@codeaurora.org>
Tested-by: Rajendra Nayak <rnayak@codeaurora.org>
Reviewed-by: Mukesh Kumar Savaliya <msavaliy@codeaurora.org>
Reviewed-by: Akash Asthana <akashast@codeaurora.org>
Link: https://lore.kernel.org/r/20200709075113.v2.1.Ia7cb4f41ce93d37d0a764b47c8a453ce9e9c70ef@changeid
Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
drivers/spi/spi-qcom-qspi.c

index 18a59aa23ef85a67818b8101900f222b7378d6cd..8fedc605ab7fdb29f12b781266eefb400eef4eaa 100644 (file)
@@ -144,6 +144,7 @@ struct qcom_qspi {
        struct icc_path *icc_path_cpu_to_qspi;
        struct opp_table *opp_table;
        bool has_opp_table;
+       unsigned long last_speed;
        /* Lock to protect data accessed by IRQs */
        spinlock_t lock;
 };
@@ -226,19 +227,13 @@ static void qcom_qspi_handle_err(struct spi_master *master,
        spin_unlock_irqrestore(&ctrl->lock, flags);
 }
 
-static int qcom_qspi_transfer_one(struct spi_master *master,
-                                 struct spi_device *slv,
-                                 struct spi_transfer *xfer)
+static int qcom_qspi_set_speed(struct qcom_qspi *ctrl, unsigned long speed_hz)
 {
-       struct qcom_qspi *ctrl = spi_master_get_devdata(master);
        int ret;
-       unsigned long speed_hz;
-       unsigned long flags;
        unsigned int avg_bw_cpu;
 
-       speed_hz = slv->max_speed_hz;
-       if (xfer->speed_hz)
-               speed_hz = xfer->speed_hz;
+       if (speed_hz == ctrl->last_speed)
+               return 0;
 
        /* In regular operation (SBL_EN=1) core must be 4x transfer clock */
        ret = dev_pm_opp_set_rate(ctrl->dev, speed_hz * 4);
@@ -259,6 +254,28 @@ static int qcom_qspi_transfer_one(struct spi_master *master,
                return ret;
        }
 
+       ctrl->last_speed = speed_hz;
+
+       return 0;
+}
+
+static int qcom_qspi_transfer_one(struct spi_master *master,
+                                 struct spi_device *slv,
+                                 struct spi_transfer *xfer)
+{
+       struct qcom_qspi *ctrl = spi_master_get_devdata(master);
+       int ret;
+       unsigned long speed_hz;
+       unsigned long flags;
+
+       speed_hz = slv->max_speed_hz;
+       if (xfer->speed_hz)
+               speed_hz = xfer->speed_hz;
+
+       ret = qcom_qspi_set_speed(ctrl, speed_hz);
+       if (ret)
+               return ret;
+
        spin_lock_irqsave(&ctrl->lock, flags);
 
        /* We are half duplex, so either rx or tx will be set */
@@ -602,7 +619,11 @@ static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
                return ret;
        }
 
-       return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+       ret = clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
+       if (ret)
+               return ret;
+
+       return dev_pm_opp_set_rate(dev, ctrl->last_speed * 4);
 }
 
 static int __maybe_unused qcom_qspi_suspend(struct device *dev)