bus: mhi: Add inbound buffers allocation flag
authorLoic Poulain <loic.poulain@linaro.org>
Mon, 2 Aug 2021 05:12:46 +0000 (10:42 +0530)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 5 Aug 2021 12:28:14 +0000 (14:28 +0200)
Currently, the MHI controller driver defines which channels should
have their inbound buffers allocated and queued. But ideally, this is
something that should be decided by the MHI device driver instead,
which actually deals with that buffers.

Add a flag parameter to mhi_prepare_for_transfer allowing to specify
if buffers have to be allocated and queued by the MHI stack.

Keep auto_queue flag for now, but should be removed at some point.

Link: https://lore.kernel.org/r/1624566520-20406-1-git-send-email-loic.poulain@linaro.org
Tested-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
Reviewed-by: Bhaumik Bhatt <bbhatt@codeaurora.org>
Reviewed-by: Hemant Kumar <hemantk@codeaurora.org>
Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Acked-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/20210802051255.5771-2-manivannan.sadhasivam@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/bus/mhi/core/internal.h
drivers/bus/mhi/core/main.c
drivers/net/mhi/net.c
drivers/net/wwan/mhi_wwan_ctrl.c
include/linux/mhi.h
net/qrtr/mhi.c

index 5b9ea66b92dc32fb7dce14f4a4db5ea7419839af..bc239a11aa69873420d93b4cf5f175591e2a20b3 100644 (file)
@@ -682,7 +682,7 @@ void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
                      struct image_info *img_info);
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan);
+                       struct mhi_chan *mhi_chan, unsigned int flags);
 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
                       struct mhi_chan *mhi_chan);
 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
index fc9196f11cb7dbdc68756651944c42a8eded8764..26bbc812121d29239e1b8b90d88b1634f3f15f2f 100644 (file)
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
 }
 
 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
-                       struct mhi_chan *mhi_chan)
+                       struct mhi_chan *mhi_chan, unsigned int flags)
 {
        int ret = 0;
        struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
        if (ret)
                goto error_pm_state;
 
+       if (mhi_chan->dir == DMA_FROM_DEVICE)
+               mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
        /* Pre-allocate buffer for xfer ring */
        if (mhi_chan->pre_alloc) {
                int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1610,7 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
 }
 
 /* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
 {
        int ret, dir;
        struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
                if (!mhi_chan)
                        continue;
 
-               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+               ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
                if (ret)
                        goto error_open_chan;
        }
index e60e38c1f09d317750c7fd6f0bdfa2ec9ca33513..11be6bcdd551a0e16fcb9d24202633de22f1bc7b 100644 (file)
@@ -335,7 +335,7 @@ static int mhi_net_newlink(void *ctxt, struct net_device *ndev, u32 if_id,
        u64_stats_init(&mhi_netdev->stats.tx_syncp);
 
        /* Start MHI channels */
-       err = mhi_prepare_for_transfer(mhi_dev);
+       err = mhi_prepare_for_transfer(mhi_dev, 0);
        if (err)
                goto out_err;
 
index 1bc6b69aa530269630ad3af3de026be1d149d775..1e18420ce4045ca904583d5a562d0e8d6c9c4131 100644 (file)
@@ -110,7 +110,7 @@ static int mhi_wwan_ctrl_start(struct wwan_port *port)
        int ret;
 
        /* Start mhi device's channel(s) */
-       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev);
+       ret = mhi_prepare_for_transfer(mhiwwan->mhi_dev, 0);
        if (ret)
                return ret;
 
index 944aa3aa30355fe33fba8133cc8e0a2ad1653c9b..5e08468854db4998089c0cc6522baecf02f84c64 100644 (file)
@@ -719,8 +719,13 @@ void mhi_device_put(struct mhi_device *mhi_dev);
  *                            host and device execution environments match and
  *                            channels are in a DISABLED state.
  * @mhi_dev: Device associated with the channels
+ * @flags: MHI channel flags
  */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev,
+                            unsigned int flags);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
 
 /**
  * mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
index fa611678af05260b313d12644a0ea12c49c43f0c..29b4fa3b72abfeb7fbd654f32ca88db615b8666b 100644 (file)
@@ -79,7 +79,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
        int rc;
 
        /* start channels */
-       rc = mhi_prepare_for_transfer(mhi_dev);
+       rc = mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
        if (rc)
                return rc;