* DSI support for sm8150/sm8250
* Support for per-process GPU pagetables (finally!) for a6xx.
  There are still some iommu/arm-smmu changes required to
  enable, without which it will fallback to the current single
  pgtable state.  The first part (ie. what doesn't depend on
  drm side patches) is queued up for v5.10[1].
* DisplayPort support.  Userspace DP compliance tool support
  is already merged in IGT[2]
* The usual assortment of smaller fixes/cleanups
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGvqjuzH=Po_9EzzFsp2Xq3tqJUTKfsA2g09XY7_+6Ypfw@mail.gmail.com
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        /* copy commands into RB: */
                        obj = submit->bos[submit->cmd[i].idx].obj;
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
 
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
 -                      /* fall-thru */
 +                      fallthrough;
                case MSM_SUBMIT_CMD_BUF:
                        OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
                        OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
 
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_device(dev, msm_obj->sgt,
-                                           DMA_BIDIRECTIONAL);
-       } else {
-               dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_map_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  static void sync_for_cpu(struct msm_gem_object *msm_obj)
  {
        struct device *dev = msm_obj->base.dev->dev;
  
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
-       } else {
-               dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
 -      dma_unmap_sg(dev, msm_obj->sgt->sgl,
 -              msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
++      dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
  }
  
  /* allocate pages from VRAM carveout, used when no IOMMU: */
 
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        size_t ret;
  
 -      ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+       /* The arm-smmu driver expects the addresses to be sign extended */
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
+ 
 +      ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
        WARN_ON(!ret);
  
        return (ret == len) ? 0 : -EINVAL;