return ret;
 }
 
-static void msm_iommu_set_platform_dma(struct device *dev)
+static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                    struct device *dev)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct msm_priv *priv = to_msm_priv(domain);
+       struct msm_priv *priv;
        unsigned long flags;
        struct msm_iommu_dev *iommu;
        struct msm_iommu_ctx_dev *master;
-       int ret;
+       int ret = 0;
+
+       if (domain == identity_domain || !domain)
+               return 0;
 
+       priv = to_msm_priv(domain);
        free_io_pgtable_ops(priv->iop);
 
        spin_lock_irqsave(&msm_iommu_lock, flags);
        }
 fail:
        spin_unlock_irqrestore(&msm_iommu_lock, flags);
+       return ret;
 }
 
+static struct iommu_domain_ops msm_iommu_identity_ops = {
+       .attach_dev = msm_iommu_identity_attach,
+};
+
+static struct iommu_domain msm_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &msm_iommu_identity_ops,
+};
+
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
                         phys_addr_t pa, size_t pgsize, size_t pgcount,
                         int prot, gfp_t gfp, size_t *mapped)
 }
 
 static struct iommu_ops msm_iommu_ops = {
+       .identity_domain = &msm_iommu_identity_domain,
        .domain_alloc = msm_iommu_domain_alloc,
        .probe_device = msm_iommu_probe_device,
        .device_group = generic_device_group,
-       .set_platform_dma_ops = msm_iommu_set_platform_dma,
        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
        .of_xlate = qcom_iommu_of_xlate,
        .default_domain_ops = &(const struct iommu_domain_ops) {