* Configure the DMA segment size to make sure we get contiguous IOVA
         * when importing PRIME buffers.
         */
-       if (!dma_dev->dma_parms) {
-               private->dma_parms_allocated = true;
-               dma_dev->dma_parms =
-                       devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
-                                    GFP_KERNEL);
-       }
-       if (!dma_dev->dma_parms) {
-               ret = -ENOMEM;
-               goto put_dma_dev;
-       }
-
-       ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
+       ret = dma_set_max_seg_size(dma_dev, UINT_MAX);
        if (ret) {
                dev_err(dma_dev, "Failed to set DMA segment size\n");
-               goto err_unset_dma_parms;
+               goto err_component_unbind;
        }
 
        /*
        drm->irq_enabled = true;
        ret = drm_vblank_init(drm, MAX_CRTC);
        if (ret < 0)
-               goto err_unset_dma_parms;
+               goto err_component_unbind;
 
        drm_kms_helper_poll_init(drm);
        drm_mode_config_reset(drm);
 
        return 0;
 
-err_unset_dma_parms:
-       if (private->dma_parms_allocated)
-               dma_dev->dma_parms = NULL;
-put_dma_dev:
-       put_device(private->dma_dev);
 err_component_unbind:
        component_unbind_all(drm->dev, drm);
 put_mutex_dev:
 
 static void mtk_drm_kms_deinit(struct drm_device *drm)
 {
-       struct mtk_drm_private *private = drm->dev_private;
-
        drm_kms_helper_poll_fini(drm);
        drm_atomic_helper_shutdown(drm);
 
-       if (private->dma_parms_allocated)
-               private->dma_dev->dma_parms = NULL;
-
        component_unbind_all(drm->dev, drm);
 }