.sync_sg_for_device     = arm_dma_sync_sg_for_device,
        .set_dma_mask           = dmabounce_set_mask,
        .mapping_error          = dmabounce_mapping_error,
+       .dma_supported          = arm_dma_supported,
 };
 
 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
 
        .sync_sg_for_cpu        = arm_dma_sync_sg_for_cpu,
        .sync_sg_for_device     = arm_dma_sync_sg_for_device,
        .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
 };
 EXPORT_SYMBOL(arm_dma_ops);
 
        .map_page               = arm_coherent_dma_map_page,
        .map_sg                 = arm_dma_map_sg,
        .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
 };
 EXPORT_SYMBOL(arm_coherent_dma_ops);
 
  * during bus mastering, then you would pass 0x00ffffff as the mask
  * to this function.
  */
-int dma_supported(struct device *dev, u64 mask)
+int arm_dma_supported(struct device *dev, u64 mask)
 {
        return __dma_supported(dev, mask, false);
 }
-EXPORT_SYMBOL(dma_supported);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
        .unmap_resource         = arm_iommu_unmap_resource,
 
        .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
 };
 
 const struct dma_map_ops iommu_coherent_ops = {
        .unmap_resource = arm_iommu_unmap_resource,
 
        .mapping_error          = arm_dma_mapping_error,
+       .dma_supported          = arm_dma_supported,
 };
 
 /**