return 0;
 }
 
+static void ccp_dma_release(struct ccp_device *ccp)
+{
+       struct ccp_dma_chan *chan;
+       struct dma_chan *dma_chan;
+       unsigned int i;
+
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               chan = ccp->ccp_dma_chan + i;
+               dma_chan = &chan->dma_chan;
+               tasklet_kill(&chan->cleanup_tasklet);
+               list_del_rcu(&dma_chan->device_node);
+       }
+}
+
 int ccp_dmaengine_register(struct ccp_device *ccp)
 {
        struct ccp_dma_chan *chan;
        return 0;
 
 err_reg:
+       ccp_dma_release(ccp);
        kmem_cache_destroy(ccp->dma_desc_cache);
 
 err_cache:
                return;
 
        dma_async_device_unregister(dma_dev);
+       ccp_dma_release(ccp);
 
        kmem_cache_destroy(ccp->dma_desc_cache);
        kmem_cache_destroy(ccp->dma_cmd_cache);