source "drivers/crypto/stm32/Kconfig"
 
+config CRYPTO_DEV_SAFEXCEL
+       tristate "Inside Secure's SafeXcel cryptographic engine driver"
+       depends on HAS_DMA && OF
+       depends on (ARM64 && ARCH_MVEBU) || (COMPILE_TEST && 64BIT)
+       select CRYPTO_AES
+       select CRYPTO_BLKCIPHER
+       select CRYPTO_HASH
+       select CRYPTO_HMAC
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       help
+         This driver interfaces with the SafeXcel EIP-197 cryptographic engine
+         designed by Inside Secure. Select this if you want to use CBC/ECB
+         chain mode, AES cipher mode and SHA1/SHA224/SHA256/SHA512 hash
+         algorithms.
+
 endif # CRYPTO_HW
 
 obj-$(CONFIG_CRYPTO_DEV_VIRTIO) += virtio/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_BCM_SPU) += bcm/
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += inside-secure/
 
--- /dev/null
+obj-$(CONFIG_CRYPTO_DEV_SAFEXCEL) += crypto_safexcel.o
+crypto_safexcel-objs := safexcel.o safexcel_ring.o safexcel_cipher.o safexcel_hash.o
 
--- /dev/null
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+
+#include <crypto/internal/hash.h>
+#include <crypto/internal/skcipher.h>
+
+#include "safexcel.h"
+
+static u32 max_rings = EIP197_MAX_RINGS;
+module_param(max_rings, uint, 0644);
+MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
+
+static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
+{
+       u32 val, htable_offset;
+       int i;
+
+       /* Enable the record cache memory access */
+       val = readl(priv->base + EIP197_CS_RAM_CTRL);
+       val &= ~EIP197_TRC_ENABLE_MASK;
+       val |= EIP197_TRC_ENABLE_0;
+       writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+       /* Clear all ECC errors */
+       writel(0, priv->base + EIP197_TRC_ECCCTRL);
+
+       /*
+        * Make sure the cache memory is accessible by taking record cache into
+        * reset.
+        */
+       val = readl(priv->base + EIP197_TRC_PARAMS);
+       val |= EIP197_TRC_PARAMS_SW_RESET;
+       val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
+       writel(val, priv->base + EIP197_TRC_PARAMS);
+
+       /* Clear all records */
+       for (i = 0; i < EIP197_CS_RC_MAX; i++) {
+               u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
+
+               writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
+                      EIP197_CS_RC_PREV(EIP197_RC_NULL),
+                      priv->base + offset);
+
+               val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
+               if (i == 0)
+                       val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
+               else if (i == EIP197_CS_RC_MAX - 1)
+                       val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
+               writel(val, priv->base + offset + sizeof(u32));
+       }
+
+       /* Clear the hash table entries */
+       htable_offset = EIP197_CS_RC_MAX * EIP197_CS_RC_SIZE;
+       for (i = 0; i < 64; i++)
+               writel(GENMASK(29, 0),
+                      priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
+
+       /* Disable the record cache memory access */
+       val = readl(priv->base + EIP197_CS_RAM_CTRL);
+       val &= ~EIP197_TRC_ENABLE_MASK;
+       writel(val, priv->base + EIP197_CS_RAM_CTRL);
+
+       /* Write head and tail pointers of the record free chain */
+       val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
+             EIP197_TRC_FREECHAIN_TAIL_PTR(EIP197_CS_RC_MAX - 1);
+       writel(val, priv->base + EIP197_TRC_FREECHAIN);
+
+       /* Configure the record cache #1 */
+       val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(EIP197_CS_TRC_REC_WC) |
+             EIP197_TRC_PARAMS2_HTABLE_PTR(EIP197_CS_RC_MAX);
+       writel(val, priv->base + EIP197_TRC_PARAMS2);
+
+       /* Configure the record cache #2 */
+       val = EIP197_TRC_PARAMS_RC_SZ_LARGE(EIP197_CS_TRC_LG_REC_WC) |
+             EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
+             EIP197_TRC_PARAMS_HTABLE_SZ(2);
+       writel(val, priv->base + EIP197_TRC_PARAMS);
+}
+
+static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
+                                 const struct firmware *fw, u32 ctrl,
+                                 u32 prog_en)
+{
+       const u32 *data = (const u32 *)fw->data;
+       u32 val;
+       int i;
+
+       /* Reset the engine to make its program memory accessible */
+       writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
+              EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
+              EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
+              priv->base + ctrl);
+
+       /* Enable access to the program memory */
+       writel(prog_en, priv->base + EIP197_PE_ICE_RAM_CTRL);
+
+       /* Write the firmware */
+       for (i = 0; i < fw->size / sizeof(u32); i++)
+               writel(be32_to_cpu(data[i]),
+                      priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
+
+       /* Disable access to the program memory */
+       writel(0, priv->base + EIP197_PE_ICE_RAM_CTRL);
+
+       /* Release engine from reset */
+       val = readl(priv->base + ctrl);
+       val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
+       writel(val, priv->base + ctrl);
+}
+
+static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
+{
+       const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
+       const struct firmware *fw[FW_NB];
+       int i, j, ret = 0;
+       u32 val;
+
+       for (i = 0; i < FW_NB; i++) {
+               ret = request_firmware(&fw[i], fw_name[i], priv->dev);
+               if (ret) {
+                       dev_err(priv->dev,
+                               "Failed to request firmware %s (%d)\n",
+                               fw_name[i], ret);
+                       goto release_fw;
+               }
+        }
+
+       /* Clear the scratchpad memory */
+       val = readl(priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+       val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
+              EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
+              EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
+              EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
+       writel(val, priv->base + EIP197_PE_ICE_SCRATCH_CTRL);
+
+       memset(priv->base + EIP197_PE_ICE_SCRATCH_RAM, 0,
+              EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
+
+       eip197_write_firmware(priv, fw[FW_IFPP], EIP197_PE_ICE_FPP_CTRL,
+                             EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
+
+       eip197_write_firmware(priv, fw[FW_IPUE], EIP197_PE_ICE_PUE_CTRL,
+                             EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
+
+release_fw:
+       for (j = 0; j < i; j++)
+               release_firmware(fw[j]);
+
+       return ret;
+}
+
+static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
+{
+       u32 hdw, cd_size_rnd, val;
+       int i;
+
+       hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+       hdw &= GENMASK(27, 25);
+       hdw >>= 25;
+
+       cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
+
+       for (i = 0; i < priv->config.rings; i++) {
+               /* ring base address */
+               writel(lower_32_bits(priv->ring[i].cdr.base_dma),
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+               writel(upper_32_bits(priv->ring[i].cdr.base_dma),
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+               writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
+                      priv->config.cd_size,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+               writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
+                      (EIP197_FETCH_COUNT * priv->config.cd_offset),
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+
+               /* Configure DMA tx control */
+               val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+               val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+               writel(val,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_DMA_CFG);
+
+               /* clear any pending interrupt */
+               writel(GENMASK(5, 0),
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_STAT);
+       }
+
+       return 0;
+}
+
+static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
+{
+       u32 hdw, rd_size_rnd, val;
+       int i;
+
+       hdw = readl(priv->base + EIP197_HIA_OPTIONS);
+       hdw &= GENMASK(27, 25);
+       hdw >>= 25;
+
+       rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
+
+       for (i = 0; i < priv->config.rings; i++) {
+               /* ring base address */
+               writel(lower_32_bits(priv->ring[i].rdr.base_dma),
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
+               writel(upper_32_bits(priv->ring[i].rdr.base_dma),
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
+
+               writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
+                      priv->config.rd_size,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DESC_SIZE);
+
+               writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
+                      (EIP197_FETCH_COUNT * priv->config.rd_offset),
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+
+               /* Configure DMA tx control */
+               val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
+               val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
+               val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUG;
+               writel(val,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_DMA_CFG);
+
+               /* clear any pending interrupt */
+               writel(GENMASK(7, 0),
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_STAT);
+
+               /* enable ring interrupt */
+               val = readl(priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+               val |= EIP197_RDR_IRQ(i);
+               writel(val, priv->base + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
+       }
+
+       return 0;
+}
+
+static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
+{
+       u32 version, val;
+       int i, ret;
+
+       /* Determine endianess and configure byte swap */
+       version = readl(priv->base + EIP197_HIA_VERSION);
+       val = readl(priv->base + EIP197_HIA_MST_CTRL);
+
+       if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
+               val |= EIP197_MST_CTRL_BYTE_SWAP;
+       else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
+               val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
+
+       writel(val, priv->base + EIP197_HIA_MST_CTRL);
+
+
+       /* Configure wr/rd cache values */
+       writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
+              EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
+              priv->base + EIP197_MST_CTRL);
+
+       /* Interrupts reset */
+
+       /* Disable all global interrupts */
+       writel(0, priv->base + EIP197_HIA_AIC_G_ENABLE_CTRL);
+
+       /* Clear any pending interrupt */
+       writel(GENMASK(31, 0), priv->base + EIP197_HIA_AIC_G_ACK);
+
+       /* Data Fetch Engine configuration */
+
+       /* Reset all DFE threads */
+       writel(EIP197_DxE_THR_CTRL_RESET_PE,
+              priv->base + EIP197_HIA_DFE_THR_CTRL);
+
+       /* Reset HIA input interface arbiter */
+       writel(EIP197_HIA_RA_PE_CTRL_RESET,
+              priv->base + EIP197_HIA_RA_PE_CTRL);
+
+       /* DMA transfer size to use */
+       val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
+       val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
+       val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(5) | EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
+       val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+       val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
+       writel(val, priv->base + EIP197_HIA_DFE_CFG);
+
+       /* Leave the DFE threads reset state */
+       writel(0, priv->base + EIP197_HIA_DFE_THR_CTRL);
+
+       /* Configure the procesing engine thresholds */
+       writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(9),
+             priv->base + EIP197_PE_IN_DBUF_THRES);
+       writel(EIP197_PE_IN_xBUF_THRES_MIN(5) | EIP197_PE_IN_xBUF_THRES_MAX(7),
+             priv->base + EIP197_PE_IN_TBUF_THRES);
+
+       /* enable HIA input interface arbiter and rings */
+       writel(EIP197_HIA_RA_PE_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+              priv->base + EIP197_HIA_RA_PE_CTRL);
+
+       /* Data Store Engine configuration */
+
+       /* Reset all DSE threads */
+       writel(EIP197_DxE_THR_CTRL_RESET_PE,
+              priv->base + EIP197_HIA_DSE_THR_CTRL);
+
+       /* Wait for all DSE threads to complete */
+       while ((readl(priv->base + EIP197_HIA_DSE_THR_STAT) &
+               GENMASK(15, 12)) != GENMASK(15, 12))
+               ;
+
+       /* DMA transfer size to use */
+       val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
+       val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) | EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
+       val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
+       writel(val, priv->base + EIP197_HIA_DSE_CFG);
+
+       /* Leave the DSE threads reset state */
+       writel(0, priv->base + EIP197_HIA_DSE_THR_CTRL);
+
+       /* Configure the procesing engine thresholds */
+       writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) | EIP197_PE_OUT_DBUF_THRES_MAX(8),
+              priv->base + EIP197_PE_OUT_DBUF_THRES);
+
+       /* Processing Engine configuration */
+
+       /* H/W capabilities selection */
+       val = EIP197_FUNCTION_RSVD;
+       val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
+       val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
+       val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
+       val |= EIP197_ALG_SHA2;
+       writel(val, priv->base + EIP197_PE_EIP96_FUNCTION_EN);
+
+       /* Command Descriptor Rings prepare */
+       for (i = 0; i < priv->config.rings; i++) {
+               /* Clear interrupts for this ring */
+               writel(GENMASK(31, 0),
+                      priv->base + EIP197_HIA_AIC_R_ENABLE_CLR(i));
+
+               /* Disable external triggering */
+               writel(0, priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_CFG);
+
+               /* Clear the pending prepared counter */
+               writel(EIP197_xDR_PREP_CLR_COUNT,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+
+               /* Clear the pending processed counter */
+               writel(EIP197_xDR_PROC_CLR_COUNT,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+
+               writel(0,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+               writel(0,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+
+               writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_RING_SIZE);
+       }
+
+       /* Result Descriptor Ring prepare */
+       for (i = 0; i < priv->config.rings; i++) {
+               /* Disable external triggering*/
+               writel(0, priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_CFG);
+
+               /* Clear the pending prepared counter */
+               writel(EIP197_xDR_PREP_CLR_COUNT,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+
+               /* Clear the pending processed counter */
+               writel(EIP197_xDR_PROC_CLR_COUNT,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_COUNT);
+
+               writel(0,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_PNTR);
+               writel(0,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PROC_PNTR);
+
+               /* Ring size */
+               writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_RING_SIZE);
+       }
+
+       /* Enable command descriptor rings */
+       writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+              priv->base + EIP197_HIA_DFE_THR_CTRL);
+
+       /* Enable result descriptor rings */
+       writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
+              priv->base + EIP197_HIA_DSE_THR_CTRL);
+
+       /* Clear any HIA interrupt */
+       writel(GENMASK(30, 20), priv->base + EIP197_HIA_AIC_G_ACK);
+
+       eip197_trc_cache_init(priv);
+
+       ret = eip197_load_firmwares(priv);
+       if (ret)
+               return ret;
+
+       safexcel_hw_setup_cdesc_rings(priv);
+       safexcel_hw_setup_rdesc_rings(priv);
+
+       return 0;
+}
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv)
+{
+       struct crypto_async_request *req, *backlog;
+       struct safexcel_context *ctx;
+       struct safexcel_request *request;
+       int i, ret, n = 0, nreq[EIP197_MAX_RINGS] = {0};
+       int cdesc[EIP197_MAX_RINGS] = {0}, rdesc[EIP197_MAX_RINGS] = {0};
+       int commands, results;
+
+       do {
+               spin_lock_bh(&priv->lock);
+               req = crypto_dequeue_request(&priv->queue);
+               backlog = crypto_get_backlog(&priv->queue);
+               spin_unlock_bh(&priv->lock);
+
+               if (!req)
+                       goto finalize;
+
+               request = kzalloc(sizeof(*request), EIP197_GFP_FLAGS(*req));
+               if (!request)
+                       goto requeue;
+
+               ctx = crypto_tfm_ctx(req->tfm);
+               ret = ctx->send(req, ctx->ring, request, &commands, &results);
+               if (ret) {
+                       kfree(request);
+requeue:
+                       spin_lock_bh(&priv->lock);
+                       crypto_enqueue_request(&priv->queue, req);
+                       spin_unlock_bh(&priv->lock);
+
+                       priv->need_dequeue = true;
+                       continue;
+               }
+
+               if (backlog)
+                       backlog->complete(backlog, -EINPROGRESS);
+
+               spin_lock_bh(&priv->ring[ctx->ring].egress_lock);
+               list_add_tail(&request->list, &priv->ring[ctx->ring].list);
+               spin_unlock_bh(&priv->ring[ctx->ring].egress_lock);
+
+               cdesc[ctx->ring] += commands;
+               rdesc[ctx->ring] += results;
+
+               nreq[ctx->ring]++;
+       } while (n++ < EIP197_MAX_BATCH_SZ);
+
+finalize:
+       if (n == EIP197_MAX_BATCH_SZ)
+               priv->need_dequeue = true;
+       else if (!n)
+               return;
+
+       for (i = 0; i < priv->config.rings; i++) {
+               if (!nreq[i])
+                       continue;
+
+               spin_lock_bh(&priv->ring[i].lock);
+
+               /* Configure when we want an interrupt */
+               writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
+                      EIP197_HIA_RDR_THRESH_PROC_PKT(nreq[i]),
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_THRESH);
+
+               /* let the RDR know we have pending descriptors */
+               writel((rdesc[i] * priv->config.rd_offset) << 2,
+                      priv->base + EIP197_HIA_RDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+
+               /* let the CDR know we have pending descriptors */
+               writel((cdesc[i] * priv->config.cd_offset) << 2,
+                      priv->base + EIP197_HIA_CDR(i) + EIP197_HIA_xDR_PREP_COUNT);
+
+               spin_unlock_bh(&priv->ring[i].lock);
+       }
+}
+
+void safexcel_free_context(struct safexcel_crypto_priv *priv,
+                          struct crypto_async_request *req,
+                          int result_sz)
+{
+       struct safexcel_context *ctx = crypto_tfm_ctx(req->tfm);
+
+       if (ctx->result_dma)
+               dma_unmap_single(priv->dev, ctx->result_dma, result_sz,
+                                DMA_FROM_DEVICE);
+
+       if (ctx->cache) {
+               dma_unmap_single(priv->dev, ctx->cache_dma, ctx->cache_sz,
+                                DMA_TO_DEVICE);
+               kfree(ctx->cache);
+               ctx->cache = NULL;
+               ctx->cache_sz = 0;
+       }
+}
+
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
+{
+       struct safexcel_command_desc *cdesc;
+
+       /* Acknowledge the command descriptors */
+       do {
+               cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
+               if (IS_ERR(cdesc)) {
+                       dev_err(priv->dev,
+                               "Could not retrieve the command descriptor\n");
+                       return;
+               }
+       } while (!cdesc->last_seg);
+}
+
+void safexcel_inv_complete(struct crypto_async_request *req, int error)
+{
+       struct safexcel_inv_result *result = req->data;
+
+       if (error == -EINPROGRESS)
+               return;
+
+       result->error = error;
+       complete(&result->completion);
+}
+
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+                             struct safexcel_context *ctx,
+                             struct safexcel_crypto_priv *priv,
+                             dma_addr_t ctxr_dma, int ring,
+                             struct safexcel_request *request)
+{
+       struct safexcel_command_desc *cdesc;
+       struct safexcel_result_desc *rdesc;
+       int ret = 0;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+
+       /* Prepare command descriptor */
+       cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
+       if (IS_ERR(cdesc)) {
+               ret = PTR_ERR(cdesc);
+               goto unlock;
+       }
+
+       cdesc->control_data.type = EIP197_TYPE_EXTENDED;
+       cdesc->control_data.options = 0;
+       cdesc->control_data.refresh = 0;
+       cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
+
+       /* Prepare result descriptor */
+       rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
+
+       if (IS_ERR(rdesc)) {
+               ret = PTR_ERR(rdesc);
+               goto cdesc_rollback;
+       }
+
+       request->req = async;
+       goto unlock;
+
+cdesc_rollback:
+       safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+unlock:
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+       return ret;
+}
+
+static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
+                                                    int ring)
+{
+       struct safexcel_request *sreq;
+       struct safexcel_context *ctx;
+       int ret, i, nreq, ndesc = 0;
+       bool should_complete;
+
+       nreq = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
+       nreq >>= 24;
+       nreq &= GENMASK(6, 0);
+       if (!nreq)
+               return;
+
+       for (i = 0; i < nreq; i++) {
+               spin_lock_bh(&priv->ring[ring].egress_lock);
+               sreq = list_first_entry(&priv->ring[ring].list,
+                                       struct safexcel_request, list);
+               list_del(&sreq->list);
+               spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+               ctx = crypto_tfm_ctx(sreq->req->tfm);
+               ndesc = ctx->handle_result(priv, ring, sreq->req,
+                                          &should_complete, &ret);
+               if (ndesc < 0) {
+                       dev_err(priv->dev, "failed to handle result (%d)", ndesc);
+                       return;
+               }
+
+               writel(EIP197_xDR_PROC_xD_PKT(1) |
+                      EIP197_xDR_PROC_xD_COUNT(ndesc * priv->config.rd_offset),
+                      priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_PROC_COUNT);
+
+               if (should_complete) {
+                       local_bh_disable();
+                       sreq->req->complete(sreq->req, ret);
+                       local_bh_enable();
+               }
+
+               kfree(sreq);
+       }
+}
+
+static void safexcel_handle_result_work(struct work_struct *work)
+{
+       struct safexcel_work_data *data =
+                       container_of(work, struct safexcel_work_data, work);
+       struct safexcel_crypto_priv *priv = data->priv;
+
+       safexcel_handle_result_descriptor(priv, data->ring);
+
+       if (priv->need_dequeue) {
+               priv->need_dequeue = false;
+               safexcel_dequeue(data->priv);
+       }
+}
+
+struct safexcel_ring_irq_data {
+       struct safexcel_crypto_priv *priv;
+       int ring;
+};
+
+static irqreturn_t safexcel_irq_ring(int irq, void *data)
+{
+       struct safexcel_ring_irq_data *irq_data = data;
+       struct safexcel_crypto_priv *priv = irq_data->priv;
+       int ring = irq_data->ring;
+       u32 status, stat;
+
+       status = readl(priv->base + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
+       if (!status)
+               return IRQ_NONE;
+
+       /* RDR interrupts */
+       if (status & EIP197_RDR_IRQ(ring)) {
+               stat = readl(priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+
+               if (unlikely(stat & EIP197_xDR_ERR)) {
+                       /*
+                        * Fatal error, the RDR is unusable and must be
+                        * reinitialized. This should not happen under
+                        * normal circumstances.
+                        */
+                       dev_err(priv->dev, "RDR: fatal error.");
+               } else if (likely(stat & EIP197_xDR_THRESH)) {
+                       queue_work(priv->ring[ring].workqueue, &priv->ring[ring].work_data.work);
+               }
+
+               /* ACK the interrupts */
+               writel(stat & 0xff,
+                      priv->base + EIP197_HIA_RDR(ring) + EIP197_HIA_xDR_STAT);
+       }
+
+       /* ACK the interrupts */
+       writel(status, priv->base + EIP197_HIA_AIC_R_ACK(ring));
+
+       return IRQ_HANDLED;
+}
+
+static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
+                                    irq_handler_t handler,
+                                    struct safexcel_ring_irq_data *ring_irq_priv)
+{
+       int ret, irq = platform_get_irq_byname(pdev, name);
+
+       if (irq < 0) {
+               dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
+               return irq;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, handler, 0,
+                              dev_name(&pdev->dev), ring_irq_priv);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
+               return ret;
+       }
+
+       return irq;
+}
+
+static struct safexcel_alg_template *safexcel_algs[] = {
+       &safexcel_alg_ecb_aes,
+       &safexcel_alg_cbc_aes,
+       &safexcel_alg_sha1,
+       &safexcel_alg_sha224,
+       &safexcel_alg_sha256,
+       &safexcel_alg_hmac_sha1,
+};
+
+static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
+{
+       int i, j, ret = 0;
+
+       for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+               safexcel_algs[i]->priv = priv;
+
+               if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+                       ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
+               else
+                       ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
+
+               if (ret)
+                       goto fail;
+       }
+
+       return 0;
+
+fail:
+       for (j = 0; j < i; j++) {
+               if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+                       crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
+               else
+                       crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
+       }
+
+       return ret;
+}
+
+static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
+               if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
+                       crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
+               else
+                       crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
+       }
+}
+
+static void safexcel_configure(struct safexcel_crypto_priv *priv)
+{
+       u32 val, mask;
+
+       val = readl(priv->base + EIP197_HIA_OPTIONS);
+       val = (val & GENMASK(27, 25)) >> 25;
+       mask = BIT(val) - 1;
+
+       val = readl(priv->base + EIP197_HIA_OPTIONS);
+       priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
+
+       priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
+       priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
+
+       priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
+       priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
+}
+
+static int safexcel_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct safexcel_crypto_priv *priv;
+       u64 dma_mask;
+       int i, ret;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->dev = dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->base)) {
+               dev_err(dev, "failed to get resource\n");
+               return PTR_ERR(priv->base);
+       }
+
+       priv->clk = of_clk_get(dev->of_node, 0);
+       if (!IS_ERR(priv->clk)) {
+               ret = clk_prepare_enable(priv->clk);
+               if (ret) {
+                       dev_err(dev, "unable to enable clk (%d)\n", ret);
+                       return ret;
+               }
+       } else {
+               /* The clock isn't mandatory */
+               if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
+                       return -EPROBE_DEFER;
+       }
+
+       if (of_property_read_u64(dev->of_node, "dma-mask", &dma_mask))
+               dma_mask = DMA_BIT_MASK(64);
+       ret = dma_set_mask_and_coherent(dev, dma_mask);
+       if (ret)
+               goto err_clk;
+
+       priv->context_pool = dmam_pool_create("safexcel-context", dev,
+                                             sizeof(struct safexcel_context_record),
+                                             1, 0);
+       if (!priv->context_pool) {
+               ret = -ENOMEM;
+               goto err_clk;
+       }
+
+       safexcel_configure(priv);
+
+       for (i = 0; i < priv->config.rings; i++) {
+               char irq_name[6] = {0}; /* "ringX\0" */
+               char wq_name[9] = {0}; /* "wq_ringX\0" */
+               int irq;
+               struct safexcel_ring_irq_data *ring_irq;
+
+               ret = safexcel_init_ring_descriptors(priv,
+                                                    &priv->ring[i].cdr,
+                                                    &priv->ring[i].rdr);
+               if (ret)
+                       goto err_clk;
+
+               ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
+               if (!ring_irq) {
+                       ret = -ENOMEM;
+                       goto err_clk;
+               }
+
+               ring_irq->priv = priv;
+               ring_irq->ring = i;
+
+               snprintf(irq_name, 6, "ring%d", i);
+               irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
+                                               ring_irq);
+
+               if (irq < 0)
+                       goto err_clk;
+
+               priv->ring[i].work_data.priv = priv;
+               priv->ring[i].work_data.ring = i;
+               INIT_WORK(&priv->ring[i].work_data.work, safexcel_handle_result_work);
+
+               snprintf(wq_name, 9, "wq_ring%d", i);
+               priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
+               if (!priv->ring[i].workqueue) {
+                       ret = -ENOMEM;
+                       goto err_clk;
+               }
+
+               INIT_LIST_HEAD(&priv->ring[i].list);
+               spin_lock_init(&priv->ring[i].lock);
+               spin_lock_init(&priv->ring[i].egress_lock);
+       }
+
+       platform_set_drvdata(pdev, priv);
+       atomic_set(&priv->ring_used, 0);
+
+       spin_lock_init(&priv->lock);
+       crypto_init_queue(&priv->queue, EIP197_DEFAULT_RING_SIZE);
+
+       ret = safexcel_hw_init(priv);
+       if (ret) {
+               dev_err(dev, "EIP h/w init failed (%d)\n", ret);
+               goto err_clk;
+       }
+
+       ret = safexcel_register_algorithms(priv);
+       if (ret) {
+               dev_err(dev, "Failed to register algorithms (%d)\n", ret);
+               goto err_clk;
+       }
+
+       return 0;
+
+err_clk:
+       clk_disable_unprepare(priv->clk);
+       return ret;
+}
+
+
+static int safexcel_remove(struct platform_device *pdev)
+{
+       struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
+       int i;
+
+       safexcel_unregister_algorithms(priv);
+       clk_disable_unprepare(priv->clk);
+
+       for (i = 0; i < priv->config.rings; i++)
+               destroy_workqueue(priv->ring[i].workqueue);
+
+       return 0;
+}
+
+static const struct of_device_id safexcel_of_match_table[] = {
+       { .compatible = "inside-secure,safexcel-eip197" },
+       {},
+};
+
+
+static struct platform_driver  crypto_safexcel = {
+       .probe          = safexcel_probe,
+       .remove         = safexcel_remove,
+       .driver         = {
+               .name   = "crypto-safexcel",
+               .of_match_table = safexcel_of_match_table,
+       },
+};
+module_platform_driver(crypto_safexcel);
+
+MODULE_AUTHOR("Antoine Tenart <antoine.tenart@free-electrons.com>");
+MODULE_AUTHOR("Ofer Heifetz <oferh@marvell.com>");
+MODULE_AUTHOR("Igal Liberman <igall@marvell.com>");
+MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
+MODULE_LICENSE("GPL v2");
 
--- /dev/null
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __SAFEXCEL_H__
+#define __SAFEXCEL_H__
+
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/skcipher.h>
+
+#define EIP197_HIA_VERSION_LE                  0xca35
+#define EIP197_HIA_VERSION_BE                  0x35ca
+
+/* Static configuration */
+#define EIP197_DEFAULT_RING_SIZE               64
+#define EIP197_MAX_TOKENS                      5
+#define EIP197_MAX_RINGS                       4
+#define EIP197_FETCH_COUNT                     1
+#define EIP197_MAX_BATCH_SZ                    8
+
+#define EIP197_GFP_FLAGS(base) ((base).flags & CRYPTO_TFM_REQ_MAY_SLEEP ? \
+                                GFP_KERNEL : GFP_ATOMIC)
+
+/* CDR/RDR register offsets */
+#define EIP197_HIA_xDR_OFF(r)                  (0x80000 + (r) * 0x1000)
+#define EIP197_HIA_CDR(r)                      (EIP197_HIA_xDR_OFF(r))
+#define EIP197_HIA_RDR(r)                      (EIP197_HIA_xDR_OFF(r) + 0x800)
+#define EIP197_HIA_xDR_RING_BASE_ADDR_LO       0x0
+#define EIP197_HIA_xDR_RING_BASE_ADDR_HI       0x4
+#define EIP197_HIA_xDR_RING_SIZE               0x18
+#define EIP197_HIA_xDR_DESC_SIZE               0x1c
+#define EIP197_HIA_xDR_CFG                     0x20
+#define EIP197_HIA_xDR_DMA_CFG                 0x24
+#define EIP197_HIA_xDR_THRESH                  0x28
+#define EIP197_HIA_xDR_PREP_COUNT              0x2c
+#define EIP197_HIA_xDR_PROC_COUNT              0x30
+#define EIP197_HIA_xDR_PREP_PNTR               0x34
+#define EIP197_HIA_xDR_PROC_PNTR               0x38
+#define EIP197_HIA_xDR_STAT                    0x3c
+
+/* register offsets */
+#define EIP197_HIA_DFE_CFG                     0x8c000
+#define EIP197_HIA_DFE_THR_CTRL                        0x8c040
+#define EIP197_HIA_DFE_THR_STAT                        0x8c044
+#define EIP197_HIA_DSE_CFG                     0x8d000
+#define EIP197_HIA_DSE_THR_CTRL                        0x8d040
+#define EIP197_HIA_DSE_THR_STAT                        0x8d044
+#define EIP197_HIA_RA_PE_CTRL                  0x90010
+#define EIP197_HIA_RA_PE_STAT                  0x90014
+#define EIP197_HIA_AIC_R_OFF(r)                        ((r) * 0x1000)
+#define EIP197_HIA_AIC_R_ENABLE_CTRL(r)                (0x9e808 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLED_STAT(r)       (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ACK(r)                        (0x9e810 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_R_ENABLE_CLR(r)         (0x9e814 - EIP197_HIA_AIC_R_OFF(r))
+#define EIP197_HIA_AIC_G_ENABLE_CTRL           0x9f808
+#define EIP197_HIA_AIC_G_ENABLED_STAT          0x9f810
+#define EIP197_HIA_AIC_G_ACK                   0x9f810
+#define EIP197_HIA_MST_CTRL                    0x9fff4
+#define EIP197_HIA_OPTIONS                     0x9fff8
+#define EIP197_HIA_VERSION                     0x9fffc
+#define EIP197_PE_IN_DBUF_THRES                        0xa0000
+#define EIP197_PE_IN_TBUF_THRES                        0xa0100
+#define EIP197_PE_ICE_SCRATCH_RAM              0xa0800
+#define EIP197_PE_ICE_PUE_CTRL                 0xa0c80
+#define EIP197_PE_ICE_SCRATCH_CTRL             0xa0d04
+#define EIP197_PE_ICE_FPP_CTRL                 0xa0d80
+#define EIP197_PE_ICE_RAM_CTRL                 0xa0ff0
+#define EIP197_PE_EIP96_FUNCTION_EN            0xa1004
+#define EIP197_PE_EIP96_CONTEXT_CTRL           0xa1008
+#define EIP197_PE_EIP96_CONTEXT_STAT           0xa100c
+#define EIP197_PE_OUT_DBUF_THRES               0xa1c00
+#define EIP197_PE_OUT_TBUF_THRES               0xa1d00
+#define EIP197_CLASSIFICATION_RAMS             0xe0000
+#define EIP197_TRC_CTRL                                0xf0800
+#define EIP197_TRC_LASTRES                     0xf0804
+#define EIP197_TRC_REGINDEX                    0xf0808
+#define EIP197_TRC_PARAMS                      0xf0820
+#define EIP197_TRC_FREECHAIN                   0xf0824
+#define EIP197_TRC_PARAMS2                     0xf0828
+#define EIP197_TRC_ECCCTRL                     0xf0830
+#define EIP197_TRC_ECCSTAT                     0xf0834
+#define EIP197_TRC_ECCADMINSTAT                        0xf0838
+#define EIP197_TRC_ECCDATASTAT                 0xf083c
+#define EIP197_TRC_ECCDATA                     0xf0840
+#define EIP197_CS_RAM_CTRL                     0xf7ff0
+#define EIP197_MST_CTRL                                0xffff4
+
+/* EIP197_HIA_xDR_DESC_SIZE */
+#define EIP197_xDR_DESC_MODE_64BIT             BIT(31)
+
+/* EIP197_HIA_xDR_DMA_CFG */
+#define EIP197_HIA_xDR_WR_RES_BUF              BIT(22)
+#define EIP197_HIA_xDR_WR_CTRL_BUG             BIT(23)
+#define EIP197_HIA_xDR_WR_OWN_BUF              BIT(24)
+#define EIP197_HIA_xDR_CFG_WR_CACHE(n)         (((n) & 0x7) << 23)
+#define EIP197_HIA_xDR_CFG_RD_CACHE(n)         (((n) & 0x7) << 29)
+
+/* EIP197_HIA_CDR_THRESH */
+#define EIP197_HIA_CDR_THRESH_PROC_PKT(n)      (n)
+#define EIP197_HIA_CDR_THRESH_PROC_MODE                BIT(22)
+#define EIP197_HIA_CDR_THRESH_PKT_MODE         BIT(23)
+#define EIP197_HIA_CDR_THRESH_TIMEOUT(n)       ((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_RDR_THRESH */
+#define EIP197_HIA_RDR_THRESH_PROC_PKT(n)      (n)
+#define EIP197_HIA_RDR_THRESH_PKT_MODE         BIT(23)
+#define EIP197_HIA_RDR_THRESH_TIMEOUT(n)       ((n) << 24) /* x256 clk cycles */
+
+/* EIP197_HIA_xDR_PREP_COUNT */
+#define EIP197_xDR_PREP_CLR_COUNT              BIT(31)
+
+/* EIP197_HIA_xDR_PROC_COUNT */
+#define EIP197_xDR_PROC_xD_COUNT(n)            ((n) << 2)
+#define EIP197_xDR_PROC_xD_PKT(n)              ((n) << 24)
+#define EIP197_xDR_PROC_CLR_COUNT              BIT(31)
+
+/* EIP197_HIA_xDR_STAT */
+#define EIP197_xDR_DMA_ERR                     BIT(0)
+#define EIP197_xDR_PREP_CMD_THRES              BIT(1)
+#define EIP197_xDR_ERR                         BIT(2)
+#define EIP197_xDR_THRESH                      BIT(4)
+#define EIP197_xDR_TIMEOUT                     BIT(5)
+
+#define EIP197_HIA_RA_PE_CTRL_RESET            BIT(31)
+#define EIP197_HIA_RA_PE_CTRL_EN               BIT(30)
+
+/* EIP197_HIA_AIC_R_ENABLE_CTRL */
+#define EIP197_CDR_IRQ(n)                      BIT((n) * 2)
+#define EIP197_RDR_IRQ(n)                      BIT((n) * 2 + 1)
+
+/* EIP197_HIA_DFE/DSE_CFG */
+#define EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(n)    ((n) << 0)
+#define EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(n)  (((n) & 0x7) << 4)
+#define EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(n)    ((n) << 8)
+#define EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(n)    ((n) << 16)
+#define EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(n)  (((n) & 0x7) << 20)
+#define EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(n)    ((n) << 24)
+#define EIP197_HIA_DFE_CFG_DIS_DEBUG           (BIT(31) | BIT(29))
+#define EIP197_HIA_DSE_CFG_DIS_DEBUG           BIT(31)
+
+/* EIP197_HIA_DFE/DSE_THR_CTRL */
+#define EIP197_DxE_THR_CTRL_EN                 BIT(30)
+#define EIP197_DxE_THR_CTRL_RESET_PE           BIT(31)
+
+/* EIP197_HIA_AIC_G_ENABLED_STAT */
+#define EIP197_G_IRQ_DFE(n)                    BIT((n) << 1)
+#define EIP197_G_IRQ_DSE(n)                    BIT(((n) << 1) + 1)
+#define EIP197_G_IRQ_RING                      BIT(16)
+#define EIP197_G_IRQ_PE(n)                     BIT((n) + 20)
+
+/* EIP197_HIA_MST_CTRL */
+#define RD_CACHE_3BITS                         0x5
+#define WR_CACHE_3BITS                         0x3
+#define RD_CACHE_4BITS                         (RD_CACHE_3BITS << 1 | BIT(0))
+#define WR_CACHE_4BITS                         (WR_CACHE_3BITS << 1 | BIT(0))
+#define EIP197_MST_CTRL_RD_CACHE(n)            (((n) & 0xf) << 0)
+#define EIP197_MST_CTRL_WD_CACHE(n)            (((n) & 0xf) << 4)
+#define EIP197_MST_CTRL_BYTE_SWAP              BIT(24)
+#define EIP197_MST_CTRL_NO_BYTE_SWAP           BIT(25)
+
+/* EIP197_PE_IN_DBUF/TBUF_THRES */
+#define EIP197_PE_IN_xBUF_THRES_MIN(n)         ((n) << 8)
+#define EIP197_PE_IN_xBUF_THRES_MAX(n)         ((n) << 12)
+
+/* EIP197_PE_OUT_DBUF_THRES */
+#define EIP197_PE_OUT_DBUF_THRES_MIN(n)                ((n) << 0)
+#define EIP197_PE_OUT_DBUF_THRES_MAX(n)                ((n) << 4)
+
+/* EIP197_PE_ICE_SCRATCH_CTRL */
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER                BIT(2)
+#define EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN            BIT(3)
+#define EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS       BIT(24)
+#define EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS      BIT(25)
+
+/* EIP197_PE_ICE_SCRATCH_RAM */
+#define EIP197_NUM_OF_SCRATCH_BLOCKS           32
+
+/* EIP197_PE_ICE_PUE/FPP_CTRL */
+#define EIP197_PE_ICE_x_CTRL_SW_RESET                  BIT(0)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR          BIT(14)
+#define EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR              BIT(15)
+
+/* EIP197_PE_ICE_RAM_CTRL */
+#define EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN     BIT(0)
+#define EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN     BIT(1)
+
+/* EIP197_PE_EIP96_FUNCTION_EN */
+#define EIP197_FUNCTION_RSVD                   (BIT(6) | BIT(15) | BIT(20) | BIT(23))
+#define EIP197_PROTOCOL_HASH_ONLY              BIT(0)
+#define EIP197_PROTOCOL_ENCRYPT_ONLY           BIT(1)
+#define EIP197_PROTOCOL_HASH_ENCRYPT           BIT(2)
+#define EIP197_PROTOCOL_HASH_DECRYPT           BIT(3)
+#define EIP197_PROTOCOL_ENCRYPT_HASH           BIT(4)
+#define EIP197_PROTOCOL_DECRYPT_HASH           BIT(5)
+#define EIP197_ALG_ARC4                                BIT(7)
+#define EIP197_ALG_AES_ECB                     BIT(8)
+#define EIP197_ALG_AES_CBC                     BIT(9)
+#define EIP197_ALG_AES_CTR_ICM                 BIT(10)
+#define EIP197_ALG_AES_OFB                     BIT(11)
+#define EIP197_ALG_AES_CFB                     BIT(12)
+#define EIP197_ALG_DES_ECB                     BIT(13)
+#define EIP197_ALG_DES_CBC                     BIT(14)
+#define EIP197_ALG_DES_OFB                     BIT(16)
+#define EIP197_ALG_DES_CFB                     BIT(17)
+#define EIP197_ALG_3DES_ECB                    BIT(18)
+#define EIP197_ALG_3DES_CBC                    BIT(19)
+#define EIP197_ALG_3DES_OFB                    BIT(21)
+#define EIP197_ALG_3DES_CFB                    BIT(22)
+#define EIP197_ALG_MD5                         BIT(24)
+#define EIP197_ALG_HMAC_MD5                    BIT(25)
+#define EIP197_ALG_SHA1                                BIT(26)
+#define EIP197_ALG_HMAC_SHA1                   BIT(27)
+#define EIP197_ALG_SHA2                                BIT(28)
+#define EIP197_ALG_HMAC_SHA2                   BIT(29)
+#define EIP197_ALG_AES_XCBC_MAC                        BIT(30)
+#define EIP197_ALG_GCM_HASH                    BIT(31)
+
+/* EIP197_PE_EIP96_CONTEXT_CTRL */
+#define EIP197_CONTEXT_SIZE(n)                 (n)
+#define EIP197_ADDRESS_MODE                    BIT(8)
+#define EIP197_CONTROL_MODE                    BIT(9)
+
+/* Context Control */
+struct safexcel_context_record {
+       u32 control0;
+       u32 control1;
+
+       __le32 data[12];
+} __packed;
+
+/* control0 */
+#define CONTEXT_CONTROL_TYPE_NULL_OUT          0x0
+#define CONTEXT_CONTROL_TYPE_NULL_IN           0x1
+#define CONTEXT_CONTROL_TYPE_HASH_OUT          0x2
+#define CONTEXT_CONTROL_TYPE_HASH_IN           0x3
+#define CONTEXT_CONTROL_TYPE_CRYPTO_OUT                0x4
+#define CONTEXT_CONTROL_TYPE_CRYPTO_IN         0x5
+#define CONTEXT_CONTROL_TYPE_ENCRYPT_HASH_OUT  0x6
+#define CONTEXT_CONTROL_TYPE_DECRYPT_HASH_IN   0x7
+#define CONTEXT_CONTROL_TYPE_HASH_ENCRYPT_OUT  0x14
+#define CONTEXT_CONTROL_TYPE_HASH_DECRYPT_OUT  0x15
+#define CONTEXT_CONTROL_RESTART_HASH           BIT(4)
+#define CONTEXT_CONTROL_NO_FINISH_HASH         BIT(5)
+#define CONTEXT_CONTROL_SIZE(n)                        ((n) << 8)
+#define CONTEXT_CONTROL_KEY_EN                 BIT(16)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES128      (0x5 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES192      (0x6 << 17)
+#define CONTEXT_CONTROL_CRYPTO_ALG_AES256      (0x7 << 17)
+#define CONTEXT_CONTROL_DIGEST_PRECOMPUTED     (0x1 << 21)
+#define CONTEXT_CONTROL_DIGEST_HMAC            (0x3 << 21)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA1                (0x2 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA224      (0x4 << 23)
+#define CONTEXT_CONTROL_CRYPTO_ALG_SHA256      (0x3 << 23)
+#define CONTEXT_CONTROL_INV_FR                 (0x5 << 24)
+#define CONTEXT_CONTROL_INV_TR                 (0x6 << 24)
+
+/* control1 */
+#define CONTEXT_CONTROL_CRYPTO_MODE_ECB                (0 << 0)
+#define CONTEXT_CONTROL_CRYPTO_MODE_CBC                (1 << 0)
+#define CONTEXT_CONTROL_IV0                    BIT(5)
+#define CONTEXT_CONTROL_IV1                    BIT(6)
+#define CONTEXT_CONTROL_IV2                    BIT(7)
+#define CONTEXT_CONTROL_IV3                    BIT(8)
+#define CONTEXT_CONTROL_DIGEST_CNT             BIT(9)
+#define CONTEXT_CONTROL_COUNTER_MODE           BIT(10)
+#define CONTEXT_CONTROL_HASH_STORE             BIT(19)
+
+/* EIP197_CS_RAM_CTRL */
+#define EIP197_TRC_ENABLE_0                    BIT(4)
+#define EIP197_TRC_ENABLE_1                    BIT(5)
+#define EIP197_TRC_ENABLE_2                    BIT(6)
+#define EIP197_TRC_ENABLE_MASK                 GENMASK(6, 4)
+
+/* EIP197_TRC_PARAMS */
+#define EIP197_TRC_PARAMS_SW_RESET             BIT(0)
+#define EIP197_TRC_PARAMS_DATA_ACCESS          BIT(2)
+#define EIP197_TRC_PARAMS_HTABLE_SZ(x)         ((x) << 4)
+#define EIP197_TRC_PARAMS_BLK_TIMER_SPEED(x)   ((x) << 10)
+#define EIP197_TRC_PARAMS_RC_SZ_LARGE(n)       ((n) << 18)
+
+/* EIP197_TRC_FREECHAIN */
+#define EIP197_TRC_FREECHAIN_HEAD_PTR(p)       (p)
+#define EIP197_TRC_FREECHAIN_TAIL_PTR(p)       ((p) << 16)
+
+/* EIP197_TRC_PARAMS2 */
+#define EIP197_TRC_PARAMS2_HTABLE_PTR(p)       (p)
+#define EIP197_TRC_PARAMS2_RC_SZ_SMALL(n)      ((n) << 18)
+
+/* Cache helpers */
+#define EIP197_CS_RC_MAX                       52
+#define EIP197_CS_RC_SIZE                      (4 * sizeof(u32))
+#define EIP197_CS_RC_NEXT(x)                   (x)
+#define EIP197_CS_RC_PREV(x)                   ((x) << 10)
+#define EIP197_RC_NULL                         0x3ff
+#define EIP197_CS_TRC_REC_WC                   59
+#define EIP197_CS_TRC_LG_REC_WC                        73
+
+/* Result data */
+struct result_data_desc {
+       u32 packet_length:17;
+       u32 error_code:15;
+
+       u8 bypass_length:4;
+       u8 e15:1;
+       u16 rsvd0;
+       u8 hash_bytes:1;
+       u8 hash_length:6;
+       u8 generic_bytes:1;
+       u8 checksum:1;
+       u8 next_header:1;
+       u8 length:1;
+
+       u16 application_id;
+       u16 rsvd1;
+
+       u32 rsvd2;
+} __packed;
+
+
+/* Basic Result Descriptor format */
+struct safexcel_result_desc {
+       u32 particle_size:17;
+       u8 rsvd0:3;
+       u8 descriptor_overflow:1;
+       u8 buffer_overflow:1;
+       u8 last_seg:1;
+       u8 first_seg:1;
+       u16 result_size:8;
+
+       u32 rsvd1;
+
+       u32 data_lo;
+       u32 data_hi;
+
+       struct result_data_desc result_data;
+} __packed;
+
+struct safexcel_token {
+       u32 packet_length:17;
+       u8 stat:2;
+       u16 instructions:9;
+       u8 opcode:4;
+} __packed;
+
+#define EIP197_TOKEN_STAT_LAST_HASH            BIT(0)
+#define EIP197_TOKEN_STAT_LAST_PACKET          BIT(1)
+#define EIP197_TOKEN_OPCODE_DIRECTION          0x0
+#define EIP197_TOKEN_OPCODE_INSERT             0x2
+#define EIP197_TOKEN_OPCODE_NOOP               EIP197_TOKEN_OPCODE_INSERT
+#define EIP197_TOKEN_OPCODE_BYPASS             GENMASK(3, 0)
+
+static inline void eip197_noop_token(struct safexcel_token *token)
+{
+       token->opcode = EIP197_TOKEN_OPCODE_NOOP;
+       token->packet_length = BIT(2);
+}
+
+/* Instructions */
+#define EIP197_TOKEN_INS_INSERT_HASH_DIGEST    0x1c
+#define EIP197_TOKEN_INS_TYPE_OUTPUT           BIT(5)
+#define EIP197_TOKEN_INS_TYPE_HASH             BIT(6)
+#define EIP197_TOKEN_INS_TYPE_CRYTO            BIT(7)
+#define EIP197_TOKEN_INS_LAST                  BIT(8)
+
+/* Processing Engine Control Data  */
+struct safexcel_control_data_desc {
+       u32 packet_length:17;
+       u16 options:13;
+       u8 type:2;
+
+       u16 application_id;
+       u16 rsvd;
+
+       u8 refresh:2;
+       u32 context_lo:30;
+       u32 context_hi;
+
+       u32 control0;
+       u32 control1;
+
+       u32 token[EIP197_MAX_TOKENS];
+} __packed;
+
+#define EIP197_OPTION_MAGIC_VALUE      BIT(0)
+#define EIP197_OPTION_64BIT_CTX                BIT(1)
+#define EIP197_OPTION_CTX_CTRL_IN_CMD  BIT(8)
+#define EIP197_OPTION_4_TOKEN_IV_CMD   GENMASK(11, 9)
+
+#define EIP197_TYPE_EXTENDED           0x3
+
+/* Basic Command Descriptor format */
+struct safexcel_command_desc {
+       u32 particle_size:17;
+       u8 rsvd0:5;
+       u8 last_seg:1;
+       u8 first_seg:1;
+       u16 additional_cdata_size:8;
+
+       u32 rsvd1;
+
+       u32 data_lo;
+       u32 data_hi;
+
+       struct safexcel_control_data_desc control_data;
+} __packed;
+
+/*
+ * Internal structures & functions
+ */
+
+enum eip197_fw {
+       FW_IFPP = 0,
+       FW_IPUE,
+       FW_NB
+};
+
+struct safexcel_ring {
+       void *base;
+       void *base_end;
+       dma_addr_t base_dma;
+
+       /* write and read pointers */
+       void *write;
+       void *read;
+
+       /* number of elements used in the ring */
+       unsigned nr;
+       unsigned offset;
+};
+
+enum safexcel_alg_type {
+       SAFEXCEL_ALG_TYPE_SKCIPHER,
+       SAFEXCEL_ALG_TYPE_AHASH,
+};
+
+struct safexcel_request {
+       struct list_head list;
+       struct crypto_async_request *req;
+};
+
+struct safexcel_config {
+       u32 rings;
+
+       u32 cd_size;
+       u32 cd_offset;
+
+       u32 rd_size;
+       u32 rd_offset;
+};
+
+struct safexcel_work_data {
+       struct work_struct work;
+       struct safexcel_crypto_priv *priv;
+       int ring;
+};
+
+struct safexcel_crypto_priv {
+       void __iomem *base;
+       struct device *dev;
+       struct clk *clk;
+       struct safexcel_config config;
+
+       spinlock_t lock;
+       struct crypto_queue queue;
+
+       bool need_dequeue;
+
+       /* context DMA pool */
+       struct dma_pool *context_pool;
+
+       atomic_t ring_used;
+
+       struct {
+               spinlock_t lock;
+               spinlock_t egress_lock;
+
+               struct list_head list;
+               struct workqueue_struct *workqueue;
+               struct safexcel_work_data work_data;
+
+               /* command/result rings */
+               struct safexcel_ring cdr;
+               struct safexcel_ring rdr;
+       } ring[EIP197_MAX_RINGS];
+};
+
+struct safexcel_context {
+       int (*send)(struct crypto_async_request *req, int ring,
+                   struct safexcel_request *request, int *commands,
+                   int *results);
+       int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
+                            struct crypto_async_request *req, bool *complete,
+                            int *ret);
+       struct safexcel_context_record *ctxr;
+       dma_addr_t ctxr_dma;
+
+       int ring;
+       bool needs_inv;
+       bool exit_inv;
+
+       /* Used for ahash requests */
+       dma_addr_t result_dma;
+       void *cache;
+       dma_addr_t cache_dma;
+       unsigned int cache_sz;
+};
+
+/*
+ * Template structure to describe the algorithms in order to register them.
+ * It also has the purpose to contain our private structure and is actually
+ * the only way I know in this framework to avoid having global pointers...
+ */
+struct safexcel_alg_template {
+       struct safexcel_crypto_priv *priv;
+       enum safexcel_alg_type type;
+       union {
+               struct skcipher_alg skcipher;
+               struct ahash_alg ahash;
+       } alg;
+};
+
+struct safexcel_inv_result {
+       struct completion completion;
+       int error;
+};
+
+void safexcel_dequeue(struct safexcel_crypto_priv *priv);
+void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
+void safexcel_free_context(struct safexcel_crypto_priv *priv,
+                                 struct crypto_async_request *req,
+                                 int result_sz);
+int safexcel_invalidate_cache(struct crypto_async_request *async,
+                             struct safexcel_context *ctx,
+                             struct safexcel_crypto_priv *priv,
+                             dma_addr_t ctxr_dma, int ring,
+                             struct safexcel_request *request);
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+                                  struct safexcel_ring *cdr,
+                                  struct safexcel_ring *rdr);
+int safexcel_select_ring(struct safexcel_crypto_priv *priv);
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+                             struct safexcel_ring *ring);
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+                                struct safexcel_ring *ring);
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+                                                int ring_id,
+                                                bool first, bool last,
+                                                dma_addr_t data, u32 len,
+                                                u32 full_data_len,
+                                                dma_addr_t context);
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+                                                int ring_id,
+                                               bool first, bool last,
+                                               dma_addr_t data, u32 len);
+void safexcel_inv_complete(struct crypto_async_request *req, int error);
+
+/* available algorithms */
+extern struct safexcel_alg_template safexcel_alg_ecb_aes;
+extern struct safexcel_alg_template safexcel_alg_cbc_aes;
+extern struct safexcel_alg_template safexcel_alg_sha1;
+extern struct safexcel_alg_template safexcel_alg_sha224;
+extern struct safexcel_alg_template safexcel_alg_sha256;
+extern struct safexcel_alg_template safexcel_alg_hmac_sha1;
+
+#endif
 
--- /dev/null
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
+
+#include "safexcel.h"
+
+enum safexcel_cipher_direction {
+       SAFEXCEL_ENCRYPT,
+       SAFEXCEL_DECRYPT,
+};
+
+struct safexcel_cipher_ctx {
+       struct safexcel_context base;
+       struct safexcel_crypto_priv *priv;
+
+       enum safexcel_cipher_direction direction;
+       u32 mode;
+
+       __le32 key[8];
+       unsigned int key_len;
+};
+
+static void safexcel_cipher_token(struct safexcel_cipher_ctx *ctx,
+                                 struct crypto_async_request *async,
+                                 struct safexcel_command_desc *cdesc,
+                                 u32 length)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_token *token;
+       unsigned offset = 0;
+
+       if (ctx->mode == CONTEXT_CONTROL_CRYPTO_MODE_CBC) {
+               offset = AES_BLOCK_SIZE / sizeof(u32);
+               memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_SIZE);
+
+               cdesc->control_data.options |= EIP197_OPTION_4_TOKEN_IV_CMD;
+       }
+
+       token = (struct safexcel_token *)(cdesc->control_data.token + offset);
+
+       token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+       token[0].packet_length = length;
+       token[0].stat = EIP197_TOKEN_STAT_LAST_PACKET;
+       token[0].instructions = EIP197_TOKEN_INS_LAST |
+                               EIP197_TOKEN_INS_TYPE_CRYTO |
+                               EIP197_TOKEN_INS_TYPE_OUTPUT;
+}
+
+static int safexcel_aes_setkey(struct crypto_skcipher *ctfm, const u8 *key,
+                              unsigned int len)
+{
+       struct crypto_tfm *tfm = crypto_skcipher_tfm(ctfm);
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypto_aes_ctx aes;
+       int ret, i;
+
+       ret = crypto_aes_expand_key(&aes, key, len);
+       if (ret) {
+               crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+               return ret;
+       }
+
+       for (i = 0; i < len / sizeof(u32); i++) {
+               if (ctx->key[i] != cpu_to_le32(aes.key_enc[i])) {
+                       ctx->base.needs_inv = true;
+                       break;
+               }
+       }
+
+       for (i = 0; i < len / sizeof(u32); i++)
+               ctx->key[i] = cpu_to_le32(aes.key_enc[i]);
+
+       ctx->key_len = len;
+
+       memzero_explicit(&aes, sizeof(aes));
+       return 0;
+}
+
+static int safexcel_context_control(struct safexcel_cipher_ctx *ctx,
+                                   struct safexcel_command_desc *cdesc)
+{
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ctrl_size;
+
+       if (ctx->direction == SAFEXCEL_ENCRYPT)
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_OUT;
+       else
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_CRYPTO_IN;
+
+       cdesc->control_data.control0 |= CONTEXT_CONTROL_KEY_EN;
+       cdesc->control_data.control1 |= ctx->mode;
+
+       switch (ctx->key_len) {
+       case AES_KEYSIZE_128:
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES128;
+               ctrl_size = 4;
+               break;
+       case AES_KEYSIZE_192:
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES192;
+               ctrl_size = 6;
+               break;
+       case AES_KEYSIZE_256:
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_CRYPTO_ALG_AES256;
+               ctrl_size = 8;
+               break;
+       default:
+               dev_err(priv->dev, "aes keysize not supported: %u\n",
+                       ctx->key_len);
+               return -EINVAL;
+       }
+       cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(ctrl_size);
+
+       return 0;
+}
+
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+                                 struct crypto_async_request *async,
+                                 bool *should_complete, int *ret)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_result_desc *rdesc;
+       int ndesc = 0;
+
+       *ret = 0;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+       do {
+               rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+               if (IS_ERR(rdesc)) {
+                       dev_err(priv->dev,
+                               "cipher: result: could not retrieve the result descriptor\n");
+                       *ret = PTR_ERR(rdesc);
+                       break;
+               }
+
+               if (rdesc->result_data.error_code) {
+                       dev_err(priv->dev,
+                               "cipher: result: result descriptor error (%d)\n",
+                               rdesc->result_data.error_code);
+                       *ret = -EIO;
+               }
+
+               ndesc++;
+       } while (!rdesc->last_seg);
+
+       safexcel_complete(priv, ring);
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       if (req->src == req->dst) {
+               dma_unmap_sg(priv->dev, req->src,
+                            sg_nents_for_len(req->src, req->cryptlen),
+                            DMA_BIDIRECTIONAL);
+       } else {
+               dma_unmap_sg(priv->dev, req->src,
+                            sg_nents_for_len(req->src, req->cryptlen),
+                            DMA_TO_DEVICE);
+               dma_unmap_sg(priv->dev, req->dst,
+                            sg_nents_for_len(req->dst, req->cryptlen),
+                            DMA_FROM_DEVICE);
+       }
+
+       *should_complete = true;
+
+       return ndesc;
+}
+
+static int safexcel_aes_send(struct crypto_async_request *async,
+                            int ring, struct safexcel_request *request,
+                            int *commands, int *results)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       struct safexcel_command_desc *cdesc;
+       struct safexcel_result_desc *rdesc;
+       struct scatterlist *sg;
+       int nr_src, nr_dst, n_cdesc = 0, n_rdesc = 0, queued = req->cryptlen;
+       int i, ret = 0;
+
+       request->req = &req->base;
+
+       if (req->src == req->dst) {
+               nr_src = dma_map_sg(priv->dev, req->src,
+                                   sg_nents_for_len(req->src, req->cryptlen),
+                                   DMA_BIDIRECTIONAL);
+               nr_dst = nr_src;
+               if (!nr_src)
+                       return -EINVAL;
+       } else {
+               nr_src = dma_map_sg(priv->dev, req->src,
+                                   sg_nents_for_len(req->src, req->cryptlen),
+                                   DMA_TO_DEVICE);
+               if (!nr_src)
+                       return -EINVAL;
+
+               nr_dst = dma_map_sg(priv->dev, req->dst,
+                                   sg_nents_for_len(req->dst, req->cryptlen),
+                                   DMA_FROM_DEVICE);
+               if (!nr_dst) {
+                       dma_unmap_sg(priv->dev, req->src,
+                                    sg_nents_for_len(req->src, req->cryptlen),
+                                    DMA_TO_DEVICE);
+                       return -EINVAL;
+               }
+       }
+
+       memcpy(ctx->base.ctxr->data, ctx->key, ctx->key_len);
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+
+       /* command descriptors */
+       for_each_sg(req->src, sg, nr_src, i) {
+               int len = sg_dma_len(sg);
+
+               /* Do not overflow the request */
+               if (queued - len < 0)
+                       len = queued;
+
+               cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc, !(queued - len),
+                                          sg_dma_address(sg), len, req->cryptlen,
+                                          ctx->base.ctxr_dma);
+               if (IS_ERR(cdesc)) {
+                       /* No space left in the command descriptor ring */
+                       ret = PTR_ERR(cdesc);
+                       goto cdesc_rollback;
+               }
+               n_cdesc++;
+
+               if (n_cdesc == 1) {
+                       safexcel_context_control(ctx, cdesc);
+                       safexcel_cipher_token(ctx, async, cdesc, req->cryptlen);
+               }
+
+               queued -= len;
+               if (!queued)
+                       break;
+       }
+
+       /* result descriptors */
+       for_each_sg(req->dst, sg, nr_dst, i) {
+               bool first = !i, last = (i == nr_dst - 1);
+               u32 len = sg_dma_len(sg);
+
+               rdesc = safexcel_add_rdesc(priv, ring, first, last,
+                                          sg_dma_address(sg), len);
+               if (IS_ERR(rdesc)) {
+                       /* No space left in the result descriptor ring */
+                       ret = PTR_ERR(rdesc);
+                       goto rdesc_rollback;
+               }
+               n_rdesc++;
+       }
+
+       ctx->base.handle_result = safexcel_handle_result;
+
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       *commands = n_cdesc;
+       *results = nr_dst;
+       return 0;
+
+rdesc_rollback:
+       for (i = 0; i < n_rdesc; i++)
+               safexcel_ring_rollback_wptr(priv, &priv->ring[ring].rdr);
+cdesc_rollback:
+       for (i = 0; i < n_cdesc; i++)
+               safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       if (req->src == req->dst) {
+               dma_unmap_sg(priv->dev, req->src,
+                            sg_nents_for_len(req->src, req->cryptlen),
+                            DMA_BIDIRECTIONAL);
+       } else {
+               dma_unmap_sg(priv->dev, req->src,
+                            sg_nents_for_len(req->src, req->cryptlen),
+                            DMA_TO_DEVICE);
+               dma_unmap_sg(priv->dev, req->dst,
+                            sg_nents_for_len(req->dst, req->cryptlen),
+                            DMA_FROM_DEVICE);
+       }
+
+       return ret;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+                                     int ring,
+                                     struct crypto_async_request *async,
+                                     bool *should_complete, int *ret)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       struct safexcel_result_desc *rdesc;
+       int ndesc = 0, enq_ret;
+
+       *ret = 0;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+       do {
+               rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+               if (IS_ERR(rdesc)) {
+                       dev_err(priv->dev,
+                               "cipher: invalidate: could not retrieve the result descriptor\n");
+                       *ret = PTR_ERR(rdesc);
+                       break;
+               }
+
+               if (rdesc->result_data.error_code) {
+                       dev_err(priv->dev, "cipher: invalidate: result descriptor error (%d)\n",
+                               rdesc->result_data.error_code);
+                       *ret = -EIO;
+               }
+
+               ndesc++;
+       } while (!rdesc->last_seg);
+
+       safexcel_complete(priv, ring);
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       if (ctx->base.exit_inv) {
+               dma_pool_free(priv->context_pool, ctx->base.ctxr,
+                             ctx->base.ctxr_dma);
+
+               *should_complete = true;
+
+               return ndesc;
+       }
+
+       ctx->base.needs_inv = false;
+       ctx->base.ring = safexcel_select_ring(priv);
+       ctx->base.send = safexcel_aes_send;
+
+       spin_lock_bh(&priv->lock);
+       enq_ret = crypto_enqueue_request(&priv->queue, async);
+       spin_unlock_bh(&priv->lock);
+
+       if (enq_ret != -EINPROGRESS)
+               *ret = enq_ret;
+
+       priv->need_dequeue = true;
+       *should_complete = false;
+
+       return ndesc;
+}
+
+static int safexcel_cipher_send_inv(struct crypto_async_request *async,
+                                   int ring, struct safexcel_request *request,
+                                   int *commands, int *results)
+{
+       struct skcipher_request *req = skcipher_request_cast(async);
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ret;
+
+       ctx->base.handle_result = safexcel_handle_inv_result;
+
+       ret = safexcel_invalidate_cache(async, &ctx->base, priv,
+                                       ctx->base.ctxr_dma, ring, request);
+       if (unlikely(ret))
+               return ret;
+
+       *commands = 1;
+       *results = 1;
+
+       return 0;
+}
+
+static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
+{
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       struct skcipher_request req;
+       struct safexcel_inv_result result = { 0 };
+
+       memset(&req, 0, sizeof(struct skcipher_request));
+
+       /* create invalidation request */
+       init_completion(&result.completion);
+       skcipher_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                       safexcel_inv_complete, &result);
+
+       skcipher_request_set_tfm(&req, __crypto_skcipher_cast(tfm));
+       ctx = crypto_tfm_ctx(req.base.tfm);
+       ctx->base.exit_inv = true;
+       ctx->base.send = safexcel_cipher_send_inv;
+
+       spin_lock_bh(&priv->lock);
+       crypto_enqueue_request(&priv->queue, &req.base);
+       spin_unlock_bh(&priv->lock);
+
+       if (!priv->need_dequeue)
+               safexcel_dequeue(priv);
+
+       wait_for_completion_interruptible(&result.completion);
+
+       if (result.error) {
+               dev_warn(priv->dev,
+                       "cipher: sync: invalidate: completion error %d\n",
+                        result.error);
+               return result.error;
+       }
+
+       return 0;
+}
+
+static int safexcel_aes(struct skcipher_request *req,
+                       enum safexcel_cipher_direction dir, u32 mode)
+{
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ret;
+
+       ctx->direction = dir;
+       ctx->mode = mode;
+
+       if (ctx->base.ctxr) {
+               if (ctx->base.needs_inv)
+                       ctx->base.send = safexcel_cipher_send_inv;
+       } else {
+               ctx->base.ring = safexcel_select_ring(priv);
+               ctx->base.send = safexcel_aes_send;
+
+               ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+                                                EIP197_GFP_FLAGS(req->base),
+                                                &ctx->base.ctxr_dma);
+               if (!ctx->base.ctxr)
+                       return -ENOMEM;
+       }
+
+       spin_lock_bh(&priv->lock);
+       ret = crypto_enqueue_request(&priv->queue, &req->base);
+       spin_unlock_bh(&priv->lock);
+
+       if (!priv->need_dequeue)
+               safexcel_dequeue(priv);
+
+       return ret;
+}
+
+static int safexcel_ecb_aes_encrypt(struct skcipher_request *req)
+{
+       return safexcel_aes(req, SAFEXCEL_ENCRYPT,
+                           CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+}
+
+static int safexcel_ecb_aes_decrypt(struct skcipher_request *req)
+{
+       return safexcel_aes(req, SAFEXCEL_DECRYPT,
+                           CONTEXT_CONTROL_CRYPTO_MODE_ECB);
+}
+
+static int safexcel_skcipher_cra_init(struct crypto_tfm *tfm)
+{
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_alg_template *tmpl =
+               container_of(tfm->__crt_alg, struct safexcel_alg_template,
+                            alg.skcipher.base);
+
+       ctx->priv = tmpl->priv;
+
+       return 0;
+}
+
+static void safexcel_skcipher_cra_exit(struct crypto_tfm *tfm)
+{
+       struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ret;
+
+       memzero_explicit(ctx->key, 8 * sizeof(u32));
+
+       /* context not allocated, skip invalidation */
+       if (!ctx->base.ctxr)
+               return;
+
+       memzero_explicit(ctx->base.ctxr->data, 8 * sizeof(u32));
+
+       ret = safexcel_cipher_exit_inv(tfm);
+       if (ret)
+               dev_warn(priv->dev, "cipher: invalidation error %d\n", ret);
+}
+
+struct safexcel_alg_template safexcel_alg_ecb_aes = {
+       .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+       .alg.skcipher = {
+               .setkey = safexcel_aes_setkey,
+               .encrypt = safexcel_ecb_aes_encrypt,
+               .decrypt = safexcel_ecb_aes_decrypt,
+               .min_keysize = AES_MIN_KEY_SIZE,
+               .max_keysize = AES_MAX_KEY_SIZE,
+               .base = {
+                       .cra_name = "ecb(aes)",
+                       .cra_driver_name = "safexcel-ecb-aes",
+                       .cra_priority = 300,
+                       .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+                       .cra_alignmask = 0,
+                       .cra_init = safexcel_skcipher_cra_init,
+                       .cra_exit = safexcel_skcipher_cra_exit,
+                       .cra_module = THIS_MODULE,
+               },
+       },
+};
+
+static int safexcel_cbc_aes_encrypt(struct skcipher_request *req)
+{
+       return safexcel_aes(req, SAFEXCEL_ENCRYPT,
+                           CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+}
+
+static int safexcel_cbc_aes_decrypt(struct skcipher_request *req)
+{
+       return safexcel_aes(req, SAFEXCEL_DECRYPT,
+                           CONTEXT_CONTROL_CRYPTO_MODE_CBC);
+}
+
+struct safexcel_alg_template safexcel_alg_cbc_aes = {
+       .type = SAFEXCEL_ALG_TYPE_SKCIPHER,
+       .alg.skcipher = {
+               .setkey = safexcel_aes_setkey,
+               .encrypt = safexcel_cbc_aes_encrypt,
+               .decrypt = safexcel_cbc_aes_decrypt,
+               .min_keysize = AES_MIN_KEY_SIZE,
+               .max_keysize = AES_MAX_KEY_SIZE,
+               .ivsize = AES_BLOCK_SIZE,
+               .base = {
+                       .cra_name = "cbc(aes)",
+                       .cra_driver_name = "safexcel-cbc-aes",
+                       .cra_priority = 300,
+                       .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC |
+                                    CRYPTO_ALG_KERN_DRIVER_ONLY,
+                       .cra_blocksize = AES_BLOCK_SIZE,
+                       .cra_ctxsize = sizeof(struct safexcel_cipher_ctx),
+                       .cra_alignmask = 0,
+                       .cra_init = safexcel_skcipher_cra_init,
+                       .cra_exit = safexcel_skcipher_cra_exit,
+                       .cra_module = THIS_MODULE,
+               },
+       },
+};
 
--- /dev/null
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <crypto/sha.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+
+
+#include "safexcel.h"
+
+struct safexcel_ahash_ctx {
+       struct safexcel_context base;
+       struct safexcel_crypto_priv *priv;
+
+       u32 alg;
+       u32 digest;
+
+       u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
+       u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
+};
+
+struct safexcel_ahash_req {
+       bool last_req;
+       bool finish;
+       bool hmac;
+
+       u8 state_sz;    /* expected sate size, only set once */
+       u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+
+       u64 len;
+       u64 processed;
+
+       u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+       u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
+};
+
+struct safexcel_ahash_export_state {
+       u64 len;
+       u64 processed;
+
+       u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
+       u8 cache[SHA256_BLOCK_SIZE];
+};
+
+static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+                               u32 input_length, u32 result_length)
+{
+       struct safexcel_token *token =
+               (struct safexcel_token *)cdesc->control_data.token;
+
+       token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
+       token[0].packet_length = input_length;
+       token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
+       token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
+
+       token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
+       token[1].packet_length = result_length;
+       token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
+                       EIP197_TOKEN_STAT_LAST_PACKET;
+       token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
+                               EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
+}
+
+static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
+                                    struct safexcel_ahash_req *req,
+                                    struct safexcel_command_desc *cdesc,
+                                    unsigned int digestsize,
+                                    unsigned int blocksize)
+{
+       int i;
+
+       cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
+       cdesc->control_data.control0 |= ctx->alg;
+       cdesc->control_data.control0 |= ctx->digest;
+
+       if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
+               if (req->processed) {
+                       if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+                               cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
+                       else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
+                                ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+                               cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
+
+                       cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
+               } else {
+                       cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
+               }
+
+               if (!req->finish)
+                       cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
+
+               /*
+                * Copy the input digest if needed, and setup the context
+                * fields. Do this now as we need it to setup the first command
+                * descriptor.
+                */
+               if (req->processed) {
+                       for (i = 0; i < digestsize / sizeof(u32); i++)
+                               ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
+
+                       if (req->finish)
+                               ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
+               }
+       } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
+               cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
+
+               memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
+               memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
+                      ctx->opad, digestsize);
+       }
+}
+
+static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
+                                 struct crypto_async_request *async,
+                                 bool *should_complete, int *ret)
+{
+       struct safexcel_result_desc *rdesc;
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
+       int cache_len, result_sz = sreq->state_sz;
+
+       *ret = 0;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+       rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+       if (IS_ERR(rdesc)) {
+               dev_err(priv->dev,
+                       "hash: result: could not retrieve the result descriptor\n");
+               *ret = PTR_ERR(rdesc);
+       } else if (rdesc->result_data.error_code) {
+               dev_err(priv->dev,
+                       "hash: result: result descriptor error (%d)\n",
+                       rdesc->result_data.error_code);
+               *ret = -EINVAL;
+       }
+
+       safexcel_complete(priv, ring);
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       if (sreq->finish)
+               result_sz = crypto_ahash_digestsize(ahash);
+       memcpy(sreq->state, areq->result, result_sz);
+
+       dma_unmap_sg(priv->dev, areq->src,
+                    sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
+
+       safexcel_free_context(priv, async, sreq->state_sz);
+
+       cache_len = sreq->len - sreq->processed;
+       if (cache_len)
+               memcpy(sreq->cache, sreq->cache_next, cache_len);
+
+       *should_complete = true;
+
+       return 1;
+}
+
+static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
+                              struct safexcel_request *request, int *commands,
+                              int *results)
+{
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
+       struct safexcel_result_desc *rdesc;
+       struct scatterlist *sg;
+       int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
+
+       queued = len = req->len - req->processed;
+       if (queued < crypto_ahash_blocksize(ahash))
+               cache_len = queued;
+       else
+               cache_len = queued - areq->nbytes;
+
+       /*
+        * If this is not the last request and the queued data does not fit
+        * into full blocks, cache it for the next send() call.
+        */
+       extra = queued & (crypto_ahash_blocksize(ahash) - 1);
+       if (!req->last_req && extra) {
+               sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+                                  req->cache_next, extra, areq->nbytes - extra);
+
+               queued -= extra;
+               len -= extra;
+       }
+
+       request->req = &areq->base;
+       ctx->base.handle_result = safexcel_handle_result;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+
+       /* Add a command descriptor for the cached data, if any */
+       if (cache_len) {
+               ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
+               if (!ctx->base.cache) {
+                       ret = -ENOMEM;
+                       goto unlock;
+               }
+               memcpy(ctx->base.cache, req->cache, cache_len);
+               ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
+                                                    cache_len, DMA_TO_DEVICE);
+               if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
+                       ret = -EINVAL;
+                       goto free_cache;
+               }
+
+               ctx->base.cache_sz = cache_len;
+               first_cdesc = safexcel_add_cdesc(priv, ring, 1,
+                                                (cache_len == len),
+                                                ctx->base.cache_dma,
+                                                cache_len, len,
+                                                ctx->base.ctxr_dma);
+               if (IS_ERR(first_cdesc)) {
+                       ret = PTR_ERR(first_cdesc);
+                       goto unmap_cache;
+               }
+               n_cdesc++;
+
+               queued -= cache_len;
+               if (!queued)
+                       goto send_command;
+       }
+
+       /* Now handle the current ahash request buffer(s) */
+       nents = dma_map_sg(priv->dev, areq->src,
+                      sg_nents_for_len(areq->src, areq->nbytes),
+                      DMA_TO_DEVICE);
+       if (!nents) {
+               ret = -ENOMEM;
+               goto cdesc_rollback;
+       }
+
+       for_each_sg(areq->src, sg, nents, i) {
+               int sglen = sg_dma_len(sg);
+
+               /* Do not overflow the request */
+               if (queued - sglen < 0)
+                       sglen = queued;
+
+               cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
+                                          !(queued - sglen), sg_dma_address(sg),
+                                          sglen, len, ctx->base.ctxr_dma);
+               if (IS_ERR(cdesc)) {
+                       ret = PTR_ERR(cdesc);
+                       goto cdesc_rollback;
+               }
+               n_cdesc++;
+
+               if (n_cdesc == 1)
+                       first_cdesc = cdesc;
+
+               queued -= sglen;
+               if (!queued)
+                       break;
+       }
+
+send_command:
+       /* Setup the context options */
+       safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
+                                crypto_ahash_blocksize(ahash));
+
+       /* Add the token */
+       safexcel_hash_token(first_cdesc, len, req->state_sz);
+
+       ctx->base.result_dma = dma_map_single(priv->dev, areq->result,
+                                             req->state_sz, DMA_FROM_DEVICE);
+       if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
+               ret = -EINVAL;
+               goto cdesc_rollback;
+       }
+
+       /* Add a result descriptor */
+       rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
+                                  req->state_sz);
+       if (IS_ERR(rdesc)) {
+               ret = PTR_ERR(rdesc);
+               goto cdesc_rollback;
+       }
+
+       req->processed += len;
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       *commands = n_cdesc;
+       *results = 1;
+       return 0;
+
+cdesc_rollback:
+       for (i = 0; i < n_cdesc; i++)
+               safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
+unmap_cache:
+       if (ctx->base.cache_dma) {
+               dma_unmap_single(priv->dev, ctx->base.cache_dma,
+                                ctx->base.cache_sz, DMA_TO_DEVICE);
+               ctx->base.cache_sz = 0;
+       }
+free_cache:
+       if (ctx->base.cache) {
+               kfree(ctx->base.cache);
+               ctx->base.cache = NULL;
+       }
+
+unlock:
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+       return ret;
+}
+
+static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       unsigned int state_w_sz = req->state_sz / sizeof(u32);
+       int i;
+
+       for (i = 0; i < state_w_sz; i++)
+               if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
+                       return true;
+
+       if (ctx->base.ctxr->data[state_w_sz] !=
+           cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
+               return true;
+
+       return false;
+}
+
+static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
+                                     int ring,
+                                     struct crypto_async_request *async,
+                                     bool *should_complete, int *ret)
+{
+       struct safexcel_result_desc *rdesc;
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
+       int enq_ret;
+
+       *ret = 0;
+
+       spin_lock_bh(&priv->ring[ring].egress_lock);
+       rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
+       if (IS_ERR(rdesc)) {
+               dev_err(priv->dev,
+                       "hash: invalidate: could not retrieve the result descriptor\n");
+               *ret = PTR_ERR(rdesc);
+       } else if (rdesc->result_data.error_code) {
+               dev_err(priv->dev,
+                       "hash: invalidate: result descriptor error (%d)\n",
+                       rdesc->result_data.error_code);
+               *ret = -EINVAL;
+       }
+
+       safexcel_complete(priv, ring);
+       spin_unlock_bh(&priv->ring[ring].egress_lock);
+
+       if (ctx->base.exit_inv) {
+               dma_pool_free(priv->context_pool, ctx->base.ctxr,
+                             ctx->base.ctxr_dma);
+
+               *should_complete = true;
+               return 1;
+       }
+
+       ctx->base.ring = safexcel_select_ring(priv);
+       ctx->base.needs_inv = false;
+       ctx->base.send = safexcel_ahash_send;
+
+       spin_lock_bh(&priv->lock);
+       enq_ret = crypto_enqueue_request(&priv->queue, async);
+       spin_unlock_bh(&priv->lock);
+
+       if (enq_ret != -EINPROGRESS)
+               *ret = enq_ret;
+
+       priv->need_dequeue = true;
+       *should_complete = false;
+
+       return 1;
+}
+
+static int safexcel_ahash_send_inv(struct crypto_async_request *async,
+                                  int ring, struct safexcel_request *request,
+                                  int *commands, int *results)
+{
+       struct ahash_request *areq = ahash_request_cast(async);
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       int ret;
+
+       ctx->base.handle_result = safexcel_handle_inv_result;
+       ret = safexcel_invalidate_cache(async, &ctx->base, ctx->priv,
+                                       ctx->base.ctxr_dma, ring, request);
+       if (unlikely(ret))
+               return ret;
+
+       *commands = 1;
+       *results = 1;
+
+       return 0;
+}
+
+static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       struct ahash_request req;
+       struct safexcel_inv_result result = { 0 };
+
+       memset(&req, 0, sizeof(struct ahash_request));
+
+       /* create invalidation request */
+       init_completion(&result.completion);
+       ahash_request_set_callback(&req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  safexcel_inv_complete, &result);
+
+       ahash_request_set_tfm(&req, __crypto_ahash_cast(tfm));
+       ctx = crypto_tfm_ctx(req.base.tfm);
+       ctx->base.exit_inv = true;
+       ctx->base.send = safexcel_ahash_send_inv;
+
+       spin_lock_bh(&priv->lock);
+       crypto_enqueue_request(&priv->queue, &req.base);
+       spin_unlock_bh(&priv->lock);
+
+       if (!priv->need_dequeue)
+               safexcel_dequeue(priv);
+
+       wait_for_completion_interruptible(&result.completion);
+
+       if (result.error) {
+               dev_warn(priv->dev, "hash: completion error (%d)\n",
+                        result.error);
+               return result.error;
+       }
+
+       return 0;
+}
+
+static int safexcel_ahash_cache(struct ahash_request *areq)
+{
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       int queued, cache_len;
+
+       cache_len = req->len - areq->nbytes - req->processed;
+       queued = req->len - req->processed;
+
+       /*
+        * In case there isn't enough bytes to proceed (less than a
+        * block size), cache the data until we have enough.
+        */
+       if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
+               sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
+                                  req->cache + cache_len,
+                                  areq->nbytes, 0);
+               return areq->nbytes;
+       }
+
+       /* We could'nt cache all the data */
+       return -E2BIG;
+}
+
+static int safexcel_ahash_enqueue(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ret;
+
+       ctx->base.send = safexcel_ahash_send;
+
+       if (req->processed && ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
+               ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
+
+       if (ctx->base.ctxr) {
+               if (ctx->base.needs_inv)
+                       ctx->base.send = safexcel_ahash_send_inv;
+       } else {
+               ctx->base.ring = safexcel_select_ring(priv);
+               ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
+                                                EIP197_GFP_FLAGS(areq->base),
+                                                &ctx->base.ctxr_dma);
+               if (!ctx->base.ctxr)
+                       return -ENOMEM;
+       }
+
+       spin_lock_bh(&priv->lock);
+       ret = crypto_enqueue_request(&priv->queue, &areq->base);
+       spin_unlock_bh(&priv->lock);
+
+       if (!priv->need_dequeue)
+               safexcel_dequeue(priv);
+
+       return ret;
+}
+
+static int safexcel_ahash_update(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+
+       /* If the request is 0 length, do nothing */
+       if (!areq->nbytes)
+               return 0;
+
+       req->len += areq->nbytes;
+
+       safexcel_ahash_cache(areq);
+
+       /*
+        * We're not doing partial updates when performing an hmac request.
+        * Everything will be handled by the final() call.
+        */
+       if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
+               return 0;
+
+       if (req->hmac)
+               return safexcel_ahash_enqueue(areq);
+
+       if (!req->last_req &&
+           req->len - req->processed > crypto_ahash_blocksize(ahash))
+               return safexcel_ahash_enqueue(areq);
+
+       return 0;
+}
+
+static int safexcel_ahash_final(struct ahash_request *areq)
+{
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+
+       req->last_req = true;
+       req->finish = true;
+
+       /* If we have an overall 0 length request */
+       if (!(req->len + areq->nbytes)) {
+               if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
+                       memcpy(areq->result, sha1_zero_message_hash,
+                              SHA1_DIGEST_SIZE);
+               else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
+                       memcpy(areq->result, sha224_zero_message_hash,
+                              SHA224_DIGEST_SIZE);
+               else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
+                       memcpy(areq->result, sha256_zero_message_hash,
+                              SHA256_DIGEST_SIZE);
+
+               return 0;
+       }
+
+       return safexcel_ahash_enqueue(areq);
+}
+
+static int safexcel_ahash_finup(struct ahash_request *areq)
+{
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+       req->last_req = true;
+       req->finish = true;
+
+       safexcel_ahash_update(areq);
+       return safexcel_ahash_final(areq);
+}
+
+static int safexcel_ahash_export(struct ahash_request *areq, void *out)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       struct safexcel_ahash_export_state *export = out;
+
+       export->len = req->len;
+       export->processed = req->processed;
+
+       memcpy(export->state, req->state, req->state_sz);
+       memset(export->cache, 0, crypto_ahash_blocksize(ahash));
+       memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
+
+       return 0;
+}
+
+static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+       const struct safexcel_ahash_export_state *export = in;
+       int ret;
+
+       ret = crypto_ahash_init(areq);
+       if (ret)
+               return ret;
+
+       req->len = export->len;
+       req->processed = export->processed;
+
+       memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
+       memcpy(req->state, export->state, req->state_sz);
+
+       return 0;
+}
+
+static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_alg_template *tmpl =
+               container_of(__crypto_ahash_alg(tfm->__crt_alg),
+                            struct safexcel_alg_template, alg.ahash);
+
+       ctx->priv = tmpl->priv;
+
+       crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+                                sizeof(struct safexcel_ahash_req));
+       return 0;
+}
+
+static int safexcel_sha1_init(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+       memset(req, 0, sizeof(*req));
+
+       req->state[0] = SHA1_H0;
+       req->state[1] = SHA1_H1;
+       req->state[2] = SHA1_H2;
+       req->state[3] = SHA1_H3;
+       req->state[4] = SHA1_H4;
+
+       ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
+       ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+       req->state_sz = SHA1_DIGEST_SIZE;
+
+       return 0;
+}
+
+static int safexcel_sha1_digest(struct ahash_request *areq)
+{
+       int ret = safexcel_sha1_init(areq);
+
+       if (ret)
+               return ret;
+
+       return safexcel_ahash_finup(areq);
+}
+
+static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct safexcel_crypto_priv *priv = ctx->priv;
+       int ret;
+
+       /* context not allocated, skip invalidation */
+       if (!ctx->base.ctxr)
+               return;
+
+       ret = safexcel_ahash_exit_inv(tfm);
+       if (ret)
+               dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
+}
+
+struct safexcel_alg_template safexcel_alg_sha1 = {
+       .type = SAFEXCEL_ALG_TYPE_AHASH,
+       .alg.ahash = {
+               .init = safexcel_sha1_init,
+               .update = safexcel_ahash_update,
+               .final = safexcel_ahash_final,
+               .finup = safexcel_ahash_finup,
+               .digest = safexcel_sha1_digest,
+               .export = safexcel_ahash_export,
+               .import = safexcel_ahash_import,
+               .halg = {
+                       .digestsize = SHA1_DIGEST_SIZE,
+                       .statesize = sizeof(struct safexcel_ahash_export_state),
+                       .base = {
+                               .cra_name = "sha1",
+                               .cra_driver_name = "safexcel-sha1",
+                               .cra_priority = 300,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+                               .cra_init = safexcel_ahash_cra_init,
+                               .cra_exit = safexcel_ahash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       },
+               },
+       },
+};
+
+static int safexcel_hmac_sha1_init(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+
+       safexcel_sha1_init(areq);
+       ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
+       return 0;
+}
+
+static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
+{
+       int ret = safexcel_hmac_sha1_init(areq);
+
+       if (ret)
+               return ret;
+
+       return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_ahash_result {
+       struct completion completion;
+       int error;
+};
+
+static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
+{
+       struct safexcel_ahash_result *result = req->data;
+
+       if (error == -EINPROGRESS)
+               return;
+
+       result->error = error;
+       complete(&result->completion);
+}
+
+static int safexcel_hmac_init_pad(struct ahash_request *areq,
+                                 unsigned int blocksize, const u8 *key,
+                                 unsigned int keylen, u8 *ipad, u8 *opad)
+{
+       struct safexcel_ahash_result result;
+       struct scatterlist sg;
+       int ret, i;
+       u8 *keydup;
+
+       if (keylen <= blocksize) {
+               memcpy(ipad, key, keylen);
+       } else {
+               keydup = kmemdup(key, keylen, GFP_KERNEL);
+               if (!keydup)
+                       return -ENOMEM;
+
+               ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                          safexcel_ahash_complete, &result);
+               sg_init_one(&sg, keydup, keylen);
+               ahash_request_set_crypt(areq, &sg, ipad, keylen);
+               init_completion(&result.completion);
+
+               ret = crypto_ahash_digest(areq);
+               if (ret == -EINPROGRESS) {
+                       wait_for_completion_interruptible(&result.completion);
+                       ret = result.error;
+               }
+
+               /* Avoid leaking */
+               memzero_explicit(keydup, keylen);
+               kfree(keydup);
+
+               if (ret)
+                       return ret;
+
+               keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
+       }
+
+       memset(ipad + keylen, 0, blocksize - keylen);
+       memcpy(opad, ipad, blocksize);
+
+       for (i = 0; i < blocksize; i++) {
+               ipad[i] ^= 0x36;
+               opad[i] ^= 0x5c;
+       }
+
+       return 0;
+}
+
+static int safexcel_hmac_init_iv(struct ahash_request *areq,
+                                unsigned int blocksize, u8 *pad, void *state)
+{
+       struct safexcel_ahash_result result;
+       struct safexcel_ahash_req *req;
+       struct scatterlist sg;
+       int ret;
+
+       ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  safexcel_ahash_complete, &result);
+       sg_init_one(&sg, pad, blocksize);
+       ahash_request_set_crypt(areq, &sg, pad, blocksize);
+       init_completion(&result.completion);
+
+       ret = crypto_ahash_init(areq);
+       if (ret)
+               return ret;
+
+       req = ahash_request_ctx(areq);
+       req->hmac = true;
+       req->last_req = true;
+
+       ret = crypto_ahash_update(areq);
+       if (ret && ret != -EINPROGRESS)
+               return ret;
+
+       wait_for_completion_interruptible(&result.completion);
+       if (result.error)
+               return result.error;
+
+       return crypto_ahash_export(areq, state);
+}
+
+static int safexcel_hmac_setkey(const char *alg, const u8 *key,
+                               unsigned int keylen, void *istate, void *ostate)
+{
+       struct ahash_request *areq;
+       struct crypto_ahash *tfm;
+       unsigned int blocksize;
+       u8 *ipad, *opad;
+       int ret;
+
+       tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
+                                CRYPTO_ALG_TYPE_AHASH_MASK);
+       if (IS_ERR(tfm))
+               return PTR_ERR(tfm);
+
+       areq = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!areq) {
+               ret = -ENOMEM;
+               goto free_ahash;
+       }
+
+       crypto_ahash_clear_flags(tfm, ~0);
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+
+       ipad = kzalloc(2 * blocksize, GFP_KERNEL);
+       if (!ipad) {
+               ret = -ENOMEM;
+               goto free_request;
+       }
+
+       opad = ipad + blocksize;
+
+       ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
+       if (ret)
+               goto free_ipad;
+
+       ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
+       if (ret)
+               goto free_ipad;
+
+       ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
+
+free_ipad:
+       kfree(ipad);
+free_request:
+       ahash_request_free(areq);
+free_ahash:
+       crypto_free_ahash(tfm);
+
+       return ret;
+}
+
+static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
+                                    unsigned int keylen)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+       struct safexcel_ahash_export_state istate, ostate;
+       int ret, i;
+
+       ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
+       if (ret)
+               return ret;
+
+       memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
+       memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
+
+       for (i = 0; i < ARRAY_SIZE(istate.state); i++) {
+               if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
+                   ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
+                       ctx->base.needs_inv = true;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
+       .type = SAFEXCEL_ALG_TYPE_AHASH,
+       .alg.ahash = {
+               .init = safexcel_hmac_sha1_init,
+               .update = safexcel_ahash_update,
+               .final = safexcel_ahash_final,
+               .finup = safexcel_ahash_finup,
+               .digest = safexcel_hmac_sha1_digest,
+               .setkey = safexcel_hmac_sha1_setkey,
+               .export = safexcel_ahash_export,
+               .import = safexcel_ahash_import,
+               .halg = {
+                       .digestsize = SHA1_DIGEST_SIZE,
+                       .statesize = sizeof(struct safexcel_ahash_export_state),
+                       .base = {
+                               .cra_name = "hmac(sha1)",
+                               .cra_driver_name = "safexcel-hmac-sha1",
+                               .cra_priority = 300,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA1_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+                               .cra_init = safexcel_ahash_cra_init,
+                               .cra_exit = safexcel_ahash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       },
+               },
+       },
+};
+
+static int safexcel_sha256_init(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+       memset(req, 0, sizeof(*req));
+
+       req->state[0] = SHA256_H0;
+       req->state[1] = SHA256_H1;
+       req->state[2] = SHA256_H2;
+       req->state[3] = SHA256_H3;
+       req->state[4] = SHA256_H4;
+       req->state[5] = SHA256_H5;
+       req->state[6] = SHA256_H6;
+       req->state[7] = SHA256_H7;
+
+       ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
+       ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+       req->state_sz = SHA256_DIGEST_SIZE;
+
+       return 0;
+}
+
+static int safexcel_sha256_digest(struct ahash_request *areq)
+{
+       int ret = safexcel_sha256_init(areq);
+
+       if (ret)
+               return ret;
+
+       return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha256 = {
+       .type = SAFEXCEL_ALG_TYPE_AHASH,
+       .alg.ahash = {
+               .init = safexcel_sha256_init,
+               .update = safexcel_ahash_update,
+               .final = safexcel_ahash_final,
+               .finup = safexcel_ahash_finup,
+               .digest = safexcel_sha256_digest,
+               .export = safexcel_ahash_export,
+               .import = safexcel_ahash_import,
+               .halg = {
+                       .digestsize = SHA256_DIGEST_SIZE,
+                       .statesize = sizeof(struct safexcel_ahash_export_state),
+                       .base = {
+                               .cra_name = "sha256",
+                               .cra_driver_name = "safexcel-sha256",
+                               .cra_priority = 300,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA256_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+                               .cra_init = safexcel_ahash_cra_init,
+                               .cra_exit = safexcel_ahash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       },
+               },
+       },
+};
+
+static int safexcel_sha224_init(struct ahash_request *areq)
+{
+       struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
+       struct safexcel_ahash_req *req = ahash_request_ctx(areq);
+
+       memset(req, 0, sizeof(*req));
+
+       req->state[0] = SHA224_H0;
+       req->state[1] = SHA224_H1;
+       req->state[2] = SHA224_H2;
+       req->state[3] = SHA224_H3;
+       req->state[4] = SHA224_H4;
+       req->state[5] = SHA224_H5;
+       req->state[6] = SHA224_H6;
+       req->state[7] = SHA224_H7;
+
+       ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
+       ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
+       req->state_sz = SHA256_DIGEST_SIZE;
+
+       return 0;
+}
+
+static int safexcel_sha224_digest(struct ahash_request *areq)
+{
+       int ret = safexcel_sha224_init(areq);
+
+       if (ret)
+               return ret;
+
+       return safexcel_ahash_finup(areq);
+}
+
+struct safexcel_alg_template safexcel_alg_sha224 = {
+       .type = SAFEXCEL_ALG_TYPE_AHASH,
+       .alg.ahash = {
+               .init = safexcel_sha224_init,
+               .update = safexcel_ahash_update,
+               .final = safexcel_ahash_final,
+               .finup = safexcel_ahash_finup,
+               .digest = safexcel_sha224_digest,
+               .export = safexcel_ahash_export,
+               .import = safexcel_ahash_import,
+               .halg = {
+                       .digestsize = SHA224_DIGEST_SIZE,
+                       .statesize = sizeof(struct safexcel_ahash_export_state),
+                       .base = {
+                               .cra_name = "sha224",
+                               .cra_driver_name = "safexcel-sha224",
+                               .cra_priority = 300,
+                               .cra_flags = CRYPTO_ALG_ASYNC |
+                                            CRYPTO_ALG_KERN_DRIVER_ONLY,
+                               .cra_blocksize = SHA224_BLOCK_SIZE,
+                               .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
+                               .cra_init = safexcel_ahash_cra_init,
+                               .cra_exit = safexcel_ahash_cra_exit,
+                               .cra_module = THIS_MODULE,
+                       },
+               },
+       },
+};
 
--- /dev/null
+/*
+ * Copyright (C) 2017 Marvell
+ *
+ * Antoine Tenart <antoine.tenart@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+
+#include "safexcel.h"
+
+int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
+                                  struct safexcel_ring *cdr,
+                                  struct safexcel_ring *rdr)
+{
+       cdr->offset = sizeof(u32) * priv->config.cd_offset;
+       cdr->base = dmam_alloc_coherent(priv->dev,
+                                       cdr->offset * EIP197_DEFAULT_RING_SIZE,
+                                       &cdr->base_dma, GFP_KERNEL);
+       if (!cdr->base)
+               return -ENOMEM;
+       cdr->write = cdr->base;
+       cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
+       cdr->read = cdr->base;
+
+       rdr->offset = sizeof(u32) * priv->config.rd_offset;
+       rdr->base = dmam_alloc_coherent(priv->dev,
+                                       rdr->offset * EIP197_DEFAULT_RING_SIZE,
+                                       &rdr->base_dma, GFP_KERNEL);
+       if (!rdr->base)
+               return -ENOMEM;
+       rdr->write = rdr->base;
+       rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
+       rdr->read = rdr->base;
+
+       return 0;
+}
+
+inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
+{
+       return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
+}
+
+static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
+                                    struct safexcel_ring *ring)
+{
+       void *ptr = ring->write;
+
+       if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
+               return ERR_PTR(-ENOMEM);
+
+       ring->write += ring->offset;
+       if (ring->write == ring->base_end)
+               ring->write = ring->base;
+
+       ring->nr++;
+       return ptr;
+}
+
+void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
+                             struct safexcel_ring *ring)
+{
+       void *ptr = ring->read;
+
+       if (!ring->nr)
+               return ERR_PTR(-ENOENT);
+
+       ring->read += ring->offset;
+       if (ring->read == ring->base_end)
+               ring->read = ring->base;
+
+       ring->nr--;
+       return ptr;
+}
+
+void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
+                                struct safexcel_ring *ring)
+{
+       if (!ring->nr)
+               return;
+
+       if (ring->write == ring->base)
+               ring->write += (EIP197_DEFAULT_RING_SIZE - 1) * ring->offset;
+       else
+               ring->write -= ring->offset;
+
+       ring->nr--;
+}
+
+struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
+                                                int ring_id,
+                                                bool first, bool last,
+                                                dma_addr_t data, u32 data_len,
+                                                u32 full_data_len,
+                                                dma_addr_t context) {
+       struct safexcel_command_desc *cdesc;
+       int i;
+
+       cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
+       if (IS_ERR(cdesc))
+               return cdesc;
+
+       memset(cdesc, 0, sizeof(struct safexcel_command_desc));
+
+       cdesc->first_seg = first;
+       cdesc->last_seg = last;
+       cdesc->particle_size = data_len;
+       cdesc->data_lo = lower_32_bits(data);
+       cdesc->data_hi = upper_32_bits(data);
+
+       if (first && context) {
+               struct safexcel_token *token =
+                       (struct safexcel_token *)cdesc->control_data.token;
+
+               cdesc->control_data.packet_length = full_data_len;
+               cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
+                                             EIP197_OPTION_64BIT_CTX |
+                                             EIP197_OPTION_CTX_CTRL_IN_CMD;
+               cdesc->control_data.context_lo =
+                       (lower_32_bits(context) & GENMASK(31, 2)) >> 2;
+               cdesc->control_data.context_hi = upper_32_bits(context);
+
+               /* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
+               cdesc->control_data.refresh = 2;
+
+               for (i = 0; i < EIP197_MAX_TOKENS; i++)
+                       eip197_noop_token(&token[i]);
+       }
+
+       return cdesc;
+}
+
+struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
+                                               int ring_id,
+                                               bool first, bool last,
+                                               dma_addr_t data, u32 len)
+{
+       struct safexcel_result_desc *rdesc;
+
+       rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
+       if (IS_ERR(rdesc))
+               return rdesc;
+
+       memset(rdesc, 0, sizeof(struct safexcel_result_desc));
+
+       rdesc->first_seg = first;
+       rdesc->last_seg = last;
+       rdesc->particle_size = len;
+       rdesc->data_lo = lower_32_bits(data);
+       rdesc->data_hi = upper_32_bits(data);
+
+       return rdesc;
+}