crypto: ixp4xx - Move driver to drivers/crypto/intel/ixp4xx
authorTom Zanussi <tom.zanussi@linux.intel.com>
Tue, 28 Mar 2023 15:39:50 +0000 (10:39 -0500)
committerHerbert Xu <herbert@gondor.apana.org.au>
Thu, 6 Apr 2023 08:38:31 +0000 (16:38 +0800)
With the growing number of Intel crypto drivers, it makes sense to
group them all into a single drivers/crypto/intel/ directory.

Create a separate drivers/crypto/intel/ixp4xx directory and move
drivers/crypto/ixp4xx_crypto.c to it, along with a new Kconfig and
Makefile to contain the config and make bits.

Also add a COMPILE_TEST dependency to CRYPTO_DEV_IXP4XX so it can be
more easily compile-tested.

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Acked-by: Corentin LABBE <clabbe@baylibre.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
MAINTAINERS
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/intel/Kconfig
drivers/crypto/intel/Makefile
drivers/crypto/intel/ixp4xx/Kconfig [new file with mode: 0644]
drivers/crypto/intel/ixp4xx/Makefile [new file with mode: 0644]
drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c [new file with mode: 0644]
drivers/crypto/ixp4xx_crypto.c [deleted file]

index b21027122ce8d4d4750a9bb2068c34300f4d1149..32490c179b55aaaef6ed8976b90d372d41e9e465 100644 (file)
@@ -2275,7 +2275,7 @@ F:        arch/arm/boot/dts/intel-ixp*
 F:     arch/arm/mach-ixp4xx/
 F:     drivers/bus/intel-ixp4xx-eb.c
 F:     drivers/clocksource/timer-ixp4xx.c
-F:     drivers/crypto/ixp4xx_crypto.c
+F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 F:     drivers/gpio/gpio-ixp4xx.c
 F:     drivers/irqchip/irq-ixp4xx.c
 
@@ -10380,7 +10380,7 @@ INTEL IXP4XX CRYPTO SUPPORT
 M:     Corentin Labbe <clabbe@baylibre.com>
 L:     linux-crypto@vger.kernel.org
 S:     Maintained
-F:     drivers/crypto/ixp4xx_crypto.c
+F:     drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
 
 INTEL ISHTP ECLITE DRIVER
 M:     Sumesh K Naduvalath <sumesh.k.naduvalath@intel.com>
index fc00d31abd3cfeaabf177295b27ad6b2e40d3d2c..9c9e5a509f378f25dd0ca69b5e31f7d5e5985d23 100644 (file)
@@ -240,21 +240,6 @@ config CRYPTO_DEV_TALITOS2
          Say 'Y' here to use the Freescale Security Engine (SEC)
          version 2 and following as found on MPC83xx, MPC85xx, etc ...
 
-config CRYPTO_DEV_IXP4XX
-       tristate "Driver for IXP4xx crypto hardware acceleration"
-       depends on ARCH_IXP4XX && IXP4XX_QMGR && IXP4XX_NPE
-       select CRYPTO_AES
-       select CRYPTO_DES
-       select CRYPTO_ECB
-       select CRYPTO_CBC
-       select CRYPTO_CTR
-       select CRYPTO_LIB_DES
-       select CRYPTO_AEAD
-       select CRYPTO_AUTHENC
-       select CRYPTO_SKCIPHER
-       help
-         Driver for the IXP4xx NPE crypto engine.
-
 config CRYPTO_DEV_PPC4XX
        tristate "Driver AMCC PPC4xx crypto accelerator"
        depends on PPC && 4xx
index eb8eb80aab3b0a7b61bce06fae22e2f1876c6db4..17f323718b6ac3b32bd05e72d416923c222d9221 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
 obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
 obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
 obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
-obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
 obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/
 obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o
 obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
index b5ae1aa4a701bf8d43736959e3176bd489820644..420580b237cb4126b2ac64a54f8bc5cda0f9f46c 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 source "drivers/crypto/intel/keembay/Kconfig"
+source "drivers/crypto/intel/ixp4xx/Kconfig"
index c392189fd9946717e1f5d36a052734b44a6ebaec..1cc4b6dd25561f5791fa9338b6accacdfa00a97c 100644 (file)
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-y += keembay/
+obj-y += ixp4xx/
diff --git a/drivers/crypto/intel/ixp4xx/Kconfig b/drivers/crypto/intel/ixp4xx/Kconfig
new file mode 100644 (file)
index 0000000..af3cc56
--- /dev/null
@@ -0,0 +1,14 @@
+config CRYPTO_DEV_IXP4XX
+       tristate "Driver for IXP4xx crypto hardware acceleration"
+       depends on (ARCH_IXP4XX || COMPILE_TEST) && IXP4XX_QMGR && IXP4XX_NPE
+       select CRYPTO_AES
+       select CRYPTO_DES
+       select CRYPTO_ECB
+       select CRYPTO_CBC
+       select CRYPTO_CTR
+       select CRYPTO_LIB_DES
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       help
+         Driver for the IXP4xx NPE crypto engine.
diff --git a/drivers/crypto/intel/ixp4xx/Makefile b/drivers/crypto/intel/ixp4xx/Makefile
new file mode 100644 (file)
index 0000000..74ebefd
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o
diff --git a/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c b/drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
new file mode 100644 (file)
index 0000000..b63e235
--- /dev/null
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IXP4xx NPE-C crypto driver
+ *
+ * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#include <crypto/ctr.h>
+#include <crypto/internal/des.h>
+#include <crypto/aes.h>
+#include <crypto/hmac.h>
+#include <crypto/sha1.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/aead.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+#include <linux/soc/ixp4xx/npe.h>
+#include <linux/soc/ixp4xx/qmgr.h>
+
+/* Intermittent includes, delete this after v5.14-rc1 */
+#include <linux/soc/ixp4xx/cpu.h>
+
+#define MAX_KEYLEN 32
+
+/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
+#define NPE_CTX_LEN 80
+#define AES_BLOCK128 16
+
+#define NPE_OP_HASH_VERIFY   0x01
+#define NPE_OP_CCM_ENABLE    0x04
+#define NPE_OP_CRYPT_ENABLE  0x08
+#define NPE_OP_HASH_ENABLE   0x10
+#define NPE_OP_NOT_IN_PLACE  0x20
+#define NPE_OP_HMAC_DISABLE  0x40
+#define NPE_OP_CRYPT_ENCRYPT 0x80
+
+#define NPE_OP_CCM_GEN_MIC   0xcc
+#define NPE_OP_HASH_GEN_ICV  0x50
+#define NPE_OP_ENC_GEN_KEY   0xc9
+
+#define MOD_ECB     0x0000
+#define MOD_CTR     0x1000
+#define MOD_CBC_ENC 0x2000
+#define MOD_CBC_DEC 0x3000
+#define MOD_CCM_ENC 0x4000
+#define MOD_CCM_DEC 0x5000
+
+#define KEYLEN_128  4
+#define KEYLEN_192  6
+#define KEYLEN_256  8
+
+#define CIPH_DECR   0x0000
+#define CIPH_ENCR   0x0400
+
+#define MOD_DES     0x0000
+#define MOD_TDEA2   0x0100
+#define MOD_3DES   0x0200
+#define MOD_AES     0x0800
+#define MOD_AES128  (0x0800 | KEYLEN_128)
+#define MOD_AES192  (0x0900 | KEYLEN_192)
+#define MOD_AES256  (0x0a00 | KEYLEN_256)
+
+#define MAX_IVLEN   16
+#define NPE_QLEN    16
+/* Space for registering when the first
+ * NPE_QLEN crypt_ctl are busy */
+#define NPE_QLEN_TOTAL 64
+
+#define CTL_FLAG_UNUSED                0x0000
+#define CTL_FLAG_USED          0x1000
+#define CTL_FLAG_PERFORM_ABLK  0x0001
+#define CTL_FLAG_GEN_ICV       0x0002
+#define CTL_FLAG_GEN_REVAES    0x0004
+#define CTL_FLAG_PERFORM_AEAD  0x0008
+#define CTL_FLAG_MASK          0x000f
+
+#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
+
+#define MD5_DIGEST_SIZE   16
+
+struct buffer_desc {
+       u32 phys_next;
+#ifdef __ARMEB__
+       u16 buf_len;
+       u16 pkt_len;
+#else
+       u16 pkt_len;
+       u16 buf_len;
+#endif
+       dma_addr_t phys_addr;
+       u32 __reserved[4];
+       struct buffer_desc *next;
+       enum dma_data_direction dir;
+};
+
+struct crypt_ctl {
+#ifdef __ARMEB__
+       u8 mode;                /* NPE_OP_*  operation mode */
+       u8 init_len;
+       u16 reserved;
+#else
+       u16 reserved;
+       u8 init_len;
+       u8 mode;                /* NPE_OP_*  operation mode */
+#endif
+       u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
+       dma_addr_t icv_rev_aes; /* icv or rev aes */
+       dma_addr_t src_buf;
+       dma_addr_t dst_buf;
+#ifdef __ARMEB__
+       u16 auth_offs;          /* Authentication start offset */
+       u16 auth_len;           /* Authentication data length */
+       u16 crypt_offs;         /* Cryption start offset */
+       u16 crypt_len;          /* Cryption data length */
+#else
+       u16 auth_len;           /* Authentication data length */
+       u16 auth_offs;          /* Authentication start offset */
+       u16 crypt_len;          /* Cryption data length */
+       u16 crypt_offs;         /* Cryption start offset */
+#endif
+       u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
+       u32 crypto_ctx;         /* NPE Crypto Param structure address */
+
+       /* Used by Host: 4*4 bytes*/
+       unsigned int ctl_flags;
+       union {
+               struct skcipher_request *ablk_req;
+               struct aead_request *aead_req;
+               struct crypto_tfm *tfm;
+       } data;
+       struct buffer_desc *regist_buf;
+       u8 *regist_ptr;
+};
+
+struct ablk_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+       u8 iv[MAX_IVLEN];
+       bool encrypt;
+       struct skcipher_request fallback_req;   // keep at the end
+};
+
+struct aead_ctx {
+       struct buffer_desc *src;
+       struct buffer_desc *dst;
+       struct scatterlist ivlist;
+       /* used when the hmac is not on one sg entry */
+       u8 *hmac_virt;
+       int encrypt;
+};
+
+struct ix_hash_algo {
+       u32 cfgword;
+       unsigned char *icv;
+};
+
+struct ix_sa_dir {
+       unsigned char *npe_ctx;
+       dma_addr_t npe_ctx_phys;
+       int npe_ctx_idx;
+       u8 npe_mode;
+};
+
+struct ixp_ctx {
+       struct ix_sa_dir encrypt;
+       struct ix_sa_dir decrypt;
+       int authkey_len;
+       u8 authkey[MAX_KEYLEN];
+       int enckey_len;
+       u8 enckey[MAX_KEYLEN];
+       u8 salt[MAX_IVLEN];
+       u8 nonce[CTR_RFC3686_NONCE_SIZE];
+       unsigned int salted;
+       atomic_t configuring;
+       struct completion completion;
+       struct crypto_skcipher *fallback_tfm;
+};
+
+struct ixp_alg {
+       struct skcipher_alg crypto;
+       const struct ix_hash_algo *hash;
+       u32 cfg_enc;
+       u32 cfg_dec;
+
+       int registered;
+};
+
+struct ixp_aead_alg {
+       struct aead_alg crypto;
+       const struct ix_hash_algo *hash;
+       u32 cfg_enc;
+       u32 cfg_dec;
+
+       int registered;
+};
+
+static const struct ix_hash_algo hash_alg_md5 = {
+       .cfgword        = 0xAA010004,
+       .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+};
+
+static const struct ix_hash_algo hash_alg_sha1 = {
+       .cfgword        = 0x00000005,
+       .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
+                         "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
+};
+
+static struct npe *npe_c;
+
+static unsigned int send_qid;
+static unsigned int recv_qid;
+static struct dma_pool *buffer_pool;
+static struct dma_pool *ctx_pool;
+
+static struct crypt_ctl *crypt_virt;
+static dma_addr_t crypt_phys;
+
+static int support_aes = 1;
+
+static struct platform_device *pdev;
+
+static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
+{
+       return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
+}
+
+static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
+{
+       return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
+}
+
+static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
+}
+
+static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
+}
+
+static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
+{
+       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
+}
+
+static int setup_crypt_desc(void)
+{
+       struct device *dev = &pdev->dev;
+
+       BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
+       crypt_virt = dma_alloc_coherent(dev,
+                                       NPE_QLEN * sizeof(struct crypt_ctl),
+                                       &crypt_phys, GFP_ATOMIC);
+       if (!crypt_virt)
+               return -ENOMEM;
+       return 0;
+}
+
+static DEFINE_SPINLOCK(desc_lock);
+static struct crypt_ctl *get_crypt_desc(void)
+{
+       int i;
+       static int idx;
+       unsigned long flags;
+
+       spin_lock_irqsave(&desc_lock, flags);
+
+       if (unlikely(!crypt_virt))
+               setup_crypt_desc();
+       if (unlikely(!crypt_virt)) {
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return NULL;
+       }
+       i = idx;
+       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+               if (++idx >= NPE_QLEN)
+                       idx = 0;
+               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return crypt_virt + i;
+       } else {
+               spin_unlock_irqrestore(&desc_lock, flags);
+               return NULL;
+       }
+}
+
+static DEFINE_SPINLOCK(emerg_lock);
+static struct crypt_ctl *get_crypt_desc_emerg(void)
+{
+       int i;
+       static int idx = NPE_QLEN;
+       struct crypt_ctl *desc;
+       unsigned long flags;
+
+       desc = get_crypt_desc();
+       if (desc)
+               return desc;
+       if (unlikely(!crypt_virt))
+               return NULL;
+
+       spin_lock_irqsave(&emerg_lock, flags);
+       i = idx;
+       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
+               if (++idx >= NPE_QLEN_TOTAL)
+                       idx = NPE_QLEN;
+               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
+               spin_unlock_irqrestore(&emerg_lock, flags);
+               return crypt_virt + i;
+       } else {
+               spin_unlock_irqrestore(&emerg_lock, flags);
+               return NULL;
+       }
+}
+
+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
+                          dma_addr_t phys)
+{
+       while (buf) {
+               struct buffer_desc *buf1;
+               u32 phys1;
+
+               buf1 = buf->next;
+               phys1 = buf->phys_next;
+               dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
+               dma_pool_free(buffer_pool, buf, phys);
+               buf = buf1;
+               phys = phys1;
+       }
+}
+
+static struct tasklet_struct crypto_done_tasklet;
+
+static void finish_scattered_hmac(struct crypt_ctl *crypt)
+{
+       struct aead_request *req = crypt->data.aead_req;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       int authsize = crypto_aead_authsize(tfm);
+       int decryptlen = req->assoclen + req->cryptlen - authsize;
+
+       if (req_ctx->encrypt) {
+               scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
+                                        decryptlen, authsize, 1);
+       }
+       dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
+}
+
+static void one_packet(dma_addr_t phys)
+{
+       struct device *dev = &pdev->dev;
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx;
+       int failed;
+
+       failed = phys & 0x1 ? -EBADMSG : 0;
+       phys &= ~0x3;
+       crypt = crypt_phys2virt(phys);
+
+       switch (crypt->ctl_flags & CTL_FLAG_MASK) {
+       case CTL_FLAG_PERFORM_AEAD: {
+               struct aead_request *req = crypt->data.aead_req;
+               struct aead_ctx *req_ctx = aead_request_ctx(req);
+
+               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+               if (req_ctx->hmac_virt)
+                       finish_scattered_hmac(crypt);
+
+               aead_request_complete(req, failed);
+               break;
+       }
+       case CTL_FLAG_PERFORM_ABLK: {
+               struct skcipher_request *req = crypt->data.ablk_req;
+               struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+               struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+               unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+               unsigned int offset;
+
+               if (ivsize > 0) {
+                       offset = req->cryptlen - ivsize;
+                       if (req_ctx->encrypt) {
+                               scatterwalk_map_and_copy(req->iv, req->dst,
+                                                        offset, ivsize, 0);
+                       } else {
+                               memcpy(req->iv, req_ctx->iv, ivsize);
+                               memzero_explicit(req_ctx->iv, ivsize);
+                       }
+               }
+
+               if (req_ctx->dst)
+                       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+               skcipher_request_complete(req, failed);
+               break;
+       }
+       case CTL_FLAG_GEN_ICV:
+               ctx = crypto_tfm_ctx(crypt->data.tfm);
+               dma_pool_free(ctx_pool, crypt->regist_ptr,
+                             crypt->regist_buf->phys_addr);
+               dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
+               if (atomic_dec_and_test(&ctx->configuring))
+                       complete(&ctx->completion);
+               break;
+       case CTL_FLAG_GEN_REVAES:
+               ctx = crypto_tfm_ctx(crypt->data.tfm);
+               *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
+               if (atomic_dec_and_test(&ctx->configuring))
+                       complete(&ctx->completion);
+               break;
+       default:
+               BUG();
+       }
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+}
+
+static void irqhandler(void *_unused)
+{
+       tasklet_schedule(&crypto_done_tasklet);
+}
+
+static void crypto_done_action(unsigned long arg)
+{
+       int i;
+
+       for (i = 0; i < 4; i++) {
+               dma_addr_t phys = qmgr_get_entry(recv_qid);
+               if (!phys)
+                       return;
+               one_packet(phys);
+       }
+       tasklet_schedule(&crypto_done_tasklet);
+}
+
+static int init_ixp_crypto(struct device *dev)
+{
+       struct device_node *np = dev->of_node;
+       u32 msg[2] = { 0, 0 };
+       int ret = -ENODEV;
+       u32 npe_id;
+
+       dev_info(dev, "probing...\n");
+
+       /* Locate the NPE and queue manager to use from device tree */
+       if (IS_ENABLED(CONFIG_OF) && np) {
+               struct of_phandle_args queue_spec;
+               struct of_phandle_args npe_spec;
+
+               ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
+                                                      1, 0, &npe_spec);
+               if (ret) {
+                       dev_err(dev, "no NPE engine specified\n");
+                       return -ENODEV;
+               }
+               npe_id = npe_spec.args[0];
+
+               ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
+                                                      &queue_spec);
+               if (ret) {
+                       dev_err(dev, "no rx queue phandle\n");
+                       return -ENODEV;
+               }
+               recv_qid = queue_spec.args[0];
+
+               ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
+                                                      &queue_spec);
+               if (ret) {
+                       dev_err(dev, "no txready queue phandle\n");
+                       return -ENODEV;
+               }
+               send_qid = queue_spec.args[0];
+       } else {
+               /*
+                * Hardcoded engine when using platform data, this goes away
+                * when we switch to using DT only.
+                */
+               npe_id = 2;
+               send_qid = 29;
+               recv_qid = 30;
+       }
+
+       npe_c = npe_request(npe_id);
+       if (!npe_c)
+               return ret;
+
+       if (!npe_running(npe_c)) {
+               ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
+               if (ret)
+                       goto npe_release;
+               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+       } else {
+               if (npe_send_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+
+               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
+                       goto npe_error;
+       }
+
+       switch ((msg[1] >> 16) & 0xff) {
+       case 3:
+               dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
+               support_aes = 0;
+               break;
+       case 4:
+       case 5:
+               support_aes = 1;
+               break;
+       default:
+               dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
+               ret = -ENODEV;
+               goto npe_release;
+       }
+       /* buffer_pool will also be used to sometimes store the hmac,
+        * so assure it is large enough
+        */
+       BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
+       buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
+                                     32, 0);
+       ret = -ENOMEM;
+       if (!buffer_pool)
+               goto err;
+
+       ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
+       if (!ctx_pool)
+               goto err;
+
+       ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
+                                "ixp_crypto:out", NULL);
+       if (ret)
+               goto err;
+       ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
+                                "ixp_crypto:in", NULL);
+       if (ret) {
+               qmgr_release_queue(send_qid);
+               goto err;
+       }
+       qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
+       tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
+
+       qmgr_enable_irq(recv_qid);
+       return 0;
+
+npe_error:
+       dev_err(dev, "%s not responding\n", npe_name(npe_c));
+       ret = -EIO;
+err:
+       dma_pool_destroy(ctx_pool);
+       dma_pool_destroy(buffer_pool);
+npe_release:
+       npe_release(npe_c);
+       return ret;
+}
+
+static void release_ixp_crypto(struct device *dev)
+{
+       qmgr_disable_irq(recv_qid);
+       tasklet_kill(&crypto_done_tasklet);
+
+       qmgr_release_queue(send_qid);
+       qmgr_release_queue(recv_qid);
+
+       dma_pool_destroy(ctx_pool);
+       dma_pool_destroy(buffer_pool);
+
+       npe_release(npe_c);
+
+       if (crypt_virt)
+               dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
+                                 crypt_virt, crypt_phys);
+}
+
+static void reset_sa_dir(struct ix_sa_dir *dir)
+{
+       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+       dir->npe_ctx_idx = 0;
+       dir->npe_mode = 0;
+}
+
+static int init_sa_dir(struct ix_sa_dir *dir)
+{
+       dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
+       if (!dir->npe_ctx)
+               return -ENOMEM;
+
+       reset_sa_dir(dir);
+       return 0;
+}
+
+static void free_sa_dir(struct ix_sa_dir *dir)
+{
+       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
+       dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
+}
+
+static int init_tfm(struct crypto_tfm *tfm)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       int ret;
+
+       atomic_set(&ctx->configuring, 0);
+       ret = init_sa_dir(&ctx->encrypt);
+       if (ret)
+               return ret;
+       ret = init_sa_dir(&ctx->decrypt);
+       if (ret)
+               free_sa_dir(&ctx->encrypt);
+
+       return ret;
+}
+
+static int init_tfm_ablk(struct crypto_skcipher *tfm)
+{
+       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+       const char *name = crypto_tfm_alg_name(ctfm);
+
+       ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback_tfm)) {
+               pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
+                       name, PTR_ERR(ctx->fallback_tfm));
+               return PTR_ERR(ctx->fallback_tfm);
+       }
+
+       pr_info("Fallback for %s is %s\n",
+                crypto_tfm_alg_driver_name(&tfm->base),
+                crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
+                );
+
+       crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
+       return init_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static int init_tfm_aead(struct crypto_aead *tfm)
+{
+       crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
+       return init_tfm(crypto_aead_tfm(tfm));
+}
+
+static void exit_tfm(struct crypto_tfm *tfm)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       free_sa_dir(&ctx->encrypt);
+       free_sa_dir(&ctx->decrypt);
+}
+
+static void exit_tfm_ablk(struct crypto_skcipher *tfm)
+{
+       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
+       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
+
+       crypto_free_skcipher(ctx->fallback_tfm);
+       exit_tfm(crypto_skcipher_tfm(tfm));
+}
+
+static void exit_tfm_aead(struct crypto_aead *tfm)
+{
+       exit_tfm(crypto_aead_tfm(tfm));
+}
+
+static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
+                             int init_len, u32 ctx_addr, const u8 *key,
+                             int key_len)
+{
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct crypt_ctl *crypt;
+       struct buffer_desc *buf;
+       int i;
+       u8 *pad;
+       dma_addr_t pad_phys, buf_phys;
+
+       BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
+       pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
+       if (!pad)
+               return -ENOMEM;
+       buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
+       if (!buf) {
+               dma_pool_free(ctx_pool, pad, pad_phys);
+               return -ENOMEM;
+       }
+       crypt = get_crypt_desc_emerg();
+       if (!crypt) {
+               dma_pool_free(ctx_pool, pad, pad_phys);
+               dma_pool_free(buffer_pool, buf, buf_phys);
+               return -EAGAIN;
+       }
+
+       memcpy(pad, key, key_len);
+       memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
+       for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
+               pad[i] ^= xpad;
+
+       crypt->data.tfm = tfm;
+       crypt->regist_ptr = pad;
+       crypt->regist_buf = buf;
+
+       crypt->auth_offs = 0;
+       crypt->auth_len = HMAC_PAD_BLOCKLEN;
+       crypt->crypto_ctx = ctx_addr;
+       crypt->src_buf = buf_phys;
+       crypt->icv_rev_aes = target;
+       crypt->mode = NPE_OP_HASH_GEN_ICV;
+       crypt->init_len = init_len;
+       crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
+
+       buf->next = NULL;
+       buf->buf_len = HMAC_PAD_BLOCKLEN;
+       buf->pkt_len = 0;
+       buf->phys_addr = pad_phys;
+
+       atomic_inc(&ctx->configuring);
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return 0;
+}
+
+static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
+                     const u8 *key, int key_len, unsigned int digest_len)
+{
+       u32 itarget, otarget, npe_ctx_addr;
+       unsigned char *cinfo;
+       int init_len, ret = 0;
+       u32 cfgword;
+       struct ix_sa_dir *dir;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       const struct ix_hash_algo *algo;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
+       algo = ix_hash(tfm);
+
+       /* write cfg word to cryptinfo */
+       cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
+#ifndef __ARMEB__
+       cfgword ^= 0xAA000000; /* change the "byte swap" flags */
+#endif
+       *(__be32 *)cinfo = cpu_to_be32(cfgword);
+       cinfo += sizeof(cfgword);
+
+       /* write ICV to cryptinfo */
+       memcpy(cinfo, algo->icv, digest_len);
+       cinfo += digest_len;
+
+       itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
+                               + sizeof(algo->cfgword);
+       otarget = itarget + digest_len;
+       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
+       npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
+
+       dir->npe_ctx_idx += init_len;
+       dir->npe_mode |= NPE_OP_HASH_ENABLE;
+
+       if (!encrypt)
+               dir->npe_mode |= NPE_OP_HASH_VERIFY;
+
+       ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
+                                init_len, npe_ctx_addr, key, key_len);
+       if (ret)
+               return ret;
+       return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
+                                 init_len, npe_ctx_addr, key, key_len);
+}
+
+static int gen_rev_aes_key(struct crypto_tfm *tfm)
+{
+       struct crypt_ctl *crypt;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct ix_sa_dir *dir = &ctx->decrypt;
+
+       crypt = get_crypt_desc_emerg();
+       if (!crypt)
+               return -EAGAIN;
+
+       *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
+
+       crypt->data.tfm = tfm;
+       crypt->crypt_offs = 0;
+       crypt->crypt_len = AES_BLOCK128;
+       crypt->src_buf = 0;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
+       crypt->mode = NPE_OP_ENC_GEN_KEY;
+       crypt->init_len = dir->npe_ctx_idx;
+       crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
+
+       atomic_inc(&ctx->configuring);
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return 0;
+}
+
+static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
+                       int key_len)
+{
+       u8 *cinfo;
+       u32 cipher_cfg;
+       u32 keylen_cfg = 0;
+       struct ix_sa_dir *dir;
+       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
+       int err;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       cinfo = dir->npe_ctx;
+
+       if (encrypt) {
+               cipher_cfg = cipher_cfg_enc(tfm);
+               dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
+       } else {
+               cipher_cfg = cipher_cfg_dec(tfm);
+       }
+       if (cipher_cfg & MOD_AES) {
+               switch (key_len) {
+               case 16:
+                       keylen_cfg = MOD_AES128;
+                       break;
+               case 24:
+                       keylen_cfg = MOD_AES192;
+                       break;
+               case 32:
+                       keylen_cfg = MOD_AES256;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               cipher_cfg |= keylen_cfg;
+       } else {
+               err = crypto_des_verify_key(tfm, key);
+               if (err)
+                       return err;
+       }
+       /* write cfg word to cryptinfo */
+       *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
+       cinfo += sizeof(cipher_cfg);
+
+       /* write cipher key to cryptinfo */
+       memcpy(cinfo, key, key_len);
+       /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
+       if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
+               memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
+               key_len = DES3_EDE_KEY_SIZE;
+       }
+       dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
+       dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
+       if ((cipher_cfg & MOD_AES) && !encrypt)
+               return gen_rev_aes_key(tfm);
+
+       return 0;
+}
+
+static struct buffer_desc *chainup_buffers(struct device *dev,
+               struct scatterlist *sg, unsigned int nbytes,
+               struct buffer_desc *buf, gfp_t flags,
+               enum dma_data_direction dir)
+{
+       for (; nbytes > 0; sg = sg_next(sg)) {
+               unsigned int len = min(nbytes, sg->length);
+               struct buffer_desc *next_buf;
+               dma_addr_t next_buf_phys;
+               void *ptr;
+
+               nbytes -= len;
+               ptr = sg_virt(sg);
+               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
+               if (!next_buf) {
+                       buf = NULL;
+                       break;
+               }
+               sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
+               buf->next = next_buf;
+               buf->phys_next = next_buf_phys;
+               buf = next_buf;
+
+               buf->phys_addr = sg_dma_address(sg);
+               buf->buf_len = len;
+               buf->dir = dir;
+       }
+       buf->next = NULL;
+       buf->phys_next = 0;
+       return buf;
+}
+
+static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                      unsigned int key_len)
+{
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       int ret;
+
+       init_completion(&ctx->completion);
+       atomic_inc(&ctx->configuring);
+
+       reset_sa_dir(&ctx->encrypt);
+       reset_sa_dir(&ctx->decrypt);
+
+       ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+       ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
+
+       ret = setup_cipher(&tfm->base, 0, key, key_len);
+       if (ret)
+               goto out;
+       ret = setup_cipher(&tfm->base, 1, key, key_len);
+out:
+       if (!atomic_dec_and_test(&ctx->configuring))
+               wait_for_completion(&ctx->completion);
+       if (ret)
+               return ret;
+       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
+       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+
+       return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
+}
+
+static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                           unsigned int key_len)
+{
+       return verify_skcipher_des3_key(tfm, key) ?:
+              ablk_setkey(tfm, key, key_len);
+}
+
+static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
+                              unsigned int key_len)
+{
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+       /* the nonce is stored in bytes at end of key */
+       if (key_len < CTR_RFC3686_NONCE_SIZE)
+               return -EINVAL;
+
+       memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
+              CTR_RFC3686_NONCE_SIZE);
+
+       key_len -= CTR_RFC3686_NONCE_SIZE;
+       return ablk_setkey(tfm, key, key_len);
+}
+
+static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
+       struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
+       struct ablk_ctx *rctx = skcipher_request_ctx(areq);
+       int err;
+
+       skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
+       skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
+                                     areq->base.complete, areq->base.data);
+       skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
+                                  areq->cryptlen, areq->iv);
+       if (encrypt)
+               err = crypto_skcipher_encrypt(&rctx->fallback_req);
+       else
+               err = crypto_skcipher_decrypt(&rctx->fallback_req);
+       return err;
+}
+
+static int ablk_perform(struct skcipher_request *req, int encrypt)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+       unsigned int nbytes = req->cryptlen;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
+       struct buffer_desc src_hook;
+       struct device *dev = &pdev->dev;
+       unsigned int offset;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+
+       if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
+               return ixp4xx_cipher_fallback(req, encrypt);
+
+       if (qmgr_stat_full(send_qid))
+               return -EAGAIN;
+       if (atomic_read(&ctx->configuring))
+               return -EAGAIN;
+
+       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
+       req_ctx->encrypt = encrypt;
+
+       crypt = get_crypt_desc();
+       if (!crypt)
+               return -ENOMEM;
+
+       crypt->data.ablk_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->mode = dir->npe_mode;
+       crypt->init_len = dir->npe_ctx_idx;
+
+       crypt->crypt_offs = 0;
+       crypt->crypt_len = nbytes;
+
+       BUG_ON(ivsize && !req->iv);
+       memcpy(crypt->iv, req->iv, ivsize);
+       if (ivsize > 0 && !encrypt) {
+               offset = req->cryptlen - ivsize;
+               scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
+       }
+       if (req->src != req->dst) {
+               struct buffer_desc dst_hook;
+
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               /* This was never tested by Intel
+                * for more than one dst buffer, I think. */
+               req_ctx->dst = NULL;
+               if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
+                                    flags, DMA_FROM_DEVICE))
+                       goto free_buf_dest;
+               src_direction = DMA_TO_DEVICE;
+               req_ctx->dst = dst_hook.next;
+               crypt->dst_buf = dst_hook.phys_next;
+       } else {
+               req_ctx->dst = NULL;
+       }
+       req_ctx->src = NULL;
+       if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
+                            src_direction))
+               goto free_buf_src;
+
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return -EINPROGRESS;
+
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+free_buf_dest:
+       if (req->src != req->dst)
+               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+       return -ENOMEM;
+}
+
+static int ablk_encrypt(struct skcipher_request *req)
+{
+       return ablk_perform(req, 1);
+}
+
+static int ablk_decrypt(struct skcipher_request *req)
+{
+       return ablk_perform(req, 0);
+}
+
+static int ablk_rfc3686_crypt(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
+       u8 iv[CTR_RFC3686_BLOCK_SIZE];
+       u8 *info = req->iv;
+       int ret;
+
+       /* set up counter block */
+       memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
+       memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
+
+       /* initialize counter portion of counter block */
+       *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
+               cpu_to_be32(1);
+
+       req->iv = iv;
+       ret = ablk_perform(req, 1);
+       req->iv = info;
+       return ret;
+}
+
+static int aead_perform(struct aead_request *req, int encrypt,
+                       int cryptoffset, int eff_cryptlen, u8 *iv)
+{
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned int ivsize = crypto_aead_ivsize(tfm);
+       unsigned int authsize = crypto_aead_authsize(tfm);
+       struct ix_sa_dir *dir;
+       struct crypt_ctl *crypt;
+       unsigned int cryptlen;
+       struct buffer_desc *buf, src_hook;
+       struct aead_ctx *req_ctx = aead_request_ctx(req);
+       struct device *dev = &pdev->dev;
+       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
+                               GFP_KERNEL : GFP_ATOMIC;
+       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
+       unsigned int lastlen;
+
+       if (qmgr_stat_full(send_qid))
+               return -EAGAIN;
+       if (atomic_read(&ctx->configuring))
+               return -EAGAIN;
+
+       if (encrypt) {
+               dir = &ctx->encrypt;
+               cryptlen = req->cryptlen;
+       } else {
+               dir = &ctx->decrypt;
+               /* req->cryptlen includes the authsize when decrypting */
+               cryptlen = req->cryptlen - authsize;
+               eff_cryptlen -= authsize;
+       }
+       crypt = get_crypt_desc();
+       if (!crypt)
+               return -ENOMEM;
+
+       crypt->data.aead_req = req;
+       crypt->crypto_ctx = dir->npe_ctx_phys;
+       crypt->mode = dir->npe_mode;
+       crypt->init_len = dir->npe_ctx_idx;
+
+       crypt->crypt_offs = cryptoffset;
+       crypt->crypt_len = eff_cryptlen;
+
+       crypt->auth_offs = 0;
+       crypt->auth_len = req->assoclen + cryptlen;
+       BUG_ON(ivsize && !req->iv);
+       memcpy(crypt->iv, req->iv, ivsize);
+
+       buf = chainup_buffers(dev, req->src, crypt->auth_len,
+                             &src_hook, flags, src_direction);
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+               goto free_buf_src;
+
+       lastlen = buf->buf_len;
+       if (lastlen >= authsize)
+               crypt->icv_rev_aes = buf->phys_addr +
+                                    buf->buf_len - authsize;
+
+       req_ctx->dst = NULL;
+
+       if (req->src != req->dst) {
+               struct buffer_desc dst_hook;
+
+               crypt->mode |= NPE_OP_NOT_IN_PLACE;
+               src_direction = DMA_TO_DEVICE;
+
+               buf = chainup_buffers(dev, req->dst, crypt->auth_len,
+                                     &dst_hook, flags, DMA_FROM_DEVICE);
+               req_ctx->dst = dst_hook.next;
+               crypt->dst_buf = dst_hook.phys_next;
+
+               if (!buf)
+                       goto free_buf_dst;
+
+               if (encrypt) {
+                       lastlen = buf->buf_len;
+                       if (lastlen >= authsize)
+                               crypt->icv_rev_aes = buf->phys_addr +
+                                                    buf->buf_len - authsize;
+               }
+       }
+
+       if (unlikely(lastlen < authsize)) {
+               /* The 12 hmac bytes are scattered,
+                * we need to copy them into a safe buffer */
+               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
+                                                   &crypt->icv_rev_aes);
+               if (unlikely(!req_ctx->hmac_virt))
+                       goto free_buf_dst;
+               if (!encrypt) {
+                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
+                                                req->src, cryptlen, authsize, 0);
+               }
+               req_ctx->encrypt = encrypt;
+       } else {
+               req_ctx->hmac_virt = NULL;
+       }
+
+       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
+       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
+       BUG_ON(qmgr_stat_overflow(send_qid));
+       return -EINPROGRESS;
+
+free_buf_dst:
+       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
+free_buf_src:
+       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
+       crypt->ctl_flags = CTL_FLAG_UNUSED;
+       return -ENOMEM;
+}
+
+static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       unsigned int digest_len = crypto_aead_maxauthsize(tfm);
+       int ret;
+
+       if (!ctx->enckey_len && !ctx->authkey_len)
+               return 0;
+       init_completion(&ctx->completion);
+       atomic_inc(&ctx->configuring);
+
+       reset_sa_dir(&ctx->encrypt);
+       reset_sa_dir(&ctx->decrypt);
+
+       ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
+       if (ret)
+               goto out;
+       ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
+       if (ret)
+               goto out;
+       ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
+                        ctx->authkey_len, digest_len);
+       if (ret)
+               goto out;
+       ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
+                        ctx->authkey_len, digest_len);
+out:
+       if (!atomic_dec_and_test(&ctx->configuring))
+               wait_for_completion(&ctx->completion);
+       return ret;
+}
+
+static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
+{
+       int max = crypto_aead_maxauthsize(tfm) >> 2;
+
+       if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
+               return -EINVAL;
+       return aead_setup(tfm, authsize);
+}
+
+static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                      unsigned int keylen)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_authenc_keys keys;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+               goto badkey;
+
+       if (keys.authkeylen > sizeof(ctx->authkey))
+               goto badkey;
+
+       if (keys.enckeylen > sizeof(ctx->enckey))
+               goto badkey;
+
+       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+       ctx->authkey_len = keys.authkeylen;
+       ctx->enckey_len = keys.enckeylen;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+       memzero_explicit(&keys, sizeof(keys));
+       return -EINVAL;
+}
+
+static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+                           unsigned int keylen)
+{
+       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_authenc_keys keys;
+       int err;
+
+       err = crypto_authenc_extractkeys(&keys, key, keylen);
+       if (unlikely(err))
+               goto badkey;
+
+       err = -EINVAL;
+       if (keys.authkeylen > sizeof(ctx->authkey))
+               goto badkey;
+
+       err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
+       if (err)
+               goto badkey;
+
+       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
+       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
+       ctx->authkey_len = keys.authkeylen;
+       ctx->enckey_len = keys.enckeylen;
+
+       memzero_explicit(&keys, sizeof(keys));
+       return aead_setup(tfm, crypto_aead_authsize(tfm));
+badkey:
+       memzero_explicit(&keys, sizeof(keys));
+       return err;
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+       return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+       return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
+}
+
+static struct ixp_alg ixp4xx_algos[] = {
+{
+       .crypto = {
+               .base.cra_name          = "cbc(des)",
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+               .ivsize                 = DES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(des)",
+               .base.cra_blocksize     = DES_BLOCK_SIZE,
+               .min_keysize            = DES_KEY_SIZE,
+               .max_keysize            = DES_KEY_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "cbc(des3_ede)",
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .ivsize                 = DES3_EDE_BLOCK_SIZE,
+               .setkey                 = ablk_des3_setkey,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(des3_ede)",
+               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
+
+               .min_keysize            = DES3_EDE_KEY_SIZE,
+               .max_keysize            = DES3_EDE_KEY_SIZE,
+               .setkey                 = ablk_des3_setkey,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
+}, {
+       .crypto = {
+               .base.cra_name          = "cbc(aes)",
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+       .crypto = {
+               .base.cra_name          = "ecb(aes)",
+               .base.cra_blocksize     = AES_BLOCK_SIZE,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
+}, {
+       .crypto = {
+               .base.cra_name          = "ctr(aes)",
+               .base.cra_blocksize     = 1,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+}, {
+       .crypto = {
+               .base.cra_name          = "rfc3686(ctr(aes))",
+               .base.cra_blocksize     = 1,
+
+               .min_keysize            = AES_MIN_KEY_SIZE,
+               .max_keysize            = AES_MAX_KEY_SIZE,
+               .ivsize                 = AES_BLOCK_SIZE,
+               .setkey                 = ablk_rfc3686_setkey,
+               .encrypt                = ablk_rfc3686_crypt,
+               .decrypt                = ablk_rfc3686_crypt,
+       },
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
+       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
+} };
+
+static struct ixp_aead_alg ixp4xx_aeads[] = {
+{
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
+               .ivsize         = DES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+               .setkey         = des3_aead_setkey,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des))",
+                       .cra_blocksize  = DES_BLOCK_SIZE,
+               },
+                       .ivsize         = DES_BLOCK_SIZE,
+                       .maxauthsize    = SHA1_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
+                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+               },
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
+               .setkey         = des3_aead_setkey,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
+       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(md5),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = MD5_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_md5,
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+}, {
+       .crypto = {
+               .base = {
+                       .cra_name       = "authenc(hmac(sha1),cbc(aes))",
+                       .cra_blocksize  = AES_BLOCK_SIZE,
+               },
+               .ivsize         = AES_BLOCK_SIZE,
+               .maxauthsize    = SHA1_DIGEST_SIZE,
+       },
+       .hash = &hash_alg_sha1,
+       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
+       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
+} };
+
+#define IXP_POSTFIX "-ixp4xx"
+
+static int ixp_crypto_probe(struct platform_device *_pdev)
+{
+       struct device *dev = &_pdev->dev;
+       int num = ARRAY_SIZE(ixp4xx_algos);
+       int i, err;
+
+       pdev = _pdev;
+
+       err = init_ixp_crypto(dev);
+       if (err)
+               return err;
+
+       for (i = 0; i < num; i++) {
+               struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
+
+               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
+                            CRYPTO_MAX_ALG_NAME)
+                       continue;
+               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+                       continue;
+
+               /* block ciphers */
+               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                     CRYPTO_ALG_ASYNC |
+                                     CRYPTO_ALG_ALLOCATES_MEMORY |
+                                     CRYPTO_ALG_NEED_FALLBACK;
+               if (!cra->setkey)
+                       cra->setkey = ablk_setkey;
+               if (!cra->encrypt)
+                       cra->encrypt = ablk_encrypt;
+               if (!cra->decrypt)
+                       cra->decrypt = ablk_decrypt;
+               cra->init = init_tfm_ablk;
+               cra->exit = exit_tfm_ablk;
+
+               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+               cra->base.cra_module = THIS_MODULE;
+               cra->base.cra_alignmask = 3;
+               cra->base.cra_priority = 300;
+               if (crypto_register_skcipher(cra))
+                       dev_err(&pdev->dev, "Failed to register '%s'\n",
+                               cra->base.cra_name);
+               else
+                       ixp4xx_algos[i].registered = 1;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
+
+               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
+                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
+                   CRYPTO_MAX_ALG_NAME)
+                       continue;
+               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
+                       continue;
+
+               /* authenc */
+               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
+                                     CRYPTO_ALG_ASYNC |
+                                     CRYPTO_ALG_ALLOCATES_MEMORY;
+               cra->setkey = cra->setkey ?: aead_setkey;
+               cra->setauthsize = aead_setauthsize;
+               cra->encrypt = aead_encrypt;
+               cra->decrypt = aead_decrypt;
+               cra->init = init_tfm_aead;
+               cra->exit = exit_tfm_aead;
+
+               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
+               cra->base.cra_module = THIS_MODULE;
+               cra->base.cra_alignmask = 3;
+               cra->base.cra_priority = 300;
+
+               if (crypto_register_aead(cra))
+                       dev_err(&pdev->dev, "Failed to register '%s'\n",
+                               cra->base.cra_driver_name);
+               else
+                       ixp4xx_aeads[i].registered = 1;
+       }
+       return 0;
+}
+
+static int ixp_crypto_remove(struct platform_device *pdev)
+{
+       int num = ARRAY_SIZE(ixp4xx_algos);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
+               if (ixp4xx_aeads[i].registered)
+                       crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
+       }
+
+       for (i = 0; i < num; i++) {
+               if (ixp4xx_algos[i].registered)
+                       crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
+       }
+       release_ixp_crypto(&pdev->dev);
+
+       return 0;
+}
+static const struct of_device_id ixp4xx_crypto_of_match[] = {
+       {
+               .compatible = "intel,ixp4xx-crypto",
+       },
+       {},
+};
+
+static struct platform_driver ixp_crypto_driver = {
+       .probe = ixp_crypto_probe,
+       .remove = ixp_crypto_remove,
+       .driver = {
+               .name = "ixp4xx_crypto",
+               .of_match_table = ixp4xx_crypto_of_match,
+       },
+};
+module_platform_driver(ixp_crypto_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
+MODULE_DESCRIPTION("IXP4xx hardware crypto");
+
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
deleted file mode 100644 (file)
index b63e235..0000000
+++ /dev/null
@@ -1,1601 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Intel IXP4xx NPE-C crypto driver
- *
- * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <linux/rtnetlink.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-#include <linux/module.h>
-#include <linux/of.h>
-
-#include <crypto/ctr.h>
-#include <crypto/internal/des.h>
-#include <crypto/aes.h>
-#include <crypto/hmac.h>
-#include <crypto/sha1.h>
-#include <crypto/algapi.h>
-#include <crypto/internal/aead.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/authenc.h>
-#include <crypto/scatterwalk.h>
-
-#include <linux/soc/ixp4xx/npe.h>
-#include <linux/soc/ixp4xx/qmgr.h>
-
-/* Intermittent includes, delete this after v5.14-rc1 */
-#include <linux/soc/ixp4xx/cpu.h>
-
-#define MAX_KEYLEN 32
-
-/* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
-#define NPE_CTX_LEN 80
-#define AES_BLOCK128 16
-
-#define NPE_OP_HASH_VERIFY   0x01
-#define NPE_OP_CCM_ENABLE    0x04
-#define NPE_OP_CRYPT_ENABLE  0x08
-#define NPE_OP_HASH_ENABLE   0x10
-#define NPE_OP_NOT_IN_PLACE  0x20
-#define NPE_OP_HMAC_DISABLE  0x40
-#define NPE_OP_CRYPT_ENCRYPT 0x80
-
-#define NPE_OP_CCM_GEN_MIC   0xcc
-#define NPE_OP_HASH_GEN_ICV  0x50
-#define NPE_OP_ENC_GEN_KEY   0xc9
-
-#define MOD_ECB     0x0000
-#define MOD_CTR     0x1000
-#define MOD_CBC_ENC 0x2000
-#define MOD_CBC_DEC 0x3000
-#define MOD_CCM_ENC 0x4000
-#define MOD_CCM_DEC 0x5000
-
-#define KEYLEN_128  4
-#define KEYLEN_192  6
-#define KEYLEN_256  8
-
-#define CIPH_DECR   0x0000
-#define CIPH_ENCR   0x0400
-
-#define MOD_DES     0x0000
-#define MOD_TDEA2   0x0100
-#define MOD_3DES   0x0200
-#define MOD_AES     0x0800
-#define MOD_AES128  (0x0800 | KEYLEN_128)
-#define MOD_AES192  (0x0900 | KEYLEN_192)
-#define MOD_AES256  (0x0a00 | KEYLEN_256)
-
-#define MAX_IVLEN   16
-#define NPE_QLEN    16
-/* Space for registering when the first
- * NPE_QLEN crypt_ctl are busy */
-#define NPE_QLEN_TOTAL 64
-
-#define CTL_FLAG_UNUSED                0x0000
-#define CTL_FLAG_USED          0x1000
-#define CTL_FLAG_PERFORM_ABLK  0x0001
-#define CTL_FLAG_GEN_ICV       0x0002
-#define CTL_FLAG_GEN_REVAES    0x0004
-#define CTL_FLAG_PERFORM_AEAD  0x0008
-#define CTL_FLAG_MASK          0x000f
-
-#define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
-
-#define MD5_DIGEST_SIZE   16
-
-struct buffer_desc {
-       u32 phys_next;
-#ifdef __ARMEB__
-       u16 buf_len;
-       u16 pkt_len;
-#else
-       u16 pkt_len;
-       u16 buf_len;
-#endif
-       dma_addr_t phys_addr;
-       u32 __reserved[4];
-       struct buffer_desc *next;
-       enum dma_data_direction dir;
-};
-
-struct crypt_ctl {
-#ifdef __ARMEB__
-       u8 mode;                /* NPE_OP_*  operation mode */
-       u8 init_len;
-       u16 reserved;
-#else
-       u16 reserved;
-       u8 init_len;
-       u8 mode;                /* NPE_OP_*  operation mode */
-#endif
-       u8 iv[MAX_IVLEN];       /* IV for CBC mode or CTR IV for CTR mode */
-       dma_addr_t icv_rev_aes; /* icv or rev aes */
-       dma_addr_t src_buf;
-       dma_addr_t dst_buf;
-#ifdef __ARMEB__
-       u16 auth_offs;          /* Authentication start offset */
-       u16 auth_len;           /* Authentication data length */
-       u16 crypt_offs;         /* Cryption start offset */
-       u16 crypt_len;          /* Cryption data length */
-#else
-       u16 auth_len;           /* Authentication data length */
-       u16 auth_offs;          /* Authentication start offset */
-       u16 crypt_len;          /* Cryption data length */
-       u16 crypt_offs;         /* Cryption start offset */
-#endif
-       u32 aadAddr;            /* Additional Auth Data Addr for CCM mode */
-       u32 crypto_ctx;         /* NPE Crypto Param structure address */
-
-       /* Used by Host: 4*4 bytes*/
-       unsigned int ctl_flags;
-       union {
-               struct skcipher_request *ablk_req;
-               struct aead_request *aead_req;
-               struct crypto_tfm *tfm;
-       } data;
-       struct buffer_desc *regist_buf;
-       u8 *regist_ptr;
-};
-
-struct ablk_ctx {
-       struct buffer_desc *src;
-       struct buffer_desc *dst;
-       u8 iv[MAX_IVLEN];
-       bool encrypt;
-       struct skcipher_request fallback_req;   // keep at the end
-};
-
-struct aead_ctx {
-       struct buffer_desc *src;
-       struct buffer_desc *dst;
-       struct scatterlist ivlist;
-       /* used when the hmac is not on one sg entry */
-       u8 *hmac_virt;
-       int encrypt;
-};
-
-struct ix_hash_algo {
-       u32 cfgword;
-       unsigned char *icv;
-};
-
-struct ix_sa_dir {
-       unsigned char *npe_ctx;
-       dma_addr_t npe_ctx_phys;
-       int npe_ctx_idx;
-       u8 npe_mode;
-};
-
-struct ixp_ctx {
-       struct ix_sa_dir encrypt;
-       struct ix_sa_dir decrypt;
-       int authkey_len;
-       u8 authkey[MAX_KEYLEN];
-       int enckey_len;
-       u8 enckey[MAX_KEYLEN];
-       u8 salt[MAX_IVLEN];
-       u8 nonce[CTR_RFC3686_NONCE_SIZE];
-       unsigned int salted;
-       atomic_t configuring;
-       struct completion completion;
-       struct crypto_skcipher *fallback_tfm;
-};
-
-struct ixp_alg {
-       struct skcipher_alg crypto;
-       const struct ix_hash_algo *hash;
-       u32 cfg_enc;
-       u32 cfg_dec;
-
-       int registered;
-};
-
-struct ixp_aead_alg {
-       struct aead_alg crypto;
-       const struct ix_hash_algo *hash;
-       u32 cfg_enc;
-       u32 cfg_dec;
-
-       int registered;
-};
-
-static const struct ix_hash_algo hash_alg_md5 = {
-       .cfgword        = 0xAA010004,
-       .icv            = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
-                         "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
-};
-
-static const struct ix_hash_algo hash_alg_sha1 = {
-       .cfgword        = 0x00000005,
-       .icv            = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
-                         "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
-};
-
-static struct npe *npe_c;
-
-static unsigned int send_qid;
-static unsigned int recv_qid;
-static struct dma_pool *buffer_pool;
-static struct dma_pool *ctx_pool;
-
-static struct crypt_ctl *crypt_virt;
-static dma_addr_t crypt_phys;
-
-static int support_aes = 1;
-
-static struct platform_device *pdev;
-
-static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
-{
-       return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
-}
-
-static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
-{
-       return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
-}
-
-static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_enc;
-}
-
-static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->cfg_dec;
-}
-
-static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
-{
-       return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
-}
-
-static int setup_crypt_desc(void)
-{
-       struct device *dev = &pdev->dev;
-
-       BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
-       crypt_virt = dma_alloc_coherent(dev,
-                                       NPE_QLEN * sizeof(struct crypt_ctl),
-                                       &crypt_phys, GFP_ATOMIC);
-       if (!crypt_virt)
-               return -ENOMEM;
-       return 0;
-}
-
-static DEFINE_SPINLOCK(desc_lock);
-static struct crypt_ctl *get_crypt_desc(void)
-{
-       int i;
-       static int idx;
-       unsigned long flags;
-
-       spin_lock_irqsave(&desc_lock, flags);
-
-       if (unlikely(!crypt_virt))
-               setup_crypt_desc();
-       if (unlikely(!crypt_virt)) {
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return NULL;
-       }
-       i = idx;
-       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
-               if (++idx >= NPE_QLEN)
-                       idx = 0;
-               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return crypt_virt + i;
-       } else {
-               spin_unlock_irqrestore(&desc_lock, flags);
-               return NULL;
-       }
-}
-
-static DEFINE_SPINLOCK(emerg_lock);
-static struct crypt_ctl *get_crypt_desc_emerg(void)
-{
-       int i;
-       static int idx = NPE_QLEN;
-       struct crypt_ctl *desc;
-       unsigned long flags;
-
-       desc = get_crypt_desc();
-       if (desc)
-               return desc;
-       if (unlikely(!crypt_virt))
-               return NULL;
-
-       spin_lock_irqsave(&emerg_lock, flags);
-       i = idx;
-       if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
-               if (++idx >= NPE_QLEN_TOTAL)
-                       idx = NPE_QLEN;
-               crypt_virt[i].ctl_flags = CTL_FLAG_USED;
-               spin_unlock_irqrestore(&emerg_lock, flags);
-               return crypt_virt + i;
-       } else {
-               spin_unlock_irqrestore(&emerg_lock, flags);
-               return NULL;
-       }
-}
-
-static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
-                          dma_addr_t phys)
-{
-       while (buf) {
-               struct buffer_desc *buf1;
-               u32 phys1;
-
-               buf1 = buf->next;
-               phys1 = buf->phys_next;
-               dma_unmap_single(dev, buf->phys_addr, buf->buf_len, buf->dir);
-               dma_pool_free(buffer_pool, buf, phys);
-               buf = buf1;
-               phys = phys1;
-       }
-}
-
-static struct tasklet_struct crypto_done_tasklet;
-
-static void finish_scattered_hmac(struct crypt_ctl *crypt)
-{
-       struct aead_request *req = crypt->data.aead_req;
-       struct aead_ctx *req_ctx = aead_request_ctx(req);
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       int authsize = crypto_aead_authsize(tfm);
-       int decryptlen = req->assoclen + req->cryptlen - authsize;
-
-       if (req_ctx->encrypt) {
-               scatterwalk_map_and_copy(req_ctx->hmac_virt, req->dst,
-                                        decryptlen, authsize, 1);
-       }
-       dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
-}
-
-static void one_packet(dma_addr_t phys)
-{
-       struct device *dev = &pdev->dev;
-       struct crypt_ctl *crypt;
-       struct ixp_ctx *ctx;
-       int failed;
-
-       failed = phys & 0x1 ? -EBADMSG : 0;
-       phys &= ~0x3;
-       crypt = crypt_phys2virt(phys);
-
-       switch (crypt->ctl_flags & CTL_FLAG_MASK) {
-       case CTL_FLAG_PERFORM_AEAD: {
-               struct aead_request *req = crypt->data.aead_req;
-               struct aead_ctx *req_ctx = aead_request_ctx(req);
-
-               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-               if (req_ctx->hmac_virt)
-                       finish_scattered_hmac(crypt);
-
-               aead_request_complete(req, failed);
-               break;
-       }
-       case CTL_FLAG_PERFORM_ABLK: {
-               struct skcipher_request *req = crypt->data.ablk_req;
-               struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
-               struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-               unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-               unsigned int offset;
-
-               if (ivsize > 0) {
-                       offset = req->cryptlen - ivsize;
-                       if (req_ctx->encrypt) {
-                               scatterwalk_map_and_copy(req->iv, req->dst,
-                                                        offset, ivsize, 0);
-                       } else {
-                               memcpy(req->iv, req_ctx->iv, ivsize);
-                               memzero_explicit(req_ctx->iv, ivsize);
-                       }
-               }
-
-               if (req_ctx->dst)
-                       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-
-               free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-               skcipher_request_complete(req, failed);
-               break;
-       }
-       case CTL_FLAG_GEN_ICV:
-               ctx = crypto_tfm_ctx(crypt->data.tfm);
-               dma_pool_free(ctx_pool, crypt->regist_ptr,
-                             crypt->regist_buf->phys_addr);
-               dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
-               if (atomic_dec_and_test(&ctx->configuring))
-                       complete(&ctx->completion);
-               break;
-       case CTL_FLAG_GEN_REVAES:
-               ctx = crypto_tfm_ctx(crypt->data.tfm);
-               *(__be32 *)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
-               if (atomic_dec_and_test(&ctx->configuring))
-                       complete(&ctx->completion);
-               break;
-       default:
-               BUG();
-       }
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-}
-
-static void irqhandler(void *_unused)
-{
-       tasklet_schedule(&crypto_done_tasklet);
-}
-
-static void crypto_done_action(unsigned long arg)
-{
-       int i;
-
-       for (i = 0; i < 4; i++) {
-               dma_addr_t phys = qmgr_get_entry(recv_qid);
-               if (!phys)
-                       return;
-               one_packet(phys);
-       }
-       tasklet_schedule(&crypto_done_tasklet);
-}
-
-static int init_ixp_crypto(struct device *dev)
-{
-       struct device_node *np = dev->of_node;
-       u32 msg[2] = { 0, 0 };
-       int ret = -ENODEV;
-       u32 npe_id;
-
-       dev_info(dev, "probing...\n");
-
-       /* Locate the NPE and queue manager to use from device tree */
-       if (IS_ENABLED(CONFIG_OF) && np) {
-               struct of_phandle_args queue_spec;
-               struct of_phandle_args npe_spec;
-
-               ret = of_parse_phandle_with_fixed_args(np, "intel,npe-handle",
-                                                      1, 0, &npe_spec);
-               if (ret) {
-                       dev_err(dev, "no NPE engine specified\n");
-                       return -ENODEV;
-               }
-               npe_id = npe_spec.args[0];
-
-               ret = of_parse_phandle_with_fixed_args(np, "queue-rx", 1, 0,
-                                                      &queue_spec);
-               if (ret) {
-                       dev_err(dev, "no rx queue phandle\n");
-                       return -ENODEV;
-               }
-               recv_qid = queue_spec.args[0];
-
-               ret = of_parse_phandle_with_fixed_args(np, "queue-txready", 1, 0,
-                                                      &queue_spec);
-               if (ret) {
-                       dev_err(dev, "no txready queue phandle\n");
-                       return -ENODEV;
-               }
-               send_qid = queue_spec.args[0];
-       } else {
-               /*
-                * Hardcoded engine when using platform data, this goes away
-                * when we switch to using DT only.
-                */
-               npe_id = 2;
-               send_qid = 29;
-               recv_qid = 30;
-       }
-
-       npe_c = npe_request(npe_id);
-       if (!npe_c)
-               return ret;
-
-       if (!npe_running(npe_c)) {
-               ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
-               if (ret)
-                       goto npe_release;
-               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-       } else {
-               if (npe_send_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-
-               if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
-                       goto npe_error;
-       }
-
-       switch ((msg[1] >> 16) & 0xff) {
-       case 3:
-               dev_warn(dev, "Firmware of %s lacks AES support\n", npe_name(npe_c));
-               support_aes = 0;
-               break;
-       case 4:
-       case 5:
-               support_aes = 1;
-               break;
-       default:
-               dev_err(dev, "Firmware of %s lacks crypto support\n", npe_name(npe_c));
-               ret = -ENODEV;
-               goto npe_release;
-       }
-       /* buffer_pool will also be used to sometimes store the hmac,
-        * so assure it is large enough
-        */
-       BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
-       buffer_pool = dma_pool_create("buffer", dev, sizeof(struct buffer_desc),
-                                     32, 0);
-       ret = -ENOMEM;
-       if (!buffer_pool)
-               goto err;
-
-       ctx_pool = dma_pool_create("context", dev, NPE_CTX_LEN, 16, 0);
-       if (!ctx_pool)
-               goto err;
-
-       ret = qmgr_request_queue(send_qid, NPE_QLEN_TOTAL, 0, 0,
-                                "ixp_crypto:out", NULL);
-       if (ret)
-               goto err;
-       ret = qmgr_request_queue(recv_qid, NPE_QLEN, 0, 0,
-                                "ixp_crypto:in", NULL);
-       if (ret) {
-               qmgr_release_queue(send_qid);
-               goto err;
-       }
-       qmgr_set_irq(recv_qid, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
-       tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
-
-       qmgr_enable_irq(recv_qid);
-       return 0;
-
-npe_error:
-       dev_err(dev, "%s not responding\n", npe_name(npe_c));
-       ret = -EIO;
-err:
-       dma_pool_destroy(ctx_pool);
-       dma_pool_destroy(buffer_pool);
-npe_release:
-       npe_release(npe_c);
-       return ret;
-}
-
-static void release_ixp_crypto(struct device *dev)
-{
-       qmgr_disable_irq(recv_qid);
-       tasklet_kill(&crypto_done_tasklet);
-
-       qmgr_release_queue(send_qid);
-       qmgr_release_queue(recv_qid);
-
-       dma_pool_destroy(ctx_pool);
-       dma_pool_destroy(buffer_pool);
-
-       npe_release(npe_c);
-
-       if (crypt_virt)
-               dma_free_coherent(dev, NPE_QLEN * sizeof(struct crypt_ctl),
-                                 crypt_virt, crypt_phys);
-}
-
-static void reset_sa_dir(struct ix_sa_dir *dir)
-{
-       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
-       dir->npe_ctx_idx = 0;
-       dir->npe_mode = 0;
-}
-
-static int init_sa_dir(struct ix_sa_dir *dir)
-{
-       dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
-       if (!dir->npe_ctx)
-               return -ENOMEM;
-
-       reset_sa_dir(dir);
-       return 0;
-}
-
-static void free_sa_dir(struct ix_sa_dir *dir)
-{
-       memset(dir->npe_ctx, 0, NPE_CTX_LEN);
-       dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
-}
-
-static int init_tfm(struct crypto_tfm *tfm)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       int ret;
-
-       atomic_set(&ctx->configuring, 0);
-       ret = init_sa_dir(&ctx->encrypt);
-       if (ret)
-               return ret;
-       ret = init_sa_dir(&ctx->decrypt);
-       if (ret)
-               free_sa_dir(&ctx->encrypt);
-
-       return ret;
-}
-
-static int init_tfm_ablk(struct crypto_skcipher *tfm)
-{
-       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
-       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
-       const char *name = crypto_tfm_alg_name(ctfm);
-
-       ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
-       if (IS_ERR(ctx->fallback_tfm)) {
-               pr_err("ERROR: Cannot allocate fallback for %s %ld\n",
-                       name, PTR_ERR(ctx->fallback_tfm));
-               return PTR_ERR(ctx->fallback_tfm);
-       }
-
-       pr_info("Fallback for %s is %s\n",
-                crypto_tfm_alg_driver_name(&tfm->base),
-                crypto_tfm_alg_driver_name(crypto_skcipher_tfm(ctx->fallback_tfm))
-                );
-
-       crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx) + crypto_skcipher_reqsize(ctx->fallback_tfm));
-       return init_tfm(crypto_skcipher_tfm(tfm));
-}
-
-static int init_tfm_aead(struct crypto_aead *tfm)
-{
-       crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
-       return init_tfm(crypto_aead_tfm(tfm));
-}
-
-static void exit_tfm(struct crypto_tfm *tfm)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       free_sa_dir(&ctx->encrypt);
-       free_sa_dir(&ctx->decrypt);
-}
-
-static void exit_tfm_ablk(struct crypto_skcipher *tfm)
-{
-       struct crypto_tfm *ctfm = crypto_skcipher_tfm(tfm);
-       struct ixp_ctx *ctx = crypto_tfm_ctx(ctfm);
-
-       crypto_free_skcipher(ctx->fallback_tfm);
-       exit_tfm(crypto_skcipher_tfm(tfm));
-}
-
-static void exit_tfm_aead(struct crypto_aead *tfm)
-{
-       exit_tfm(crypto_aead_tfm(tfm));
-}
-
-static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
-                             int init_len, u32 ctx_addr, const u8 *key,
-                             int key_len)
-{
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct crypt_ctl *crypt;
-       struct buffer_desc *buf;
-       int i;
-       u8 *pad;
-       dma_addr_t pad_phys, buf_phys;
-
-       BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
-       pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
-       if (!pad)
-               return -ENOMEM;
-       buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
-       if (!buf) {
-               dma_pool_free(ctx_pool, pad, pad_phys);
-               return -ENOMEM;
-       }
-       crypt = get_crypt_desc_emerg();
-       if (!crypt) {
-               dma_pool_free(ctx_pool, pad, pad_phys);
-               dma_pool_free(buffer_pool, buf, buf_phys);
-               return -EAGAIN;
-       }
-
-       memcpy(pad, key, key_len);
-       memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
-       for (i = 0; i < HMAC_PAD_BLOCKLEN; i++)
-               pad[i] ^= xpad;
-
-       crypt->data.tfm = tfm;
-       crypt->regist_ptr = pad;
-       crypt->regist_buf = buf;
-
-       crypt->auth_offs = 0;
-       crypt->auth_len = HMAC_PAD_BLOCKLEN;
-       crypt->crypto_ctx = ctx_addr;
-       crypt->src_buf = buf_phys;
-       crypt->icv_rev_aes = target;
-       crypt->mode = NPE_OP_HASH_GEN_ICV;
-       crypt->init_len = init_len;
-       crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
-
-       buf->next = NULL;
-       buf->buf_len = HMAC_PAD_BLOCKLEN;
-       buf->pkt_len = 0;
-       buf->phys_addr = pad_phys;
-
-       atomic_inc(&ctx->configuring);
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return 0;
-}
-
-static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned int authsize,
-                     const u8 *key, int key_len, unsigned int digest_len)
-{
-       u32 itarget, otarget, npe_ctx_addr;
-       unsigned char *cinfo;
-       int init_len, ret = 0;
-       u32 cfgword;
-       struct ix_sa_dir *dir;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       const struct ix_hash_algo *algo;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       cinfo = dir->npe_ctx + dir->npe_ctx_idx;
-       algo = ix_hash(tfm);
-
-       /* write cfg word to cryptinfo */
-       cfgword = algo->cfgword | (authsize << 6); /* (authsize/4) << 8 */
-#ifndef __ARMEB__
-       cfgword ^= 0xAA000000; /* change the "byte swap" flags */
-#endif
-       *(__be32 *)cinfo = cpu_to_be32(cfgword);
-       cinfo += sizeof(cfgword);
-
-       /* write ICV to cryptinfo */
-       memcpy(cinfo, algo->icv, digest_len);
-       cinfo += digest_len;
-
-       itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
-                               + sizeof(algo->cfgword);
-       otarget = itarget + digest_len;
-       init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
-       npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
-
-       dir->npe_ctx_idx += init_len;
-       dir->npe_mode |= NPE_OP_HASH_ENABLE;
-
-       if (!encrypt)
-               dir->npe_mode |= NPE_OP_HASH_VERIFY;
-
-       ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
-                                init_len, npe_ctx_addr, key, key_len);
-       if (ret)
-               return ret;
-       return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
-                                 init_len, npe_ctx_addr, key, key_len);
-}
-
-static int gen_rev_aes_key(struct crypto_tfm *tfm)
-{
-       struct crypt_ctl *crypt;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       struct ix_sa_dir *dir = &ctx->decrypt;
-
-       crypt = get_crypt_desc_emerg();
-       if (!crypt)
-               return -EAGAIN;
-
-       *(__be32 *)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
-
-       crypt->data.tfm = tfm;
-       crypt->crypt_offs = 0;
-       crypt->crypt_len = AES_BLOCK128;
-       crypt->src_buf = 0;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
-       crypt->mode = NPE_OP_ENC_GEN_KEY;
-       crypt->init_len = dir->npe_ctx_idx;
-       crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
-
-       atomic_inc(&ctx->configuring);
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return 0;
-}
-
-static int setup_cipher(struct crypto_tfm *tfm, int encrypt, const u8 *key,
-                       int key_len)
-{
-       u8 *cinfo;
-       u32 cipher_cfg;
-       u32 keylen_cfg = 0;
-       struct ix_sa_dir *dir;
-       struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
-       int err;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       cinfo = dir->npe_ctx;
-
-       if (encrypt) {
-               cipher_cfg = cipher_cfg_enc(tfm);
-               dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
-       } else {
-               cipher_cfg = cipher_cfg_dec(tfm);
-       }
-       if (cipher_cfg & MOD_AES) {
-               switch (key_len) {
-               case 16:
-                       keylen_cfg = MOD_AES128;
-                       break;
-               case 24:
-                       keylen_cfg = MOD_AES192;
-                       break;
-               case 32:
-                       keylen_cfg = MOD_AES256;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               cipher_cfg |= keylen_cfg;
-       } else {
-               err = crypto_des_verify_key(tfm, key);
-               if (err)
-                       return err;
-       }
-       /* write cfg word to cryptinfo */
-       *(__be32 *)cinfo = cpu_to_be32(cipher_cfg);
-       cinfo += sizeof(cipher_cfg);
-
-       /* write cipher key to cryptinfo */
-       memcpy(cinfo, key, key_len);
-       /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
-       if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
-               memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE - key_len);
-               key_len = DES3_EDE_KEY_SIZE;
-       }
-       dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
-       dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
-       if ((cipher_cfg & MOD_AES) && !encrypt)
-               return gen_rev_aes_key(tfm);
-
-       return 0;
-}
-
-static struct buffer_desc *chainup_buffers(struct device *dev,
-               struct scatterlist *sg, unsigned int nbytes,
-               struct buffer_desc *buf, gfp_t flags,
-               enum dma_data_direction dir)
-{
-       for (; nbytes > 0; sg = sg_next(sg)) {
-               unsigned int len = min(nbytes, sg->length);
-               struct buffer_desc *next_buf;
-               dma_addr_t next_buf_phys;
-               void *ptr;
-
-               nbytes -= len;
-               ptr = sg_virt(sg);
-               next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
-               if (!next_buf) {
-                       buf = NULL;
-                       break;
-               }
-               sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
-               buf->next = next_buf;
-               buf->phys_next = next_buf_phys;
-               buf = next_buf;
-
-               buf->phys_addr = sg_dma_address(sg);
-               buf->buf_len = len;
-               buf->dir = dir;
-       }
-       buf->next = NULL;
-       buf->phys_next = 0;
-       return buf;
-}
-
-static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                      unsigned int key_len)
-{
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       int ret;
-
-       init_completion(&ctx->completion);
-       atomic_inc(&ctx->configuring);
-
-       reset_sa_dir(&ctx->encrypt);
-       reset_sa_dir(&ctx->decrypt);
-
-       ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
-       ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
-
-       ret = setup_cipher(&tfm->base, 0, key, key_len);
-       if (ret)
-               goto out;
-       ret = setup_cipher(&tfm->base, 1, key, key_len);
-out:
-       if (!atomic_dec_and_test(&ctx->configuring))
-               wait_for_completion(&ctx->completion);
-       if (ret)
-               return ret;
-       crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK);
-       crypto_skcipher_set_flags(ctx->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
-
-       return crypto_skcipher_setkey(ctx->fallback_tfm, key, key_len);
-}
-
-static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                           unsigned int key_len)
-{
-       return verify_skcipher_des3_key(tfm, key) ?:
-              ablk_setkey(tfm, key, key_len);
-}
-
-static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
-                              unsigned int key_len)
-{
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-
-       /* the nonce is stored in bytes at end of key */
-       if (key_len < CTR_RFC3686_NONCE_SIZE)
-               return -EINVAL;
-
-       memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
-              CTR_RFC3686_NONCE_SIZE);
-
-       key_len -= CTR_RFC3686_NONCE_SIZE;
-       return ablk_setkey(tfm, key, key_len);
-}
-
-static int ixp4xx_cipher_fallback(struct skcipher_request *areq, int encrypt)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq);
-       struct ixp_ctx *op = crypto_skcipher_ctx(tfm);
-       struct ablk_ctx *rctx = skcipher_request_ctx(areq);
-       int err;
-
-       skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm);
-       skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags,
-                                     areq->base.complete, areq->base.data);
-       skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst,
-                                  areq->cryptlen, areq->iv);
-       if (encrypt)
-               err = crypto_skcipher_encrypt(&rctx->fallback_req);
-       else
-               err = crypto_skcipher_decrypt(&rctx->fallback_req);
-       return err;
-}
-
-static int ablk_perform(struct skcipher_request *req, int encrypt)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
-       struct ix_sa_dir *dir;
-       struct crypt_ctl *crypt;
-       unsigned int nbytes = req->cryptlen;
-       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
-       struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
-       struct buffer_desc src_hook;
-       struct device *dev = &pdev->dev;
-       unsigned int offset;
-       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
-                               GFP_KERNEL : GFP_ATOMIC;
-
-       if (sg_nents(req->src) > 1 || sg_nents(req->dst) > 1)
-               return ixp4xx_cipher_fallback(req, encrypt);
-
-       if (qmgr_stat_full(send_qid))
-               return -EAGAIN;
-       if (atomic_read(&ctx->configuring))
-               return -EAGAIN;
-
-       dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
-       req_ctx->encrypt = encrypt;
-
-       crypt = get_crypt_desc();
-       if (!crypt)
-               return -ENOMEM;
-
-       crypt->data.ablk_req = req;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->mode = dir->npe_mode;
-       crypt->init_len = dir->npe_ctx_idx;
-
-       crypt->crypt_offs = 0;
-       crypt->crypt_len = nbytes;
-
-       BUG_ON(ivsize && !req->iv);
-       memcpy(crypt->iv, req->iv, ivsize);
-       if (ivsize > 0 && !encrypt) {
-               offset = req->cryptlen - ivsize;
-               scatterwalk_map_and_copy(req_ctx->iv, req->src, offset, ivsize, 0);
-       }
-       if (req->src != req->dst) {
-               struct buffer_desc dst_hook;
-
-               crypt->mode |= NPE_OP_NOT_IN_PLACE;
-               /* This was never tested by Intel
-                * for more than one dst buffer, I think. */
-               req_ctx->dst = NULL;
-               if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
-                                    flags, DMA_FROM_DEVICE))
-                       goto free_buf_dest;
-               src_direction = DMA_TO_DEVICE;
-               req_ctx->dst = dst_hook.next;
-               crypt->dst_buf = dst_hook.phys_next;
-       } else {
-               req_ctx->dst = NULL;
-       }
-       req_ctx->src = NULL;
-       if (!chainup_buffers(dev, req->src, nbytes, &src_hook, flags,
-                            src_direction))
-               goto free_buf_src;
-
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return -EINPROGRESS;
-
-free_buf_src:
-       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-free_buf_dest:
-       if (req->src != req->dst)
-               free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-       return -ENOMEM;
-}
-
-static int ablk_encrypt(struct skcipher_request *req)
-{
-       return ablk_perform(req, 1);
-}
-
-static int ablk_decrypt(struct skcipher_request *req)
-{
-       return ablk_perform(req, 0);
-}
-
-static int ablk_rfc3686_crypt(struct skcipher_request *req)
-{
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
-       u8 iv[CTR_RFC3686_BLOCK_SIZE];
-       u8 *info = req->iv;
-       int ret;
-
-       /* set up counter block */
-       memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
-       memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
-
-       /* initialize counter portion of counter block */
-       *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
-               cpu_to_be32(1);
-
-       req->iv = iv;
-       ret = ablk_perform(req, 1);
-       req->iv = info;
-       return ret;
-}
-
-static int aead_perform(struct aead_request *req, int encrypt,
-                       int cryptoffset, int eff_cryptlen, u8 *iv)
-{
-       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       unsigned int ivsize = crypto_aead_ivsize(tfm);
-       unsigned int authsize = crypto_aead_authsize(tfm);
-       struct ix_sa_dir *dir;
-       struct crypt_ctl *crypt;
-       unsigned int cryptlen;
-       struct buffer_desc *buf, src_hook;
-       struct aead_ctx *req_ctx = aead_request_ctx(req);
-       struct device *dev = &pdev->dev;
-       gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
-                               GFP_KERNEL : GFP_ATOMIC;
-       enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
-       unsigned int lastlen;
-
-       if (qmgr_stat_full(send_qid))
-               return -EAGAIN;
-       if (atomic_read(&ctx->configuring))
-               return -EAGAIN;
-
-       if (encrypt) {
-               dir = &ctx->encrypt;
-               cryptlen = req->cryptlen;
-       } else {
-               dir = &ctx->decrypt;
-               /* req->cryptlen includes the authsize when decrypting */
-               cryptlen = req->cryptlen - authsize;
-               eff_cryptlen -= authsize;
-       }
-       crypt = get_crypt_desc();
-       if (!crypt)
-               return -ENOMEM;
-
-       crypt->data.aead_req = req;
-       crypt->crypto_ctx = dir->npe_ctx_phys;
-       crypt->mode = dir->npe_mode;
-       crypt->init_len = dir->npe_ctx_idx;
-
-       crypt->crypt_offs = cryptoffset;
-       crypt->crypt_len = eff_cryptlen;
-
-       crypt->auth_offs = 0;
-       crypt->auth_len = req->assoclen + cryptlen;
-       BUG_ON(ivsize && !req->iv);
-       memcpy(crypt->iv, req->iv, ivsize);
-
-       buf = chainup_buffers(dev, req->src, crypt->auth_len,
-                             &src_hook, flags, src_direction);
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       if (!buf)
-               goto free_buf_src;
-
-       lastlen = buf->buf_len;
-       if (lastlen >= authsize)
-               crypt->icv_rev_aes = buf->phys_addr +
-                                    buf->buf_len - authsize;
-
-       req_ctx->dst = NULL;
-
-       if (req->src != req->dst) {
-               struct buffer_desc dst_hook;
-
-               crypt->mode |= NPE_OP_NOT_IN_PLACE;
-               src_direction = DMA_TO_DEVICE;
-
-               buf = chainup_buffers(dev, req->dst, crypt->auth_len,
-                                     &dst_hook, flags, DMA_FROM_DEVICE);
-               req_ctx->dst = dst_hook.next;
-               crypt->dst_buf = dst_hook.phys_next;
-
-               if (!buf)
-                       goto free_buf_dst;
-
-               if (encrypt) {
-                       lastlen = buf->buf_len;
-                       if (lastlen >= authsize)
-                               crypt->icv_rev_aes = buf->phys_addr +
-                                                    buf->buf_len - authsize;
-               }
-       }
-
-       if (unlikely(lastlen < authsize)) {
-               /* The 12 hmac bytes are scattered,
-                * we need to copy them into a safe buffer */
-               req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
-                                                   &crypt->icv_rev_aes);
-               if (unlikely(!req_ctx->hmac_virt))
-                       goto free_buf_dst;
-               if (!encrypt) {
-                       scatterwalk_map_and_copy(req_ctx->hmac_virt,
-                                                req->src, cryptlen, authsize, 0);
-               }
-               req_ctx->encrypt = encrypt;
-       } else {
-               req_ctx->hmac_virt = NULL;
-       }
-
-       crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
-       qmgr_put_entry(send_qid, crypt_virt2phys(crypt));
-       BUG_ON(qmgr_stat_overflow(send_qid));
-       return -EINPROGRESS;
-
-free_buf_dst:
-       free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
-free_buf_src:
-       free_buf_chain(dev, req_ctx->src, crypt->src_buf);
-       crypt->ctl_flags = CTL_FLAG_UNUSED;
-       return -ENOMEM;
-}
-
-static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       unsigned int digest_len = crypto_aead_maxauthsize(tfm);
-       int ret;
-
-       if (!ctx->enckey_len && !ctx->authkey_len)
-               return 0;
-       init_completion(&ctx->completion);
-       atomic_inc(&ctx->configuring);
-
-       reset_sa_dir(&ctx->encrypt);
-       reset_sa_dir(&ctx->decrypt);
-
-       ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
-       if (ret)
-               goto out;
-       ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
-       if (ret)
-               goto out;
-       ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
-                        ctx->authkey_len, digest_len);
-       if (ret)
-               goto out;
-       ret = setup_auth(&tfm->base, 1, authsize,  ctx->authkey,
-                        ctx->authkey_len, digest_len);
-out:
-       if (!atomic_dec_and_test(&ctx->configuring))
-               wait_for_completion(&ctx->completion);
-       return ret;
-}
-
-static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
-{
-       int max = crypto_aead_maxauthsize(tfm) >> 2;
-
-       if ((authsize >> 2) < 1 || (authsize >> 2) > max || (authsize & 3))
-               return -EINVAL;
-       return aead_setup(tfm, authsize);
-}
-
-static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                      unsigned int keylen)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       struct crypto_authenc_keys keys;
-
-       if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
-               goto badkey;
-
-       if (keys.authkeylen > sizeof(ctx->authkey))
-               goto badkey;
-
-       if (keys.enckeylen > sizeof(ctx->enckey))
-               goto badkey;
-
-       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
-       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
-       ctx->authkey_len = keys.authkeylen;
-       ctx->enckey_len = keys.enckeylen;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return aead_setup(tfm, crypto_aead_authsize(tfm));
-badkey:
-       memzero_explicit(&keys, sizeof(keys));
-       return -EINVAL;
-}
-
-static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
-                           unsigned int keylen)
-{
-       struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
-       struct crypto_authenc_keys keys;
-       int err;
-
-       err = crypto_authenc_extractkeys(&keys, key, keylen);
-       if (unlikely(err))
-               goto badkey;
-
-       err = -EINVAL;
-       if (keys.authkeylen > sizeof(ctx->authkey))
-               goto badkey;
-
-       err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
-       if (err)
-               goto badkey;
-
-       memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
-       memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
-       ctx->authkey_len = keys.authkeylen;
-       ctx->enckey_len = keys.enckeylen;
-
-       memzero_explicit(&keys, sizeof(keys));
-       return aead_setup(tfm, crypto_aead_authsize(tfm));
-badkey:
-       memzero_explicit(&keys, sizeof(keys));
-       return err;
-}
-
-static int aead_encrypt(struct aead_request *req)
-{
-       return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
-}
-
-static int aead_decrypt(struct aead_request *req)
-{
-       return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
-}
-
-static struct ixp_alg ixp4xx_algos[] = {
-{
-       .crypto = {
-               .base.cra_name          = "cbc(des)",
-               .base.cra_blocksize     = DES_BLOCK_SIZE,
-
-               .min_keysize            = DES_KEY_SIZE,
-               .max_keysize            = DES_KEY_SIZE,
-               .ivsize                 = DES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(des)",
-               .base.cra_blocksize     = DES_BLOCK_SIZE,
-               .min_keysize            = DES_KEY_SIZE,
-               .max_keysize            = DES_KEY_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "cbc(des3_ede)",
-               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-
-               .min_keysize            = DES3_EDE_KEY_SIZE,
-               .max_keysize            = DES3_EDE_KEY_SIZE,
-               .ivsize                 = DES3_EDE_BLOCK_SIZE,
-               .setkey                 = ablk_des3_setkey,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(des3_ede)",
-               .base.cra_blocksize     = DES3_EDE_BLOCK_SIZE,
-
-               .min_keysize            = DES3_EDE_KEY_SIZE,
-               .max_keysize            = DES3_EDE_KEY_SIZE,
-               .setkey                 = ablk_des3_setkey,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
-}, {
-       .crypto = {
-               .base.cra_name          = "cbc(aes)",
-               .base.cra_blocksize     = AES_BLOCK_SIZE,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-}, {
-       .crypto = {
-               .base.cra_name          = "ecb(aes)",
-               .base.cra_blocksize     = AES_BLOCK_SIZE,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
-}, {
-       .crypto = {
-               .base.cra_name          = "ctr(aes)",
-               .base.cra_blocksize     = 1,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
-       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-}, {
-       .crypto = {
-               .base.cra_name          = "rfc3686(ctr(aes))",
-               .base.cra_blocksize     = 1,
-
-               .min_keysize            = AES_MIN_KEY_SIZE,
-               .max_keysize            = AES_MAX_KEY_SIZE,
-               .ivsize                 = AES_BLOCK_SIZE,
-               .setkey                 = ablk_rfc3686_setkey,
-               .encrypt                = ablk_rfc3686_crypt,
-               .decrypt                = ablk_rfc3686_crypt,
-       },
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
-       .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
-} };
-
-static struct ixp_aead_alg ixp4xx_aeads[] = {
-{
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(des))",
-                       .cra_blocksize  = DES_BLOCK_SIZE,
-               },
-               .ivsize         = DES_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(des3_ede))",
-                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               },
-               .ivsize         = DES3_EDE_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-               .setkey         = des3_aead_setkey,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(des))",
-                       .cra_blocksize  = DES_BLOCK_SIZE,
-               },
-                       .ivsize         = DES_BLOCK_SIZE,
-                       .maxauthsize    = SHA1_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(des3_ede))",
-                       .cra_blocksize  = DES3_EDE_BLOCK_SIZE,
-               },
-               .ivsize         = DES3_EDE_BLOCK_SIZE,
-               .maxauthsize    = SHA1_DIGEST_SIZE,
-               .setkey         = des3_aead_setkey,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
-       .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(md5),cbc(aes))",
-                       .cra_blocksize  = AES_BLOCK_SIZE,
-               },
-               .ivsize         = AES_BLOCK_SIZE,
-               .maxauthsize    = MD5_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_md5,
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-}, {
-       .crypto = {
-               .base = {
-                       .cra_name       = "authenc(hmac(sha1),cbc(aes))",
-                       .cra_blocksize  = AES_BLOCK_SIZE,
-               },
-               .ivsize         = AES_BLOCK_SIZE,
-               .maxauthsize    = SHA1_DIGEST_SIZE,
-       },
-       .hash = &hash_alg_sha1,
-       .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
-       .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
-} };
-
-#define IXP_POSTFIX "-ixp4xx"
-
-static int ixp_crypto_probe(struct platform_device *_pdev)
-{
-       struct device *dev = &_pdev->dev;
-       int num = ARRAY_SIZE(ixp4xx_algos);
-       int i, err;
-
-       pdev = _pdev;
-
-       err = init_ixp_crypto(dev);
-       if (err)
-               return err;
-
-       for (i = 0; i < num; i++) {
-               struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
-
-               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
-                            CRYPTO_MAX_ALG_NAME)
-                       continue;
-               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
-                       continue;
-
-               /* block ciphers */
-               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                     CRYPTO_ALG_ASYNC |
-                                     CRYPTO_ALG_ALLOCATES_MEMORY |
-                                     CRYPTO_ALG_NEED_FALLBACK;
-               if (!cra->setkey)
-                       cra->setkey = ablk_setkey;
-               if (!cra->encrypt)
-                       cra->encrypt = ablk_encrypt;
-               if (!cra->decrypt)
-                       cra->decrypt = ablk_decrypt;
-               cra->init = init_tfm_ablk;
-               cra->exit = exit_tfm_ablk;
-
-               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
-               cra->base.cra_module = THIS_MODULE;
-               cra->base.cra_alignmask = 3;
-               cra->base.cra_priority = 300;
-               if (crypto_register_skcipher(cra))
-                       dev_err(&pdev->dev, "Failed to register '%s'\n",
-                               cra->base.cra_name);
-               else
-                       ixp4xx_algos[i].registered = 1;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
-               struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
-
-               if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
-                            "%s"IXP_POSTFIX, cra->base.cra_name) >=
-                   CRYPTO_MAX_ALG_NAME)
-                       continue;
-               if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
-                       continue;
-
-               /* authenc */
-               cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
-                                     CRYPTO_ALG_ASYNC |
-                                     CRYPTO_ALG_ALLOCATES_MEMORY;
-               cra->setkey = cra->setkey ?: aead_setkey;
-               cra->setauthsize = aead_setauthsize;
-               cra->encrypt = aead_encrypt;
-               cra->decrypt = aead_decrypt;
-               cra->init = init_tfm_aead;
-               cra->exit = exit_tfm_aead;
-
-               cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
-               cra->base.cra_module = THIS_MODULE;
-               cra->base.cra_alignmask = 3;
-               cra->base.cra_priority = 300;
-
-               if (crypto_register_aead(cra))
-                       dev_err(&pdev->dev, "Failed to register '%s'\n",
-                               cra->base.cra_driver_name);
-               else
-                       ixp4xx_aeads[i].registered = 1;
-       }
-       return 0;
-}
-
-static int ixp_crypto_remove(struct platform_device *pdev)
-{
-       int num = ARRAY_SIZE(ixp4xx_algos);
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
-               if (ixp4xx_aeads[i].registered)
-                       crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
-       }
-
-       for (i = 0; i < num; i++) {
-               if (ixp4xx_algos[i].registered)
-                       crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
-       }
-       release_ixp_crypto(&pdev->dev);
-
-       return 0;
-}
-static const struct of_device_id ixp4xx_crypto_of_match[] = {
-       {
-               .compatible = "intel,ixp4xx-crypto",
-       },
-       {},
-};
-
-static struct platform_driver ixp_crypto_driver = {
-       .probe = ixp_crypto_probe,
-       .remove = ixp_crypto_remove,
-       .driver = {
-               .name = "ixp4xx_crypto",
-               .of_match_table = ixp4xx_crypto_of_match,
-       },
-};
-module_platform_driver(ixp_crypto_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
-MODULE_DESCRIPTION("IXP4xx hardware crypto");
-