SYM_FUNC_END(aesni_set_key)
 
 /*
- * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ * void aesni_enc(const void *ctx, u8 *dst, const u8 *src)
  */
 SYM_FUNC_START(aesni_enc)
        FRAME_BEGIN
 SYM_FUNC_END(_aesni_enc4)
 
 /*
- * void aesni_dec (struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
+ * void aesni_dec (const void *ctx, u8 *dst, const u8 *src)
  */
 SYM_FUNC_START(aesni_dec)
        FRAME_BEGIN
        pxor CTR, IV;
 
 /*
- * void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
- *                      bool enc, u8 *iv)
+ * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst,
+ *                      const u8 *src, bool enc, le128 *iv)
  */
 SYM_FUNC_START(aesni_xts_crypt8)
        FRAME_BEGIN
 
 
 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
                             unsigned int key_len);
-asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
-                         const u8 *in);
-asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
-                         const u8 *in);
+asmlinkage void aesni_enc(const void *ctx, u8 *out, const u8 *in);
+asmlinkage void aesni_dec(const void *ctx, u8 *out, const u8 *in);
 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len);
 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
 
-asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
-                                const u8 *in, bool enc, u8 *iv);
+asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out,
+                                const u8 *in, bool enc, le128 *iv);
 
 /* asmlinkage void aesni_gcm_enc()
  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
 }
 
 
-static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
+static void aesni_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       aesni_enc(ctx, out, in);
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_enc);
 }
 
-static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec);
 }
 
-static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
+       aesni_xts_crypt8(ctx, dst, src, true, iv);
 }
 
-static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
-}
-
-static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
-{
-       aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
+       aesni_xts_crypt8(ctx, dst, src, false, iv);
 }
 
 static const struct common_glue_ctx aesni_enc_xts = {
 
        .funcs = { {
                .num_blocks = 8,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
+               .fn_u = { .xts = aesni_xts_enc8 }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
+               .fn_u = { .xts = aesni_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 8,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
+               .fn_u = { .xts = aesni_xts_dec8 }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
+               .fn_u = { .xts = aesni_xts_dec }
        } }
 };
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&aesni_enc_xts, req,
-                                  XTS_TWEAK_CAST(aesni_xts_tweak),
+       return glue_xts_req_128bit(&aesni_enc_xts, req, aesni_enc,
                                   aes_ctx(ctx->raw_tweak_ctx),
                                   aes_ctx(ctx->raw_crypt_ctx),
                                   false);
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&aesni_dec_xts, req,
-                                  XTS_TWEAK_CAST(aesni_xts_tweak),
+       return glue_xts_req_128bit(&aesni_dec_xts, req, aesni_enc,
                                   aes_ctx(ctx->raw_tweak_ctx),
                                   aes_ctx(ctx->raw_crypt_ctx),
                                   true);
 
 #define CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS 32
 
 /* 32-way AVX2/AES-NI parallel cipher functions */
-asmlinkage void camellia_ecb_enc_32way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
-asmlinkage void camellia_ecb_dec_32way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
+asmlinkage void camellia_ecb_enc_32way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ecb_dec_32way(const void *ctx, u8 *dst, const u8 *src);
 
-asmlinkage void camellia_cbc_dec_32way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
-asmlinkage void camellia_ctr_32way(struct camellia_ctx *ctx, u8 *dst,
-                                  const u8 *src, le128 *iv);
+asmlinkage void camellia_cbc_dec_32way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ctr_32way(const void *ctx, u8 *dst, const u8 *src,
+                                  le128 *iv);
 
-asmlinkage void camellia_xts_enc_32way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
-asmlinkage void camellia_xts_dec_32way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_enc_32way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
+asmlinkage void camellia_xts_dec_32way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
 
 static const struct common_glue_ctx camellia_enc = {
        .num_funcs = 4,
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_32way) }
+               .fn_u = { .ecb = camellia_ecb_enc_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
+               .fn_u = { .ecb = camellia_ecb_enc_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+               .fn_u = { .ecb = camellia_enc_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+               .fn_u = { .ecb = camellia_enc_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_32way) }
+               .fn_u = { .ctr = camellia_ctr_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
+               .fn_u = { .ctr = camellia_ctr_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+               .fn_u = { .ctr = camellia_crypt_ctr_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+               .fn_u = { .ctr = camellia_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_32way) }
+               .fn_u = { .xts = camellia_xts_enc_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
+               .fn_u = { .xts = camellia_xts_enc_16way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
+               .fn_u = { .xts = camellia_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_32way) }
+               .fn_u = { .ecb = camellia_ecb_dec_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
+               .fn_u = { .ecb = camellia_ecb_dec_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+               .fn_u = { .ecb = camellia_dec_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .ecb = camellia_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_32way) }
+               .fn_u = { .cbc = camellia_cbc_dec_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
+               .fn_u = { .cbc = camellia_cbc_dec_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+               .fn_u = { .cbc = camellia_decrypt_cbc_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .cbc = camellia_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_AVX2_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_32way) }
+               .fn_u = { .xts = camellia_xts_dec_32way }
        }, {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
+               .fn_u = { .xts = camellia_xts_dec_16way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
+               .fn_u = { .xts = camellia_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&camellia_enc_xts, req,
-                                  XTS_TWEAK_CAST(camellia_enc_blk),
+       return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, false);
 }
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&camellia_dec_xts, req,
-                                  XTS_TWEAK_CAST(camellia_enc_blk),
+       return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, true);
 }
 
 
 #define CAMELLIA_AESNI_PARALLEL_BLOCKS 16
 
 /* 16-way parallel cipher functions (avx/aes-ni) */
-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
+asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
 EXPORT_SYMBOL_GPL(camellia_ecb_enc_16way);
 
-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
 EXPORT_SYMBOL_GPL(camellia_ecb_dec_16way);
 
-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
+asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
 EXPORT_SYMBOL_GPL(camellia_cbc_dec_16way);
 
-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
-                                  const u8 *src, le128 *iv);
+asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
+                                  le128 *iv);
 EXPORT_SYMBOL_GPL(camellia_ctr_16way);
 
-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
 EXPORT_SYMBOL_GPL(camellia_xts_enc_16way);
 
-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
+asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
 EXPORT_SYMBOL_GPL(camellia_xts_dec_16way);
 
-void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(camellia_enc_blk));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_enc_blk);
 }
 EXPORT_SYMBOL_GPL(camellia_xts_enc);
 
-void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(camellia_dec_blk));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_dec_blk);
 }
 EXPORT_SYMBOL_GPL(camellia_xts_dec);
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_enc_16way) }
+               .fn_u = { .ecb = camellia_ecb_enc_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+               .fn_u = { .ecb = camellia_enc_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+               .fn_u = { .ecb = camellia_enc_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_ctr_16way) }
+               .fn_u = { .ctr = camellia_ctr_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+               .fn_u = { .ctr = camellia_crypt_ctr_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+               .fn_u = { .ctr = camellia_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc_16way) }
+               .fn_u = { .xts = camellia_xts_enc_16way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_enc) }
+               .fn_u = { .xts = camellia_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_ecb_dec_16way) }
+               .fn_u = { .ecb = camellia_ecb_dec_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+               .fn_u = { .ecb = camellia_dec_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .ecb = camellia_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_cbc_dec_16way) }
+               .fn_u = { .cbc = camellia_cbc_dec_16way }
        }, {
                .num_blocks = 2,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+               .fn_u = { .cbc = camellia_decrypt_cbc_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .cbc = camellia_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAMELLIA_AESNI_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec_16way) }
+               .fn_u = { .xts = camellia_xts_dec_16way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(camellia_xts_dec) }
+               .fn_u = { .xts = camellia_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&camellia_enc_xts, req,
-                                  XTS_TWEAK_CAST(camellia_enc_blk),
+       return glue_xts_req_128bit(&camellia_enc_xts, req, camellia_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, false);
 }
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct camellia_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&camellia_dec_xts, req,
-                                  XTS_TWEAK_CAST(camellia_enc_blk),
+       return glue_xts_req_128bit(&camellia_dec_xts, req, camellia_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, true);
 }
 
 
 #include <asm/crypto/glue_helper.h>
 
 /* regular block cipher functions */
-asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
-                                  const u8 *src, bool xor);
+asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
+                                  bool xor);
 EXPORT_SYMBOL_GPL(__camellia_enc_blk);
-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
-                                const u8 *src);
+asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
 EXPORT_SYMBOL_GPL(camellia_dec_blk);
 
 /* 2-way parallel cipher functions */
-asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
-                                       const u8 *src, bool xor);
+asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
+                                       bool xor);
 EXPORT_SYMBOL_GPL(__camellia_enc_blk_2way);
-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
-                                     const u8 *src);
+asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
 EXPORT_SYMBOL_GPL(camellia_dec_blk_2way);
 
 static void camellia_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
        return camellia_setkey(&tfm->base, key, key_len);
 }
 
-void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src)
+void camellia_decrypt_cbc_2way(const void *ctx, u8 *d, const u8 *s)
 {
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
        u128 iv = *src;
 
        camellia_dec_blk_2way(ctx, (u8 *)dst, (u8 *)src);
 }
 EXPORT_SYMBOL_GPL(camellia_decrypt_cbc_2way);
 
-void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblk;
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        if (dst != src)
                *dst = *src;
 }
 EXPORT_SYMBOL_GPL(camellia_crypt_ctr);
 
-void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void camellia_crypt_ctr_2way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblks[2];
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        if (dst != src) {
                dst[0] = src[0];
 
        .funcs = { {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk_2way) }
+               .fn_u = { .ecb = camellia_enc_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_enc_blk) }
+               .fn_u = { .ecb = camellia_enc_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 2,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr_2way) }
+               .fn_u = { .ctr = camellia_crypt_ctr_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(camellia_crypt_ctr) }
+               .fn_u = { .ctr = camellia_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 2,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk_2way) }
+               .fn_u = { .ecb = camellia_dec_blk_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .ecb = camellia_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 2,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_decrypt_cbc_2way) }
+               .fn_u = { .cbc = camellia_decrypt_cbc_2way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(camellia_dec_blk) }
+               .fn_u = { .cbc = camellia_dec_blk }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(camellia_enc_blk),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(camellia_enc_blk, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
 
 
 #define CAST6_PARALLEL_BLOCKS 8
 
-asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
-                                  const u8 *src);
-asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
-                                  const u8 *src);
-
-asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
-                                  const u8 *src);
-asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
+asmlinkage void cast6_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void cast6_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+
+asmlinkage void cast6_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void cast6_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
                               le128 *iv);
 
-asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
-                                  const u8 *src, le128 *iv);
-asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
-                                  const u8 *src, le128 *iv);
+asmlinkage void cast6_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
+                                  le128 *iv);
+asmlinkage void cast6_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
+                                  le128 *iv);
 
 static int cast6_setkey_skcipher(struct crypto_skcipher *tfm,
                                 const u8 *key, unsigned int keylen)
        return cast6_setkey(&tfm->base, key, keylen);
 }
 
-static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(__cast6_encrypt));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_encrypt);
 }
 
-static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(__cast6_decrypt));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, __cast6_decrypt);
 }
 
-static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void cast6_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblk;
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        le128_to_be128(&ctrblk, iv);
        le128_inc(iv);
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_enc_8way) }
+               .fn_u = { .ecb = cast6_ecb_enc_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_encrypt) }
+               .fn_u = { .ecb = __cast6_encrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_ctr_8way) }
+               .fn_u = { .ctr = cast6_ctr_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(cast6_crypt_ctr) }
+               .fn_u = { .ctr = cast6_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc_8way) }
+               .fn_u = { .xts = cast6_xts_enc_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_enc) }
+               .fn_u = { .xts = cast6_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(cast6_ecb_dec_8way) }
+               .fn_u = { .ecb = cast6_ecb_dec_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__cast6_decrypt) }
+               .fn_u = { .ecb = __cast6_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(cast6_cbc_dec_8way) }
+               .fn_u = { .cbc = cast6_cbc_dec_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__cast6_decrypt) }
+               .fn_u = { .cbc = __cast6_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = CAST6_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec_8way) }
+               .fn_u = { .xts = cast6_xts_dec_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(cast6_xts_dec) }
+               .fn_u = { .xts = cast6_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__cast6_encrypt),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(__cast6_encrypt, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&cast6_enc_xts, req,
-                                  XTS_TWEAK_CAST(__cast6_encrypt),
+       return glue_xts_req_128bit(&cast6_enc_xts, req, __cast6_encrypt,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, false);
 }
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct cast6_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&cast6_dec_xts, req,
-                                  XTS_TWEAK_CAST(__cast6_encrypt),
+       return glue_xts_req_128bit(&cast6_dec_xts, req, __cast6_encrypt,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, true);
 }
 
 
                                src -= num_blocks - 1;
                                dst -= num_blocks - 1;
 
-                               gctx->funcs[i].fn_u.cbc(ctx, dst, src);
+                               gctx->funcs[i].fn_u.cbc(ctx, (u8 *)dst,
+                                                       (const u8 *)src);
 
                                nbytes -= func_bytes;
                                if (nbytes < bsize)
 
                        /* Process multi-block batch */
                        do {
-                               gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
+                               gctx->funcs[i].fn_u.ctr(ctx, (u8 *)dst,
+                                                       (const u8 *)src,
+                                                       &ctrblk);
                                src += num_blocks;
                                dst += num_blocks;
                                nbytes -= func_bytes;
 
                be128_to_le128(&ctrblk, (be128 *)walk.iv);
                memcpy(&tmp, walk.src.virt.addr, nbytes);
-               gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, &tmp, &tmp,
+               gctx->funcs[gctx->num_funcs - 1].fn_u.ctr(ctx, (u8 *)&tmp,
+                                                         (const u8 *)&tmp,
                                                          &ctrblk);
                memcpy(walk.dst.virt.addr, &tmp, nbytes);
                le128_to_be128((be128 *)walk.iv, &ctrblk);
 
                if (nbytes >= func_bytes) {
                        do {
-                               gctx->funcs[i].fn_u.xts(ctx, dst, src,
+                               gctx->funcs[i].fn_u.xts(ctx, (u8 *)dst,
+                                                       (const u8 *)src,
                                                        walk->iv);
 
                                src += num_blocks;
 }
 EXPORT_SYMBOL_GPL(glue_xts_req_128bit);
 
-void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
-                              common_glue_func_t fn)
+void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst, const u8 *src,
+                              le128 *iv, common_glue_func_t fn)
 {
        le128 ivblk = *iv;
 
        gf128mul_x_ble(iv, &ivblk);
 
        /* CC <- T xor C */
-       u128_xor(dst, src, (u128 *)&ivblk);
+       u128_xor((u128 *)dst, (const u128 *)src, (u128 *)&ivblk);
 
        /* PP <- D(Key2,CC) */
-       fn(ctx, (u8 *)dst, (u8 *)dst);
+       fn(ctx, dst, dst);
 
        /* P <- T xor PP */
-       u128_xor(dst, dst, (u128 *)&ivblk);
+       u128_xor((u128 *)dst, (u128 *)dst, (u128 *)&ivblk);
 }
 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
 
 
 #define SERPENT_AVX2_PARALLEL_BLOCKS 16
 
 /* 16-way AVX2 parallel cipher functions */
-asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
-                                     const u8 *src);
-asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
-                                     const u8 *src);
-asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
+asmlinkage void serpent_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void serpent_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void serpent_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
 
-asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
+asmlinkage void serpent_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
                                  le128 *iv);
-asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
-                                     const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
-                                     const u8 *src, le128 *iv);
+asmlinkage void serpent_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+                                     le128 *iv);
+asmlinkage void serpent_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+                                     le128 *iv);
 
 static int serpent_setkey_skcipher(struct crypto_skcipher *tfm,
                                   const u8 *key, unsigned int keylen)
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_16way) }
+               .fn_u = { .ecb = serpent_ecb_enc_16way }
        }, {
                .num_blocks = 8,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+               .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+               .fn_u = { .ecb = __serpent_encrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_16way) }
+               .fn_u = { .ctr = serpent_ctr_16way }
        },  {
                .num_blocks = 8,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+               .fn_u = { .ctr = serpent_ctr_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+               .fn_u = { .ctr = __serpent_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_16way) }
+               .fn_u = { .xts = serpent_xts_enc_16way }
        }, {
                .num_blocks = 8,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+               .fn_u = { .xts = serpent_xts_enc_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+               .fn_u = { .xts = serpent_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_16way) }
+               .fn_u = { .ecb = serpent_ecb_dec_16way }
        }, {
                .num_blocks = 8,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+               .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .ecb = __serpent_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_16way) }
+               .fn_u = { .cbc = serpent_cbc_dec_16way }
        }, {
                .num_blocks = 8,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+               .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .cbc = __serpent_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 16,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_16way) }
+               .fn_u = { .xts = serpent_xts_dec_16way }
        }, {
                .num_blocks = 8,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+               .fn_u = { .xts = serpent_xts_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+               .fn_u = { .xts = serpent_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        return glue_xts_req_128bit(&serpent_enc_xts, req,
-                                  XTS_TWEAK_CAST(__serpent_encrypt),
-                                  &ctx->tweak_ctx, &ctx->crypt_ctx, false);
+                                  __serpent_encrypt, &ctx->tweak_ctx,
+                                  &ctx->crypt_ctx, false);
 }
 
 static int xts_decrypt(struct skcipher_request *req)
        struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        return glue_xts_req_128bit(&serpent_dec_xts, req,
-                                  XTS_TWEAK_CAST(__serpent_encrypt),
-                                  &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+                                  __serpent_encrypt, &ctx->tweak_ctx,
+                                  &ctx->crypt_ctx, true);
 }
 
 static struct skcipher_alg serpent_algs[] = {
 
 #include <asm/crypto/serpent-avx.h>
 
 /* 8-way parallel cipher functions */
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
 EXPORT_SYMBOL_GPL(serpent_ecb_enc_8way_avx);
 
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
 EXPORT_SYMBOL_GPL(serpent_ecb_dec_8way_avx);
 
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
 EXPORT_SYMBOL_GPL(serpent_cbc_dec_8way_avx);
 
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
-                                    const u8 *src, le128 *iv);
+asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
+                                    le128 *iv);
 EXPORT_SYMBOL_GPL(serpent_ctr_8way_avx);
 
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src, le128 *iv);
 EXPORT_SYMBOL_GPL(serpent_xts_enc_8way_avx);
 
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src, le128 *iv);
 EXPORT_SYMBOL_GPL(serpent_xts_dec_8way_avx);
 
-void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void __serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblk;
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        le128_to_be128(&ctrblk, iv);
        le128_inc(iv);
 }
 EXPORT_SYMBOL_GPL(__serpent_crypt_ctr);
 
-void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(__serpent_encrypt));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_encrypt);
 }
 EXPORT_SYMBOL_GPL(serpent_xts_enc);
 
-void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(__serpent_decrypt));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, __serpent_decrypt);
 }
 EXPORT_SYMBOL_GPL(serpent_xts_dec);
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_enc_8way_avx) }
+               .fn_u = { .ecb = serpent_ecb_enc_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+               .fn_u = { .ecb = __serpent_encrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_ctr_8way_avx) }
+               .fn_u = { .ctr = serpent_ctr_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(__serpent_crypt_ctr) }
+               .fn_u = { .ctr = __serpent_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc_8way_avx) }
+               .fn_u = { .xts = serpent_xts_enc_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_enc) }
+               .fn_u = { .xts = serpent_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_ecb_dec_8way_avx) }
+               .fn_u = { .ecb = serpent_ecb_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .ecb = __serpent_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_cbc_dec_8way_avx) }
+               .fn_u = { .cbc = serpent_cbc_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .cbc = __serpent_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec_8way_avx) }
+               .fn_u = { .xts = serpent_xts_dec_8way_avx }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(serpent_xts_dec) }
+               .fn_u = { .xts = serpent_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(__serpent_encrypt, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        return glue_xts_req_128bit(&serpent_enc_xts, req,
-                                  XTS_TWEAK_CAST(__serpent_encrypt),
-                                  &ctx->tweak_ctx, &ctx->crypt_ctx, false);
+                                  __serpent_encrypt, &ctx->tweak_ctx,
+                                  &ctx->crypt_ctx, false);
 }
 
 static int xts_decrypt(struct skcipher_request *req)
        struct serpent_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
        return glue_xts_req_128bit(&serpent_dec_xts, req,
-                                  XTS_TWEAK_CAST(__serpent_encrypt),
-                                  &ctx->tweak_ctx, &ctx->crypt_ctx, true);
+                                  __serpent_encrypt, &ctx->tweak_ctx,
+                                  &ctx->crypt_ctx, true);
 }
 
 static struct skcipher_alg serpent_algs[] = {
 
        return __serpent_setkey(crypto_skcipher_ctx(tfm), key, keylen);
 }
 
-static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src)
+static void serpent_decrypt_cbc_xway(const void *ctx, u8 *d, const u8 *s)
 {
        u128 ivs[SERPENT_PARALLEL_BLOCKS - 1];
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
        unsigned int j;
 
        for (j = 0; j < SERPENT_PARALLEL_BLOCKS - 1; j++)
                u128_xor(dst + (j + 1), dst + (j + 1), ivs + j);
 }
 
-static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void serpent_crypt_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblk;
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        le128_to_be128(&ctrblk, iv);
        le128_inc(iv);
        u128_xor(dst, src, (u128 *)&ctrblk);
 }
 
-static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src,
+static void serpent_crypt_ctr_xway(const void *ctx, u8 *d, const u8 *s,
                                   le128 *iv)
 {
        be128 ctrblks[SERPENT_PARALLEL_BLOCKS];
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
        unsigned int i;
 
        for (i = 0; i < SERPENT_PARALLEL_BLOCKS; i++) {
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_enc_blk_xway) }
+               .fn_u = { .ecb = serpent_enc_blk_xway }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_encrypt) }
+               .fn_u = { .ecb = __serpent_encrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr_xway) }
+               .fn_u = { .ctr = serpent_crypt_ctr_xway }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(serpent_crypt_ctr) }
+               .fn_u = { .ctr = serpent_crypt_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(serpent_dec_blk_xway) }
+               .fn_u = { .ecb = serpent_dec_blk_xway }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .ecb = __serpent_decrypt }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = SERPENT_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(serpent_decrypt_cbc_xway) }
+               .fn_u = { .cbc = serpent_decrypt_cbc_xway }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(__serpent_decrypt) }
+               .fn_u = { .cbc = __serpent_decrypt }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(__serpent_encrypt),
+       return glue_cbc_encrypt_req_128bit(__serpent_encrypt,
                                           req);
 }
 
 
 #define TWOFISH_PARALLEL_BLOCKS 8
 
 /* 8-way parallel cipher functions */
-asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src);
-asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src);
+asmlinkage void twofish_ecb_enc_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_ecb_dec_8way(const void *ctx, u8 *dst, const u8 *src);
 
-asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src);
-asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
-                                const u8 *src, le128 *iv);
+asmlinkage void twofish_cbc_dec_8way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_ctr_8way(const void *ctx, u8 *dst, const u8 *src,
+                                le128 *iv);
 
-asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src, le128 *iv);
-asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src, le128 *iv);
+asmlinkage void twofish_xts_enc_8way(const void *ctx, u8 *dst, const u8 *src,
+                                    le128 *iv);
+asmlinkage void twofish_xts_dec_8way(const void *ctx, u8 *dst, const u8 *src,
+                                    le128 *iv);
 
 static int twofish_setkey_skcipher(struct crypto_skcipher *tfm,
                                   const u8 *key, unsigned int keylen)
        return twofish_setkey(&tfm->base, key, keylen);
 }
 
-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
-                                       const u8 *src)
+static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
 {
        __twofish_enc_blk_3way(ctx, dst, src, false);
 }
 
-static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void twofish_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(twofish_enc_blk));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_enc_blk);
 }
 
-static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+static void twofish_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv)
 {
-       glue_xts_crypt_128bit_one(ctx, dst, src, iv,
-                                 GLUE_FUNC_CAST(twofish_dec_blk));
+       glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_dec_blk);
 }
 
 struct twofish_xts_ctx {
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_enc_8way) }
+               .fn_u = { .ecb = twofish_ecb_enc_8way }
        }, {
                .num_blocks = 3,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
+               .fn_u = { .ecb = twofish_enc_blk_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
+               .fn_u = { .ecb = twofish_enc_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_ctr_8way) }
+               .fn_u = { .ctr = twofish_ctr_8way }
        }, {
                .num_blocks = 3,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr_3way) }
+               .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ctr = GLUE_CTR_FUNC_CAST(twofish_enc_blk_ctr) }
+               .fn_u = { .ctr = twofish_enc_blk_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc_8way) }
+               .fn_u = { .xts = twofish_xts_enc_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_enc) }
+               .fn_u = { .xts = twofish_xts_enc }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_ecb_dec_8way) }
+               .fn_u = { .ecb = twofish_ecb_dec_8way }
        }, {
                .num_blocks = 3,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
+               .fn_u = { .ecb = twofish_dec_blk_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
+               .fn_u = { .ecb = twofish_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_cbc_dec_8way) }
+               .fn_u = { .cbc = twofish_cbc_dec_8way }
        }, {
                .num_blocks = 3,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
+               .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
+               .fn_u = { .cbc = twofish_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = TWOFISH_PARALLEL_BLOCKS,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec_8way) }
+               .fn_u = { .xts = twofish_xts_dec_8way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .xts = GLUE_XTS_FUNC_CAST(twofish_xts_dec) }
+               .fn_u = { .xts = twofish_xts_dec }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&twofish_enc_xts, req,
-                                  XTS_TWEAK_CAST(twofish_enc_blk),
+       return glue_xts_req_128bit(&twofish_enc_xts, req, twofish_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, false);
 }
 
        struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
        struct twofish_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
 
-       return glue_xts_req_128bit(&twofish_dec_xts, req,
-                                  XTS_TWEAK_CAST(twofish_enc_blk),
+       return glue_xts_req_128bit(&twofish_dec_xts, req, twofish_enc_blk,
                                   &ctx->tweak_ctx, &ctx->crypt_ctx, true);
 }
 
 
        return twofish_setkey(&tfm->base, key, keylen);
 }
 
-static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
-                                       const u8 *src)
+static inline void twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src)
 {
        __twofish_enc_blk_3way(ctx, dst, src, false);
 }
 
-static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst,
+static inline void twofish_enc_blk_xor_3way(const void *ctx, u8 *dst,
                                            const u8 *src)
 {
        __twofish_enc_blk_3way(ctx, dst, src, true);
 }
 
-void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src)
+void twofish_dec_blk_cbc_3way(const void *ctx, u8 *d, const u8 *s)
 {
        u128 ivs[2];
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        ivs[0] = src[0];
        ivs[1] = src[1];
 }
 EXPORT_SYMBOL_GPL(twofish_dec_blk_cbc_3way);
 
-void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv)
+void twofish_enc_blk_ctr(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblk;
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        if (dst != src)
                *dst = *src;
 }
 EXPORT_SYMBOL_GPL(twofish_enc_blk_ctr);
 
-void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
-                             le128 *iv)
+void twofish_enc_blk_ctr_3way(const void *ctx, u8 *d, const u8 *s, le128 *iv)
 {
        be128 ctrblks[3];
+       u128 *dst = (u128 *)d;
+       const u128 *src = (const u128 *)s;
 
        if (dst != src) {
                dst[0] = src[0];
 
        .funcs = { {
                .num_blocks = 3,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_3way) }
+               .fn_u = { .ecb = twofish_enc_blk_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk) }
+               .fn_u = { .ecb = twofish_enc_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 3,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr_3way) }
+               .fn_u = { .ctr = twofish_enc_blk_ctr_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_enc_blk_ctr) }
+               .fn_u = { .ctr = twofish_enc_blk_ctr }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 3,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk_3way) }
+               .fn_u = { .ecb = twofish_dec_blk_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .ecb = GLUE_FUNC_CAST(twofish_dec_blk) }
+               .fn_u = { .ecb = twofish_dec_blk }
        } }
 };
 
 
        .funcs = { {
                .num_blocks = 3,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk_cbc_3way) }
+               .fn_u = { .cbc = twofish_dec_blk_cbc_3way }
        }, {
                .num_blocks = 1,
-               .fn_u = { .cbc = GLUE_CBC_FUNC_CAST(twofish_dec_blk) }
+               .fn_u = { .cbc = twofish_dec_blk }
        } }
 };
 
 
 static int cbc_encrypt(struct skcipher_request *req)
 {
-       return glue_cbc_encrypt_req_128bit(GLUE_FUNC_CAST(twofish_enc_blk),
-                                          req);
+       return glue_cbc_encrypt_req_128bit(twofish_enc_blk, req);
 }
 
 static int cbc_decrypt(struct skcipher_request *req)
 
                               unsigned int keylen);
 
 /* regular block cipher functions */
-asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
-                                  const u8 *src, bool xor);
-asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
-                                const u8 *src);
+asmlinkage void __camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src,
+                                  bool xor);
+asmlinkage void camellia_dec_blk(const void *ctx, u8 *dst, const u8 *src);
 
 /* 2-way parallel cipher functions */
-asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
-                                       const u8 *src, bool xor);
-asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
-                                     const u8 *src);
+asmlinkage void __camellia_enc_blk_2way(const void *ctx, u8 *dst, const u8 *src,
+                                       bool xor);
+asmlinkage void camellia_dec_blk_2way(const void *ctx, u8 *dst, const u8 *src);
 
 /* 16-way parallel cipher functions (avx/aes-ni) */
-asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
-asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
-
-asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src);
-asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
-                                  const u8 *src, le128 *iv);
-
-asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
-asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
-                                      const u8 *src, le128 *iv);
-
-static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
-                                   const u8 *src)
+asmlinkage void camellia_ecb_enc_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ecb_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+
+asmlinkage void camellia_cbc_dec_16way(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void camellia_ctr_16way(const void *ctx, u8 *dst, const u8 *src,
+                                  le128 *iv);
+
+asmlinkage void camellia_xts_enc_16way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
+asmlinkage void camellia_xts_dec_16way(const void *ctx, u8 *dst, const u8 *src,
+                                      le128 *iv);
+
+static inline void camellia_enc_blk(const void *ctx, u8 *dst, const u8 *src)
 {
        __camellia_enc_blk(ctx, dst, src, false);
 }
 
-static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst,
-                                       const u8 *src)
+static inline void camellia_enc_blk_xor(const void *ctx, u8 *dst, const u8 *src)
 {
        __camellia_enc_blk(ctx, dst, src, true);
 }
 
-static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
+static inline void camellia_enc_blk_2way(const void *ctx, u8 *dst,
                                         const u8 *src)
 {
        __camellia_enc_blk_2way(ctx, dst, src, false);
 }
 
-static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst,
+static inline void camellia_enc_blk_xor_2way(const void *ctx, u8 *dst,
                                             const u8 *src)
 {
        __camellia_enc_blk_2way(ctx, dst, src, true);
 }
 
 /* glue helpers */
-extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
-extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void camellia_decrypt_cbc_2way(const void *ctx, u8 *dst, const u8 *src);
+extern void camellia_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
                               le128 *iv);
-extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
+extern void camellia_crypt_ctr_2way(const void *ctx, u8 *dst, const u8 *src,
                                    le128 *iv);
 
-extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void camellia_xts_enc(const void *ctx, u8 *dst, const u8 *src,
+                            le128 *iv);
+extern void camellia_xts_dec(const void *ctx, u8 *dst, const u8 *src,
+                            le128 *iv);
 
 #endif /* ASM_X86_CAMELLIA_H */
 
 #include <asm/fpu/api.h>
 #include <crypto/b128ops.h>
 
-typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
-typedef void (*common_glue_cbc_func_t)(void *ctx, u128 *dst, const u128 *src);
-typedef void (*common_glue_ctr_func_t)(void *ctx, u128 *dst, const u128 *src,
+typedef void (*common_glue_func_t)(const void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_cbc_func_t)(const void *ctx, u8 *dst, const u8 *src);
+typedef void (*common_glue_ctr_func_t)(const void *ctx, u8 *dst, const u8 *src,
                                       le128 *iv);
-typedef void (*common_glue_xts_func_t)(void *ctx, u128 *dst, const u128 *src,
+typedef void (*common_glue_xts_func_t)(const void *ctx, u8 *dst, const u8 *src,
                                       le128 *iv);
 
-#define GLUE_FUNC_CAST(fn) ((common_glue_func_t)(fn))
-#define GLUE_CBC_FUNC_CAST(fn) ((common_glue_cbc_func_t)(fn))
-#define GLUE_CTR_FUNC_CAST(fn) ((common_glue_ctr_func_t)(fn))
-#define GLUE_XTS_FUNC_CAST(fn) ((common_glue_xts_func_t)(fn))
-
 struct common_glue_func_entry {
        unsigned int num_blocks; /* number of blocks that @fn will process */
        union {
                               common_glue_func_t tweak_fn, void *tweak_ctx,
                               void *crypt_ctx, bool decrypt);
 
-extern void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src,
-                                     le128 *iv, common_glue_func_t fn);
+extern void glue_xts_crypt_128bit_one(const void *ctx, u8 *dst,
+                                     const u8 *src, le128 *iv,
+                                     common_glue_func_t fn);
 
 #endif /* _CRYPTO_GLUE_HELPER_H */
 
        struct serpent_ctx crypt_ctx;
 };
 
-asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_enc_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
-asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_ecb_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
 
-asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_cbc_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src);
-asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
-                                    const u8 *src, le128 *iv);
+asmlinkage void serpent_ctr_8way_avx(const void *ctx, u8 *dst, const u8 *src,
+                                    le128 *iv);
 
-asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_enc_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src, le128 *iv);
-asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_xts_dec_8way_avx(const void *ctx, u8 *dst,
                                         const u8 *src, le128 *iv);
 
-extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void __serpent_crypt_ctr(const void *ctx, u8 *dst, const u8 *src,
                                le128 *iv);
 
-extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
-extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
+extern void serpent_xts_enc(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
+extern void serpent_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv);
 
 extern int xts_serpent_setkey(struct crypto_skcipher *tfm, const u8 *key,
                              unsigned int keylen);
 
 
 #define SERPENT_PARALLEL_BLOCKS 4
 
-asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void __serpent_enc_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
                                       const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_dec_blk_4way(const struct serpent_ctx *ctx, u8 *dst,
                                     const u8 *src);
 
-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
-                                       const u8 *src)
+static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
 {
        __serpent_enc_blk_4way(ctx, dst, src, false);
 }
 
-static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
-                                           const u8 *src)
+static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
+                                           u8 *dst, const u8 *src)
 {
        __serpent_enc_blk_4way(ctx, dst, src, true);
 }
 
-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
-                                       const u8 *src)
+static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
 {
        serpent_dec_blk_4way(ctx, dst, src);
 }
 
 #define SERPENT_PARALLEL_BLOCKS 8
 
-asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void __serpent_enc_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
                                       const u8 *src, bool xor);
-asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
+asmlinkage void serpent_dec_blk_8way(const struct serpent_ctx *ctx, u8 *dst,
                                     const u8 *src);
 
-static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst,
-                                  const u8 *src)
+static inline void serpent_enc_blk_xway(const void *ctx, u8 *dst, const u8 *src)
 {
        __serpent_enc_blk_8way(ctx, dst, src, false);
 }
 
-static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst,
-                                      const u8 *src)
+static inline void serpent_enc_blk_xway_xor(const struct serpent_ctx *ctx,
+                                           u8 *dst, const u8 *src)
 {
        __serpent_enc_blk_8way(ctx, dst, src, true);
 }
 
-static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst,
-                                  const u8 *src)
+static inline void serpent_dec_blk_xway(const void *ctx, u8 *dst, const u8 *src)
 {
        serpent_dec_blk_8way(ctx, dst, src);
 }
 
 #include <crypto/b128ops.h>
 
 /* regular block cipher functions from twofish_x86_64 module */
-asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
-                               const u8 *src);
-asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
-                               const u8 *src);
+asmlinkage void twofish_enc_blk(const void *ctx, u8 *dst, const u8 *src);
+asmlinkage void twofish_dec_blk(const void *ctx, u8 *dst, const u8 *src);
 
 /* 3-way parallel cipher functions */
-asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
-                                      const u8 *src, bool xor);
-asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
-                                    const u8 *src);
+asmlinkage void __twofish_enc_blk_3way(const void *ctx, u8 *dst, const u8 *src,
+                                      bool xor);
+asmlinkage void twofish_dec_blk_3way(const void *ctx, u8 *dst, const u8 *src);
 
 /* helpers from twofish_x86_64-3way module */
-extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
-extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
+extern void twofish_dec_blk_cbc_3way(const void *ctx, u8 *dst, const u8 *src);
+extern void twofish_enc_blk_ctr(const void *ctx, u8 *dst, const u8 *src,
                                le128 *iv);
-extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
+extern void twofish_enc_blk_ctr_3way(const void *ctx, u8 *dst, const u8 *src,
                                     le128 *iv);
 
 #endif /* ASM_X86_TWOFISH_H */
 
 EXPORT_SYMBOL_GPL(cast6_setkey);
 
 /*forward quad round*/
-static inline void Q(u32 *block, u8 *Kr, u32 *Km)
+static inline void Q(u32 *block, const u8 *Kr, const u32 *Km)
 {
        u32 I;
        block[2] ^= F1(block[3], Kr[0], Km[0]);
 }
 
 /*reverse quad round*/
-static inline void QBAR(u32 *block, u8 *Kr, u32 *Km)
+static inline void QBAR(u32 *block, const u8 *Kr, const u32 *Km)
 {
        u32 I;
        block[3] ^= F1(block[0], Kr[3], Km[3]);
        block[2] ^= F1(block[3], Kr[0], Km[0]);
 }
 
-void __cast6_encrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
+void __cast6_encrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
 {
+       const struct cast6_ctx *c = ctx;
        const __be32 *src = (const __be32 *)inbuf;
        __be32 *dst = (__be32 *)outbuf;
        u32 block[4];
-       u32 *Km;
-       u8 *Kr;
+       const u32 *Km;
+       const u8 *Kr;
 
        block[0] = be32_to_cpu(src[0]);
        block[1] = be32_to_cpu(src[1]);
        __cast6_encrypt(crypto_tfm_ctx(tfm), outbuf, inbuf);
 }
 
-void __cast6_decrypt(struct cast6_ctx *c, u8 *outbuf, const u8 *inbuf)
+void __cast6_decrypt(const void *ctx, u8 *outbuf, const u8 *inbuf)
 {
+       const struct cast6_ctx *c = ctx;
        const __be32 *src = (const __be32 *)inbuf;
        __be32 *dst = (__be32 *)outbuf;
        u32 block[4];
-       u32 *Km;
-       u8 *Kr;
+       const u32 *Km;
+       const u8 *Kr;
 
        block[0] = be32_to_cpu(src[0]);
        block[1] = be32_to_cpu(src[1]);
 
 }
 EXPORT_SYMBOL_GPL(serpent_setkey);
 
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
+void __serpent_encrypt(const void *c, u8 *dst, const u8 *src)
 {
+       const struct serpent_ctx *ctx = c;
        const u32 *k = ctx->expkey;
        const __le32 *s = (const __le32 *)src;
        __le32  *d = (__le32 *)dst;
        __serpent_encrypt(ctx, dst, src);
 }
 
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src)
+void __serpent_decrypt(const void *c, u8 *dst, const u8 *src)
 {
+       const struct serpent_ctx *ctx = c;
        const u32 *k = ctx->expkey;
        const __le32 *s = (const __le32 *)src;
        __le32  *d = (__le32 *)dst;
 
                   unsigned int keylen, u32 *flags);
 int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
 
-void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
-void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src);
 
 #endif
 
                     unsigned int keylen);
 int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
 
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src);
 
 #endif
 
 
 #define XTS_BLOCK_SIZE 16
 
-#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-
 static inline int xts_check_key(struct crypto_tfm *tfm,
                                const u8 *key, unsigned int keylen)
 {