/* * Driver for EIP97 cryptographic accelerator. * * Copyright (c) 2018 AVM GmbH * * SPDX-License-Identifier: GPL-2.0-only * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is heavily based on the Mediatek driver from Ryder Lee. * */ #include #include #include #include #include #include #include #include #include #include #include "eip97-platform.h" #include "eip97-aead.h" #include "eip97-aead-raw.h" #include "eip97-aead-utester.h" #define EIP97_AEAD_MAX_KEY_WORDS 8 #define EIP97_AES_GCM_PREHASH_WORDS 4 #define EIP97_AES_GCM_CT_CONTAINS_IV 0x1c000000 /* Loose integer overflow requirements: * - able to add two or three lengths without overflow * - spare headroom if actually adding three lengths * - able to store as big lengths as possible */ #define EIP97_AEAD_MAX_UINT_LEN (UINT_MAX/8) #define EIP97_AEAD_MAX_U32_LEN (U32_MAX/8) enum aead_mode { AES_GCM, }; struct eip97_aead_tfm_ctx { struct eip97_cryp *cryp; enum aead_mode mode; u32 key_words; u32 key[EIP97_AEAD_MAX_KEY_WORDS]; u32 auth_bytes; u32 prehash[EIP97_AES_GCM_PREHASH_WORDS]; struct crypto_skcipher *aes_ctr; }; struct eip97_aead_sg { u32 proc_bytes; struct scatterlist *unaligned_sg; u32 unaligned_nents; u32 nents; }; struct eip97_aead_req_ctx { struct eip97_aead_sg src; struct eip97_aead_sg dst; enum crypt_direction dir; struct eip97_aead_info info; dma_addr_t info_dma; enum eip97_op_type type; }; struct eip97_aes_gcm_setkey_result { int err; struct completion completion; }; struct eip97_aead_info_dma_sync { struct device *dev; dma_addr_t addr; }; static inline struct eip97_aead_req_ctx * req_ctx(struct aead_request *req) { return (struct eip97_aead_req_ctx *) req->__ctx; } static void eip97_aes_gcm_setkey_done(struct crypto_async_request *req, int err) { struct eip97_aes_gcm_setkey_result *result = req->data; if (err == -EINPROGRESS) return; result->err = err; complete(&result->completion); } static int eip97_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, u32 key_bytes) { struct eip97_aead_tfm_ctx *ctx = crypto_tfm_ctx(&aead->base); struct crypto_skcipher *skc = ctx->aes_ctr; int err; struct { u32 hash[4]; u8 iv[8]; struct eip97_aes_gcm_setkey_result result; struct scatterlist sg[1]; struct skcipher_request req; } *data; switch (key_bytes) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } /* For some mysterious hardware-related reason we need to precompute * some kind of hash from all-zero IV, all-zero data and current key by * using AES in CTR mode. */ crypto_skcipher_clear_flags(skc, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(skc, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); err = crypto_skcipher_setkey(skc, key, key_bytes); if (err) return err; crypto_aead_set_flags(aead, crypto_skcipher_get_flags(skc) & CRYPTO_TFM_RES_MASK); data = kzalloc(sizeof(*data) + crypto_skcipher_reqsize(skc), GFP_ATOMIC); if (!data) return -ENOMEM; init_completion(&data->result.completion); sg_init_one(data->sg, &data->hash, AES_BLOCK_SIZE); skcipher_request_set_tfm(&data->req, skc); skcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG, eip97_aes_gcm_setkey_done, &data->result); skcipher_request_set_crypt(&data->req, data->sg, data->sg, AES_BLOCK_SIZE, data->iv); err = crypto_skcipher_encrypt(&data->req); if (err == -EINPROGRESS) { err = wait_for_completion_interruptible( &data->result.completion); if (!err) err = data->result.err; } if (err) goto exit; ctx->key_words = SIZE_IN_WORDS(key_bytes); BUILD_BUG_ON(sizeof(ctx->key) < AES_KEYSIZE_256); memcpy(ctx->key, key, key_bytes); memcpy(ctx->prehash, data->hash, sizeof(ctx->prehash)); exit: kzfree(data); return err; } static int eip97_aes_gcm_setauthsize(struct crypto_aead *aead, u32 auth_bytes) { struct eip97_aead_tfm_ctx *ctx = crypto_tfm_ctx(&aead->base); switch (auth_bytes) { case 8: case 12: case 16: break; default: return -EINVAL; } ctx->auth_bytes = auth_bytes; return 0; } static int eip97_aead_complete(struct aead_request *req, int err, bool callback) { struct crypto_async_request *areq = &req->base; if (callback) areq->complete(areq, err); return err; } static int eip97_aead_dma_unalign(struct aead_request *req, int err, bool callback) { struct eip97_aead_req_ctx *ctx = req_ctx(req); size_t copied = 0; u8 *align_buf = 0; if (req->src != ctx->src.unaligned_sg) { align_buf = sg_virt(req->src); req->src = ctx->src.unaligned_sg; ctx->src.nents = ctx->src.unaligned_nents; } if (req->dst != ctx->dst.unaligned_sg) { align_buf = sg_virt(req->dst); req->dst = ctx->dst.unaligned_sg; ctx->dst.nents = ctx->dst.unaligned_nents; if (err) goto exit; copied = E97_HK(sg_copy_from_buffer)(req->dst, ctx->dst.nents, align_buf, ctx->dst.proc_bytes); if (copied != ctx->dst.proc_bytes) err = -ENODATA; } exit: if (align_buf) kzfree(align_buf); return eip97_aead_complete(req, err, callback); } static int eip97_aead_dma_unmap(struct aead_request *req, int err, bool callback) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); struct device *dev = tfm_ctx->cryp->dev; dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_TO_DEVICE); /* NOTE: Per Documentation/DMA-API.txt, we must pass ctx->src.nents */ if (req->src == req->dst) dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_BIDIRECTIONAL); else { dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, ctx->dst.nents, DMA_FROM_DEVICE); } return eip97_aead_dma_unalign(req, err, callback); } void eip97_aead_finish(enum eip97_op_type *op_type, eip97_err_t err, unsigned int bytes) { struct eip97_aead_req_ctx *ctx = container_of(op_type, struct eip97_aead_req_ctx, type); struct aead_request *req = container_of((void *)ctx, struct aead_request, __ctx); if (err & EIP97_ERR_CHECK_FAIL) { eip97_aead_dma_unmap(req, -EBADMSG, true); return; } else if (err & EIP97_ERR_TIMEOUT) { eip97_aead_dma_unmap(req, -ETIMEDOUT, true); return; } else if (err) { eip97_aead_dma_unmap(req, -EPROTO, true); return; } if (bytes != ctx->dst.proc_bytes) eip97_aead_dma_unmap(req, -ENODATA, true); else eip97_aead_dma_unmap(req, 0, true); } static int eip97_aes_gcm_create_tfm(struct eip97_aes_gcm_request *req) { struct eip97_aead_info *info = req->info; u32 iv_words; info->tfm[0] = 0; info->tfm[1] = 0; switch (SIZE_IN_BYTES(req->key_words)) { case AES_KEYSIZE_128: info->tfm[0] |= EIP97_TFM_AES_128BITS; break; case AES_KEYSIZE_192: info->tfm[0] |= EIP97_TFM_AES_192BITS; break; case AES_KEYSIZE_256: info->tfm[0] |= EIP97_TFM_AES_256BITS; break; default: return -EINVAL; } switch (req->iv_bytes) { case 16: info->tfm[1] |= EIP97_TFM_IV3; case 12: info->tfm[1] |= EIP97_TFM_IV2; case 8: info->tfm[1] |= EIP97_TFM_IV1; case 4: info->tfm[1] |= EIP97_TFM_IV0; iv_words = req->iv_bytes / 4; break; default: return -EINVAL; } info->tfm[0] |= EIP97_TFM_SIZE(req->key_words + EIP97_AES_GCM_PREHASH_WORDS + iv_words); BUILD_BUG_ON(sizeof(info->state) < AES_KEYSIZE_256 + SIZE_IN_BYTES(EIP97_AES_GCM_PREHASH_WORDS) + 16); eip97_write_state_le(info->state, req->key, req->key_words); info->tfm[0] |= EIP97_TFM_CONTAINS_KEY; eip97_write_state_be(info->state + req->key_words, req->prehash, EIP97_AES_GCM_PREHASH_WORDS); eip97_write_state_le(info->state + req->key_words + EIP97_AES_GCM_PREHASH_WORDS, (u32 *)req->iv, iv_words); switch (req->dir) { case EIP97_ENCRYPT: info->tfm[0] |= EIP97_TFM_CRYPT_THEN_HASH_OUT; break; case EIP97_DECRYPT: info->tfm[0] |= EIP97_TFM_HASH_THEN_CRYPT_IN; break; default: return -EINVAL; } /* For whatever reason, SHA-224 flag must be set. */ info->tfm[0] |= EIP97_TFM_AES_AEAD_DIGEST | EIP97_TFM_SHA224; info->tfm[1] |= EIP97_TFM_COUNTER_MODE_IV | EIP97_TFM_ENCRYPT_HASH_RESULT | EIP97_TFM_CRYPT_MODE_UNDEFINED; return 0; } static u32 eip97_aes_gcm_create_command_token(struct eip97_aes_gcm_request *req, u32 text_bytes) { u32 cmd_words = 0; BUILD_BUG_ON(sizeof(req->info->cmd) < SIZE_IN_BYTES(6)); req->info->cmd[cmd_words++] = 0x0b000000 | req->assoc_bytes; req->info->cmd[cmd_words++] = 0xa0800000 | req->assoc_bytes; req->info->cmd[cmd_words++] = 0x25000010; req->info->cmd[cmd_words++] = 0x0f020000 | text_bytes; switch (req->dir) { case EIP97_ENCRYPT: req->info->cmd[cmd_words++] = 0x21e60000 | req->auth_bytes; break; case EIP97_DECRYPT: req->info->cmd[cmd_words++] = 0x40e60000 | req->auth_bytes; req->info->cmd[cmd_words++] = 0xd0070000 | req->auth_bytes; break; default: return 0; } return cmd_words; } int eip97_aes_gcm_crypt(struct eip97_aes_gcm_request *req) { struct eip97_ring *ring = req->ring; volatile struct eip97_rd *res_prep; volatile struct eip97_cd *cmd_prep; int src_nents, dst_nents; u32 cmd_words; u32 text_bytes; int err; #ifdef EIP97_SAFE_MODE //req->key_words is checked always anyway if (!req->key) return -EINVAL; if (!req->prehash) return -EINVAL; //req->iv_bytes is checked always anyway if (!req->iv) return -EINVAL; switch (req->auth_bytes) { case 8: case 12: case 16: break; default: return -EINVAL; } if (!req->assoc_bytes) return -EINVAL; if (!req->src || !req->src_bytes) return -EINVAL; if (!req->dst || !req->dst_bytes) return -EINVAL; if (req->src_bytes > EIP97_AEAD_MAX_U32_LEN) return -EINVAL; if (req->dst_bytes > EIP97_AEAD_MAX_U32_LEN) return -EINVAL; if (req->dir != EIP97_ENCRYPT && req->dir != EIP97_DECRYPT) return -EINVAL; if (!req->base) return -EINVAL; if (req->ring_id < 0 || req->ring_id >= EIP97_RING_MAX) return -EINVAL; if (!req->ring) return -EINVAL; if (!req->info) return -EINVAL; if (!req->info_dma) return -EINVAL; #endif switch (req->dir) { case EIP97_ENCRYPT: text_bytes = req->src_bytes - req->assoc_bytes; break; case EIP97_DECRYPT: text_bytes = req->dst_bytes - req->assoc_bytes; break; default: return -EINVAL; } err = eip97_aes_gcm_create_tfm(req); if (err) return err; cmd_words = eip97_aes_gcm_create_command_token(req, text_bytes); if (!cmd_words) return -EINVAL; /* Build command descriptors */ cmd_prep = ring->cmd_prep; src_nents = E97_HK(eip97_create_cmd_descs)(req->src, req->src_bytes, req->ring, req->info_dma, 0, cmd_words, req->tag, req->info_dma + sizeof(req->info->cmd)); if (src_nents < 0) { ring->cmd_prep = cmd_prep; return src_nents; } /* Build result descriptors */ res_prep = ring->res_prep; dst_nents = E97_HK(eip97_create_res_descs)(req->dst, req->dst_bytes, req->ring, req->tag); if (dst_nents < 0) { ring->cmd_prep = cmd_prep; ring->res_prep = res_prep; return dst_nents; } /* Sync DMA, if any */ if (req->sync) req->sync(req->sync_ctx); /* Make sure all changes to DMA ring are done before we start engine */ wmb(); /* Start transfer */ writel(EIP97_CD_CNT((u32)src_nents), req->base + CDR_PREP_COUNT(req->ring_id)); writel(EIP97_RD_CNT((u32)dst_nents), req->base + RDR_PREP_COUNT(req->ring_id)); return -EINPROGRESS; } EXPORT_SYMBOL(eip97_aes_gcm_crypt); static void eip97_aead_info_dma_sync(void *ctx) { struct eip97_aead_info_dma_sync *sync_ctx = (struct eip97_aead_info_dma_sync *)ctx; dma_sync_single_for_device(sync_ctx->dev, sync_ctx->addr, sizeof(struct eip97_aead_info), DMA_TO_DEVICE); } static int eip97_aead_enqueue_hard(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); struct eip97_aes_gcm_request aes_gcm_req; u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); struct eip97_aead_info_dma_sync sync_ctx; struct eip97_cryp *cryp = READ_ONCE(tfm_ctx->cryp); enum eip97_ring_id ring_id; struct eip97_ring *ring; int tag, ret; memset(&aes_gcm_req, 0, sizeof(aes_gcm_req)); memset(&sync_ctx, 0, sizeof(sync_ctx)); //early so eip97_alloc_tag() only needs spin_lock() local_bh_disable(); tag = E97_HK(eip97_alloc_tag)(tfm_ctx->cryp, &(ctx->type)); if (tag < 0) { local_bh_enable(); return tag; } sync_ctx.dev = cryp->dev; sync_ctx.addr = ctx->info_dma; BUILD_BUG_ON(NR_CPUS > EIP97_RING_MAX); ring_id = get_cpu() % EIP97_RING_MAX; ring = cryp->ring[ring_id]; if (cd_ring_empty(ring) && rd_ring_empty(ring)) atomic_inc(&(cryp->debug.ring_empty)); switch (tfm_ctx->mode) { case AES_GCM: aes_gcm_req.key_words = tfm_ctx->key_words; aes_gcm_req.key = tfm_ctx->key; aes_gcm_req.prehash = tfm_ctx->prehash; aes_gcm_req.iv_bytes = ivsize; aes_gcm_req.iv = req->iv; aes_gcm_req.auth_bytes = tfm_ctx->auth_bytes; aes_gcm_req.assoc_bytes = req->assoclen; aes_gcm_req.src = req->src; aes_gcm_req.src_bytes = ctx->src.proc_bytes; aes_gcm_req.dst = req->dst; aes_gcm_req.dst_bytes = ctx->dst.proc_bytes; aes_gcm_req.dir = ctx->dir; aes_gcm_req.tag = tag; aes_gcm_req.base = cryp->base; aes_gcm_req.ring_id = ring_id; aes_gcm_req.ring = ring; aes_gcm_req.info = &(ctx->info); aes_gcm_req.info_dma = ctx->info_dma; aes_gcm_req.sync = &eip97_aead_info_dma_sync; aes_gcm_req.sync_ctx = &sync_ctx; ret = eip97_aes_gcm_crypt(&aes_gcm_req); break; default: ret = -EINVAL; break; } put_cpu(); local_bh_enable(); if (ret != -EINPROGRESS) cryp->tags[tag] = 0; else atomic_inc(&(cryp->debug.tx_ops)); return ret; } static int eip97_aead_enqueue(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); int err; err = eip97_aead_enqueue_hard(req); if (err == -EINPROGRESS) return -EINPROGRESS; if (err == -EAGAIN) atomic_inc(&(tfm_ctx->cryp->debug.ring_full)); WARN_ONCE(atomic_read(&(tfm_ctx->cryp->debug.ring_full)) > 16, "Please increase EIP97 driver descriptor ring size, tag ring size or implement software backlog!"); return eip97_aead_dma_unmap(req, err, false); } static int eip97_aead_dma_map(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); struct device *dev = tfm_ctx->cryp->dev; int dma_nents; ctx->info_dma = E97_HK(dma_map_single)(dev, &(ctx->info), sizeof(ctx->info), DMA_TO_DEVICE); if (unlikely(E97_HK(dma_mapping_error)(dev, ctx->info_dma))) goto err_exit; if (req->src == req->dst) { dma_nents = E97_HK(dma_map_sg)(dev, req->src, ctx->src.nents, DMA_BIDIRECTIONAL); if (unlikely(!dma_nents)) goto src_map_err; } else { dma_nents = E97_HK(dma_map_sg)(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); if (unlikely(!dma_nents)) goto src_map_err; dma_nents = E97_HK(dma_map_sg)(dev, req->dst, ctx->dst.nents, DMA_FROM_DEVICE); if (unlikely(!dma_nents)) goto dst_map_err; } return eip97_aead_enqueue(req); dst_map_err: dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); src_map_err: dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_TO_DEVICE); err_exit: return eip97_aead_dma_unalign(req, -EINVAL, false); } static bool eip97_aead_check_aligned(struct scatterlist *sg, size_t *bytes, u32 *nents) { bool ret = true; *nents = 0; for (*bytes = 0; sg; sg = sg_next(sg)) { *bytes += sg->length; (*nents)++; if (!IS_ALIGNED(sg->offset, sizeof(u32))) ret = false; } return ret; } static int eip97_aead_dma_align(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); struct crypto_alg *alg = tfm->__crt_alg; struct scatterlist *aligned_sg; bool src_aligned, dst_aligned; size_t src_bytes, dst_bytes; size_t align_bytes; size_t copied; u8 *align_buf; ctx->src.unaligned_sg = req->src; ctx->dst.unaligned_sg = req->dst; src_aligned = eip97_aead_check_aligned(req->src, &src_bytes, &ctx->src.unaligned_nents); if (req->src == req->dst) { dst_aligned = src_aligned; dst_bytes = src_bytes; ctx->dst.unaligned_nents = ctx->src.unaligned_nents; } else dst_aligned = eip97_aead_check_aligned(req->dst, &dst_bytes, &ctx->dst.unaligned_nents); ctx->src.nents = ctx->src.unaligned_nents; ctx->dst.nents = ctx->dst.unaligned_nents; if (src_bytes < ctx->src.proc_bytes || dst_bytes < ctx->dst.proc_bytes) return -EINVAL; if (!src_aligned || !dst_aligned) { align_bytes = max(ctx->src.proc_bytes, ctx->dst.proc_bytes); align_buf = E97_HK(kmalloc)(align_bytes + sizeof(struct scatterlist), GFP_ATOMIC); if (!align_buf) return -ENOMEM; aligned_sg = (struct scatterlist *)(align_buf + align_bytes); sg_set_buf(aligned_sg, align_buf, align_bytes); sg_mark_end(aligned_sg); if (!dst_aligned) { req->dst = aligned_sg; ctx->dst.nents = 1; } if (!src_aligned) { WARN_ONCE(alg->cra_flags & CRYPTO_ALG_TESTED, "EIP97 acceleration of aead slows down because of unaligned data input!"); atomic_inc(&(tfm_ctx->cryp->debug.unaligned_ops)); copied = E97_HK(sg_copy_to_buffer)(req->src, ctx->src.nents, align_buf, ctx->src.proc_bytes); req->src = aligned_sg; ctx->src.nents = 1; if (copied != ctx->src.proc_bytes) { req->dst = ctx->dst.unaligned_sg; req->src = ctx->src.unaligned_sg; kzfree(align_buf); return -ENODATA; } } } return eip97_aead_dma_map(req); } static bool eip97_aead_limits_conform(struct aead_request *req) { if (!IS_ALIGNED(req->assoclen + req->cryptlen, AES_BLOCK_SIZE)) return false; if (unlikely(req->assoclen > EIP97_AEAD_MAX_UINT_LEN)) return false; if (unlikely(req->cryptlen > EIP97_AEAD_MAX_UINT_LEN)) return false; return true; } static int eip97_aead_enc(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); if (!eip97_aead_limits_conform(req)) return -EINVAL; ctx->dir = EIP97_ENCRYPT; ctx->type = EIP97_AEAD; ctx->src.proc_bytes = req->assoclen + req->cryptlen; ctx->dst.proc_bytes = req->assoclen + req->cryptlen + tfm_ctx->auth_bytes; atomic_inc(&(tfm_ctx->cryp->debug.total_ops)); return eip97_aead_dma_align(req); } static int eip97_aead_dec(struct aead_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_aead_req_ctx *ctx = req_ctx(req); if (!eip97_aead_limits_conform(req)) return -EINVAL; ctx->dir = EIP97_DECRYPT; ctx->type = EIP97_AEAD; ctx->src.proc_bytes = req->assoclen + req->cryptlen; ctx->dst.proc_bytes = req->assoclen + req->cryptlen - tfm_ctx->auth_bytes; atomic_inc(&(tfm_ctx->cryp->debug.total_ops)); return eip97_aead_dma_align(req); } static int eip97_aead_cra_init_aes_gcm(struct crypto_aead *aead) { struct eip97_aead_tfm_ctx *ctx = crypto_tfm_ctx(&aead->base); ctx->cryp = eip97_find_dev(); if (!ctx->cryp) { pr_err("EIP97 aead: Can't find crypto device\n"); return -ENODEV; } aead->reqsize = sizeof(struct eip97_aead_req_ctx); ctx->mode = AES_GCM; ctx->aes_ctr = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(ctx->aes_ctr)) { pr_err("EIP97 aead: Error allocating skcipher 'ctr(aes)'\n"); return PTR_ERR(ctx->aes_ctr); } return 0; } static void eip97_aead_cra_exit_aes_gcm(struct crypto_aead *aead) { struct eip97_aead_tfm_ctx *ctx = crypto_tfm_ctx(&aead->base); crypto_free_skcipher(ctx->aes_ctr); } static struct aead_alg aead_algs[] = { { .setkey = eip97_aes_gcm_setkey, .setauthsize = eip97_aes_gcm_setauthsize, .encrypt = eip97_aead_enc, .decrypt = eip97_aead_dec, .ivsize = 12, .init = eip97_aead_cra_init_aes_gcm, .exit = eip97_aead_cra_exit_aes_gcm, .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-eip97", .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct eip97_aead_tfm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, }, }; int eip97_aead_register_algs(void) { int err, i; for (i = 0; i < ARRAY_SIZE(aead_algs); i++) { err = crypto_register_aead(&aead_algs[i]); if (err) goto err_aead_algs; } return 0; err_aead_algs: while (i--) crypto_unregister_aead(&aead_algs[i]); return err; } void eip97_aead_start_alg_tests(struct eip97_debug *dbg) { int i; for (i = 0; i < ARRAY_SIZE(aead_algs); i++) eip97_test_aead(&aead_algs[i], dbg); } void eip97_aead_unregister_algs(void) { int i; for (i = 0; i < ARRAY_SIZE(aead_algs); i++) crypto_unregister_aead(&aead_algs[i]); }