/* * Driver for EIP97 cryptographic accelerator. * * Copyright (c) 2018 AVM GmbH * * SPDX-License-Identifier: GPL-2.0-only * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is heavily based on the Mediatek driver from Ryder Lee. * */ #include #include #include #include #include #include "eip97-platform.h" #include "eip97-ahash.h" #include "eip97-ahash-raw.h" #include "eip97-ahash-utester.h" #define EIP97_AHASH_ALIGN_MASK (sizeof(u32) - 1) //TODO: Kommen im aktuellen mainline aus include/crypto/hmac.h #define HMAC_IPAD_VALUE 0x36 #define HMAC_OPAD_VALUE 0x5C #define EIP97_AHASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE #define EIP97_HMAC_IPAD_SIZE EIP97_AHASH_MAX_BLOCK_SIZE #define EIP97_HMAC_OPAD_SIZE EIP97_AHASH_MAX_BLOCK_SIZE #define EIP97_AHASH_MAX_PADDING_SIZE (SHA512_BLOCK_SIZE + 16) #define EIP97_CT_CTRL_WORDS_IN_CT BIT(25) #define EIP97_AHASH_START BIT(4) #define EIP97_AHASH_CONTINUE BIT(5) /* Loose integer overflow requirements for lengths: * - able to add two or three lengths without overflow * - spare headroom if actually adding three lengths * - able to store as big lengths as possible */ #define EIP97_AHASH_MAX_UINT_LEN (UINT_MAX/8) /* Strict integer overflow requirements for block counts: * - able to multiply with EIP97_AHASH_MAX_BLOCK_SIZE to gain byte count */ #define EIP97_AHASH_MAX_BLOCKS (U32_MAX/EIP97_AHASH_MAX_BLOCK_SIZE) struct eip97_ahash_req_ctx { struct scatterlist *unaligned_sg; u32 nents; enum eip97_op_type type; struct scatterlist ipad_sg[2]; u32 ipad_bytes; struct scatterlist *penultimate; struct scatterlist *unpadded_sg; u8 *padding; struct scatterlist padding_sg[2]; u32 padding_bytes; struct scatterlist *truncated_sg; u32 truncated_bytes; unsigned long truncated_page_link; struct eip97_ahash_info info; dma_addr_t info_dma; }; enum ahash_mode { HMAC_SHA512, HMAC_SHA1, }; struct eip97_ahash_hmac_tfm_ctx { struct crypto_shash *shash; u8 ipad[EIP97_HMAC_IPAD_SIZE] __aligned(sizeof(u32)); u8 opad[EIP97_HMAC_OPAD_SIZE] __aligned(sizeof(u32)); u32 xpad_bytes; }; struct eip97_ahash_tfm_ctx { struct eip97_cryp *cryp; bool hmac; enum ahash_mode mode; struct eip97_ahash_hmac_tfm_ctx hmac_ctx[0]; }; struct eip97_ahash_info_dma_sync { struct device *dev; dma_addr_t addr; }; static int eip97_ahash_update(struct ahash_request *req) { BUG(); //inperformant path for hmac-only not implemented return -ENOSYS; } static int eip97_ahash_final(struct ahash_request *req) { BUG(); //inperformant path for hmac-only not implemented return -ENOSYS; } static int eip97_ahash_complete(struct ahash_request *req, int err, bool callback) { struct crypto_async_request *areq = &req->base; if (callback) areq->complete(areq, err); return err; } static int eip97_ahash_untruncate(struct ahash_request *req, int err, bool callback) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); if (likely(ctx->truncated_sg)) { ctx->truncated_sg->length += ctx->truncated_bytes; ctx->truncated_sg->page_link = ctx->truncated_page_link; } return eip97_ahash_complete(req, err, callback); } static int eip97_ahash_dma_unalign(struct ahash_request *req, int err, bool callback) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); if (req->src != ctx->unaligned_sg) { kzfree(sg_virt(req->src)); req->src = ctx->unaligned_sg; } return eip97_ahash_untruncate(req, err, callback); } static int eip97_ahash_unappend_padding(struct ahash_request *req, int err, bool callback) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct scatterlist *last; if (!ctx->penultimate) { req->src = ctx->unpadded_sg; } else if (sg_is_chain(ctx->penultimate)) { sg_chain(ctx->penultimate, 1, ctx->unpadded_sg); } else { last = ctx->penultimate+1; sg_set_buf(last, sg_virt(ctx->padding_sg), ctx->padding_sg->length); sg_mark_end(last); } req->nbytes -= ctx->padding_bytes; ctx->nents--; kzfree(ctx->padding); return eip97_ahash_dma_unalign(req, err, callback); } static int eip97_ahash_unprepend_ipad(struct ahash_request *req, int err, bool callback) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); req->src = sg_next(req->src); req->nbytes -= ctx->ipad_bytes; ctx->nents--; return eip97_ahash_unappend_padding(req, err, callback); } static int eip97_ahash_dma_unmap(struct ahash_request *req, int err, bool callback) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct device *dev = tfm_ctx->cryp->dev; struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_BIDIRECTIONAL); /* NOTE: Per Documentation/DMA-API.txt, we must pass ctx->nents */ dma_unmap_sg(dev, req->src, ctx->nents, DMA_TO_DEVICE); return eip97_ahash_unprepend_ipad(req, err, callback); } static void eip97_ahash_gather_result(struct ahash_request *req) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_ahash_hmac_tfm_ctx *hmac_ctx; struct crypto_ahash *wtf_tfm = crypto_ahash_reqtfm(req); size_t digest_bytes = crypto_ahash_digestsize(wtf_tfm); int err; eip97_read_state_le((u32 *)req->result, ctx->info.state, SIZE_IN_WORDS(digest_bytes)); if (!tfm_ctx->hmac) { eip97_ahash_dma_unmap(req, 0, true); return; } hmac_ctx = tfm_ctx->hmac_ctx; SHASH_DESC_ON_STACK(shash, hmac_ctx->shash); shash->tfm = hmac_ctx->shash; shash->flags = 0; /* not CRYPTO_TFM_REQ_MAY_SLEEP */ err = crypto_shash_init(shash) ?: crypto_shash_update(shash, hmac_ctx->opad, hmac_ctx->xpad_bytes) ?: crypto_shash_finup(shash, req->result, digest_bytes, req->result); eip97_ahash_dma_unmap(req, err, true); } void eip97_ahash_finish(enum eip97_op_type *op_type, eip97_err_t err, unsigned int bytes) { struct eip97_ahash_req_ctx *ctx = container_of(op_type, struct eip97_ahash_req_ctx, type); struct ahash_request *req = container_of((void *)ctx, struct ahash_request, __ctx); if (err & EIP97_ERR_CHECK_FAIL) { eip97_ahash_dma_unmap(req, -EBADMSG, true); return; } else if (err & EIP97_ERR_TIMEOUT) { eip97_ahash_dma_unmap(req, -ETIMEDOUT, true); return; } else if (err) { eip97_ahash_dma_unmap(req, -EPROTO, true); return; } if (bytes != req->nbytes) eip97_ahash_dma_unmap(req, -ENODATA, true); else eip97_ahash_gather_result(req); } static void eip97_ahash_info_dma_sync(void *ctx) { struct eip97_ahash_info_dma_sync *sync_ctx = (struct eip97_ahash_info_dma_sync *)ctx; dma_sync_single_for_device(sync_ctx->dev, sync_ctx->addr, sizeof(struct eip97_ahash_info), DMA_BIDIRECTIONAL); } static int eip97_sha_create_tfm(struct eip97_sha_request *req, u32 result_words) { struct eip97_ahash_info *info = req->info; info->tfm[0] = 0; info->tfm[1] = 0; switch (req->mode) { case EIP97_SHA1: info->tfm[0] |= EIP97_TFM_SHA1; break; case EIP97_SHA512: info->tfm[0] |= EIP97_TFM_SHA512; break; default: return -1; } info->tfm[0] |= EIP97_TFM_HASH_OUT | EIP97_TFM_SIZE(result_words); info->tfm[1] |= EIP97_TFM_STORE_HASH_RESULT; return 0; } static int eip97_sha_create_command_token(struct eip97_sha_request *req, u32 src_bytes, u32 result_words) { struct eip97_ahash_info *info = req->info; info->ctrl[0] = info->tfm[0] | EIP97_AHASH_CONTINUE; if (req->start) info->ctrl[0] |= EIP97_AHASH_START; info->ctrl[1] = info->tfm[1]; if (src_bytes >> 17) return -EINVAL; BUILD_BUG_ON(sizeof(req->info->cmd) < SIZE_IN_BYTES(3)); info->cmd[0] = 0x03020000 | src_bytes; info->cmd[1] = 0x21060000; info->cmd[2] = 0xe0e63802 | ((result_words & GENMASK(3, 0)) << 24); /* ARRAY_SIZE(info->ctrl) + info->cmd words */ return 5; } int eip97_sha_crypt(struct eip97_sha_request *req) { struct eip97_ring *ring = req->ring; volatile struct eip97_cd *cmd_prep; volatile struct eip97_rd *res_prep; dma_addr_t tfm_dma; u32 cmd_words; int src_nents, dst_nents; u32 block_bytes; u32 result_words; u32 src_bytes; int err, ret; #ifdef EIP97_SAFE_MODE if (!req->src || !req->src_blocks) return -EINVAL; if (req->src_blocks > EIP97_AHASH_MAX_BLOCKS) return -EINVAL; if (!req->base) return -EINVAL; if (req->ring_id < 0 || req->ring_id >= EIP97_RING_MAX) return -EINVAL; if (!ring) return -EINVAL; if (!req->info) return -EINVAL; if (!req->info_dma) return -EINVAL; #endif switch (req->mode) { case EIP97_SHA1: block_bytes = SHA1_BLOCK_SIZE; result_words = SIZE_IN_WORDS(SHA1_DIGEST_SIZE); break; case EIP97_SHA512: block_bytes = SHA512_BLOCK_SIZE; result_words = SIZE_IN_WORDS(SHA512_DIGEST_SIZE); break; default: return -EINVAL; } err = eip97_sha_create_tfm(req, result_words); if (err) return err; src_bytes = req->src_blocks * block_bytes; ret = eip97_sha_create_command_token(req, src_bytes, result_words); if (ret < 0) return ret; cmd_words = ret; /* Build command descriptors */ cmd_prep = ring->cmd_prep; tfm_dma = req->info_dma + sizeof(req->info->ctrl) + sizeof(req->info->cmd); src_nents = E97_HK(eip97_create_cmd_descs)(req->src, src_bytes, ring, req->info_dma, EIP97_CT_CTRL_WORDS_IN_CT, cmd_words, req->tag, tfm_dma); if (src_nents < 0) { ring->cmd_prep = cmd_prep; return src_nents; } /* Build result descriptors, for whatever reason */ res_prep = ring->res_prep; dst_nents = E97_HK(eip97_create_res_descs)(req->src, src_bytes, ring, req->tag); if (dst_nents < 0) { ring->cmd_prep = cmd_prep; ring->res_prep = res_prep; return dst_nents; } /* Sync DMA, if any */ if (req->sync) req->sync(req->sync_ctx); /* Make sure all changes to DMA ring are done before we start engine */ wmb(); /* Start transfer */ writel(EIP97_CD_CNT((u32)src_nents), req->base + CDR_PREP_COUNT(req->ring_id)); writel(EIP97_RD_CNT((u32)dst_nents), req->base + RDR_PREP_COUNT(req->ring_id)); return -EINPROGRESS; } static int eip97_ahash_enqueue_hard(struct ahash_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct eip97_sha_request sha_req; struct eip97_ahash_info_dma_sync sync_ctx; struct eip97_cryp *cryp = READ_ONCE(tfm_ctx->cryp); enum eip97_ring_id ring_id; struct eip97_ring *ring; int tag, ret; memset(&sha_req, 0, sizeof(sha_req)); memset(&sync_ctx, 0, sizeof(sync_ctx)); //early so eip97_alloc_tag() only needs spin_lock() local_bh_disable(); tag = E97_HK(eip97_alloc_tag)(tfm_ctx->cryp, &(ctx->type)); if (tag < 0) { local_bh_enable(); return tag; } sync_ctx.dev = cryp->dev; sync_ctx.addr = ctx->info_dma; BUILD_BUG_ON(NR_CPUS > EIP97_RING_MAX); ring_id = get_cpu() % EIP97_RING_MAX; ring = cryp->ring[ring_id]; if (cd_ring_empty(ring) && rd_ring_empty(ring)) atomic_inc(&(cryp->debug.ring_empty)); switch (tfm_ctx->mode) { case HMAC_SHA512: if (req->nbytes % SHA512_BLOCK_SIZE) { ret = -EINVAL; break; } sha_req.mode = EIP97_SHA512; sha_req.src = req->src; sha_req.src_blocks = req->nbytes / SHA512_BLOCK_SIZE; sha_req.start = true; sha_req.tag = tag; sha_req.base = cryp->base; sha_req.ring_id = ring_id; sha_req.ring = ring; sha_req.info = &(ctx->info); sha_req.info_dma = ctx->info_dma; sha_req.sync = eip97_ahash_info_dma_sync; sha_req.sync_ctx = &sync_ctx; ret = eip97_sha_crypt(&sha_req); break; case HMAC_SHA1: if (req->nbytes % SHA1_BLOCK_SIZE) { ret = -EINVAL; break; } sha_req.mode = EIP97_SHA1; sha_req.src = req->src; sha_req.src_blocks = req->nbytes / SHA1_BLOCK_SIZE; sha_req.start = true; sha_req.tag = tag; sha_req.base = cryp->base; sha_req.ring_id = ring_id; sha_req.ring = ring; sha_req.info = &(ctx->info); sha_req.info_dma = ctx->info_dma; sha_req.sync = eip97_ahash_info_dma_sync; sha_req.sync_ctx = &sync_ctx; ret = eip97_sha_crypt(&sha_req); break; default: ret = -EINVAL; break; } put_cpu(); local_bh_enable(); if (ret != -EINPROGRESS) cryp->tags[tag] = 0; else atomic_inc(&(cryp->debug.tx_ops)); return ret; } static int eip97_ahash_enqueue(struct ahash_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); int err; err = eip97_ahash_enqueue_hard(req); if (err == -EINPROGRESS) return -EINPROGRESS; if (err == -EAGAIN) atomic_inc(&(tfm_ctx->cryp->debug.ring_full)); WARN_ONCE(atomic_read(&(tfm_ctx->cryp->debug.ring_full)) > 16, "Please increase EIP97 driver descriptor ring size, tag ring size or implement software backlog!"); return eip97_ahash_dma_unmap(req, err, false); } static int eip97_ahash_dma_map(struct ahash_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct device *dev = tfm_ctx->cryp->dev; struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); int dma_nents; ctx->info_dma = E97_HK(dma_map_single)(dev, &(ctx->info), sizeof(ctx->info), DMA_BIDIRECTIONAL); if (unlikely(E97_HK(dma_mapping_error)(dev, ctx->info_dma))) goto err_exit; dma_nents = E97_HK(dma_map_sg)(dev, req->src, ctx->nents, DMA_TO_DEVICE); if (unlikely(!dma_nents)) goto src_map_error; return eip97_ahash_enqueue(req); src_map_error: dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_BIDIRECTIONAL); err_exit: return eip97_ahash_unprepend_ipad(req, -EINVAL, false); } static int eip97_ahash_prepend_ipad(struct ahash_request *req) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct eip97_ahash_hmac_tfm_ctx *hmac_ctx = tfm_ctx->hmac_ctx; sg_init_table(ctx->ipad_sg, ARRAY_SIZE(ctx->ipad_sg)); sg_set_buf(ctx->ipad_sg, hmac_ctx->ipad, hmac_ctx->xpad_bytes); sg_chain(ctx->ipad_sg, ARRAY_SIZE(ctx->ipad_sg), req->src); req->src = ctx->ipad_sg; req->nbytes += hmac_ctx->xpad_bytes; ctx->ipad_bytes = hmac_ctx->xpad_bytes; ctx->nents++; return eip97_ahash_dma_map(req); } /* * The purpose of this padding is to ensure that the padded message is a * multiple of 1024 bits (SHA384/SHA512). * The bit "1" is appended at the end of the message followed by * "padbits-1" zero bits. Then a 128 bits block (SHA384/SHA512) equals to the * message length in bits is appended. * * padbytes is calculated as followed: * - if message length < 112 bytes then padbytes = 112 - message length * - else padbytes = 128 + 112 - message length */ static void eip97_ahash_prep_sha_128_pad(struct ahash_request *req, u32 add_len) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); u32 index, padbytes; u64 bits[2]; size_t size = req->nbytes + add_len; BUILD_BUG_ON(EIP97_AHASH_MAX_PADDING_SIZE < (128 + 16)); bits[1] = cpu_to_be64(((u64)size) << 3); bits[0] = cpu_to_be64(((u64)size) >> 61); index = size & 0x7f; padbytes = (index < 112) ? (112 - index) : ((128 + 112) - index); *(ctx->padding) = 0x80; memset(ctx->padding + 1, 0, padbytes - 1); memcpy(ctx->padding + padbytes, bits, 16); ctx->padding_bytes = padbytes + 16; } /* * The purpose of this padding is to ensure that the padded message is a * multiple of 512 bits (SHA1/SHA224/SHA256). * The bit "1" is appended at the end of the message followed by * "padbits-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) * equals to the message length in bits is appended. * * padbytes is calculated as followed: * - if message length < 56 bytes then padbytes = 56 - message length * - else padbytes = 64 + 56 - message length */ static void eip97_ahash_prep_sha_64_pad(struct ahash_request *req, u32 add_len) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); u32 index, padbytes; u64 bits; size_t size = req->nbytes + add_len; BUILD_BUG_ON(EIP97_AHASH_MAX_PADDING_SIZE < (64 + 8)); bits = cpu_to_be64(((u64)size) << 3); index = size & 0x3f; padbytes = (index < 56) ? (56 - index) : ((64 + 56) - index); *(ctx->padding) = 0x80; memset(ctx->padding + 1, 0, padbytes - 1); memcpy(ctx->padding + padbytes, &bits, 8); ctx->padding_bytes = padbytes + 8; } static int eip97_ahash_append_padding(struct ahash_request *req) { struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct eip97_ahash_hmac_tfm_ctx *hmac_ctx; struct scatterlist *last; ctx->padding = E97_HK(kmalloc)(EIP97_AHASH_MAX_PADDING_SIZE, GFP_ATOMIC); if (!ctx->padding) return eip97_ahash_dma_unalign(req, -ENOMEM, false); switch (tfm_ctx->mode) { case HMAC_SHA512: hmac_ctx = tfm_ctx->hmac_ctx; eip97_ahash_prep_sha_128_pad(req, hmac_ctx->xpad_bytes); break; case HMAC_SHA1: hmac_ctx = tfm_ctx->hmac_ctx; eip97_ahash_prep_sha_64_pad(req, hmac_ctx->xpad_bytes); break; default: kfree(ctx->padding); return eip97_ahash_dma_unalign(req, -EINVAL, false); } sg_init_table(ctx->padding_sg, 2); sg_set_buf(ctx->padding_sg+1, ctx->padding, ctx->padding_bytes); sg_mark_end(ctx->padding_sg+1); if (!ctx->penultimate) { sg_set_buf(ctx->padding_sg, sg_virt(req->src), req->src->length); ctx->unpadded_sg = req->src; req->src = ctx->padding_sg; } else if (sg_is_chain(ctx->penultimate)) { last = sg_chain_ptr(ctx->penultimate); sg_set_buf(ctx->padding_sg, sg_virt(last), last->length); sg_chain(ctx->penultimate, 1, ctx->padding_sg); ctx->unpadded_sg = last; } else { last = ctx->penultimate+1; sg_set_buf(ctx->padding_sg, sg_virt(last), last->length); sg_chain(last, 1, ctx->padding_sg); ctx->unpadded_sg = 0; } req->nbytes += ctx->padding_bytes; ctx->nents++; return eip97_ahash_prepend_ipad(req); } static bool eip97_ahash_check_aligned(struct scatterlist *sg, struct scatterlist **penultimate, u32 *nents) { bool ret = true; *penultimate = NULL; *nents = 0; while (sg) { if (!sg_is_last(sg)) { *penultimate = sg_is_chain(sg+1) ? sg+1 : sg; } (*nents)++; if (!IS_ALIGNED(sg->offset, sizeof(u32))) ret = false; sg = sg_next(sg); } return ret; } static int eip97_ahash_dma_align(struct ahash_request *req) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct crypto_tfm *tfm = req->base.tfm; struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; struct scatterlist *aligned_sg; size_t copied; u8 *align_buf; bool aligned; ctx->unaligned_sg = req->src; aligned = eip97_ahash_check_aligned(req->src, &ctx->penultimate, &ctx->nents); if (!aligned) { align_buf = E97_HK(kmalloc)(req->nbytes + sizeof(struct scatterlist), GFP_ATOMIC); if (!align_buf) return eip97_ahash_untruncate(req, -ENOMEM, false); aligned_sg = (struct scatterlist *)(align_buf + req->nbytes); sg_set_buf(aligned_sg, align_buf, req->nbytes); sg_mark_end(aligned_sg); WARN_ONCE(alg->cra_flags & CRYPTO_ALG_TESTED, "EIP97 acceleration of ahash slows down because of unaligned data input!"); atomic_inc(&(tfm_ctx->cryp->debug.unaligned_ops)); copied = E97_HK(sg_copy_to_buffer)(req->src, ctx->nents, align_buf, req->nbytes); if (copied != req->nbytes) { kzfree(align_buf); return eip97_ahash_untruncate(req, -ENODATA, false); } req->src = aligned_sg; ctx->nents = 1; ctx->penultimate = 0; } return eip97_ahash_append_padding(req); } static int eip97_ahash_truncate(struct ahash_request *req) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); struct scatterlist *sg = req->src; size_t left = req->nbytes; ctx->truncated_sg = 0; ctx->truncated_bytes = 0; while (sg) { if (left <= sg->length) { ctx->truncated_sg = sg; ctx->truncated_bytes = sg->length - left; ctx->truncated_page_link = sg->page_link; sg_mark_end(sg); sg->length = left; left = 0; break; } left -= sg->length; sg = sg_next(sg); } if (left) return -EINVAL; return eip97_ahash_dma_align(req); } static int eip97_ahash_finup(struct ahash_request *req) { struct eip97_ahash_tfm_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); if (!tfm_ctx->hmac) //hmac-only, for now return -EINVAL; if (unlikely(req->nbytes > EIP97_AHASH_MAX_UINT_LEN)) return -EINVAL; atomic_inc(&(tfm_ctx->cryp->debug.total_ops)); return eip97_ahash_truncate(req); } static int eip97_ahash_init(struct ahash_request *req) { struct eip97_ahash_req_ctx *ctx = ahash_request_ctx(req); ctx->type = EIP97_AHASH; return 0; } static int eip97_ahash_digest(struct ahash_request *req) { return eip97_ahash_init(req) ?: eip97_ahash_finup(req); } static int eip97_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, u32 keylen) { struct eip97_ahash_tfm_ctx *ctx = crypto_ahash_ctx(tfm); struct eip97_ahash_hmac_tfm_ctx *hctx = ctx->hmac_ctx; size_t block_bytes = crypto_shash_blocksize(hctx->shash); size_t digest_bytes = crypto_shash_digestsize(hctx->shash); int err, i; SHASH_DESC_ON_STACK(shash, hctx->shash); shash->tfm = hctx->shash; shash->flags = crypto_shash_get_flags(hctx->shash) & CRYPTO_TFM_REQ_MAY_SLEEP; if (keylen > block_bytes) { err = crypto_shash_digest(shash, key, keylen, hctx->ipad); if (err) return err; keylen = digest_bytes; } else { memcpy(hctx->ipad, key, keylen); } memset(hctx->ipad + keylen, 0, block_bytes - keylen); memcpy(hctx->opad, hctx->ipad, block_bytes); for (i = 0; i < block_bytes; i++) { hctx->ipad[i] ^= HMAC_IPAD_VALUE; hctx->opad[i] ^= HMAC_OPAD_VALUE; } hctx->xpad_bytes = block_bytes; return 0; } static int eip97_ahash_cra_init(struct crypto_tfm *tfm, const char *hmac_base) { struct eip97_ahash_tfm_ctx *ctx = crypto_tfm_ctx(tfm); ctx->cryp = eip97_find_dev(); if (!ctx->cryp) { pr_err("EIP97 ahash: Can't find crypto device\n"); return -ENODEV; } crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct eip97_ahash_req_ctx)); if (hmac_base) { struct eip97_ahash_hmac_tfm_ctx *hmac_ctx = ctx->hmac_ctx; ctx->hmac = true; hmac_ctx->shash = crypto_alloc_shash(hmac_base, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(hmac_ctx->shash)) { pr_err("EIP97 hmac ahash: Can't find base hash '%s'\n", hmac_base); return PTR_ERR(hmac_ctx->shash); } } else { ctx->hmac = false; } return 0; } static int eip97_ahash_cra_init_hmac_sha512(struct crypto_tfm *tfm) { struct eip97_ahash_tfm_ctx *ctx = crypto_tfm_ctx(tfm); ctx->mode = HMAC_SHA512; return eip97_ahash_cra_init(tfm, "sha512"); } static int eip97_ahash_cra_init_hmac_sha1(struct crypto_tfm *tfm) { struct eip97_ahash_tfm_ctx *ctx = crypto_tfm_ctx(tfm); ctx->mode = HMAC_SHA1; return eip97_ahash_cra_init(tfm, "sha1"); } static void eip97_ahash_cra_exit(struct crypto_tfm *tfm) { struct eip97_ahash_tfm_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->hmac) crypto_free_shash(ctx->hmac_ctx->shash); } static struct ahash_alg ahash_algs[] = { { .init = eip97_ahash_init, .update = eip97_ahash_update, .final = eip97_ahash_final, .finup = eip97_ahash_finup, .digest = eip97_ahash_digest, .setkey = eip97_ahash_setkey, .halg.digestsize = SHA512_DIGEST_SIZE + BUILD_BUG_ON_ZERO(SHA512_DIGEST_SIZE > EIP97_AHASH_MAX_DIGEST_BYTES), .halg.statesize = sizeof(struct eip97_ahash_req_ctx), .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-eip97", .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = SHA512_BLOCK_SIZE + BUILD_BUG_ON_ZERO(SHA512_BLOCK_SIZE > EIP97_AHASH_MAX_BLOCK_SIZE), .cra_ctxsize = sizeof(struct eip97_ahash_tfm_ctx) + sizeof(struct eip97_ahash_hmac_tfm_ctx), .cra_alignmask = EIP97_AHASH_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = eip97_ahash_cra_init_hmac_sha512, .cra_exit = eip97_ahash_cra_exit, } }, { .init = eip97_ahash_init, .update = eip97_ahash_update, .final = eip97_ahash_final, .finup = eip97_ahash_finup, .digest = eip97_ahash_digest, .setkey = eip97_ahash_setkey, .halg.digestsize = SHA1_DIGEST_SIZE + BUILD_BUG_ON_ZERO(SHA1_DIGEST_SIZE > EIP97_AHASH_MAX_DIGEST_BYTES), .halg.statesize = sizeof(struct eip97_ahash_req_ctx), .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-eip97", .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = SHA1_BLOCK_SIZE + BUILD_BUG_ON_ZERO(SHA1_BLOCK_SIZE > EIP97_AHASH_MAX_BLOCK_SIZE), .cra_ctxsize = sizeof(struct eip97_ahash_tfm_ctx) + sizeof(struct eip97_ahash_hmac_tfm_ctx), .cra_alignmask = EIP97_AHASH_ALIGN_MASK, .cra_module = THIS_MODULE, .cra_init = eip97_ahash_cra_init_hmac_sha1, .cra_exit = eip97_ahash_cra_exit, } }, }; int eip97_ahash_register_algs(void) { int err, i; for (i = 0; i < ARRAY_SIZE(ahash_algs); i++) { err = crypto_register_ahash(&ahash_algs[i]); if (err) goto err_ahash_algs; } return 0; err_ahash_algs: while (i--) crypto_unregister_ahash(&ahash_algs[i]); return err; } void eip97_ahash_start_alg_tests(struct eip97_debug *dbg) { int i; for (i = 0; i < ARRAY_SIZE(ahash_algs); i++) eip97_test_ahash(&ahash_algs[i], dbg); } void eip97_ahash_unregister_algs(void) { int i; for (i = 0; i < ARRAY_SIZE(ahash_algs); i++) crypto_unregister_ahash(&ahash_algs[i]); }