/* * Driver for EIP97 cryptographic accelerator. * * Copyright (c) 2018 AVM GmbH * * SPDX-License-Identifier: GPL-2.0-only * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is heavily based on the Mediatek driver from Ryder Lee. * */ #include #include #include #include #include #include #include #include #include #include "eip97-platform.h" #include "eip97-skcipher.h" #include "eip97-skcipher-raw.h" #include "eip97-skcipher-utester.h" #define EIP97_SKCIPHER_MAX_KEY_WORDS 8 /* Loose integer overflow requirements: * - able to add two or three lengths without overflow * - spare headroom if actually adding three lengths * - able to store as big lengths as possible */ #define EIP97_SKCIPHER_MAX_UINT_LEN (UINT_MAX/8) #define EIP97_SKCIPHER_MAX_U32_LEN (U32_MAX/8) enum skcipher_mode { AES_CBC, }; struct eip97_skcipher_tfm_ctx { struct eip97_cryp *cryp; enum skcipher_mode mode; u32 key_words; u32 key[EIP97_SKCIPHER_MAX_KEY_WORDS]; }; struct eip97_skcipher_sg { struct scatterlist *unaligned_sg; u32 unaligned_nents; u32 nents; }; struct eip97_skcipher_req_ctx { struct eip97_skcipher_sg src; struct eip97_skcipher_sg dst; enum crypt_direction dir; struct eip97_skcipher_info info; dma_addr_t info_dma; enum eip97_op_type type; }; struct eip97_skcipher_info_dma_sync { struct device *dev; dma_addr_t addr; }; static inline struct eip97_skcipher_req_ctx * req_ctx(struct skcipher_request *req) { return (struct eip97_skcipher_req_ctx *) req->__ctx; } static int eip97_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, u32 key_bytes) { struct eip97_skcipher_tfm_ctx *ctx = crypto_tfm_ctx(&skcipher->base); switch (key_bytes) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } ctx->key_words = SIZE_IN_WORDS(key_bytes); memcpy(ctx->key, key, key_bytes); return 0; } static int eip97_skcipher_complete(struct skcipher_request *req, int err, bool callback) { struct crypto_async_request *areq = &req->base; if (callback) areq->complete(areq, err); return err; } static int eip97_skcipher_dma_unalign(struct skcipher_request *req, int err, bool callback) { struct eip97_skcipher_req_ctx *ctx = req_ctx(req); size_t copied = 0; u8 *align_buf = 0; if (req->src != ctx->src.unaligned_sg) { align_buf = sg_virt(req->src); req->src = ctx->src.unaligned_sg; ctx->src.nents = ctx->src.unaligned_nents; } if (req->dst != ctx->dst.unaligned_sg) { align_buf = sg_virt(req->dst); req->dst = ctx->dst.unaligned_sg; ctx->dst.nents = ctx->dst.unaligned_nents; if (err) goto exit; copied = E97_HK(sg_copy_from_buffer)(req->dst, ctx->dst.nents, align_buf, req->cryptlen); if (copied != req->cryptlen) err = -ENODATA; } exit: if (align_buf) kzfree(align_buf); return eip97_skcipher_complete(req, err, callback); } static int eip97_skcipher_dma_unmap(struct skcipher_request *req, int err, bool callback) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); struct device *dev = tfm_ctx->cryp->dev; dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_TO_DEVICE); /* NOTE: Per Documentation/DMA-API.txt, we must pass ctx->src.nents */ if (req->src == req->dst) dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_BIDIRECTIONAL); else { dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, ctx->dst.nents, DMA_FROM_DEVICE); } return eip97_skcipher_dma_unalign(req, err, callback); } void eip97_skcipher_finish(enum eip97_op_type *op_type, eip97_err_t err, unsigned int bytes) { struct eip97_skcipher_req_ctx *ctx = container_of(op_type, struct eip97_skcipher_req_ctx, type); struct skcipher_request *req = container_of((void *)ctx, struct skcipher_request, __ctx); if (err & EIP97_ERR_CHECK_FAIL) { eip97_skcipher_dma_unmap(req, -EBADMSG, true); return; } else if (err & EIP97_ERR_TIMEOUT) { eip97_skcipher_dma_unmap(req, -ETIMEDOUT, true); return; } else if (err) { eip97_skcipher_dma_unmap(req, -EPROTO, true); return; } if (bytes != req->cryptlen) eip97_skcipher_dma_unmap(req, -ENODATA, true); else eip97_skcipher_dma_unmap(req, 0, true); } static int eip97_aes_cbc_create_tfm(struct eip97_aes_cbc_request *req) { struct eip97_skcipher_info *info = req->info; info->tfm[0] = 0; info->tfm[1] = 0; switch (SIZE_IN_BYTES(req->key_words)) { case AES_KEYSIZE_128: info->tfm[0] |= EIP97_TFM_AES_128BITS; break; case AES_KEYSIZE_192: info->tfm[0] |= EIP97_TFM_AES_192BITS; break; case AES_KEYSIZE_256: info->tfm[0] |= EIP97_TFM_AES_256BITS; break; default: return -EINVAL; } info->tfm[0] |= EIP97_TFM_SIZE(req->key_words + SIZE_IN_WORDS(AES_BLOCK_SIZE)); BUILD_BUG_ON(sizeof(info->state) < AES_KEYSIZE_256 + AES_BLOCK_SIZE); eip97_write_state_le(info->state, req->key, req->key_words); info->tfm[0] |= EIP97_TFM_CONTAINS_KEY; eip97_write_state_le(info->state + req->key_words, (u32 *)req->iv, SIZE_IN_WORDS(AES_BLOCK_SIZE)); info->tfm[1] |= EIP97_TFM_IV0 | EIP97_TFM_IV1 | EIP97_TFM_IV2 | EIP97_TFM_IV3; switch (req->dir) { case EIP97_ENCRYPT: info->tfm[0] |= EIP97_TFM_BASIC_OUT; break; case EIP97_DECRYPT: info->tfm[0] |= EIP97_TFM_BASIC_IN; break; default: return -EINVAL; } info->tfm[1] |= EIP97_TFM_CBC; return 0; } static u32 eip97_aes_cbc_create_command_token(struct eip97_aes_cbc_request *req, u32 text_bytes) { u32 cmd_words = 0; BUILD_BUG_ON(sizeof(req->info->cmd) < SIZE_IN_BYTES(3)); req->info->cmd[cmd_words++] = 0x05000000 | text_bytes; req->info->cmd[cmd_words++] = 0x2d060000; req->info->cmd[cmd_words++] = 0xe4a63806; return cmd_words; } int eip97_aes_cbc_crypt(struct eip97_aes_cbc_request *req) { struct eip97_ring *ring = req->ring; volatile struct eip97_rd *res_prep; volatile struct eip97_cd *cmd_prep; int src_nents, dst_nents; u32 cmd_words; int err; #ifdef EIP97_SAFE_MODE //req->key_words is checked always anyway if (!req->key) return -EINVAL; if (!req->iv) return -EINVAL; if (!req->src || !req->dst || !req->text_bytes) return -EINVAL; if (req->text_bytes > EIP97_SKCIPHER_MAX_U32_LEN) return -EINVAL; if (req->dir != EIP97_ENCRYPT && req->dir != EIP97_DECRYPT) return -EINVAL; if (!req->base) return -EINVAL; if (req->ring_id < 0 || req->ring_id >= EIP97_RING_MAX) return -EINVAL; if (!ring) return -EINVAL; if (!req->info) return -EINVAL; if (!req->info_dma) return -EINVAL; #endif err = eip97_aes_cbc_create_tfm(req); if (err) return err; cmd_words = eip97_aes_cbc_create_command_token(req, req->text_bytes); if (!cmd_words) return -EINVAL; /* Build command descriptors */ cmd_prep = ring->cmd_prep; src_nents = E97_HK(eip97_create_cmd_descs)(req->src, req->text_bytes, ring, req->info_dma, 0, cmd_words, req->tag, req->info_dma + sizeof(req->info->cmd)); if (src_nents < 0) { ring->cmd_prep = cmd_prep; return src_nents; } /* Build result descriptors */ res_prep = ring->res_prep; dst_nents = E97_HK(eip97_create_res_descs)(req->dst, req->text_bytes, ring, req->tag); if (dst_nents < 0) { ring->cmd_prep = cmd_prep; ring->res_prep = res_prep; return dst_nents; } /* Sync DMA, if any */ if (req->sync) req->sync(req->sync_ctx); /* Make sure all changes to DMA ring are done before we start engine */ wmb(); /* Start transfer */ writel(EIP97_CD_CNT((u32)src_nents), req->base + CDR_PREP_COUNT(req->ring_id)); writel(EIP97_RD_CNT((u32)dst_nents), req->base + RDR_PREP_COUNT(req->ring_id)); return -EINPROGRESS; } EXPORT_SYMBOL(eip97_aes_cbc_crypt); static void eip97_skcipher_info_dma_sync(void *ctx) { struct eip97_skcipher_info_dma_sync *sync_ctx = (struct eip97_skcipher_info_dma_sync *)ctx; dma_sync_single_for_device(sync_ctx->dev, sync_ctx->addr, sizeof(struct eip97_skcipher_info), DMA_TO_DEVICE); } static int eip97_skcipher_enqueue_hard(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); struct eip97_aes_cbc_request aes_cbc_req; struct eip97_skcipher_info_dma_sync sync_ctx; struct eip97_cryp *cryp = READ_ONCE(tfm_ctx->cryp); enum eip97_ring_id ring_id; struct eip97_ring *ring; int tag, ret; memset(&aes_cbc_req, 0, sizeof(aes_cbc_req)); memset(&sync_ctx, 0, sizeof(sync_ctx)); //early so eip97_alloc_tag() only needs spin_lock() local_bh_disable(); tag = E97_HK(eip97_alloc_tag)(tfm_ctx->cryp, &(ctx->type)); if (tag < 0) { local_bh_enable(); return tag; } sync_ctx.dev = cryp->dev; sync_ctx.addr = ctx->info_dma; BUILD_BUG_ON(NR_CPUS > EIP97_RING_MAX); ring_id = get_cpu() % EIP97_RING_MAX; ring = cryp->ring[ring_id]; if (cd_ring_empty(ring) && rd_ring_empty(ring)) atomic_inc(&(cryp->debug.ring_empty)); switch (tfm_ctx->mode) { case AES_CBC: aes_cbc_req.key_words = tfm_ctx->key_words; aes_cbc_req.key = tfm_ctx->key; aes_cbc_req.iv = req->iv; aes_cbc_req.src = req->src; aes_cbc_req.dst = req->dst; aes_cbc_req.text_bytes = req->cryptlen; aes_cbc_req.dir = ctx->dir; aes_cbc_req.tag = tag; aes_cbc_req.base = cryp->base; aes_cbc_req.ring_id = ring_id; aes_cbc_req.ring = ring; aes_cbc_req.info = &(ctx->info); aes_cbc_req.info_dma = ctx->info_dma; aes_cbc_req.sync = &eip97_skcipher_info_dma_sync; aes_cbc_req.sync_ctx = &sync_ctx; ret = eip97_aes_cbc_crypt(&aes_cbc_req); break; default: ret = -EINVAL; break; } put_cpu(); local_bh_enable(); if (ret != -EINPROGRESS) cryp->tags[tag] = 0; else atomic_inc(&(cryp->debug.tx_ops)); return ret; } static int eip97_skcipher_enqueue(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); int err; err = eip97_skcipher_enqueue_hard(req); if (err == -EINPROGRESS) return -EINPROGRESS; if (err == -EAGAIN) atomic_inc(&(tfm_ctx->cryp->debug.ring_full)); WARN_ONCE(atomic_read(&(tfm_ctx->cryp->debug.ring_full)) > 16, "Please increase EIP97 driver descriptor ring size, tag ring size or implement software backlog!"); return eip97_skcipher_dma_unmap(req, err, false); } static int eip97_skcipher_dma_map(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); struct device *dev = tfm_ctx->cryp->dev; int dma_nents; ctx->info_dma = E97_HK(dma_map_single)(dev, &(ctx->info), sizeof(ctx->info), DMA_TO_DEVICE); if (unlikely(E97_HK(dma_mapping_error)(dev, ctx->info_dma))) goto err_exit; if (req->src == req->dst) { dma_nents = E97_HK(dma_map_sg)(dev, req->src, ctx->src.nents, DMA_BIDIRECTIONAL); if (unlikely(!dma_nents)) goto src_map_err; } else { dma_nents = E97_HK(dma_map_sg)(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); if (unlikely(!dma_nents)) goto src_map_err; dma_nents = E97_HK(dma_map_sg)(dev, req->dst, ctx->dst.nents, DMA_FROM_DEVICE); if (unlikely(!dma_nents)) goto dst_map_err; } return eip97_skcipher_enqueue(req); dst_map_err: dma_unmap_sg(dev, req->src, ctx->src.nents, DMA_TO_DEVICE); src_map_err: dma_unmap_single(dev, ctx->info_dma, sizeof(ctx->info), DMA_TO_DEVICE); err_exit: return eip97_skcipher_dma_unalign(req, -EINVAL, false); } static bool eip97_skcipher_check_aligned(struct scatterlist *sg, size_t *bytes, u32 *nents) { bool ret = true; *nents = 0; for (*bytes = 0; sg; sg = sg_next(sg)) { *bytes += sg->length; (*nents)++; if (!IS_ALIGNED(sg->offset, sizeof(u32))) ret = false; } return ret; } static int eip97_skcipher_dma_align(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); struct crypto_alg *alg = tfm->__crt_alg; bool src_aligned, dst_aligned; size_t src_bytes, dst_bytes; struct scatterlist *aligned_sg; u8 *align_buf; size_t copied; ctx->src.unaligned_sg = req->src; ctx->dst.unaligned_sg = req->dst; src_aligned = eip97_skcipher_check_aligned(req->src, &src_bytes, &ctx->src.unaligned_nents); if (req->src == req->dst) { dst_aligned = src_aligned; dst_bytes = src_bytes; ctx->dst.unaligned_nents = ctx->src.unaligned_nents; } else dst_aligned = eip97_skcipher_check_aligned(req->dst, &dst_bytes, &ctx->dst.unaligned_nents); ctx->src.nents = ctx->src.unaligned_nents; ctx->dst.nents = ctx->dst.unaligned_nents; if (src_bytes < req->cryptlen || dst_bytes < req->cryptlen) return -EINVAL; if (!src_aligned || !dst_aligned) { align_buf = E97_HK(kmalloc)(req->cryptlen + sizeof(struct scatterlist), GFP_ATOMIC); if (!align_buf) return -ENOMEM; aligned_sg = (struct scatterlist *)(align_buf + req->cryptlen); sg_set_buf(aligned_sg, align_buf, req->cryptlen); sg_mark_end(aligned_sg); if (!dst_aligned) { req->dst = aligned_sg; ctx->dst.nents = 1; } if (!src_aligned) { WARN_ONCE(alg->cra_flags & CRYPTO_ALG_TESTED, "EIP97 acceleration of skcipher slows down because of unaligned data input!"); atomic_inc(&(tfm_ctx->cryp->debug.unaligned_ops)); copied = E97_HK(sg_copy_to_buffer)(req->src, ctx->src.nents, align_buf, req->cryptlen); req->src = aligned_sg; ctx->src.nents = 1; if (copied != req->cryptlen) { req->dst = ctx->dst.unaligned_sg; req->src = ctx->src.unaligned_sg; kzfree(align_buf); return -ENODATA; } } } return eip97_skcipher_dma_map(req); } static bool eip97_skcipher_limits_conform(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct crypto_skcipher *skc_tfm = __crypto_skcipher_cast(tfm); if (!IS_ALIGNED(req->cryptlen, crypto_skcipher_blocksize(skc_tfm))) return false; if (unlikely(req->cryptlen > EIP97_SKCIPHER_MAX_UINT_LEN)) return false; return true; } static int eip97_skcipher_enc(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); if (!eip97_skcipher_limits_conform(req)) return -EINVAL; ctx->dir = EIP97_ENCRYPT; ctx->type = EIP97_SKCIPHER; atomic_inc(&(tfm_ctx->cryp->debug.total_ops)); return eip97_skcipher_dma_align(req); } static int eip97_skcipher_dec(struct skcipher_request *req) { struct crypto_tfm *tfm = req->base.tfm; struct eip97_skcipher_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); struct eip97_skcipher_req_ctx *ctx = req_ctx(req); if (!eip97_skcipher_limits_conform(req)) return -EINVAL; ctx->dir = EIP97_DECRYPT; ctx->type = EIP97_SKCIPHER; atomic_inc(&(tfm_ctx->cryp->debug.total_ops)); return eip97_skcipher_dma_align(req); } static int eip97_skcipher_init_aes_cbc(struct crypto_skcipher *skcipher) { struct eip97_skcipher_tfm_ctx *ctx = crypto_tfm_ctx(&skcipher->base); ctx->cryp = eip97_find_dev(); if (!ctx->cryp) { pr_err("EIP97 skcipher: Can't find crypto device\n"); return -ENODEV; } skcipher->reqsize = sizeof(struct eip97_skcipher_req_ctx); ctx->mode = AES_CBC; return 0; } static struct skcipher_alg skcipher_algs[] = { { .setkey = eip97_skcipher_setkey, .encrypt = eip97_skcipher_enc, .decrypt = eip97_skcipher_dec, .init = eip97_skcipher_init_aes_cbc, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-eip97", .cra_priority = 400, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct eip97_skcipher_tfm_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, } }, }; int eip97_skcipher_register_algs(void) { int err, i; for (i = 0; i < ARRAY_SIZE(skcipher_algs); i++) { err = crypto_register_skcipher(&skcipher_algs[i]); if (err) goto err_skcipher_algs; } return 0; err_skcipher_algs: while (i--) crypto_unregister_skcipher(&skcipher_algs[i]); return err; } void eip97_skcipher_start_alg_tests(struct eip97_debug *dbg) { int i; for (i = 0; i < ARRAY_SIZE(skcipher_algs); i++) eip97_test_skcipher(&skcipher_algs[i], dbg); } void eip97_skcipher_unregister_algs(void) { int i; for (i = 0; i < ARRAY_SIZE(skcipher_algs); i++) { crypto_unregister_skcipher(&skcipher_algs[i]); } }