--- zzzz-none-000/linux-4.4.271/drivers/crypto/qce/ablkcipher.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/drivers/crypto/qce/ablkcipher.c 2023-04-19 10:22:28.000000000 +0000 @@ -17,9 +17,12 @@ #include #include #include +#include +#include +#include "regs-v5.h" #include "cipher.h" - +#include "core.h" static LIST_HEAD(ablkcipher_algs); static void qce_ablkcipher_done(void *data) @@ -29,11 +32,11 @@ struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; + struct qce_bam_transaction *qce_bam_txn = qce->dma.qce_bam_txn; enum dma_data_direction dir_src, dir_dst; u32 status; int error; bool diff_dst; - diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; @@ -49,6 +52,16 @@ sg_free_table(&rctx->dst_tbl); + if (qce->qce_cmd_desc_enable) { + if (qce_bam_txn->qce_read_sgl_cnt) + qcom_bam_unmap_sg(qce->dev, qce_bam_txn->qce_reg_read_sgl, + qce_bam_txn->qce_read_sgl_cnt, DMA_DEV_TO_MEM); + + if (qce_bam_txn->qce_write_sgl_cnt) + qcom_bam_unmap_sg(qce->dev, qce_bam_txn->qce_reg_write_sgl, + qce_bam_txn->qce_write_sgl_cnt, DMA_MEM_TO_DEV); + } + error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status); @@ -78,6 +91,10 @@ dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; + /* Get the LOCK for this request */ + if (qce->qce_cmd_desc_enable) + qce_read_dma_get_lock(qce); + rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (diff_dst) rctx->dst_nents = sg_nents_for_len(req->dst, req->nbytes); @@ -131,9 +148,15 @@ qce_dma_issue_pending(&qce->dma); - ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); - if (ret) - goto error_terminate; + if (qce->qce_cmd_desc_enable) { + ret = qce_start_dma(async_req, tmpl->crypto_alg_type, req->nbytes, 0); + if (ret) + goto error_terminate; + } else { + ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); + if (ret) + goto error_terminate; + } return 0; @@ -149,6 +172,46 @@ return ret; } +static int qce_setkey_sec(unsigned int keylen) +{ + struct qce_config_key_sec *key; + struct kmem_cache *qce_cache; + int ret; + + qce_cache = kmem_cache_create("qce_sec_key", max(L1_CACHE_BYTES, + (int)sizeof(struct qce_config_key_sec)), + L1_CACHE_BYTES, SLAB_PANIC, NULL); + if (!qce_cache) { + pr_info("%s: Error in creating cache\n",__func__); + return -ENOMEM; + } + + key = kmem_cache_alloc(qce_cache, GFP_KERNEL); + if (!key) { + pr_info("%s: Error in alloacting cache.\n",__func__); + ret = -ENOMEM; + goto destroy; + } + + /*update the key length value */ + key->keylen = keylen; + + ret = qcom_set_qcekey_sec(key, sizeof(struct qce_config_key_sec)); + if (ret) { + pr_err("%s: qce key configuration fail\n", __func__); + ret = -EINVAL; + goto free; + } + +free: + kmem_cache_free(qce_cache, key); + +destroy: + kmem_cache_destroy(qce_cache); + + return ret; +} + static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key, unsigned int keylen) { @@ -177,6 +240,14 @@ goto weakkey; } + if (use_fixed_key) { + ret = qce_setkey_sec(keylen); + if (ret){ + pr_err("%s: qce key configuration fail\n", __func__); + return ret; + } + } + ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0;