/* * Driver for EIP97 cryptographic accelerator. * * Copyright (c) 2018 AVM GmbH * * SPDX-License-Identifier: GPL-2.0-only * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This driver is heavily based on the Mediatek driver from Ryder Lee. * */ #include #include #include #include #include #include #include #include #include #include #include "eip97-platform.h" #include "eip97-skcipher.h" #include "eip97-aead.h" #include "eip97-ahash.h" #define EIP97_BURST_SIZE_MSK GENMASK(7, 4) #define EIP97_BURST_SIZE(x) ((x) << 4) #define EIP97_DESC_SIZE(x) ((x) << 0) #define EIP97_DESC_OFFSET(x) ((x) << 16) #define EIP97_DESC_FETCH_SIZE(x) ((x) << 0) #define EIP97_DESC_FETCH_THRESH(x) ((x) << 16) #define EIP97_DESC_OVL_IRQ_EN BIT(25) #define EIP97_DESC_ATP_PRESENT BIT(30) #define EIP97_DFSE_IDLE GENMASK(3, 0) #define EIP97_DFSE_THR_CTRL_EN BIT(30) #define EIP97_DFSE_THR_CTRL_RESET BIT(31) #define EIP97_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0)) #define EIP97_DFSE_MIN_DATA(x) ((x) << 0) #define EIP97_DFSE_MAX_DATA(x) ((x) << 8) #define EIP97_DFE_MIN_CTRL(x) ((x) << 16) #define EIP97_DFE_MAX_CTRL(x) ((x) << 24) #define EIP97_IN_BUF_MIN_THRESH(x) ((x) << 8) #define EIP97_IN_BUF_MAX_THRESH(x) ((x) << 12) #define EIP97_OUT_BUF_MIN_THRESH(x) ((x) << 0) #define EIP97_OUT_BUF_MAX_THRESH(x) ((x) << 4) #define EIP97_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0)) #define EIP97_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) #define EIP97_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0)) #define EIP97_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) #define EIP97_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0)) #define EIP97_PE_TK_LOC_AVL BIT(2) #define EIP97_PE_PROC_HELD BIT(14) #define EIP97_PE_TK_TIMEOUT_EN BIT(22) #define EIP97_PE_INPUT_DMA_ERR BIT(0) #define EIP97_PE_OUTPUT_DMA_ERR BIT(1) #define EIP97_PE_PKT_PORC_ERR BIT(2) #define EIP97_PE_PKT_TIMEOUT BIT(3) #define EIP97_PE_FATAL_ERR BIT(14) #define EIP97_PE_INPUT_DMA_ERR_EN BIT(16) #define EIP97_PE_OUTPUT_DMA_ERR_EN BIT(17) #define EIP97_PE_PKT_PORC_ERR_EN BIT(18) #define EIP97_PE_PKT_TIMEOUT_EN BIT(19) #define EIP97_PE_FATAL_ERR_EN BIT(30) #define EIP97_PE_INT_OUT_EN BIT(31) #define EIP97_HIA_SIGNATURE ((u16)0x35ca) #define EIP97_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0)) #define EIP97_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0)) #define EIP97_CDR_STAT_CLR GENMASK(4, 0) #define EIP97_RDR_STAT_CLR GENMASK(7, 0) #define EIP97_AIC_INT_MSK GENMASK(5, 0) #define EIP97_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20)) #define EIP97_AIC_VER11 0x011036c9 #define EIP97_AIC_VER12 0x012036c9 #define EIP97_AIC_G_CLR GENMASK(30, 20) #define EIP97_WR_OWN_BUF BIT(24) #define EIP97_DATA_SWAP BIT(8) #define EIP97_ACD_PROTECTION(x) ((x) << 18) #define EIP97_PROC_COUNT(x) (((x) >> 2) & GENMASK(21, 0)) #define EIP97_CNT_RST BIT(31) #define EIP97_RDR_PROC_THRESH BIT(0) #define EIP97_RDR_PROC_MODE BIT(23) /** * EIP97 is an integrated security subsystem to accelerate cryptographic * functions and protocols to offload the host processor. * Some important hardware modules are briefly introduced below: * * Host Interface Adapter(HIA) - the main interface between the host * system and the hardware subsystem. It is responsible for attaching * processing engine to the specific host bus interface and provides a * standardized software view for off loading tasks to the engine. * * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many * CD the host has prepared in the CDR. It monitors the fill level of its * CD-FIFO and if there's sufficient space for the next block of descriptors, * then it fires off a DMA request to fetch a block of CDs. * * Data fetch engine(DFE) - It is responsible for parsing the CD and * setting up the required control and packet data DMA transfers from * system memory to the processing engine. * * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager, * but target is result descriptors, Moreover, it also handles the RD * updates under control of the DSE. For each packet data segment * processed, the DSE triggers the RDR Manager to write the updated RD. * If triggered to update, the RDR Manager sets up a DMA operation to * copy the RD from the DSE to the correct location in the RDR. * * Data Store Engine(DSE) - It is responsible for parsing the prepared RD * and setting up the required control and packet data DMA transfers from * the processing engine to system memory. * * Advanced Interrupt Controllers(AICs) - receive interrupt request signals * from various sources and combine them into one interrupt output. * The AICs are used by: * - One for the HIA global and processing engine interrupts. * - The others for the descriptor ring interrupts. */ /* User value only used when set on module load */ static uint eip97_desc_num = 1024; module_param(eip97_desc_num, uint, 0644); /* Cryptographic engine capabilities */ struct eip97_sys_cap { /* host interface adapter */ u32 hia_ver; u32 hia_opt; /* packet engine */ u32 pkt_eng_opt; /* global hardware */ u32 hw_opt; }; struct eip97_drv { struct list_head dev_list; /* Device list lock */ spinlock_t lock; }; struct eip97_drv eip97_devices = { .dev_list = LIST_HEAD_INIT(eip97_devices.dev_list), .lock = __SPIN_LOCK_UNLOCKED(eip97_devices.lock), }; struct eip97_cryp *eip97_find_dev() { struct eip97_cryp *tmp; spin_lock_bh(&eip97_devices.lock); list_for_each_entry(tmp, &eip97_devices.dev_list, dev_list) { spin_unlock_bh(&eip97_devices.lock); return tmp; } spin_unlock_bh(&eip97_devices.lock); return NULL; } static int eip97_aic_cap_check(struct eip97_cryp *cryp, int hw) { u32 val; if (hw == EIP97_RING_MAX) val = readl(cryp->base + AIC_G_VERSION); else val = readl(cryp->base + AIC_VERSION(hw)); val &= EIP97_AIC_VER_MSK; if (val != EIP97_AIC_VER11 && val != EIP97_AIC_VER12) return -ENXIO; if (hw == EIP97_RING_MAX) val = readl(cryp->base + AIC_G_OPTIONS); else val = readl(cryp->base + AIC_OPTIONS(hw)); val &= EIP97_AIC_INT_MSK; if (!val || val > 32) return -ENXIO; return 0; } static int eip97_aic_init(struct eip97_cryp *cryp, int hw) { int err; err = eip97_aic_cap_check(cryp, hw); if (err) return err; /* Disable all interrupts and set initial configuration */ if (hw == EIP97_RING_MAX) { writel(0, cryp->base + AIC_G_ENABLE_CTRL); writel(0, cryp->base + AIC_G_POL_CTRL); writel(0, cryp->base + AIC_G_TYPE_CTRL); writel(0, cryp->base + AIC_G_ENABLE_SET); } else { writel(0, cryp->base + AIC_ENABLE_CTRL(hw)); writel(0, cryp->base + AIC_POL_CTRL(hw)); writel(0, cryp->base + AIC_TYPE_CTRL(hw)); writel(0, cryp->base + AIC_ENABLE_SET(hw)); } return 0; } static int eip97_dfe_dse_state_check(struct eip97_cryp *cryp) { int ret = -EINVAL; u32 val; /* Check for completion of all DMA transfers */ val = readl(cryp->base + DFE_THR_STAT); if (EIP97_DFSE_RING_ID(val) == EIP97_DFSE_IDLE) { val = readl(cryp->base + DSE_THR_STAT); if (EIP97_DFSE_RING_ID(val) == EIP97_DFSE_IDLE) ret = 0; } if (!ret) { /* Take DFE/DSE thread out of reset */ writel(0, cryp->base + DFE_THR_CTRL); writel(0, cryp->base + DSE_THR_CTRL); } else { return -EBUSY; } return 0; } static int eip97_dfe_dse_reset(struct eip97_cryp *cryp) { int err; /* Reset DSE/DFE and correct system priorities for all rings. */ writel(EIP97_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL); writel(0, cryp->base + DFE_PRIO_0); writel(0, cryp->base + DFE_PRIO_1); writel(0, cryp->base + DFE_PRIO_2); writel(0, cryp->base + DFE_PRIO_3); writel(EIP97_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL); writel(0, cryp->base + DSE_PRIO_0); writel(0, cryp->base + DSE_PRIO_1); writel(0, cryp->base + DSE_PRIO_2); writel(0, cryp->base + DSE_PRIO_3); err = eip97_dfe_dse_state_check(cryp); if (err) return err; return 0; } static void eip97_dfe_dse_buf_setup(struct eip97_cryp *cryp, struct eip97_sys_cap *cap) { u32 width = EIP97_HIA_DATA_WIDTH(cap->hia_opt) + 2; u32 len = EIP97_HIA_DMA_LENGTH(cap->hia_opt) - 1; u32 ipbuf = min((u32)EIP97_IN_DBUF_SIZE(cap->hw_opt) + width, len); u32 opbuf = min((u32)EIP97_OUT_DBUF_SIZE(cap->hw_opt) + width, len); u32 itbuf = min((u32)EIP97_IN_TBUF_SIZE(cap->hw_opt) + width, len); writel(EIP97_DFSE_MIN_DATA(ipbuf - 1) | EIP97_DFSE_MAX_DATA(ipbuf) | EIP97_DFE_MIN_CTRL(itbuf - 1) | EIP97_DFE_MAX_CTRL(itbuf), cryp->base + DFE_CFG); writel(EIP97_DFSE_MIN_DATA(opbuf - 1) | EIP97_DFSE_MAX_DATA(opbuf), cryp->base + DSE_CFG); writel(EIP97_IN_BUF_MIN_THRESH(ipbuf - 1) | EIP97_IN_BUF_MAX_THRESH(ipbuf), cryp->base + PE_IN_DBUF_THRESH); writel(EIP97_IN_BUF_MIN_THRESH(itbuf - 1) | EIP97_IN_BUF_MAX_THRESH(itbuf), cryp->base + PE_IN_TBUF_THRESH); writel(EIP97_OUT_BUF_MIN_THRESH(opbuf - 1) | EIP97_OUT_BUF_MAX_THRESH(opbuf), cryp->base + PE_OUT_DBUF_THRESH); writel(0, cryp->base + PE_OUT_TBUF_THRESH); writel(0, cryp->base + PE_OUT_BUF_CTRL); } static void eip97_desc_ring_link(struct eip97_cryp *cryp, u32 mask) { /* Assign rings to DFE/DSE thread and enable it */ writel(EIP97_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL); writel(EIP97_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL); } static inline volatile struct eip97_cd *cd_next(volatile struct eip97_cd *cd_cur, struct eip97_ring *ring) { if (++cd_cur == ring->cmd_base + ring->cmd_num) return ring->cmd_base; else return cd_cur; } static inline volatile struct eip97_rd *rd_next(volatile struct eip97_rd *rd_cur, struct eip97_ring *ring) { if (++rd_cur == ring->res_base + ring->res_num) return ring->res_base; else return rd_cur; } static inline void proc_cd(struct eip97_ring *ring) { if (cd_ring_empty(ring)) { WARN(1, "Command descriptor ring empty although hardware reports processed descriptors!"); return; } memset((void *)ring->cmd_proc, 0, sizeof(struct eip97_cd)); ring->cmd_proc = cd_next(ring->cmd_proc, ring); } static inline void proc_rd(struct eip97_ring *ring) { if (rd_ring_empty(ring)) { WARN(1, "Result descriptor ring empty although hardware reports processed descriptors!"); return; } memset((void *)ring->res_proc, 0, sizeof(struct eip97_rd)); ring->res_proc = rd_next(ring->res_proc, ring); } static inline int cd_ring_full(struct eip97_ring *ring) { return cd_next(ring->cmd_prep, ring) == ring->cmd_proc; } static inline int rd_ring_full(struct eip97_ring *ring) { return rd_next(ring->res_prep, ring) == ring->res_proc; } static inline void prep_cd(struct eip97_ring *ring) { BUG_ON(cd_ring_full(ring)); ring->cmd_prep = cd_next(ring->cmd_prep, ring); } static inline void prep_rd(struct eip97_ring *ring) { BUG_ON(rd_ring_full(ring)); ring->res_prep = rd_next(ring->res_prep, ring); } int eip97_create_cmd_descs(struct scatterlist *sg, u32 sg_bytes, struct eip97_ring *ring, dma_addr_t ct, u32 ct_flags, u32 ct_len, u32 tag, dma_addr_t tfm) { volatile struct eip97_cd *cmd; int ret = 0; while (sg_bytes) { if (!sg) { ret = -EINVAL; break; } else if (cd_ring_full(ring)) { ret = -EAGAIN; break; } cmd = ring->cmd_prep; prep_cd(ring); ret++; cmd->hdr = 0; cmd->inbuf = sg_dma_address(sg); if (ret == 1) { cmd->hdr |= EIP97_DESC_FIRST | EIP97_CD_CT_LEN(ct_len); cmd->ct_hdr = EIP97_CT_USE_EIP97_MODE | ct_flags | sg_bytes; cmd->ct = ct; cmd->tag = tag; cmd->tfm = tfm; } else { cmd->ct_hdr = 0; cmd->ct = 0; cmd->tag = 0; cmd->tfm = 0; } if (sg_bytes <= sg_dma_len(sg)) { cmd->hdr |= EIP97_DESC_BUF_LEN(sg_bytes); break; } cmd->hdr |= EIP97_DESC_BUF_LEN(sg_dma_len(sg)); sg_bytes -= sg_dma_len(sg); sg = sg_next(sg); } if (ret > 0) cmd->hdr |= EIP97_DESC_LAST; return ret; } void eip97_cmd_desc_ring_set(void __iomem *base, int ring_id, struct eip97_ring *ring) { writel(0, base + CDR_BASE_ADDR_HI(ring_id)); writel(ring->cmd_dma, base + CDR_BASE_ADDR_LO(ring_id)); writel(EIP97_CD_CNT(ring->cmd_num), base + CDR_RING_SIZE(ring_id)); } EXPORT_SYMBOL(eip97_cmd_desc_ring_set); static void eip97_cmd_desc_ring_setup(struct eip97_cryp *cryp, enum eip97_ring_id ring_id, struct eip97_sys_cap *cap) { /* Full descriptor that fits FIFO minus one */ u32 count = ((1 << EIP97_CMD_FIFO_SIZE(cap->hia_opt)) / EIP97_CD_SZ) - 1; /* Disable external triggering */ writel(0, cryp->base + CDR_CFG(ring_id)); /* Clear CDR count */ writel(EIP97_CNT_RST, cryp->base + CDR_PREP_COUNT(ring_id)); writel(EIP97_CNT_RST, cryp->base + CDR_PROC_COUNT(ring_id)); writel(0, cryp->base + CDR_PREP_PNTR(ring_id)); writel(0, cryp->base + CDR_PROC_PNTR(ring_id)); writel(EIP97_WR_OWN_BUF | EIP97_DATA_SWAP, cryp->base + CDR_DMA_CFG(ring_id)); /* Configure CDR host address space */ eip97_cmd_desc_ring_set(cryp->base, ring_id, cryp->ring[ring_id]); /* Clear and disable all CDR interrupts */ writel(EIP97_CDR_STAT_CLR, cryp->base + CDR_STAT(ring_id)); /* * Set command descriptor offset and enable additional * token present in descriptor. */ writel(EIP97_DESC_SIZE(EIP97_CD_SZ) | EIP97_DESC_OFFSET(EIP97_CD_OFF) | EIP97_DESC_ATP_PRESENT, cryp->base + CDR_DESC_SIZE(ring_id)); writel(EIP97_DESC_FETCH_SIZE(count * EIP97_CD_OFF) | EIP97_DESC_FETCH_THRESH(count * EIP97_CD_SZ), cryp->base + CDR_CFG(ring_id)); } int eip97_create_res_descs(struct scatterlist *sg, u32 sg_bytes, struct eip97_ring *ring, u32 tag) { volatile struct eip97_rd *res; int ret = 0; while (sg_bytes) { if (!sg) { ret = -EINVAL; break; } else if (rd_ring_full(ring)) { ret = -EAGAIN; break; } res = ring->res_prep; prep_rd(ring); ret++; res->hdr = 0; res->outbuf = sg_dma_address(sg); res->res1 = 0; if (ret == 1) { res->hdr |= EIP97_DESC_FIRST; res->tag = tag; } else { res->tag = 0; } if (sg_bytes <= sg_dma_len(sg)) { res->hdr |= EIP97_DESC_BUF_LEN(sg_bytes); break; } res->hdr |= EIP97_DESC_BUF_LEN(sg_dma_len(sg)); sg_bytes -= sg_dma_len(sg); sg = sg_next(sg); } if (ret > 0) res->hdr |= EIP97_DESC_LAST; return ret; } void eip97_res_desc_ring_set(void __iomem *base, int ring_id, struct eip97_ring *ring) { writel(0, base + RDR_BASE_ADDR_HI(ring_id)); writel(ring->res_dma, base + RDR_BASE_ADDR_LO(ring_id)); writel(EIP97_RD_CNT(ring->res_num), base + RDR_RING_SIZE(ring_id)); } EXPORT_SYMBOL(eip97_res_desc_ring_set); static void eip97_res_desc_ring_setup(struct eip97_cryp *cryp, enum eip97_ring_id ring_id, struct eip97_sys_cap *cap) { u32 rndup = 2; u32 count = ((1 << EIP97_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1; /* Disable external triggering */ writel(0, cryp->base + RDR_CFG(ring_id)); /* Clear RDR count */ writel(EIP97_CNT_RST, cryp->base + RDR_PREP_COUNT(ring_id)); writel(EIP97_CNT_RST, cryp->base + RDR_PROC_COUNT(ring_id)); writel(0, cryp->base + RDR_PREP_PNTR(ring_id)); writel(0, cryp->base + RDR_PROC_PNTR(ring_id)); writel(EIP97_WR_OWN_BUF | EIP97_DATA_SWAP | EIP97_ACD_PROTECTION(0x20), cryp->base + RDR_DMA_CFG(ring_id)); /* Configure RDR host address space */ eip97_res_desc_ring_set(cryp->base, ring_id, cryp->ring[ring_id]); /* Clear and disable all RDR interrupts */ writel(EIP97_RDR_STAT_CLR, cryp->base + RDR_STAT(ring_id)); /* * RDR manager generates update interrupts on a per-completed-packet, * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count * for the RDR exceeds the number of packets. */ writel(EIP97_RDR_PROC_THRESH | EIP97_RDR_PROC_MODE, cryp->base + RDR_THRESH(ring_id)); /* * Configure a threshold and time-out value for the processed * result descriptors (or complete packets) that are written to * the RDR. */ writel(EIP97_DESC_SIZE(EIP97_RD_SZ) | EIP97_DESC_OFFSET(EIP97_RD_OFF), cryp->base + RDR_DESC_SIZE(ring_id)); /* * Configure HIA fetch size and fetch threshold that are used to * fetch blocks of multiple descriptors. */ writel(EIP97_DESC_FETCH_SIZE(count * EIP97_RD_OFF) | EIP97_DESC_FETCH_THRESH(count * rndup) | EIP97_DESC_OVL_IRQ_EN, cryp->base + RDR_CFG(ring_id)); } static int eip97_packet_engine_setup(struct eip97_cryp *cryp) { struct eip97_sys_cap cap; enum eip97_ring_id i; int err; u32 val; cap.hia_ver = readl(cryp->base + HIA_VERSION); cap.hia_opt = readl(cryp->base + HIA_OPTIONS); cap.hw_opt = readl(cryp->base + EIP97_OPTIONS); if (!(((u16)cap.hia_ver) == EIP97_HIA_SIGNATURE)) return -EINVAL; /* Configure endianness conversion method for master (DMA) interface */ writel(0, cryp->base + EIP97_MST_CTRL); /* Set HIA burst size */ val = readl(cryp->base + HIA_MST_CTRL); val &= ~EIP97_BURST_SIZE_MSK; val |= EIP97_BURST_SIZE(5); writel(val, cryp->base + HIA_MST_CTRL); err = eip97_dfe_dse_reset(cryp); if (err) { dev_err(cryp->dev, "Failed to reset DFE and DSE.\n"); return err; } eip97_dfe_dse_buf_setup(cryp, &cap); /* Enable the 4 rings for the packet engines. */ eip97_desc_ring_link(cryp, 0xf); for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) { eip97_cmd_desc_ring_setup(cryp, i, &cap); eip97_res_desc_ring_setup(cryp, i, &cap); } writel(EIP97_PE_TK_LOC_AVL | EIP97_PE_PROC_HELD | EIP97_PE_TK_TIMEOUT_EN, cryp->base + PE_TOKEN_CTRL_STAT); /* Clear all pending interrupts */ writel(EIP97_AIC_G_CLR, cryp->base + AIC_G_ACK); writel(EIP97_PE_INPUT_DMA_ERR | EIP97_PE_OUTPUT_DMA_ERR | EIP97_PE_PKT_PORC_ERR | EIP97_PE_PKT_TIMEOUT | EIP97_PE_FATAL_ERR | EIP97_PE_INPUT_DMA_ERR_EN | EIP97_PE_OUTPUT_DMA_ERR_EN | EIP97_PE_PKT_PORC_ERR_EN | EIP97_PE_PKT_TIMEOUT_EN | EIP97_PE_FATAL_ERR_EN | EIP97_PE_INT_OUT_EN, cryp->base + PE_INTERRUPT_CTRL_STAT); return 0; } static int eip97_accelerator_init(struct eip97_cryp *cryp) { int i, err; /* Initialize advanced interrupt controller(AIC) */ for (i = 0; i < EIP97_IRQ_NUM; i++) { err = eip97_aic_init(cryp, i); if (err) { dev_err(cryp->dev, "Failed to initialize AIC.\n"); return err; } } /* Initialize packet engine */ err = eip97_packet_engine_setup(cryp); if (err) { dev_err(cryp->dev, "Failed to configure packet engine.\n"); return err; } return 0; } static int eip97_desc_ring_alloc(struct eip97_cryp *cryp) { struct eip97_ring **ring = cryp->ring; u32 eip97_cur_desc_num = eip97_desc_num; enum eip97_ring_id i; int err = -ENOMEM; /* one tag per every descriptor of every ring */ cryp->tags_num = eip97_cur_desc_num * EIP97_RING_MAX; if (cryp->tags_num <= 1) return -EINVAL; /* Tag field is only 3 bytes */ if (cryp->tags_num & ((u32)0xFF000000)) return -EINVAL; cryp->tags = kcalloc(cryp->tags_num, sizeof(enum eip97_op_type *), GFP_KERNEL); if (!cryp->tags) return -ENOMEM; for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) { ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); if (!ring[i]) goto err_cleanup; ring[i]->ring_id = i; ring[i]->cryp = cryp; ring[i]->cmd_base = dma_zalloc_coherent(cryp->dev, sizeof(struct eip97_cd) * eip97_cur_desc_num, &ring[i]->cmd_dma, GFP_KERNEL); if (!ring[i]->cmd_base) { kzfree(ring[i]); goto err_cleanup; } ring[i]->cmd_num = eip97_cur_desc_num; ring[i]->cmd_prep = ring[i]->cmd_base; ring[i]->cmd_proc = ring[i]->cmd_base; ring[i]->res_base = dma_zalloc_coherent(cryp->dev, sizeof(struct eip97_rd) * eip97_cur_desc_num, &ring[i]->res_dma, GFP_KERNEL); if (!ring[i]->res_base) { kzfree(ring[i]); dma_free_coherent(cryp->dev, sizeof(struct eip97_cd) * eip97_cur_desc_num, (void *)ring[i]->cmd_base, ring[i]->cmd_dma); goto err_cleanup; } ring[i]->res_num = eip97_cur_desc_num; ring[i]->res_prep = ring[i]->res_base; ring[i]->res_proc = ring[i]->res_base; } return 0; err_cleanup: kzfree(cryp->tags); while (i--) { dma_free_coherent(cryp->dev, sizeof(struct eip97_rd) * ring[i]->res_num, (void *)ring[i]->res_base, ring[i]->res_dma); dma_free_coherent(cryp->dev, sizeof(struct eip97_cd) * ring[i]->cmd_num, (void *)ring[i]->cmd_base, ring[i]->cmd_dma); kzfree(ring[i]); } return err; } static void eip97_desc_ring_free(struct eip97_cryp *cryp) { struct eip97_ring **ring = cryp->ring; enum eip97_ring_id i; kzfree(cryp->tags); for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) { dma_free_coherent(cryp->dev, sizeof(struct eip97_rd) * ring[i]->res_num, (void *)ring[i]->res_base, ring[i]->res_dma); dma_free_coherent(cryp->dev, sizeof(struct eip97_cd) * ring[i]->cmd_num, (void *)ring[i]->cmd_base, ring[i]->cmd_dma); kzfree(ring[i]); } } int eip97_alloc_tag(struct eip97_cryp *cryp, enum eip97_op_type *op_type) { static u32 index = 0; u32 last_index; int ret; if (!op_type) return -EINVAL; //bottom half already disabled by caller spin_lock(&cryp->taglock); last_index = index; index = (index + 1) % cryp->tags_num; while (index != last_index) { if (cryp->tags[index] == NULL) { cryp->tags[index] = op_type; /* tag num can only hold 3 bytes, see allocation */ ret = index; spin_unlock(&cryp->taglock); return ret; } index = (index + 1) % cryp->tags_num; } spin_unlock(&cryp->taglock); return -EAGAIN; } //TODO: public funktion um IRQ in der hardware zu enablen! static void eip97_irq_bh(unsigned long data) { struct eip97_ring *ring = (struct eip97_ring *)data; enum eip97_ring_id ring_id = ring->ring_id; struct eip97_cryp *cryp = ring->cryp; enum eip97_op_type *op_type; eip97_err_t op_err; unsigned int bytes; u32 tag, val, desc_avail; val = readl(cryp->base + CDR_PROC_COUNT(ring_id)); writel(val, cryp->base + CDR_PROC_COUNT(ring_id)); desc_avail = EIP97_PROC_COUNT(val) / EIP97_RD_OFF; /* Simply drop all the uninteresting command descriptors */ while (desc_avail--) proc_cd(ring); val = readl(cryp->base + RDR_PROC_COUNT(ring_id)); writel(val, cryp->base + RDR_PROC_COUNT(ring_id)); desc_avail = EIP97_PROC_COUNT(val) / EIP97_RD_OFF; /* re-enable result descriptor proc thresh irq * that was disabled by writing RDR_PROC_COUNT */ writel(EIP97_RDR_PROC_THRESH | EIP97_RDR_PROC_MODE, cryp->base + RDR_THRESH(ring_id)); while (desc_avail) { /* Find end descriptor of next result */ while (desc_avail && !(ring->res_proc->hdr & EIP97_DESC_LAST)) { WARN_ONCE(ring->res_proc->res1, "EIP97 Operation result data in unexpected fields!"); proc_rd(ring); desc_avail--; } if (!desc_avail) break; /* Collect result data from last result descriptor */ op_err = EIP97_RD_ERR(ring->res_proc->res1); if (ring->res_proc->hdr & EIP97_RD_BUF_OVF) op_err |= EIP97_ERR_BUFFER_OVERFLOW; if (ring->res_proc->hdr & EIP97_RD_DESC_OVF) op_err |= EIP97_ERR_DESCRIPTOR_OVERFLOW; if (op_err) atomic_inc(&(cryp->debug.err_ops)); bytes = EIP97_RD_RESULT_SIZE(ring->res_proc->res1); tag = EIP97_DESC_TAG(ring->res_proc->tag); /* Reset descriptor header so in case hardware reports result * descriptors although the ring is empty we will simply discard * them in the end descriptor search loop above */ ring->res_proc->hdr = 0; proc_rd(ring); desc_avail--; /* Lookup op_type inside original request context by tag */ if (tag >= cryp->tags_num) continue; op_type = cryp->tags[tag]; if (!op_type) continue; cryp->tags[tag] = NULL; /* Issue callback, must be inside softirq context for xfrm */ atomic_inc(&(cryp->debug.rx_ops)); switch (*op_type) { case EIP97_SKCIPHER: eip97_skcipher_finish(op_type, op_err, bytes); break; case EIP97_AEAD: eip97_aead_finish(op_type, op_err, bytes); break; case EIP97_AHASH: eip97_ahash_finish(op_type, op_err, bytes); break; default: break; } } } static irqreturn_t eip97_irq_th(int irq, void *dev_id) { struct eip97_ring *ring = (struct eip97_ring *)dev_id; enum eip97_ring_id ring_id = ring->ring_id; struct eip97_cryp *cryp = ring->cryp; u32 val; /* clear all pending interrupts */ val = readl(cryp->base + RDR_STAT(ring_id)); writel(val, cryp->base + RDR_STAT(ring_id)); tasklet_schedule(&cryp->irq_bh[ring_id]); return IRQ_HANDLED; } static char *irq_name[] = { "eip97-req-from-cpu0", "eip97-req-from-cpu1", "eip97-req-from-cpu2", "eip97-req-from-cpu3" }; static int eip97_irq_init(struct eip97_cryp *cryp) { enum eip97_ring_id i; int ret; BUILD_BUG_ON(EIP97_RING_MAX > EIP97_IRQ_NUM); for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) { tasklet_init(&cryp->irq_bh[i], eip97_irq_bh, (unsigned long)cryp->ring[i]); ret = devm_request_irq(cryp->dev, cryp->irq[i], eip97_irq_th, 0, irq_name[i], cryp->ring[i]); if (ret) goto err_cleanup; } for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) writel(EIP97_IRQ_RDR(i), cryp->base + AIC_ENABLE_SET(i)); return 0; err_cleanup: tasklet_kill(&cryp->irq_bh[i]); while (i--) { devm_free_irq(cryp->dev, cryp->irq[i], cryp->ring[i]); tasklet_kill(&cryp->irq_bh[i]); } return ret; } static void eip97_irq_deinit(struct eip97_cryp *cryp) { enum eip97_ring_id i; for (i = EIP97_RING0; i < EIP97_RING_MAX; i++) { writel(0, cryp->base + AIC_ENABLE_SET(i)); devm_free_irq(cryp->dev, cryp->irq[i], cryp->ring[i]); tasklet_kill(&cryp->irq_bh[i]); } } static void eip97_add_device(struct eip97_cryp *cryp) { INIT_LIST_HEAD(&cryp->dev_list); spin_lock(&eip97_devices.lock); list_add_tail(&cryp->dev_list, &eip97_devices.dev_list); spin_unlock(&eip97_devices.lock); } static void eip97_del_device(struct eip97_cryp *cryp) { spin_lock(&eip97_devices.lock); list_del(&cryp->dev_list); spin_unlock(&eip97_devices.lock); } static int eip97_debug_init(struct eip97_cryp *cryp) { struct dentry *file; atomic_set(&(cryp->debug.unaligned_ops), 0); atomic_set(&(cryp->debug.total_ops), 0); atomic_set(&(cryp->debug.ring_full), 0); atomic_set(&(cryp->debug.ring_empty), 0); atomic_set(&(cryp->debug.err_ops), 0); atomic_set(&(cryp->debug.rx_ops), 0); atomic_set(&(cryp->debug.tx_ops), 0); atomic_set(&(cryp->debug.tests_total), 0); atomic_set(&(cryp->debug.tests_success), 0); atomic_set(&(cryp->debug.tests_fail), 0); cryp->debug.dir = debugfs_create_dir("eip97", NULL); if (!cryp->debug.dir) return -ENOENT; file = debugfs_create_atomic_t("eip97_unaligned_ops", 0400, cryp->debug.dir, &cryp->debug.unaligned_ops); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_total_ops", 0400, cryp->debug.dir, &cryp->debug.total_ops); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_ring_full", 0400, cryp->debug.dir, &cryp->debug.ring_full); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_ring_empty", 0400, cryp->debug.dir, &cryp->debug.ring_empty); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_err_ops", 0400, cryp->debug.dir, &cryp->debug.err_ops); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_rx_ops", 0400, cryp->debug.dir, &cryp->debug.rx_ops); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_tx_ops", 0400, cryp->debug.dir, &cryp->debug.tx_ops); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_tests_total", 0400, cryp->debug.dir, &cryp->debug.tests_total); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_tests_success", 0400, cryp->debug.dir, &cryp->debug.tests_success); if (!file) return -ENOENT; file = debugfs_create_atomic_t("eip97_tests_fail", 0400, cryp->debug.dir, &cryp->debug.tests_fail); if (!file) return -ENOENT; return 0; } static void eip97_debug_remove(struct eip97_cryp *cryp) { debugfs_remove_recursive(cryp->debug.dir); } static int eip97_crypto_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct eip97_cryp *cryp; int i, err; cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); if (!cryp) return -ENOMEM; cryp->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(cryp->base)) return PTR_ERR(cryp->base); for (i = 0; i < EIP97_IRQ_NUM; i++) { cryp->irq[i] = platform_get_irq(pdev, i); if (cryp->irq[i] < 0) { dev_err(&pdev->dev, "No IRQ:%d resource info\n", i); return cryp->irq[i]; } } cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); if (IS_ERR(cryp->clk_cryp)) { dev_err(&pdev->dev, "No clock found\n"); return -EPROBE_DEFER; } cryp->dev = &pdev->dev; pm_runtime_enable(cryp->dev); pm_runtime_get_sync(cryp->dev); err = clk_prepare_enable(cryp->clk_cryp); if (err) goto err_clk_cryp; spin_lock_init(&cryp->taglock); /* Allocate four command/result descriptor rings */ err = eip97_desc_ring_alloc(cryp); if (err) { dev_err(cryp->dev, "Unable to allocate descriptor rings.\n"); goto err_resource; } /* Initialize hardware modules */ err = eip97_accelerator_init(cryp); if (err) { dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n"); goto err_engine; } err = eip97_irq_init(cryp); if (err) { dev_err(cryp->dev, "Failed to initialize IRQs.\n"); goto err_irq; } eip97_add_device(cryp); platform_set_drvdata(pdev, cryp); err = eip97_debug_init(cryp); if (err) { dev_err(cryp->dev, "Failed to initialize debug statistics.\n"); goto err_debug; } err = eip97_aead_register_algs(); if (err) { dev_err(cryp->dev, "Unable to register aead algorithms.\n"); goto err_aead; } err = eip97_skcipher_register_algs(); if (err) { dev_err(cryp->dev, "Unable to register skcipher algorithms.\n"); goto err_skcipher; } err = eip97_ahash_register_algs(); if (err) { dev_err(cryp->dev, "Unable to register ahash algorithms.\n"); goto err_ahash; } eip97_aead_start_alg_tests(&cryp->debug); eip97_skcipher_start_alg_tests(&cryp->debug); eip97_ahash_start_alg_tests(&cryp->debug); return 0; err_ahash: eip97_skcipher_unregister_algs(); err_skcipher: eip97_aead_unregister_algs(); err_aead: eip97_dfe_dse_reset(cryp); eip97_debug_remove(cryp); err_debug: eip97_irq_deinit(cryp); err_irq: err_engine: eip97_desc_ring_free(cryp); err_resource: clk_disable_unprepare(cryp->clk_cryp); err_clk_cryp: pm_runtime_put_sync(cryp->dev); pm_runtime_disable(cryp->dev); return err; } static int eip97_crypto_remove(struct platform_device *pdev) { struct eip97_cryp *cryp = platform_get_drvdata(pdev); eip97_irq_deinit(cryp); eip97_del_device(cryp); eip97_aead_unregister_algs(); eip97_skcipher_unregister_algs(); eip97_ahash_unregister_algs(); eip97_desc_ring_free(cryp); eip97_debug_remove(cryp); clk_disable_unprepare(cryp->clk_cryp); pm_runtime_put_sync(cryp->dev); pm_runtime_disable(cryp->dev); platform_set_drvdata(pdev, NULL); return 0; } static const struct of_device_id of_crypto_id[] = { { .compatible = "lantiq,crypto-xrx500" }, {}, }; MODULE_DEVICE_TABLE(of, of_crypto_id); static struct platform_driver eip97_crypto_driver = { .probe = eip97_crypto_probe, .remove = eip97_crypto_remove, .driver = { .name = "eip97-crypto", .of_match_table = of_crypto_id, }, }; module_platform_driver(eip97_crypto_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Hering "); MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");