#if defined(CONFIG_BCM_KF_OPTEE_414_BACKPORTS) /* * Copyright (c) 2015, Linaro Limited * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include "optee_private.h" #include "optee_smc.h" #define DRIVER_NAME "optee" #define OPTEE_SHM_NUM_PRIV_PAGES 1 #if defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) #include #include /* for meminfo */ #include #include #include #include /* for high_memory */ #include #include #include #include #include static int pte_callback(pte_t *pte, unsigned long x, unsigned long y, struct mm_walk *walk) { const pgprot_t pte_prot = __pgprot(*pte); const pgprot_t req_prot = *((pgprot_t *)walk->private); #if defined(CONFIG_ARM64) const pgprot_t prot_msk = PROT_DEFAULT | PTE_VALID; #else const pgprot_t prot_msk = L_PTE_MT_MASK | L_PTE_VALID; #endif return (((pte_prot ^ req_prot) & prot_msk) == 0) ? 0 : -1; } static void *page_to_virt_contig(const struct page *page, unsigned int pg_cnt, pgprot_t pgprot) { int rc; struct mm_walk walk; unsigned long pfn; unsigned long pfn_start; unsigned long pfn_end; unsigned long va_start; unsigned long va_end; if ((page == NULL) || !pg_cnt) return ERR_PTR(-EINVAL); pfn_start = page_to_pfn(page); pfn_end = pfn_start + pg_cnt; for (pfn = pfn_start; pfn < pfn_end; pfn++) { const struct page *cur_pg = pfn_to_page(pfn); phys_addr_t pa; /* Verify range is in low memory only */ if (PageHighMem(cur_pg)) return NULL; /* Must be mapped */ pa = page_to_phys(cur_pg); if (page_address(cur_pg) == NULL) return NULL; } /* * Aliased mappings with different cacheability attributes on ARM can * lead to trouble! */ memset(&walk, 0, sizeof(walk)); walk.pte_entry = &pte_callback; walk.private = (void *)&pgprot; walk.mm = current->mm; va_start = (unsigned long)page_address(page); va_end = (unsigned long)(page_address(page) + (pg_cnt << PAGE_SHIFT)); rc = walk_page_range(va_start, va_end, &walk); if (rc) pr_debug("cacheability mismatch\n"); return rc ? NULL : page_address(page); } static struct page **get_pages(struct page *page, int num_pages) { struct page **pages; long pfn; int i; if (num_pages == 0) { pr_err("bad count\n"); return NULL; } if (page == NULL) { pr_err("bad page\n"); return NULL; } pages = vmalloc(sizeof(struct page *) * num_pages); if (pages == NULL) return NULL; pfn = page_to_pfn(page); for (i = 0; i < num_pages; i++) { /* * pfn_to_page() should resolve to simple arithmetic for the * FLATMEM memory model. */ pages[i] = pfn_to_page(pfn++); } return pages; } /* * Basically just vmap() without checking that count < totalram_pages, * since we want to be able to map pages that aren't managed by Linux */ static void *brcmstb_memory_vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { struct vm_struct *area; might_sleep(); area = get_vm_area_caller((count << PAGE_SHIFT), flags, __builtin_return_address(0)); if (!area) return NULL; if (map_vm_area(area, prot, pages)) { vunmap(area->addr); return NULL; } return area->addr; } /** * brcmstb_memory_kva_map() - Map page(s) to a kernel virtual address * * @page: A struct page * that points to the beginning of a chunk of physical * contiguous memory. * @num_pages: Number of pages * @pgprot: Page protection bits * * Return: pointer to mapping, or NULL on failure */ void *brcmstb_memory_kva_map(struct page *page, int num_pages, pgprot_t pgprot) { void *va; /* get the virtual address for this range if it exists */ va = page_to_virt_contig(page, num_pages, pgprot); if (IS_ERR(va)) { pr_debug("page_to_virt_contig() failed (%ld)\n", PTR_ERR(va)); return NULL; } else if (va == NULL || is_vmalloc_addr(va)) { struct page **pages; pages = get_pages(page, num_pages); if (pages == NULL) { pr_err("couldn't get pages\n"); return NULL; } va = brcmstb_memory_vmap(pages, num_pages, 0, pgprot); vfree(pages); if (va == NULL) { pr_err("vmap failed (num_pgs=%d)\n", num_pages); return NULL; } } return va; } EXPORT_SYMBOL(brcmstb_memory_kva_map); /** * brcmstb_memory_kva_map_phys() - map phys range to kernel virtual address * * @phys: physical address base * @size: size of range to map * @cached: whether to use cached or uncached mapping * * Return: NULL on failure, err on success */ void *brcmstb_memory_kva_map_phys(phys_addr_t phys, size_t size, bool cached) { void *addr = NULL; unsigned long pfn = PFN_DOWN(phys); if (!cached) { /* * This could be supported for MIPS by using ioremap instead, * but that cannot be done on ARM if you want O_DIRECT support * because having multiple mappings to the same memory with * different cacheability will result in undefined behavior. */ return NULL; } if (pfn_valid(pfn)) { addr = brcmstb_memory_kva_map(pfn_to_page(pfn), size / PAGE_SIZE, PAGE_KERNEL); } return addr; } EXPORT_SYMBOL(brcmstb_memory_kva_map_phys); /** * brcmstb_memory_kva_unmap() - Unmap a kernel virtual address associated * to physical pages mapped by brcmstb_memory_kva_map() * * @kva: Kernel virtual address previously mapped by brcmstb_memory_kva_map() * * Return: 0 on success, negative on failure. */ int brcmstb_memory_kva_unmap(const void *kva) { if (kva == NULL) return -EINVAL; if (!is_vmalloc_addr(kva)) { /* unmapping not necessary for low memory VAs */ return 0; } vunmap(kva); return 0; } EXPORT_SYMBOL(brcmstb_memory_kva_unmap); extern void *brcmstb_memory_kva_map_phys(phys_addr_t phys, size_t size, bool cached); extern int brcmstb_memory_kva_unmap(const void *kva); #endif /* defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) */ /** * optee_from_msg_param() - convert from OPTEE_MSG parameters to * struct tee_param * @params: subsystem internal parameter representation * @num_params: number of elements in the parameter arrays * @msg_params: OPTEE_MSG parameters * Returns 0 on success or <0 on failure */ int optee_from_msg_param(struct tee_param *params, size_t num_params, const struct optee_msg_param *msg_params) { int rc; size_t n; struct tee_shm *shm; phys_addr_t pa; for (n = 0; n < num_params; n++) { struct tee_param *p = params + n; const struct optee_msg_param *mp = msg_params + n; u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK; switch (attr) { case OPTEE_MSG_ATTR_TYPE_NONE: p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; memset(&p->u, 0, sizeof(p->u)); break; case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT: case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT: case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT: p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT + attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT; p->u.value.a = mp->u.value.a; p->u.value.b = mp->u.value.b; p->u.value.c = mp->u.value.c; break; case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT: case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT: case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT: p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT + attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT; p->u.memref.size = mp->u.tmem.size; shm = (struct tee_shm *)(unsigned long) mp->u.tmem.shm_ref; if (!shm) { p->u.memref.shm_offs = 0; p->u.memref.shm = NULL; break; } rc = tee_shm_get_pa(shm, 0, &pa); if (rc) return rc; p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; p->u.memref.shm = shm; /* Check that the memref is covered by the shm object */ if (p->u.memref.size) { size_t o = p->u.memref.shm_offs + p->u.memref.size - 1; rc = tee_shm_get_pa(shm, o, NULL); if (rc) return rc; } break; default: return -EINVAL; } } return 0; } /** * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters * @msg_params: OPTEE_MSG parameters * @num_params: number of elements in the parameter arrays * @params: subsystem itnernal parameter representation * Returns 0 on success or <0 on failure */ int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params, const struct tee_param *params) { int rc; size_t n; phys_addr_t pa; for (n = 0; n < num_params; n++) { const struct tee_param *p = params + n; struct optee_msg_param *mp = msg_params + n; switch (p->attr) { case TEE_IOCTL_PARAM_ATTR_TYPE_NONE: mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE; memset(&mp->u, 0, sizeof(mp->u)); break; case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT: mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr - TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT; mp->u.value.a = p->u.value.a; mp->u.value.b = p->u.value.b; mp->u.value.c = p->u.value.c; break; case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr - TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT; mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm; mp->u.tmem.size = p->u.memref.size; if (!p->u.memref.shm) { mp->u.tmem.buf_ptr = 0; break; } rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa); if (rc) return rc; mp->u.tmem.buf_ptr = pa; mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED << OPTEE_MSG_ATTR_CACHE_SHIFT; break; default: return -EINVAL; } } return 0; } static void optee_get_version(struct tee_device *teedev, struct tee_ioctl_version_data *vers) { struct tee_ioctl_version_data v = { .impl_id = TEE_IMPL_ID_OPTEE, .impl_caps = TEE_OPTEE_CAP_TZ, .gen_caps = TEE_GEN_CAP_GP, }; *vers = v; } static int optee_open(struct tee_context *ctx) { struct optee_context_data *ctxdata; struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL); if (!ctxdata) return -ENOMEM; if (teedev == optee->supp_teedev) { bool busy = true; mutex_lock(&optee->supp.ctx_mutex); if (!optee->supp.ctx) { busy = false; optee->supp.ctx = ctx; } mutex_unlock(&optee->supp.ctx_mutex); if (busy) { kfree(ctxdata); return -EBUSY; } } mutex_init(&ctxdata->mutex); INIT_LIST_HEAD(&ctxdata->sess_list); ctx->data = ctxdata; return 0; } static void optee_release(struct tee_context *ctx) { struct optee_context_data *ctxdata = ctx->data; struct tee_device *teedev = ctx->teedev; struct optee *optee = tee_get_drvdata(teedev); struct tee_shm *shm; struct optee_msg_arg *arg = NULL; phys_addr_t parg; struct optee_session *sess; struct optee_session *sess_tmp; if (!ctxdata) return; shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg), TEE_SHM_MAPPED); if (!IS_ERR(shm)) { arg = tee_shm_get_va(shm, 0); /* * If va2pa fails for some reason, we can't call into * secure world, only free the memory. Secure OS will leak * sessions and finally refuse more sessions, but we will * at least let normal world reclaim its memory. */ if (!IS_ERR(arg)) if (tee_shm_va2pa(shm, arg, &parg)) arg = NULL; /* prevent usage of parg below */ } list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list, list_node) { list_del(&sess->list_node); if (!IS_ERR_OR_NULL(arg)) { memset(arg, 0, sizeof(*arg)); arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION; arg->session = sess->session_id; optee_do_call_with_arg(ctx, parg); } kfree(sess); } kfree(ctxdata); if (!IS_ERR(shm)) tee_shm_free(shm); ctx->data = NULL; if (teedev == optee->supp_teedev) { mutex_lock(&optee->supp.ctx_mutex); optee->supp.ctx = NULL; mutex_unlock(&optee->supp.ctx_mutex); } } static const struct tee_driver_ops optee_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, .open_session = optee_open_session, .close_session = optee_close_session, .invoke_func = optee_invoke_func, .cancel_req = optee_cancel_req, }; static const struct tee_desc optee_desc = { .name = DRIVER_NAME "-clnt", .ops = &optee_ops, .owner = THIS_MODULE, }; static const struct tee_driver_ops optee_supp_ops = { .get_version = optee_get_version, .open = optee_open, .release = optee_release, .supp_recv = optee_supp_recv, .supp_send = optee_supp_send, }; static const struct tee_desc optee_supp_desc = { .name = DRIVER_NAME "-supp", .ops = &optee_supp_ops, .owner = THIS_MODULE, .flags = TEE_DESC_PRIVILEGED, }; static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn) { struct arm_smccc_res res; invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res); if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 && res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3) return true; return false; } static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn) { union { struct arm_smccc_res smccc; struct optee_smc_calls_revision_result result; } res; invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.major == OPTEE_MSG_REVISION_MAJOR && (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR) return true; return false; } static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn, u32 *sec_caps) { union { struct arm_smccc_res smccc; struct optee_smc_exchange_capabilities_result result; } res; u32 a1 = 0; /* * TODO This isn't enough to tell if it's UP system (from kernel * point of view) or not, is_smp() returns the the information * needed, but can't be called directly from here. */ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1) a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR; invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.status != OPTEE_SMC_RETURN_OK) return false; *sec_caps = res.result.capabilities; return true; } static struct tee_shm_pool * optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm) { union { struct arm_smccc_res smccc; struct optee_smc_get_shm_config_result result; } res; struct tee_shm_pool *pool; unsigned long vaddr; phys_addr_t paddr; size_t size; phys_addr_t begin; phys_addr_t end; void *va; struct tee_shm_pool_mem_info priv_info; struct tee_shm_pool_mem_info dmabuf_info; invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc); if (res.result.status != OPTEE_SMC_RETURN_OK) { pr_info("shm service not available\n"); return ERR_PTR(-ENOENT); } if (res.result.settings != OPTEE_SMC_SHM_CACHED) { pr_err("only normal cached shared memory supported\n"); return ERR_PTR(-EINVAL); } begin = roundup(res.result.start, PAGE_SIZE); end = rounddown(res.result.start + res.result.size, PAGE_SIZE); paddr = begin; size = end - begin; if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) { pr_err("too small shared memory area\n"); return ERR_PTR(-EINVAL); } #if defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) va = brcmstb_memory_kva_map_phys(paddr, size, true); #else va = memremap(paddr, size, MEMREMAP_WB); #endif if (!va) { pr_err("shared memory ioremap failed\n"); return ERR_PTR(-EINVAL); } vaddr = (unsigned long)va; priv_info.vaddr = vaddr; priv_info.paddr = paddr; priv_info.size = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; dmabuf_info.vaddr = vaddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; dmabuf_info.paddr = paddr + OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; dmabuf_info.size = size - OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE; pool = tee_shm_pool_alloc_res_mem(&priv_info, &dmabuf_info); if (IS_ERR(pool)) { #if defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) brcmstb_memory_kva_unmap(va); #else memunmap(va); #endif goto out; } *memremaped_shm = va; out: return pool; } /* Simple wrapper functions to be able to use a function pointer */ static void optee_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7, struct arm_smccc_res *res) { #if defined(CONFIG_BCM_KF_OPTEE) extern char _stext[], _etext[]; #if defined(CONFIG_ARM64) register long ret_addr asm ("x30"); /* Make SMC call and save x0-x3 into res */ __asm__ volatile( "smc #0 \n" "mov x8, %0 \n" "stp x0, x1, [x8] \n" "stp x2, x3, [x8, #0x10] \n" : : "r" (res) : "x8"); #else register long ret_addr asm ("r14"); /* Make SMC call (opcode: 0xE1600070) and save r0-r3 into res */ __asm__ volatile( ".long 0xE1600070 \n" "mov r8, %0 \n" "stm r8, {r0-r3} \n" : : "r" (res) : "r8"); #endif /* Make sure, this function was called from signed address range */ if ( (char*)ret_addr < _stext || (char*)ret_addr > _etext){ pr_info("ALERT!! System intrution detected by OP-TEE\n"); res->a0 = OPTEE_SMC_RETURN_UNKNOWN_FUNCTION; return; } #else arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res); #endif } static void optee_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, unsigned long a3, unsigned long a4, unsigned long a5, unsigned long a6, unsigned long a7, struct arm_smccc_res *res) { arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res); } static optee_invoke_fn *get_invoke_func(struct device_node *np) { const char *method; pr_info("probing for conduit method from DT.\n"); if (of_property_read_string(np, "method", &method)) { pr_warn("missing \"method\" property\n"); return ERR_PTR(-ENXIO); } if (!strcmp("hvc", method)) return optee_smccc_hvc; else if (!strcmp("smc", method)) return optee_smccc_smc; pr_warn("invalid \"method\" property: %s\n", method); return ERR_PTR(-EINVAL); } #if defined(CONFIG_BCM_KF_OPTEE) struct optee *optee_probe(struct device_node *np) #else static struct optee *optee_probe(struct device_node *np) #endif { optee_invoke_fn *invoke_fn; struct tee_shm_pool *pool; struct optee *optee = NULL; void *memremaped_shm = NULL; struct tee_device *teedev; u32 sec_caps; int rc; invoke_fn = get_invoke_func(np); if (IS_ERR(invoke_fn)) return (void *)invoke_fn; if (!optee_msg_api_uid_is_optee_api(invoke_fn)) { pr_warn("api uid mismatch\n"); return ERR_PTR(-EINVAL); } if (!optee_msg_api_revision_is_compatible(invoke_fn)) { pr_warn("api revision mismatch\n"); return ERR_PTR(-EINVAL); } if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) { pr_warn("capabilities mismatch\n"); return ERR_PTR(-EINVAL); } /* * We have no other option for shared memory, if secure world * doesn't have any reserved memory we can use we can't continue. */ if (!(sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM)) return ERR_PTR(-EINVAL); pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm); if (IS_ERR(pool)) return (void *)pool; optee = kzalloc(sizeof(*optee), GFP_KERNEL); if (!optee) { rc = -ENOMEM; goto err; } optee->invoke_fn = invoke_fn; teedev = tee_device_alloc(&optee_desc, NULL, pool, optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); goto err; } optee->teedev = teedev; teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee); if (IS_ERR(teedev)) { rc = PTR_ERR(teedev); goto err; } optee->supp_teedev = teedev; rc = tee_device_register(optee->teedev); if (rc) goto err; rc = tee_device_register(optee->supp_teedev); if (rc) goto err; mutex_init(&optee->call_queue.mutex); INIT_LIST_HEAD(&optee->call_queue.waiters); optee_wait_queue_init(&optee->wait_queue); optee_supp_init(&optee->supp); optee->memremaped_shm = memremaped_shm; optee->pool = pool; optee_enable_shm_cache(optee); pr_info("initialized driver\n"); return optee; err: if (optee) { /* * tee_device_unregister() is safe to call even if the * devices hasn't been registered with * tee_device_register() yet. */ tee_device_unregister(optee->supp_teedev); tee_device_unregister(optee->teedev); kfree(optee); } if (pool) tee_shm_pool_free(pool); if (memremaped_shm) #if defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) iounmap(memremaped_shm); #else memunmap(memremaped_shm); #endif return ERR_PTR(rc); } static void optee_remove(struct optee *optee) { /* * Ask OP-TEE to free all cached shared memory objects to decrease * reference counters and also avoid wild pointers in secure world * into the old shared memory range. */ optee_disable_shm_cache(optee); /* * The two devices has to be unregistered before we can free the * other resources. */ tee_device_unregister(optee->supp_teedev); tee_device_unregister(optee->teedev); tee_shm_pool_free(optee->pool); if (optee->memremaped_shm) #if defined(CONFIG_BCM_KF_OPTEE) && defined(CONFIG_OPTEE) iounmap(optee->memremaped_shm); #else memunmap(optee->memremaped_shm); #endif optee_wait_queue_exit(&optee->wait_queue); optee_supp_uninit(&optee->supp); mutex_destroy(&optee->call_queue.mutex); kfree(optee); } static const struct of_device_id optee_match[] = { { .compatible = "linaro,optee-tz" }, {}, }; static struct optee *optee_svc; static int __init optee_driver_init(void) { struct device_node *fw_np; struct device_node *np; struct optee *optee; /* Node is supposed to be below /firmware */ fw_np = of_find_node_by_name(NULL, "firmware"); if (!fw_np) return -ENODEV; np = of_find_matching_node(fw_np, optee_match); of_node_put(fw_np); if (!np) return -ENODEV; optee = optee_probe(np); of_node_put(np); if (IS_ERR(optee)) return PTR_ERR(optee); optee_svc = optee; return 0; } module_init(optee_driver_init); static void __exit optee_driver_exit(void) { struct optee *optee = optee_svc; optee_svc = NULL; if (optee) optee_remove(optee); } module_exit(optee_driver_exit); MODULE_AUTHOR("Linaro"); MODULE_DESCRIPTION("OP-TEE driver"); MODULE_SUPPORTED_DEVICE(""); MODULE_VERSION("1.0"); MODULE_LICENSE("GPL v2"); #endif /* CONFIG_BCM_KF_OPTEE_414_BACKPORTS */