/* * * pal_cppi41.c * Description: * see below * This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2015-2020 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called LICENSE.GPL. Contact Information: Intel Corporation 2200 Mission College Blvd. Santa Clara, CA 97052 BSD LICENSE Copyright(c) 2015-2020 Intel Corporation. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **/ /** * \file pal_cppi41.c * \brief PAL CPPI 4 Source file, contains the minimum PAL * implementation for NPCPU APPCPU datapipe. * * */ /****************/ /** Includes **/ /****************/ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_MRPC_CPPI_CLIENT extern PAL_Result PAL_cppi4GetBpoolBsmonInfo (u32 idx, qsmonRal_t *info ); extern PAL_Result PAL_cppi4GetPpFreeQueueInfo(u32 idx, FDqueue_t *fqueue_info); extern PAL_Result PAL_cppi4GetFreeQueuesCount(u32 *fqueues_count ); extern PAL_Result PAL_cppi4GetPpDescRegionInfo(u32 idx, Cppi4DescReg *region_info); #endif /***************/ /** Defines **/ /***************/ /* uncomment to enable debug prints */ //#define PAL_CPPI4_DBG #define DBG_PROC_DIR "/proc/cppi/sr/dbg" #define PROC_CREATE_FILE(name, mode, root_directory, file_operations) \ do { \ if (!proc_create(name, mode, root_directory, file_operations)) { \ EPRINTK("Failed to create '%s' proc file\n", name); \ return 1; \ } \ } while(0) #ifdef PAL_CPPI4_DBG /* Debug print, also print function name and line number */ # define DPRINTK(fmt, args...) printk("%s(%d): " fmt "\n", __FUNCTION__ , __LINE__, ## args) #else # define DPRINTK(fmt, args...) #endif #ifdef PAL_CPPI4_DBG #define ACCUM_CH_PARAM_DEBUG(initCfg) printk("\n accChanNum=%d\n\ mode=%d\nqMg=%d\n qNum=%d \n pacingTickCnt=%d \n list.istBase=%p\n\ list.axPageEntry=%d\n list.acingMode=%d\n list.tallAvoidance=%d\n\ list.istCountMode=%d\n list.istEntrySize=%d\n list.axPageCnt=%d\n\ monitor.pktCountThresh=%d\n monitor.pacingMode=%d \n",\ initCfg.accChanNum, initCfg.mode, initCfg.queue.qMgr,\ initCfg.queue.qNum,initCfg.pacingTickCnt,initCfg.list.listBase\ ,initCfg.list.maxPageEntry,initCfg.list.pacingMode,\ initCfg.list.stallAvoidance,initCfg.list.listCountMode,\ initCfg.list.listEntrySize,initCfg.list.maxPageCnt,\ initCfg.monitor.pktCountThresh,initCfg.monitor.pacingMode) #else #define ACCUM_CH_PARAM_DEBUG(initCfg) #endif /* Error print, also print function name and line number */ #define EPRINTK(fmt, args...) pr_err("[CPPI ERR] %s(%d): " fmt, __FUNCTION__ , __LINE__, ## args) #define ERRSEQ(m, fmt, args...) seq_printf(m, "[CPPI ERR] %s(%d): " fmt, __FUNCTION__ , __LINE__, ## args) _Atomic Uint32 g_proc_qsm = 1; #define QUEUE_NUMBER_TO_QUEUE_ADDR_SHIFT (sizeof(CSL_Queue_Mgmt_Regs) / 4) #define QUEUE_NUM_TO_QUEUE_BASE_ADDR(qMgr_base, qNum) ((qMgr_base) + ((qNum) << QUEUE_NUMBER_TO_QUEUE_ADDR_SHIFT)) #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "LO." #qName, Uint8 *PalCppiPpLqmgrNames[PAL_CPPI_PP_QMGR_LOCAL_TOTAL_Q_COUNT] = { PAL_CPPI_PP_QMGR_LOCAL_Q_LIST }; EXPORT_SYMBOL(PalCppiPpLqmgrNames); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "G0." #qName, Uint8 *PalCppiPpGqmgr0Names[PAL_CPPI_PP_QMGR_G0_TOTAL_Q_COUNT] = { PAL_CPPI_PP_QMGR_G0_Q_LIST }; EXPORT_SYMBOL(PalCppiPpGqmgr0Names); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "G1." #qName, Uint8 *PalCppiPpGqmgr1Names[PAL_CPPI_PP_QMGR_G1_TOTAL_Q_COUNT] = { PAL_CPPI_PP_QMGR_G1_Q_LIST }; EXPORT_SYMBOL(PalCppiPpGqmgr1Names); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "G2." #qName, Uint8 *PalCppiPpGqmgr2Names[PAL_CPPI_PP_QMGR_G2_TOTAL_Q_COUNT] = { PAL_CPPI_PP_QMGR_G2_Q_LIST }; EXPORT_SYMBOL(PalCppiPpGqmgr2Names); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "DSG0." #qName, Uint8 *PalCppiDsg0qmgrNames[PAL_CPPI_DSG_QMGR_TOTAL_Q_COUNT] = { PAL_CPPI_DSG_QMGR_Q_LIST }; EXPORT_SYMBOL(PalCppiDsg0qmgrNames); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "DSG1." #qName, Uint8 *PalCppiDsg1qmgrNames[PAL_CPPI_DSG_QMGR_TOTAL_Q_COUNT] = { PAL_CPPI_DSG_QMGR_Q_LIST }; EXPORT_SYMBOL(PalCppiDsg1qmgrNames); #undef PAL_CPPI_QMGR_Q_ADD #define PAL_CPPI_QMGR_Q_ADD(qName) "DSG2." #qName, Uint8 *PalCppiDsg2qmgrNames[PAL_CPPI_DSG_QMGR_TOTAL_Q_COUNT] = { PAL_CPPI_DSG_QMGR_Q_LIST }; EXPORT_SYMBOL(PalCppiDsg2qmgrNames); #undef PAL_CPPI_BMGR_P_ADD #define PAL_CPPI_BMGR_P_ADD(pName) #pName, Uint8 *PalCppiPpbmgrNames[PAL_CPPI41_BMGR_MAX_POOLS] = { PAL_CPPI_PP_BMGR_POOL_LIST }; EXPORT_SYMBOL(PalCppiPpbmgrNames); struct queue_mgr_regs { u32 regs; u32 desc; u32 queues; u32 stats; }; #define GQMGR_MAX_QUEUES(qMgr) \ ((qMgr == PAL_CPPI_PP_QMGR_G0) ? PAL_CPPI_PP_QMGR_G0_TOTAL_Q_COUNT : \ ((qMgr == PAL_CPPI_PP_QMGR_G1) ? PAL_CPPI_PP_QMGR_G1_TOTAL_Q_COUNT : \ ((qMgr == PAL_CPPI_PP_QMGR_G2) ? PAL_CPPI_PP_QMGR_G2_TOTAL_Q_COUNT : \ PAL_CPPI_PP_QMGR_LOCAL_TOTAL_Q_COUNT))) #define CPPI_DESC_TYPE_STR(type) \ ((type == CPPI41_DESC_TYPE_EMBEDDED) ? "Embedded " : \ ((type == CPPI41_DESC_TYPE_HOST) ? "Host " : \ ((type == CPPI41_DESC_TYPE_MONOLITHIC) ? "Monolithic" : \ ((type == CPPI41_DESC_TYPE_TEARDOWN) ? "Teardown " : \ "Invalid")))) #define IS_DESC_TYPE_VALID(desc) \ ((CPPI41_DESC_TYPE_EMBEDDED <= desc) && (desc <= CPPI41_DESC_TYPE_TEARDOWN)) #define IS_DESC_REGION_VALID(region) \ ((PAL_CPPI_PP_VOICE_GLOBAL_DESC_REGION <= region) && \ (region < PAL_CPPI41_MAX_DESC_REGIONS)) #define IS_QMGR_ID_VALID(qMgr) \ ((PAL_CPPI_PP_QMGR_G0 <= qMgr) && (qMgr < PAL_CPPI41_NUM_QUEUE_MGR)) #define IS_QUEUE_ID_VALID(qId, qMgr) \ ((0 <= qId) && (qId < GQMGR_MAX_QUEUES(qMgr))) #define CPPI_QMGR_MAX_DESC_SUPPORT (64*1024) /***************/ /** Globals **/ /***************/ struct queue_mgr_regs q_mgrs[PAL_CPPI41_NUM_QUEUE_MGR] = {{0}}; u32 bmgr_base = 0 ; /* buffers pools manager base address */ u32 qsmon_base[PAL_CPPI_PP_NUM_QSMON_MGRs] = { 0 }; /* queue status monitors base addresses */ static bool initialized = false; static spinlock_t init_lock; /*ADP accumulator HW Mailbox message format*/ /*******************************************/ /* ------------------------------------ * |header including Opcode -4bytes | * |----------------------------------| * |Cppi4AccumulatorCfg 56 bytes | * | | * | | * | | * | | * |----------------------------------| * | Current page (curPage). | * | 4bytes | * |----------------------------------| * | general purpose field | * | from ATOM->ARM: PAL_Handle | * | from ARM->ATOM: npcpu address | * | | * | 4bytes | * |----------------------------------| *-----------------------------------*/ /***************************************/ /*********************************/ /** local Functions declaration **/ /*********************************/ /*********************************/ /** Functions Implementations **/ /*********************************/ PAL_Handle PAL_cppi4Init (void * initCfg, Ptr param) { unsigned long flags; /* currently on ATOM, only pp domain is suported */ BUG_ON(param != CPPI41_DOMAIN_PP); spin_lock_irqsave(&init_lock, flags); if (initialized) goto done; /* init queue managers base addresses */ q_mgrs[PAL_CPPI_PP_QMGR_G0].queues = (u32)(PAL_CPPI_PP_QMGR_G0_QUEUES_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G0].stats = (u32)(PAL_CPPI_PP_QMGR_G0_Q_STATS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G0].regs = (u32)(PAL_CPPI_PP_QMGR_G0_REGS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G0].desc = (u32)(PAL_CPPI_PP_QMGR_G0_DESC_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G1].queues = (u32)(PAL_CPPI_PP_QMGR_G1_QUEUES_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G1].stats = (u32)(PAL_CPPI_PP_QMGR_G1_Q_STATS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G1].regs = (u32)(PAL_CPPI_PP_QMGR_G1_REGS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G1].desc = (u32)(PAL_CPPI_PP_QMGR_G1_DESC_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G2].queues = (u32)(PAL_CPPI_PP_QMGR_G2_QUEUES_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G2].stats = (u32)(PAL_CPPI_PP_QMGR_G2_Q_STATS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G2].regs = (u32)(PAL_CPPI_PP_QMGR_G2_REGS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_G2].desc = (u32)(PAL_CPPI_PP_QMGR_G2_DESC_BASE); q_mgrs[PAL_CPPI_PP_QMGR_LOCAL].queues = (u32)(PAL_CPPI_PP_QMGR_LOCAL_QUEUES_BASE); q_mgrs[PAL_CPPI_PP_QMGR_LOCAL].stats = (u32)(PAL_CPPI_PP_QMGR_LOCAL_Q_STATS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_LOCAL].regs = (u32)(PAL_CPPI_PP_QMGR_LOCAL_REGS_BASE); q_mgrs[PAL_CPPI_PP_QMGR_LOCAL].desc = (u32)(PAL_CPPI_PP_QMGR_LOCAL_DESC_BASE); /* init buffer manager monitors base addresses */ bmgr_base = PAL_CPPI_PP_BUF_MGR_BASE; /* init queue status monitors base addresses */ qsmon_base[PAL_CPPI_PP_QSMON_MGR0] = PAL_CPPI_QSMON_0_CTRL_REGS_BASE; qsmon_base[PAL_CPPI_PP_QSMON_MGR1] = PAL_CPPI_QSMON_1_CTRL_REGS_BASE; qsmon_base[PAL_CPPI_PP_QSMON_MGR2] = PAL_CPPI_QSMON_2_CTRL_REGS_BASE; qsmon_base[PAL_CPPI_PP_QSMON_MGR3] = PAL_CPPI_QSMON_3_CTRL_REGS_BASE; qsmon_base[PAL_CPPI_PP_QSMON_MGR4] = PAL_CPPI_QSMON_4_CTRL_REGS_BASE; qsmon_base[PAL_CPPI_PP_BSMON_MGR] = PAL_CPPI_BSMON_CTRL_REGS_BASE ; initialized = true; /* pal handle is not used in ATOM implementation, return dummy value to avoid the null checks in shared code */ done: spin_unlock_irqrestore(&init_lock, flags); return (PAL_Handle) 0xDEADBEEF; } EXPORT_SYMBOL(PAL_cppi4Init); int PAL_cppi4Exit(PAL_Handle hnd, void *param) { return 0; } EXPORT_SYMBOL(PAL_cppi4Exit); int PAL_cppi4BufDecRefCnt (PAL_Handle hnd, Cppi4BufPool pool, Ptr bufPtr) { CSL_BufMgr_Regs *bPoolRegs = (CSL_BufMgr_Regs *)bmgr_base; bPoolRegs->Pointer_Size[pool.bPool].Buffer_Pool_Pointer = cpu_to_be32((__u32) bufPtr); return 0; } EXPORT_SYMBOL(PAL_cppi4BufDecRefCnt); Bool Pal_cppi4IsQueueValid(Cppi4Queue *q) { if (!q) return False; switch (q->qMgr) { case PAL_CPPI_PP_QMGR_G0: if (q->qNum < PAL_CPPI_PP_QMGR_G0_TOTAL_Q_COUNT) return True; case PAL_CPPI_PP_QMGR_G1: if (q->qNum < PAL_CPPI_PP_QMGR_G1_TOTAL_Q_COUNT) return True; case PAL_CPPI_PP_QMGR_G2: if (q->qNum < PAL_CPPI_PP_QMGR_G2_TOTAL_Q_COUNT) return True; case PAL_CPPI_PP_QMGR_LOCAL: if (q->qNum < PAL_CPPI_PP_QMGR_LOCAL_TOTAL_Q_COUNT) return True; } return False; } #ifdef CONFIG_MRPC_CPPI_CLIENT /** * Retrieve all cppi buffers pools information from cppi driver * on ARM * * @param pools array to fill in the pools info * * @return int 0 on success, non-zero value otherwise */ PAL_Result PAL_cppi4GetAllBufferPoolsInfo(PAL_Handle hnd, Cppi4BufPoolInfo *pools) { Cppi4BufPool pool; u32 i; if (NULL == pools) { EPRINTK("pools is null\n"); return PAL_ERROR_FLAG; } pool.bMgr = 0; /* Get pools info */ for (i=0; iPointer_Size[pool.bPool].Buffer_Pool_Pointer); } EXPORT_SYMBOL(PAL_cppi4BufPopBuf); PAL_Cppi4QueueHnd PAL_cppi4QueueOpen(PAL_Handle hnd, Cppi4Queue queue) { PAL_Cppi4QueueHnd qHnd; if (!q_mgrs[queue.qMgr].queues) { EPRINTK("PAL is not initialized, first call init function\n"); return NULL; } qHnd = (PAL_Cppi4QueueHnd *) QUEUE_NUM_TO_QUEUE_BASE_ADDR(q_mgrs[queue.qMgr].queues, queue.qNum); DPRINTK("Open queue G%d.%d, qHnd = %p", queue.qMgr, queue.qNum, qHnd); return qHnd; } EXPORT_SYMBOL(PAL_cppi4QueueOpen); PAL_Cppi4QueueHnd PAL_cppi4QueueOpenNoReset (PAL_Handle hnd, Cppi4Queue queue) { return PAL_cppi4QueueOpen(hnd, queue); } EXPORT_SYMBOL(PAL_cppi4QueueOpenNoReset); int PAL_cppi4QueueClose(PAL_Handle hnd, PAL_Cppi4QueueHnd qHnd) { DPRINTK("Close queue %p", qHnd); return 0; } EXPORT_SYMBOL(PAL_cppi4QueueClose); int PAL_cppi4QueueGetEntryCount(PAL_Handle hnd, Cppi4Queue queue, unsigned int *entryCount) { CSL_Queue_Status_Regs *qStatRegs; qStatRegs = (CSL_Queue_Status_Regs *) QUEUE_NUM_TO_QUEUE_BASE_ADDR(q_mgrs[queue.qMgr].stats, queue.qNum); DPRINTK("qStatRegs = 0x%08x", IO_VIRT2PHY(qStatRegs)); *entryCount = be32_to_cpu(qStatRegs->Queue_Status_Reg_A); return 0; } EXPORT_SYMBOL(PAL_cppi4QueueGetEntryCount); #define SIZE_IN_WORD(p) ((sizeof(p) + 0x3) >> 2) static Int32 (*__pdsp_cmd_send)(pdsp_id_t, pdsp_cmd_t, void *, Uint32, void *, Uint32) = NULL; PAL_Result PAL_cppi4PdspCmdSendUnregister(void) { BUG_ON(!__pdsp_cmd_send); __pdsp_cmd_send = NULL; printk("%s:%d: pdsp_cmd_send unregister done.", __func__, __LINE__); return (PAL_SOK); } EXPORT_SYMBOL(PAL_cppi4PdspCmdSendUnregister); PAL_Result PAL_cppi4PdspCmdSendRegister(Int32 (*cb)(pdsp_id_t, pdsp_cmd_t, void *, Uint32, void *, Uint32)) { BUG_ON(__pdsp_cmd_send); __pdsp_cmd_send = cb; printk("%s:%d: pdsp_cmd_send register done.", __func__, __LINE__); return (PAL_SOK); } EXPORT_SYMBOL(PAL_cppi4PdspCmdSendRegister); #define PDSP_PREP_CMD(cmd, option, index) \ (((cmd) & 0xffu) << 0) | \ (((option) & 0xffu) << 8) | \ (((index) & 0xffffu) << 16) static inline AVALANCHE_PP_ACC_CH_INFO_t PAL_cppi4AccChInfo_cpu_to_be(AVALANCHE_PP_ACC_CH_INFO_t *src) { AVALANCHE_PP_ACC_CH_INFO_t dst; memset(&dst, 0, sizeof(AVALANCHE_PP_ACC_CH_INFO_t)); dst.Index = cpu_to_be16(src->Index); dst.Channel = src->Channel; dst.Command = src->Command; dst.Param0Ret = cpu_to_be32(src->Param0Ret); dst.Param1 = cpu_to_be32(src->Param1); dst.Param2 = cpu_to_be32(src->Param2); return dst; } static AVALANCHE_PP_RET_e __cppi4AccChClose(AVALANCHE_PP_ACC_CH_INFO_t *ptr_ch_cfg) { pdsp_cmd_t pdsp_cmd = cpu_to_be32(PDSP_PREP_CMD(PDSP_ACCUMULATOR_DISABLE_CH, ptr_ch_cfg->Channel, ptr_ch_cfg->Index)); AVALANCHE_PP_RET_e rc; BUG_ON(!__pdsp_cmd_send); rc = __pdsp_cmd_send(PDSP_ID_Accumulator, pdsp_cmd, NULL, 0, NULL, 0); if (rc) { printk("%s:%d ERROR !!! Failed to close accumulator channel !!!\n",__FUNCTION__,__LINE__); return (rc + PP_RC_FAILURE); } return (PP_RC_SUCCESS); } static AVALANCHE_PP_RET_e __cppi4AccChOpen(AVALANCHE_PP_ACC_CH_INFO_t *ptr_ch_cfg) { AVALANCHE_PP_ACC_CH_INFO_t info = PAL_cppi4AccChInfo_cpu_to_be(ptr_ch_cfg); pdsp_cmd_t pdsp_cmd = cpu_to_be32(PDSP_PREP_CMD(PDSP_ACCUMULATOR_ENABLE_CH, ptr_ch_cfg->Channel, ptr_ch_cfg->Index)); AVALANCHE_PP_RET_e rc; BUG_ON(!__pdsp_cmd_send); rc = __pdsp_cmd_send(PDSP_ID_Accumulator, pdsp_cmd, &(info.Param0Ret), SIZE_IN_WORD(AVALANCHE_PP_ACC_CH_INFO_t) - SIZE_IN_WORD(Int32) /* Size of the parameters = total size - command size*/, NULL, 0); if (rc) { printk("%s:%d ERROR !!! Failed to open accumulator channel !!!\n",__FUNCTION__,__LINE__); __cppi4AccChClose(ptr_ch_cfg); return (rc + PP_RC_FAILURE); } return (PP_RC_SUCCESS); } static AVALANCHE_PP_RET_e __cppi4AccChEnRateLimit(AVALANCHE_PP_ACC_CH_INFO_t *ptr_ch_cfg) { AVALANCHE_PP_ACC_CH_INFO_t info = PAL_cppi4AccChInfo_cpu_to_be(ptr_ch_cfg); pdsp_cmd_t pdsp_cmd = cpu_to_be32(PDSP_PREP_CMD(PDSP_ACCUMULATOR_ENABLE_CH_RATE_LIMIT, ptr_ch_cfg->Channel, ptr_ch_cfg->Index)); AVALANCHE_PP_RET_e rc; BUG_ON(!__pdsp_cmd_send); rc = __pdsp_cmd_send(PDSP_ID_Accumulator, pdsp_cmd, &(info.Param0Ret), SIZE_IN_WORD(AVALANCHE_PP_ACC_CH_INFO_t) - SIZE_IN_WORD(Int32) /* Size of the parameters = total size - command size*/, NULL, 0); if (rc) { return (rc + PP_RC_FAILURE); } return (PP_RC_SUCCESS); } #ifdef CONFIG_INTEL_KERNEL_PP_DRIVER_LOCAL PAL_Cppi4AccChHnd PAL_cppi4AccChOpen(PAL_Handle hnd, Cppi4AccumulatorCfg* accCfg) { Uint32 cookie; PAL_Cppi4AccChObj *accChObj; #if PUMA7_OR_NEWER_SOC_TYPE AVALANCHE_PP_ACC_CH_INFO_t ptr_ch_cfg; AVALANCHE_PP_RET_e rc; #else Uint32 i; Cppi4PALObj *palCppi4Obj = (Cppi4PALObj *) hnd; Cppi4InitCfg * initCfg = palCppi4Obj->initCfg; APDSP_Command_Status_RegsOvly cmdRegs = initCfg->apdspInfo.pdspCmdBase; #endif if (PAL_osMemAlloc(0, sizeof(PAL_Cppi4AccChObj), 0, (Ptr *) &accChObj) != PAL_SOK) { EPRINTK ("\nERROR:PAL: PAL_cppi4AccChOpen: Failed to allocate Acc channel object structure.\n"); return NULL; } PAL_osMemSet (accChObj, 0, sizeof (PAL_Cppi4AccChObj)); PAL_osMemCopy(&accChObj->initCfg, accCfg, sizeof(Cppi4AccumulatorCfg)); accChObj->palCppi4Obj = hnd; /* Need to protect the accumulator register writes. They are shared with pre-fetcher */ PAL_osProtectEntry(PAL_OSPROTECT_INTERRUPT, &cookie); #if PUMA7_OR_NEWER_SOC_TYPE ptr_ch_cfg.Index = 0; ptr_ch_cfg.Channel = accCfg->accChanNum; ptr_ch_cfg.Command = 0; ptr_ch_cfg.Param0Ret = (Uint32)PAL_CPPI4_VIRT_2_PHYS((void *)accCfg->list.listBase) ; ptr_ch_cfg.Param1 = (accCfg->queue.qNum) | (accCfg->queue.qMgr << 12) | (accCfg->list.maxPageEntry << 16); ptr_ch_cfg.Param2 = (accCfg->pacingTickCnt) | (accCfg->list.maxPageCnt << 16) | (accCfg->list.listEntrySize << 18)| (accCfg->list.listCountMode << 20) | (accCfg->list.stallAvoidance << 21)| (accCfg->list.pacingMode << 22 | (accCfg->mode << 31)); rc = __cppi4AccChOpen ( &ptr_ch_cfg); if (PP_RC_SUCCESS != rc) { EPRINTK("Error: Accumulator PDSP is not responding, return code: %u\n", rc); PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); PAL_osMemFree( 0, accChObj, sizeof(PAL_Cppi4AccChObj) ); return NULL; } #else if(accCfg->mode) { /* monitor mode */ cmdRegs->Config_A = (accCfg->queue.qNum) | (accCfg->queue.qMgr << 8) | (accCfg->monitor.pktCountThresh << 16); cmdRegs->Config_B = (accCfg->pacingTickCnt) | (accCfg->monitor.pacingMode << 22) | (0x1 << 31); } else { /* list mode */ cmdRegs->List_Buffer_Address = PAL_CPPI4_VIRT_2_PHYS(accCfg->list.listBase); cmdRegs->Config_A = (accCfg->queue.qNum) | (accCfg->queue.qMgr << 8) | (accCfg->list.maxPageEntry << 16); cmdRegs->Config_B = (accCfg->pacingTickCnt) | (accCfg->list.maxPageCnt << 16) | (accCfg->list.listEntrySize << 18) | (accCfg->list.listCountMode << 20) | (accCfg->list.stallAvoidance << 21)| (accCfg->list.pacingMode << 22); } cmdRegs->Command = (accCfg->accChanNum) | (APDSP_CMD_ENABLE << 8); dbgPrint("APDSP config @%p, value %x\n", &cmdRegs->List_Buffer_Address, cmdRegs->List_Buffer_Address); dbgPrint("APDSP config @%p, value %x\n", &cmdRegs->Config_A, cmdRegs->Config_A); dbgPrint("APDSP config @%p, value %x\n", &cmdRegs->Config_B, cmdRegs->Config_B); dbgPrint("APDSP config @%p, value %x\n", &cmdRegs->Command, cmdRegs->Command); /* TODO: 1000000 is a magic word picked up from mike's code. Need to understand * timeout values and fix the code */ for(i=0; (i < 1000000) && (cmdRegs->Command & (0xFF << 8)); i++); if( i==1000000 ) { EPRINTK("Error: APDSP firmware not responding!, APDSP return code: 0x%02X\n", (cmdRegs->Command & (0xFF << 24))); PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); PAL_osMemFree( 0, accChObj, sizeof(PAL_Cppi4AccChObj) ); return NULL; } #endif accChObj->curPage = 0; PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); return (PAL_Cppi4AccChHnd) accChObj; } #else /* Following API will use HW mailbox provide Accumulator fuctionalities */ PAL_Cppi4AccChHnd PAL_cppi4AccChOpen(PAL_Handle hnd, Cppi4AccumulatorCfg* accCfg) { /*Return pointer to the caller */ PAL_Cppi4AccChObj *accChObj = NULL; /* transport message over HW_MBOX */ Cppi41HwMboxAccChOpenMsg_t openAccChObj = {0}; /* local temporary varaibles */ Cppi41HwMboxAccChOpenReplyMsg_t* tmp = NULL; unsigned long tmpPtr = 0; /* transport message over HW_MBOX */ /* Return length of HW mailbox Op-Code channle */ Uint32 dataLen = sizeof(Cppi41HwMboxAccChOpenMsg_t); if(!accCfg) { EPRINTK("NULL pointer reference."); return NULL; } /* kmalloc returns cache line aligned memory unless you are debugging the slab allocator (2.6.18) */ accChObj = (PAL_Cppi4AccChObj *)kzalloc(sizeof(PAL_Cppi4AccChObj) ,GFP_KERNEL); if(!accChObj) { EPRINTK("could not allocate memeory for local accumulator ojbect"); return NULL; } /*copy accCfg data to accumulator channel onject */ if(!accCfg->list.listBase) { EPRINTK("NULL pointer reference. for accCfg.list.base"); kfree(accChObj); return NULL; } /* Copy datapipe accumulator init paramters into the message container */ memcpy(&openAccChObj.initCfg, accCfg, sizeof(Cppi4AccumulatorCfg)); DPRINTK(" Virtual list.listBase=%p, address received=%p before sending to HWMbox.\n", openAccChObj.initCfg.list.listBase, accCfg->list.listBase ); /* APPCPU virtual address need to converted to Physical address before sending to HW mailbox */ tmpPtr = (unsigned long)PAL_CPPI4_VIRT_2_PHYS(openAccChObj.initCfg.list.listBase); openAccChObj.initCfg.list.listBase = (void*)tmpPtr; DPRINTK(" Physical list.listBase=%p, Original address received=%p before sending to HWMbox.\n", openAccChObj.initCfg.list.listBase, accCfg->list.listBase ); /* hardware mailbox implementation to open accumulator channel goes here */ if(hwMbox_isReady()) { EPRINTK("HW mailbox isn't ready yet."); kfree(accChObj); return NULL; } ACCUM_CH_PARAM_DEBUG(openAccChObj.initCfg); /* need to convert data from cpu_to_be(); */ if(!Cppi41HwMboxAccChangeEndianness(&openAccChObj, endiannessBig)) { EPRINTK("data conversion fo endianness failed"); kfree(accChObj); return NULL; } /* need to send accumulator handler as well though we are not using it right now but incase needed in future */ /* will receive back Object address in SendReplyOp() at npcpuAddress variable */ openAccChObj.cmd = cpu_to_be32(CPPI41_HWMBOX_CMD_ACC_CH_OPEN); ACCUM_CH_PARAM_DEBUG(openAccChObj.initCfg); /* send a message to NP-CPU and expect a 64 byte reply back using SendReplyOp()*/ DPRINTK(" size of data length=%d.", sizeof(Cppi41HwMboxAccChOpenMsg_t)); if(hwMbox_sendOpcode(HW_MBOX_MASTER_NP_CPU, NPCPU_APPCPU_HW_MBOX_TAG_CPPI41_MBX , (uint8_t *)&openAccChObj, sizeof(Cppi41HwMboxAccChOpenMsg_t) , sizeof(Cppi41HwMboxAccChOpenMsg_t) , &dataLen)) { EPRINTK("HW mailbox hwMbox_sendOpcode failed."); kfree(accChObj); return NULL; } if(dataLen != sizeof(Cppi41HwMboxAccChOpenReplyMsg_t)) { EPRINTK("HW mailbox hwMbox_sendOpcode reply wasnt of desire length Cppi41HwMboxAccChOpenReplyMsg=%d dataLen=%d ",sizeof(Cppi41HwMboxAccChOpenReplyMsg_t), dataLen); kfree(accChObj); return NULL; } DPRINTK("HW mailbox adpHwMboxmessageObj.msgData.initCfg.list.listBase before Endian change=%p.", openAccChObj.initCfg.list.listBase); /* need to conver data from be_to_cpu(); */ DPRINTK("HW mailbox Received adpHwMboxmessageObj.msgData.initCfg.list.listBase after Endian change=%p.", openAccChObj.initCfg.list.listBase); DPRINTK("HW mailbox called to accumulator open successful."); /* copy HW_Mbox message to kmalloced object for return */ DPRINTK("data length=%d.",dataLen ); memcpy(&(accChObj->initCfg), accCfg, sizeof(Cppi4AccumulatorCfg)); tmp = (Cppi41HwMboxAccChOpenReplyMsg_t *) &openAccChObj; accChObj->curPage = be32_to_cpu( tmp->curPage); DPRINTK("curPage=%d.", accChObj->curPage ); accChObj->palCppi4Obj = (void *)be32_to_cpu((unsigned int)(tmp->accChHnd)); DPRINTK("npcpuAddress=%d.", tmp->accChHnd ); DPRINTK("HW mailbox Received accChObj->initCfg.list.listBase after phys_to_virt=%p.", accChObj->initCfg.list.listBase); return (PAL_Cppi4AccChHnd)accChObj; } #endif EXPORT_SYMBOL(PAL_cppi4AccChOpen); PAL_Cppi4AccChHnd PAL_cppi4AccChOpenAppCpu(PAL_Handle hnd, Cppi4AccumulatorCfg* accCfg) { Uint32 cookie; PAL_Cppi4AccChObj *accChObj; AVALANCHE_PP_ACC_CH_INFO_t ptr_ch_cfg; AVALANCHE_PP_RET_e rc; if (PAL_osMemAlloc(0, sizeof(PAL_Cppi4AccChObj), 0, (Ptr *) &accChObj) != PAL_SOK) { EPRINTK ("\nERROR:PAL: PAL_cppi4AccChOpen: Failed to allocate Acc channel object structure."); return NULL; } PAL_osMemSet (accChObj, 0, sizeof (PAL_Cppi4AccChObj)); PAL_osMemCopy(&accChObj->initCfg, accCfg, sizeof(Cppi4AccumulatorCfg)); accChObj->palCppi4Obj = hnd; /* Need to protect the accumulator register writes. They are shared with pre-fetcher */ PAL_osProtectEntry(PAL_OSPROTECT_INTERRUPT, &cookie); ptr_ch_cfg.Index = 0; ptr_ch_cfg.Channel = accCfg->accChanNum; ptr_ch_cfg.Command = 0; ptr_ch_cfg.Param0Ret = (Uint32)(accCfg->list.listBase) ; ptr_ch_cfg.Param1 = (accCfg->queue.qNum) | (accCfg->queue.qMgr << 12) | (accCfg->list.maxPageEntry << 16); ptr_ch_cfg.Param2 = (accCfg->pacingTickCnt) | (accCfg->list.maxPageCnt << 16) | (accCfg->list.listEntrySize << 18)| (accCfg->list.listCountMode << 20) | (accCfg->list.stallAvoidance << 21)| (accCfg->list.pacingMode << 22) | (accCfg->mode << 31); rc = __cppi4AccChOpen(&ptr_ch_cfg); if (PP_RC_SUCCESS != rc) { EPRINTK("Error: Accumulator PDSP is not responding, return code: %u\n", rc); PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); PAL_osMemFree( 0, accChObj, sizeof(PAL_Cppi4AccChObj) ); return NULL; } accChObj->curPage = 0; PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); return (PAL_Cppi4AccChHnd) accChObj; } EXPORT_SYMBOL(PAL_cppi4AccChOpenAppCpu); void PAL_cppi4AccChEnRateLimit(Cppi4AccumulatorRateLimitCfg* accCfg) { Uint32 cookie; AVALANCHE_PP_RET_e rc; AVALANCHE_PP_ACC_CH_INFO_t ptr_ch_cfg; /* Need to protect the accumulator register writes. They are shared with pre-fetcher */ PAL_osProtectEntry(PAL_OSPROTECT_INTERRUPT, &cookie); ptr_ch_cfg.Index = 1 ; // Enable Rate Limit ptr_ch_cfg.Channel = accCfg->accChanNum ; ptr_ch_cfg.Command = 0 ; ptr_ch_cfg.Param0Ret = (accCfg->retQ); ptr_ch_cfg.Param1 = (accCfg->upperThreshold << 16) | (accCfg->lowerThreshold); ptr_ch_cfg.Param2 = 0; rc = __cppi4AccChEnRateLimit(&ptr_ch_cfg); if (PP_RC_SUCCESS != rc) { PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); return; } PAL_osProtectExit(PAL_OSPROTECT_INTERRUPT, cookie); } EXPORT_SYMBOL(PAL_cppi4AccChEnRateLimit); #ifdef CONFIG_INTEL_KERNEL_PP_DRIVER_LOCAL int PAL_cppi4AccChClose(PAL_Cppi4AccChHnd hnd, void *closeArgs) { PAL_Cppi4AccChObj *accChObj = (PAL_Cppi4AccChObj *) hnd; #if PUMA7_OR_NEWER_SOC_TYPE AVALANCHE_PP_ACC_CH_INFO_t ptr_ch_cfg = { 0 }; AVALANCHE_PP_RET_e rc; //return code ptr_ch_cfg.Channel = accChObj->initCfg.accChanNum ; ptr_ch_cfg.Command = 0 ; ptr_ch_cfg.Param0Ret = 0 ; ptr_ch_cfg.Param1 = 0 ; ptr_ch_cfg.Param2 = 0 ; if ((rc = __cppi4AccChClose ( &ptr_ch_cfg)) != PP_RC_SUCCESS ) { DPRINTK("\nError: APDSP firmware not responding!"); DPRINTK("APDSP return code: %d\n", rc); return PAL_ERROR_FLAG; } #else Cppi4PALObj *palCppi4Obj = accChObj->palCppi4Obj; Cppi4InitCfg * initCfg = palCppi4Obj->initCfg; Uint32 i; APDSP_Command_Status_RegsOvly cmdRegs = initCfg->apdspInfo.pdspCmdBase; cmdRegs->List_Buffer_Address = 0; cmdRegs->Config_A = 0; cmdRegs->Config_B = 0; cmdRegs->Command = (accChObj->initCfg.accChanNum) | (APDSP_CMD_DISABLE << 8); /* TODO: 1000000 is a magic word picked up from mike's code. Need to understand * timeout values and fix the code */ for(i=0; (i < 1000000) && (cmdRegs->Command & (0xFF << 8)); i++); if( i==1000000 ) { dbgPrint("\nError: APDSP firmware not responding!"); dbgPrint("APDSP return code: %x\n", (cmdRegs->Command & (0xFF << 24))); return PAL_ERROR_FLAG; } #endif PAL_osMemFree(0, hnd, sizeof(PAL_Cppi4AccChObj)); return PAL_SOK; } #else int PAL_cppi4AccChClose(PAL_Cppi4AccChHnd hnd, void *closeArgs) { /* local pointer to free */ PAL_Cppi4AccChObj *accChObj; /* transport message over HW_MBOX */ Cppi41HwMboxAccChCloseMsg_t adpHwMboxmessageObj; Uint32 dataLen = sizeof(Cppi41HwMboxAccChCloseMsg_t); if(!hnd) { EPRINTK("NULL pointer reference."); return false; } accChObj = (PAL_Cppi4AccChObj *)hnd; /*copy PAL_Cppi4AccChObj data to accumulator channel onject */ /* convert data since CPPI need ch_num for accumulator close */ adpHwMboxmessageObj.accChHnd = (void *)cpu_to_be32((unsigned int)(accChObj->palCppi4Obj)); DPRINTK("npcpuAddress=%d.", accChObj->palCppi4Obj ); adpHwMboxmessageObj.cmd = cpu_to_be32(CPPI41_HWMBOX_CMD_ACC_CH_CLOSE); /* send a message to NP-CPU and expect to pointer get free in NPCPUaddress space make sure correct poiter by reply*/ if(hwMbox_sendOpcode(HW_MBOX_MASTER_NP_CPU,NPCPU_APPCPU_HW_MBOX_TAG_CPPI41_MBX, (uint8_t *)&adpHwMboxmessageObj, sizeof(Cppi41HwMboxAccChCloseMsg_t), sizeof(Cppi41HwMboxAccChCloseMsg_t), &dataLen)) { EPRINTK("HW mailbox hwMbox_sendOpcode failed."); return false; } /* free local onject which was created in Open call */ kfree(accChObj); /* hardware mailbox implementation to close accumulator channel goes here */ DPRINTK("HW mailbox called to free accumulator channel successful."); return true; } #endif EXPORT_SYMBOL(PAL_cppi4AccChClose); bool Cppi41AccChangeEndianness(Cppi41HwMboxAccChOpenMsg_t *destCfgData, endianness_e endianity) { if( !destCfgData ) { EPRINTK(" null pointer reference "); return false; } if( (endianity != endiannessBig) && (endianity != endiannessLittle) ) { EPRINTK(" Endianness value pass in datapipe is not correct "); return false; } if(endianity == endiannessBig) { (*destCfgData).initCfg.accChanNum = cpu_to_be32((*destCfgData).initCfg.accChanNum); (*destCfgData).initCfg.mode = cpu_to_be32((*destCfgData).initCfg.mode); (*destCfgData).initCfg.queue.qMgr = cpu_to_be32((*destCfgData).initCfg.queue.qMgr); (*destCfgData).initCfg.queue.qNum = cpu_to_be32((*destCfgData).initCfg.queue.qNum); (*destCfgData).initCfg.pacingTickCnt = cpu_to_be32((*destCfgData).initCfg.pacingTickCnt); (*destCfgData).initCfg.list.listBase = (void *)cpu_to_be32((unsigned int)(*destCfgData).initCfg.list.listBase); (*destCfgData).initCfg.list.maxPageEntry = cpu_to_be32((*destCfgData).initCfg.list.maxPageEntry); (*destCfgData).initCfg.list.pacingMode = cpu_to_be32((*destCfgData).initCfg.list.pacingMode); (*destCfgData).initCfg.list.stallAvoidance = cpu_to_be32((*destCfgData).initCfg.list.stallAvoidance); (*destCfgData).initCfg.list.listCountMode = cpu_to_be32((*destCfgData).initCfg.list.listCountMode); (*destCfgData).initCfg.list.listEntrySize = cpu_to_be32((*destCfgData).initCfg.list.listEntrySize); (*destCfgData).initCfg.list.maxPageCnt = cpu_to_be32((*destCfgData).initCfg.list.maxPageCnt); (*destCfgData).initCfg.monitor.pktCountThresh = cpu_to_be32((*destCfgData).initCfg.monitor.pktCountThresh); (*destCfgData).initCfg.monitor.pacingMode = cpu_to_be32((*destCfgData).initCfg.monitor.pacingMode); } if(endianity == endiannessLittle) { (*destCfgData).initCfg.accChanNum = be32_to_cpu((*destCfgData).initCfg.accChanNum); (*destCfgData).initCfg.mode = be32_to_cpu((*destCfgData).initCfg.mode); (*destCfgData).initCfg.queue.qMgr = be32_to_cpu((*destCfgData).initCfg.queue.qMgr); (*destCfgData).initCfg.queue.qNum = be32_to_cpu((*destCfgData).initCfg.queue.qNum); (*destCfgData).initCfg.pacingTickCnt = be32_to_cpu((*destCfgData).initCfg.pacingTickCnt); (*destCfgData).initCfg.list.listBase = (void *)be32_to_cpu((*destCfgData).initCfg.list.listBase); (*destCfgData).initCfg.list.maxPageEntry = be32_to_cpu((*destCfgData).initCfg.list.maxPageEntry); (*destCfgData).initCfg.list.pacingMode = be32_to_cpu((*destCfgData).initCfg.list.pacingMode); (*destCfgData).initCfg.list.stallAvoidance = be32_to_cpu((*destCfgData).initCfg.list.stallAvoidance); (*destCfgData).initCfg.list.listCountMode = be32_to_cpu((*destCfgData).initCfg.list.listCountMode); (*destCfgData).initCfg.list.listEntrySize = be32_to_cpu((*destCfgData).initCfg.list.listEntrySize); (*destCfgData).initCfg.list.maxPageCnt = be32_to_cpu((*destCfgData).initCfg.list.maxPageCnt); (*destCfgData).initCfg.monitor.pktCountThresh = be32_to_cpu((*destCfgData).initCfg.monitor.pktCountThresh); (*destCfgData).initCfg.monitor.pacingMode = be32_to_cpu((*destCfgData).initCfg.monitor.pacingMode); } return true; } EXPORT_SYMBOL(Cppi41AccChangeEndianness); void* PAL_cppi4AccChGetNextList(PAL_Cppi4AccChHnd hnd) { PAL_Cppi4AccChObj *accChObj = (PAL_Cppi4AccChObj *) hnd; Cppi4AccumulatorCfg* initCfg = &accChObj->initCfg; Ptr ret = 0; if(initCfg->mode) return NULL; /* no lists in monitor mode */ /* data available at base + (current page * number of entries per page * size of each entry) */ DPRINTK("accChObj->curPage= %d initCfg->list.listBase=%p initCfg->list.maxPageEntry=%d initCfg->list.listEntrySize=%d \n", accChObj->curPage,initCfg->list.listBase,initCfg->list.maxPageEntry,initCfg->list.listEntrySize ); ret = initCfg->list.listBase + (accChObj->curPage * initCfg->list.maxPageEntry * (initCfg->list.listEntrySize + 1) * sizeof(Uint32)); /* cache flush of list page */ PAL_CPPI4_CACHE_INVALIDATE(ret, initCfg->list.maxPageEntry * (initCfg->list.listEntrySize + 1) * sizeof(Uint32)); accChObj->curPage++; DPRINTK("****accChObj->curPage= %d \n", accChObj->curPage ); if(accChObj->curPage >= initCfg->list.maxPageCnt) { accChObj->curPage = 0; } return ret; } EXPORT_SYMBOL(PAL_cppi4AccChGetNextList); int PAL_cppi4Control (PAL_Handle hnd, Uint32 cmd, Ptr cmdArg, Ptr param) { CSL_Queue_Manager_Region_RegsOvly regs; PAL_CPPI_PP_QMGRs_e qMgr; Cppi4Queue *queue; switch (cmd) { case PAL_CPPI41_IOCTL_QUEUE_DIVERT: { qMgr = *(Uint32 *)param; if (qMgr != PAL_CPPI_PP_QMGR_G1 && qMgr != PAL_CPPI_PP_QMGR_G2) { pr_err("%s:%d: unsupported queue manager!\n", __func__, __LINE__); return 1; } regs = (CSL_Queue_Manager_Region_RegsOvly)q_mgrs[qMgr].regs; if (!regs) { pr_err("%s:%d: queue manager %d not initialized!\n",__func__, __LINE__, qMgr); return 1; } regs->Queue_Diversion = cpu_to_be32((Uint32)cmdArg); break; } case PAL_CPPI41_IOCTL_GET_QUEUE_ENTRY_COUNT: { queue = (Cppi4Queue*) cmdArg; return PAL_cppi4QueueGetEntryCount(hnd, *queue, (u32 *)param); break; } default: { pr_err("%s:%d:: Unsupported ioctl code %d",__func__, __LINE__, cmd); return 1; } } return 0; } EXPORT_SYMBOL(PAL_cppi4Control); #ifdef CONFIG_MRPC_CPPI_CLIENT static ssize_t cppi_pp_dump_queues_stats(struct seq_file *m, void *v) { Cppi4Queue q; PAL_Handle p_hnd; u32 max_qnum[PAL_CPPI41_NUM_QUEUE_MGR]; u32 desc_count = 0; u32 expected = 0; u32 i; FDqueue_t *pp_fqueues; u32 fqueue_cnt = 0; p_hnd = PAL_cppi4Init(NULL, CPPI41_DOMAIN_PP); /* init max queue number for all queue managers */ max_qnum[PAL_CPPI_PP_QMGR_G0] = PAL_CPPI_PP_QMGR_G0_TOTAL_Q_COUNT ; max_qnum[PAL_CPPI_PP_QMGR_G1] = PAL_CPPI_PP_QMGR_G1_TOTAL_Q_COUNT ; max_qnum[PAL_CPPI_PP_QMGR_G2] = PAL_CPPI_PP_QMGR_G2_TOTAL_Q_COUNT ; max_qnum[PAL_CPPI_PP_QMGR_LOCAL] = PAL_CPPI_PP_QMGR_LOCAL_TOTAL_Q_COUNT; if(PAL_cppi4GetFreeQueuesCount(&fqueue_cnt) != 0) { ERRSEQ(m, "Failed to get free queues count\n"); return 0; } pp_fqueues = kzalloc(fqueue_cnt * sizeof(*pp_fqueues), GFP_ATOMIC); if (!pp_fqueues) { ERRSEQ(m, "Failed to allocate memory\n"); return 0; } /* get free queues info via MRPC */ for (i = 0; i < fqueue_cnt; i++) PAL_cppi4GetPpFreeQueueInfo(i, &pp_fqueues[i]); seq_printf(m, "\n"); for (q.qMgr = 0; q.qMgr < PAL_CPPI41_NUM_QUEUE_MGR; q.qMgr++) { for (q.qNum = 0; q.qNum < max_qnum[q.qMgr]; q.qNum++) { /* get current queue count */ PAL_cppi4Control(p_hnd, PAL_CPPI41_IOCTL_GET_QUEUE_ENTRY_COUNT, &q, &desc_count); /* get expected queue count*/ for (expected = 0, i = 0; i < fqueue_cnt; i++) { if (pp_fqueues[i].qMgr == q.qMgr && pp_fqueues[i].qId == q.qNum) { expected = pp_fqueues[i].descCount; break; } } if (desc_count != expected) seq_printf(m, "%4d %-65s : %4d [%d]\n", q.qNum, PAL_CPPI_PP_QMGR_GET_Q_NAME(q.qMgr, q.qNum), desc_count, expected); } } seq_printf(m, "\n"); if (pp_fqueues) kfree(pp_fqueues); return 0; } static ssize_t cppi_bsm_dump(struct seq_file *m, void *v) { qsmonRal_t bsmon[PAL_CPPI41_BMGR_MAX_POOLS]; CSL_QSMon_RegsOvly bsmon_reg; u32 reg, i, pool_id; Cppi4BufPool pool; Cppi4BufPoolInfo pool_info; if (!qsmon_base[PAL_CPPI_PP_BSMON_MGR]) { ERRSEQ(m, "bsmon_base isn't initialized\n"); return 0; } for (i = 0; i < ARRAY_SIZE(bsmon); i++) { if(PAL_cppi4GetBpoolBsmonInfo(i, &bsmon[i]) != 0) { ERRSEQ(m, "Failed to get buffers pool %u bsmon info\n", i); return 0; } } seq_printf(m, "\n| Pool | Watermark | Current | Init | Pool Name |\n"); seq_printf(m, "|------|-----------|-----------|-----------|------------------------------------------------|\n"); bsmon_reg = (Ptr) qsmon_base[PAL_CPPI_PP_BSMON_MGR]; for (i = 0; i < ARRAY_SIZE(bsmon); i++) { if (!bsmon[i].qsmonChEn) continue; reg = be32_to_cpu(bsmon_reg->Cmds[bsmon[i].qsmonThread].Register_B); pool_id = reg & QSMON_CONFIG_MONITORED_QUEUE_NUM_MASK; pool.bPool = pool_id; if (PAL_cppi4GetBufferPoolInfo(NULL, pool, &pool_info)) { pool_info.numBuf = 0; ERRSEQ(m, "Unable to retrieve initial configuration for pool %d\n", pool_id); } seq_printf(m, "| %2d | %5d | %5d | %5d | %-46s |\n", pool_id, be32_to_cpu(bsmon_reg->Depth[bsmon[i].qsmonThread].Stats_Qdepth_Watermark), be32_to_cpu(bsmon_reg->Depth[bsmon[i].qsmonThread].Stats_Qdepth_Cnt), pool_info.numBuf, PalCppiPpbmgrNames[pool_id]); } seq_printf(m, "\n"); return 0; } static ssize_t cppi_qsm_dump(struct seq_file *m, void *v) { CSL_QSMon_RegsOvly qsmon_reg; PAL_CPPI_PP_QSMON_MGRs_e mngr; Uint32 thread; CSL_Reg32 reg; if (!qsmon_base[PAL_CPPI_PP_QSMON_MGR1] || !qsmon_base[PAL_CPPI_PP_QSMON_MGR2]) { ERRSEQ(m, "qsmon_base isn't initialized\n"); return 0; } // check if we want to reset counters if (g_proc_qsm == 0) { for (mngr = PAL_CPPI_PP_QSMON_MGR1; mngr <= PAL_CPPI_PP_QSMON_MGR2; mngr++) { qsmon_reg = (Ptr) qsmon_base[mngr]; for (thread = 0; thread < PAL_CPPI_QSMON_MAX_THREADS; thread++) { reg = be32_to_cpu(qsmon_reg->Cmds[thread].Register_B); if (reg & (1 << QSMON_CONFIG_MONITORED_QUEUE_EN_SHIFT)) { qsmon_reg->Depth[thread].Stats_Qdepth_Watermark = qsmon_reg->Depth[thread].Stats_Qdepth_Cnt; } } } // reset the value so that we don't reset counters unless explicitly asked g_proc_qsm = 1; } seq_printf(m, "| QSM | Thread | Queue | Watermark | Current | Min/Max | Qname\n"); seq_printf(m, "|-----|--------|-------|-----------|---------|---------|------\n"); for (mngr = PAL_CPPI_PP_QSMON_MGR1; mngr <= PAL_CPPI_PP_QSMON_MGR2; mngr++) { qsmon_reg = (Ptr) qsmon_base[mngr]; for (thread = 0; thread < PAL_CPPI_QSMON_MAX_THREADS; thread++) { reg = be32_to_cpu(qsmon_reg->Cmds[thread].Register_B); if (reg & (1 << QSMON_CONFIG_MONITORED_QUEUE_EN_SHIFT)) { seq_printf(m, "| %d | %2d | %d.%3d | %5d | %5d | %s | %s\n", mngr, thread, mngr, (reg & QSMON_CONFIG_MONITORED_QUEUE_NUM_MASK), be32_to_cpu(qsmon_reg->Depth[thread].Stats_Qdepth_Watermark), be32_to_cpu(qsmon_reg->Depth[thread].Stats_Qdepth_Cnt), (reg & (1 << QSMON_CONFIG_MONITORED_QUEUE_DIR_SHIFT)) ? "Min" : "Max", PAL_CPPI_PP_QMGR_GET_Q_NAME((enum PAL_CPPI_PP_QMGRs) mngr, (reg & QSMON_CONFIG_MONITORED_QUEUE_NUM_MASK))); } } } return 0; } #define __get_input_num(num, params, sep, err_lbl) \ { \ char * __str = NULL; \ unsigned long __val = 0; \ if (params && (NULL != (__str = strsep(¶ms, sep)))) { \ if (kstrtoul(__str, 0, &__val)) { \ EPRINTK("Parameter '%s' is an invalid numeric input\n", __str); \ goto err_lbl; \ } \ num = (typeof(num))__val; \ } \ } static ssize_t cppi_dbg_proc_help(struct seq_file *m, void *v) { seq_printf(m, "\n"); seq_printf(m, "pop descriptor : echo pop > %s\n", DBG_PROC_DIR); seq_printf(m, "push descriptor : echo push > %s\n", DBG_PROC_DIR); seq_printf(m, "pop buffer : echo bpop > %s\n", DBG_PROC_DIR); seq_printf(m, "push buffer : echo bpush > %s\n\n", DBG_PROC_DIR); return 0; } static ssize_t cppi_dbg_proc(struct file *fp, const char __user * ubuf, size_t count, loff_t *ppos) { char lbuf[128] = { 0 }; char *token = NULL; char *params = NULL; char *sep = " "; u32 bp = 0; PAL_Handle p_hnd = NULL; Cppi4Queue q = { 0 }; Cppi4BufPool p = { 0 }; PAL_Cppi4BD bd = 0; PAL_Cppi4QueueHnd qh = NULL; unsigned int pktLen = 0; if (count >= 128) { EPRINTK("\nBuffer Overflow\n"); return EINVAL; } if (copy_from_user(lbuf, ubuf, count)) { EPRINTK("\nFailed copy from user\n"); return EFAULT; } p_hnd = PAL_cppi4Init(NULL, CPPI41_DOMAIN_PP); if (!p_hnd) return count; lbuf[count] = '\0'; params = &lbuf[0]; token = strsep(¶ms, " "); if (!token) { EPRINTK("Please provide arguments\n"); goto einval; } if (!strncasecmp("pop", token, strlen("pop"))) { __get_input_num(q.qMgr , params, sep, einval); __get_input_num(q.qNum , params, sep, einval); if (False == Pal_cppi4IsQueueValid(&q)) { EPRINTK("Invalid queue G%u.%u\n", q.qMgr, q.qNum); goto einval; } qh = PAL_cppi4QueueOpen(p_hnd, q); if (qh) { bd = (PAL_Cppi4BD) PAL_cppi4QueuePop(qh); printk("Q[G%u.%-5u] ==> BD:[0x%x]\n",q.qMgr, q.qNum, bd); PAL_cppi4QueueClose(p_hnd, qh); } } else if (!strncasecmp("push", token, strlen("push"))) { __get_input_num(q.qMgr, params, sep, einval); __get_input_num(q.qNum, params, sep, einval); __get_input_num(bd , params, sep, einval); __get_input_num(pktLen, params, sep, einval); if (False == Pal_cppi4IsQueueValid(&q)) { EPRINTK("Invalid queue G%u.%u\n", q.qMgr, q.qNum); goto einval; } qh = PAL_cppi4QueueOpen(p_hnd, q); if (qh) { PAL_cppi4QueuePush(qh, (PAL_Cppi4BD *)bd, 0, pktLen); printk("Q[G%u.%-5u] <== BD:[0x%x] len=%d\n",q.qMgr, q.qNum, bd, pktLen); PAL_cppi4QueueClose(p_hnd, qh); } } else if (!strncasecmp("bpop", token, strlen("bpop"))) { p.bMgr = 0; __get_input_num(p.bPool, params, sep, einval); if (p.bMgr != PAL_CPPI_PP_BUF_MGR || p.bPool >= PAL_CPPI41_BMGR_MAX_POOLS) { EPRINTK("Invalid buffer pool %u", p.bPool); goto einval; } bp = (u32) PAL_cppi4BufPopBuf(p_hnd, p); printk("Pool[%u] ==> BP:[0x%x]\n", p.bPool, bp); } else if (!strncasecmp("bpush", token, strlen("bpush"))) { p.bMgr = 0; __get_input_num(p.bPool, params, sep, einval); __get_input_num(bp , params, sep, einval); if (p.bMgr != PAL_CPPI_PP_BUF_MGR || p.bPool >= PAL_CPPI41_BMGR_MAX_POOLS) { EPRINTK("Invalid buffer pool %u", p.bPool); goto einval; } PAL_cppi4BufDecRefCnt(p_hnd, p, (Ptr) bp); printk("Pool[%u] <== BP:[0x%x]\n", p.bPool, bp); } return count; einval: return EINVAL; } static ssize_t cppi_mem_dump_proc_help(struct seq_file *m, void *v) { seq_printf(m, "\n"); seq_printf(m, "write reg : echo 1
[endian swap] > %s\n", DBG_PROC_DIR); seq_printf(m, "read reg : echo 2
[endian swap] > %s\n", DBG_PROC_DIR); seq_printf(m, "dump memory : echo 3
[endian swap] > %s\n", DBG_PROC_DIR); seq_printf(m, "\n"); return 0; } static ssize_t cppi_mem_dump_proc(struct file *fp, const char __user * ubuf, size_t count, loff_t *ppos) { char lbuf[128] = { 0 }; char *params = NULL; char *sep = " "; u32 cmd = 0; u32 paddr = 0; u32 *vaddr = NULL; u32 val = 0; u32 i, endian = 1; if (count > 128) { EPRINTK("\nBuffer Overflow\n"); return EINVAL; } if (copy_from_user(lbuf, ubuf, count)) { EPRINTK("\nFailed copy from user\n"); return EFAULT; } lbuf[count - 1] = '\0'; params = &lbuf[0]; __get_input_num(cmd, params, sep, einval); switch (cmd) { case 1: /* write register */ __get_input_num(paddr , params, sep, einval); __get_input_num(val , params, sep, einval); __get_input_num(endian, params, sep, einval); vaddr = (u32 *)netip_mmio_to_virtual((unsigned long) paddr); *vaddr = endian ? cpu_to_be32(val) : val; printk("WRITE: address = 0x%08x, value = 0x%x\n", paddr, val); break; case 2: /* read register */ __get_input_num(paddr, params, sep, einval); __get_input_num(endian, params, sep, einval); vaddr = (u32 *)netip_mmio_to_virtual((unsigned long) paddr); printk("READ: 0x%08x = 0x%x\n", paddr, (endian ? be32_to_cpu(*vaddr) : *vaddr)); break; case 3: /* dump memory */ __get_input_num(paddr, params, sep, einval); __get_input_num(val , params, sep, einval); __get_input_num(endian, params, sep, einval); vaddr = (u32 *)netip_mmio_to_virtual((unsigned long) paddr); val = min(val, 2048U); if (val > 0) { printk("\nRegister Dump, 32-byte words: length = %u, Data at 0x%08x:", val, paddr); for (i = 0; i < (val / 4); i++) { if ((i % 4) == 0) printk("\n 0x%08x: ", paddr + i*4); printk("%08x ", (endian ? be32_to_cpu(vaddr[i]) : vaddr[i])); } printk("\n"); } break; default: EPRINTK("Wrong command %u\n", cmd); goto einval; } return count; einval: return EINVAL; } static int __get_desc_type(struct seq_file *f, u32 *virt_addr) { u32 dtype, dinfo; dinfo = be32_to_cpu(*(virt_addr)); /* get descriptor 1st word */ dtype = dinfo & PAL_CPPI4_HOSTDESC_DESC_TYPE_MASK; dtype = dtype >> PAL_CPPI4_HOSTDESC_DESC_TYPE_SHIFT; if (dtype == PAL_CPPI4_HOSTDESC_DESC_TYPE_HOST) return CPPI41_DESC_TYPE_HOST; if (dtype == PAL_CPPI4_MONODESC_DESC_TYPE_MONO) return CPPI41_DESC_TYPE_MONOLITHIC; if (dtype == PAL_CPPI4_TDDESC_DESC_TYPE_TD) return CPPI41_DESC_TYPE_TEARDOWN; dtype = dinfo & CPPI41_EM_DESCINFO_DTYPE_MASK; dtype = dtype >> CPPI41_EM_DESCINFO_DTYPE_SHIFT; if (dtype == CPPI41_EM_DESCINFO_DTYPE_EMB) return CPPI41_DESC_TYPE_EMBEDDED; return -1; } static int __get_desc_ret_qmgr(struct seq_file *f, void *virt_addr, u32 dtype) { u32 retqmgr, pktinfo; if (dtype == CPPI41_DESC_TYPE_HOST) { Cppi4HostDescLinux *hdesc = (Cppi4HostDescLinux *)virt_addr; pktinfo = be32_to_cpu(hdesc->hw.pktInfo); retqmgr = pktinfo & PAL_CPPI4_HOSTDESC_PKT_RETQMGR_MASK; return retqmgr >> PAL_CPPI4_HOSTDESC_PKT_RETQMGR_SHIFT; } else if (dtype == CPPI41_DESC_TYPE_EMBEDDED) { Cppi4EmbdDescPp* edesc = (Cppi4EmbdDescPp *)virt_addr; pktinfo = be32_to_cpu(edesc->pktInfo); retqmgr = pktinfo & CPPI41_EM_PKTINFO_RETQMGR_MASK; return retqmgr >> CPPI41_EM_PKTINFO_RETQMGR_SHIFT; } else { return -1; } } static int __get_desc_ret_queue(struct seq_file *f, void *virt_addr, u32 dtype) { u32 retq, pktinfo; if (dtype == CPPI41_DESC_TYPE_HOST) { Cppi4HostDescLinux *hdesc = (Cppi4HostDescLinux *)virt_addr; pktinfo = be32_to_cpu(hdesc->hw.pktInfo); retq = pktinfo & PAL_CPPI4_HOSTDESC_PKT_RETQNUM_MASK; return retq >> PAL_CPPI4_HOSTDESC_PKT_RETQNUM_SHIFT; } else if (dtype == CPPI41_DESC_TYPE_EMBEDDED) { Cppi4EmbdDescPp* edesc = (Cppi4EmbdDescPp *)virt_addr; pktinfo = be32_to_cpu(edesc->pktInfo); retq = pktinfo & CPPI41_EM_PKTINFO_RETQ_MASK; return retq >> CPPI41_EM_PKTINFO_RETQ_SHIFT; } else { return -1; } } static int cppi_free_queues_check_dbg(struct seq_file *f) { u32 i; u32 fqueue_count; u32 total_desc_num = 0; u32 region_desc_cnt[PAL_CPPI41_MAX_DESC_REGIONS] = { 0 }; Cppi4DescReg region; FDqueue_t fq; if (PAL_SOK != PAL_cppi4GetFreeQueuesCount(&fqueue_count)) { ERRSEQ(f, "Failed to get free queues count\n"); return 0; } seq_printf(f, " Verifying all free queues are well configured...\n"); seq_printf(f, "-------------------------------------------------------------------------------------------------------\n"); seq_printf(f, " Queue name | qID | region | count | size | type \n"); seq_printf(f, "-------------------------------------------------------------------------------------------------------\n"); for (i = 0; i < fqueue_count; i++) { if (PAL_SOK != PAL_cppi4GetPpFreeQueueInfo(i, &fq)) { ERRSEQ(f, "Failed to get free queue info\n"); continue; } if (!IS_QMGR_ID_VALID(fq.qMgr)) { seq_printf(f, " Verification Failed: Config entry %u: queue mgr %u isn't valid\n", i, fq.qMgr); continue; } if (!IS_QUEUE_ID_VALID(fq.qId, fq.qMgr)) { seq_printf(f, " Verification Failed: Config entry %u: queue %u isn't valid\n", i, fq.qId); continue; } if (!IS_DESC_REGION_VALID(fq.descRegion)) { seq_printf(f, " Verification Failed: Config entry %u %s: descRegion %u isn't valid\n", i, PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId), fq.descRegion); continue; } if (!IS_DESC_TYPE_VALID(fq.descType)) { seq_printf(f, " Verification Failed: Config entry %u %s: descType %u isn't valid\n", i, PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId), fq.descType); continue; } if (PAL_SOK != PAL_cppi4GetPpDescRegionInfo((u32)fq.descRegion, ®ion)) { ERRSEQ(f, "Failed to get descriptor region info\n"); continue; } if (fq.descSize != region.szDesc) { seq_printf(f, " Verification Failed: Config entry %u %s: descSize %u doesn't match region %u descSize %u\n", i, PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId), fq.descSize, fq.descRegion, region.szDesc); continue; } seq_printf(f, "%-64s | %3u | %7u| %7u | %4u | %u\n", PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId), fq.qId, fq.descRegion, fq.descCount, fq.descSize, fq.descType); region_desc_cnt[fq.descRegion] += fq.descCount; } seq_printf(f, "-----------------------------------------------------------------------------------------------------\n"); seq_printf(f, " \n Verifying descriptors regions are configured according to the free queues...\n"); for (i = PAL_CPPI_PP_VOICE_GLOBAL_DESC_REGION; i < PAL_CPPI41_MAX_DESC_REGIONS; i++) { if (PAL_SOK != PAL_cppi4GetPpDescRegionInfo(i, ®ion)) { ERRSEQ(f, "Failed to get descriptor region info\n"); continue; } total_desc_num += region.numDesc; seq_printf(f, " Region %2u: desc num %5u, desc size %4u, on %s\n", i, region.numDesc, region.szDesc, region.isOnChip ? "chip" : "ddr"); if (region.numDesc != region_desc_cnt[i]) { seq_printf(f, " Region %u desc count is %u while queues array count is %u\n", i, region.numDesc, region_desc_cnt[i]); } } if (total_desc_num > CPPI_QMGR_MAX_DESC_SUPPORT) { seq_printf(f, " Verification Failed: %u descriptors in the regions exceeds the maximum of %u\n", total_desc_num, CPPI_QMGR_MAX_DESC_SUPPORT); return -1; } seq_printf(f, " Total number of descriptors configured to the regions: %u\n", total_desc_num); seq_printf(f, "-------------------------------------------------------------------------------------------------------\n"); return 0; } static int cppi_descriptors_check_dbg(struct seq_file *f) { PAL_Cppi4QueueHnd qhnd = NULL; PAL_Handle phnd; void *desc_virt; FDqueue_t fq; Cppi4Queue retq, qinfo; u32 desc_type, desc_phys, q_desc_num; u32 i; Bool qOk = 1; u32 fqueue_count; seq_printf(f, "\n Verifying all descriptors within each free queue...\n"); if (PAL_SOK != PAL_cppi4GetFreeQueuesCount(&fqueue_count)) { ERRSEQ(f, "Failed to get free queues count\n"); return 0; } phnd = PAL_cppi4Init(NULL, CPPI41_DOMAIN_PP); if (!phnd) { seq_printf(f, " Failed to get PP pal handle\n"); return -1; } for (i = 0; i < fqueue_count; i++) { if (PAL_SOK != PAL_cppi4GetPpFreeQueueInfo(i, &fq)) { ERRSEQ(f, "Failed to get free queue info\n"); continue; } /* check only embedded and host descriptors */ if (fq.descType != CPPI41_DESC_TYPE_EMBEDDED && fq.descType != CPPI41_DESC_TYPE_HOST) continue; if (!fq.descCount) continue; if (fq.qId == PAL_CPPI_PP_QMGR_G2_VOICE_DSP_RX_EMB_FD_Q_NUM || fq.qId == PAL_CPPI_PP_QMGR_G2_VOICE_INFRA_RX_EMB_FD_Q_NUM) continue; /* get queue handle */ qinfo.qMgr = fq.qMgr; qinfo.qNum = fq.qId; qhnd = PAL_cppi4QueueOpen(phnd, qinfo); if (!qhnd) { seq_printf(f, " Failed to get queue %s handle\n", PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId)); continue; } qOk = 1; PAL_cppi4QueueGetEntryCount(phnd, qinfo, &q_desc_num); seq_printf(f, " Scan Queue %s(%u) (descCount = %u[%u]) .... ", PAL_CPPI_PP_QMGR_GET_Q_NAME(fq.qMgr, fq.qId), fq.qId, q_desc_num, fq.descCount); if (fq.descCount < q_desc_num) { seq_printf(f, "\n Error: number of descriptors (%u) is bigger than initial one (%u)", q_desc_num, fq.descCount); qOk = 0; } while (q_desc_num--) { desc_phys = (PAL_Cppi4BD)PAL_cppi4QueuePop(qhnd); if (!desc_phys) { seq_printf(f, "\n Error: got NULL descriptor"); qOk = 0; break; } desc_virt = virt_addr_valid(PAL_CPPI4_PHYS_2_VIRT(desc_phys)) ? PAL_CPPI4_PHYS_2_VIRT(desc_phys) : (void *)IO_PHY2VIRT(desc_phys); desc_type = __get_desc_type(f, desc_virt); if (desc_type == -1) { seq_printf(f, "\n Error: fail to get descriptor %#x type", desc_phys); PAL_cppi4QueuePush(qhnd, (Ptr)desc_phys, PAL_CPPI4_DESCSIZE_2_QMGRSIZE(fq.descSize), 0); qOk = 0; continue; } if (desc_type != fq.descType) { seq_printf(f, "\n Error: found descriptor %#x with wrong type %s(%u), expected %s", desc_phys, CPPI_DESC_TYPE_STR(desc_type), desc_type, CPPI_DESC_TYPE_STR(fq.descType)); qOk = 0; } retq.qMgr = __get_desc_ret_qmgr(f, desc_virt, desc_type); if (!IS_QMGR_ID_VALID(retq.qMgr)) { seq_printf(f, "\n Error: found descriptor %#x with invalid qMgr %u", desc_phys, retq.qMgr); qOk = 0; } retq.qNum = __get_desc_ret_queue(f, desc_virt, desc_type); if (!IS_QUEUE_ID_VALID(retq.qNum, retq.qMgr)) { seq_printf(f, "\n Error: found descriptor %#x with invalid return qId %u", desc_phys, retq.qNum); qOk = 0; } if ((retq.qNum || retq.qMgr) && (retq.qMgr != fq.qMgr || retq.qNum != fq.qId)) { Uint16 qMgr_qId = PAL_CPPI_NETDEV_BUILD_Q_INFO(fq.qMgr, fq.qId); Uint16 ret_qMgr_qId = PAL_CPPI_NETDEV_BUILD_Q_INFO(retq.qMgr, retq.qNum); /* In some cases like the ones which require completion * indication it is expected to find a different retQ than the * queue itself. Do not count those as an error */ if (!((qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_LOCAL, PAL_CPPI_PP_QMGR_LOCAL_PP_INTERNAL_EMB_FD_Q_NUM)) || (qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_HOST2PP_LOW_HOST_FD_Q_NUM) && ret_qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_HOST2PP_LOW_TX_COMPLETE_Q_NUM)) || (qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_HOST2PP_HI_HOST_FD_Q_NUM) && ret_qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_HOST2PP_HI_TX_COMPLETE_Q_NUM)) || (qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_NP2APP_HOST_FD_Q_NUM) && ret_qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_NP2APP_TX_COMPLETE_Q_NUM)) || (qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_APP2NP_HOST_FD_Q_NUM) && ret_qMgr_qId == PAL_CPPI_NETDEV_BUILD_Q_INFO(PAL_CPPI_PP_QMGR_G2, PAL_CPPI_PP_QMGR_G2_APP2NP_TX_COMPLETE_Q_NUM)))) { seq_printf(f, "\n Error: found descriptor %#x from different free queue - %s(%u)", desc_phys, PAL_CPPI_PP_QMGR_GET_Q_NAME(retq.qMgr, retq.qNum), retq.qNum); qOk = 0; } } /* push the descriptor back to the queue */ PAL_cppi4QueuePush(qhnd, (Ptr)desc_phys, PAL_CPPI4_DESCSIZE_2_QMGRSIZE(fq.descSize), 0); } if (qOk) seq_printf(f,"OK\n"); else seq_printf(f,"\n"); } return 0; } static ssize_t cppi_verification_dbg(struct seq_file *f, void *v) { seq_printf(f, " CPPI Verification Test Start\n"); seq_printf(f, "==============================\n"); cppi_free_queues_check_dbg(f); cppi_descriptors_check_dbg(f); return 0; } static int cppi_pp_proc_open(struct inode *inode, struct file *file) { return single_open(file, cppi_pp_dump_queues_stats, NULL); } static const struct file_operations cppi_pp_proc_fops = { .open = cppi_pp_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int cppi_bsm_proc_open(struct inode *inode, struct file *file) { return single_open(file, cppi_bsm_dump, NULL); } static const struct file_operations cppi_bsm_proc_fops = { .open = cppi_bsm_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int cppi_qsm_proc_open(struct inode *inode, struct file *file) { return single_open(file, cppi_qsm_dump, NULL); } static ssize_t cppi_qsm_proc_write(struct file *fp, const char __user * buf, size_t count, loff_t *ppos) { unsigned char local_buf[10]; int ret_val = 0; if (count >= 10) { printk(KERN_ERR "\n%s[%d]: Buffer Overflow\n", __FUNCTION__, __LINE__); return -EFAULT; } if (copy_from_user(local_buf, buf, count)) { return -EFAULT; } local_buf[count]='\0'; ret_val = count; // set a global flag to signal to the read proc called later whether we want to reset counters if (kstrtoul(local_buf, 10, (unsigned long*) &g_proc_qsm)) { printk(KERN_ERR "\n%s[%d]: Illegal argument\n", __FUNCTION__, __LINE__); return -EFAULT; } return ret_val; } static const struct file_operations cppi_qsm_proc_fops = { .open = cppi_qsm_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = cppi_qsm_proc_write, }; static int cppi_dbg_proc_open(struct inode *inode, struct file *file) { return single_open(file, cppi_dbg_proc_help, NULL); } static const struct file_operations cppi_dbg_proc_fops = { .open = cppi_dbg_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = cppi_dbg_proc, }; static int cppi_mem_dump_proc_open(struct inode *inode, struct file *file) { return single_open(file, cppi_mem_dump_proc_help, NULL); } static const struct file_operations cppi_mem_dump_proc_fops = { .open = cppi_mem_dump_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = cppi_mem_dump_proc, }; static int cppi_verification_open(struct inode *inode, struct file *file) { return single_open(file, cppi_verification_dbg, NULL); } static const struct file_operations cppi_verification_proc_fops = { .open = cppi_verification_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static struct proc_dir_entry *cppi41_proc_root_dir = NULL; static struct proc_dir_entry *cppi41_proc_sr_dir = NULL; static struct proc_dir_entry *cppi41_proc_stats_dir = NULL; static int __init pal_cppi41_procs_init(void) { /* create proc cppi root directory */ cppi41_proc_root_dir = proc_mkdir("cppi", NULL); if (cppi41_proc_root_dir && IS_ERR(cppi41_proc_root_dir)) { EPRINTK("Failed to create 'cppi' proc folder\n"); return 1; } /* create proc cppi/sr directory */ cppi41_proc_sr_dir = proc_mkdir("sr", cppi41_proc_root_dir); if (cppi41_proc_sr_dir && IS_ERR(cppi41_proc_sr_dir)) { EPRINTK("Failed to create 'cppi/sr' proc folder\n"); return 1; } /* create proc cppi/sr/stats directory */ cppi41_proc_stats_dir = proc_mkdir("stats", cppi41_proc_sr_dir); if (cppi41_proc_stats_dir && IS_ERR(cppi41_proc_stats_dir)) { EPRINTK("Failed to create 'cppi/sr/stats' proc folder\n"); return 1; } /* CPPI debug API */ #if (defined(CONFIG_CPPI_DEBUG_API) || defined(CONFIG_CPPI_VAL_DEBUG_API)) PROC_CREATE_FILE("bsm", 0440, cppi41_proc_sr_dir, &cppi_bsm_proc_fops); #endif #ifdef CONFIG_CPPI_DEBUG_API PROC_CREATE_FILE("qsm", 0660, cppi41_proc_sr_dir, &cppi_qsm_proc_fops); PROC_CREATE_FILE("dbg", 0660, cppi41_proc_sr_dir, &cppi_dbg_proc_fops); PROC_CREATE_FILE("mem_dump", 0660, cppi41_proc_sr_dir, &cppi_mem_dump_proc_fops); PROC_CREATE_FILE("verifyCppiSanity", 0440, cppi41_proc_sr_dir, &cppi_verification_proc_fops); #endif PROC_CREATE_FILE("all", 0440, cppi41_proc_stats_dir, &cppi_pp_proc_fops); return 0; } #endif static int __init pal_cppi41_init(void) { spin_lock_init(&init_lock); DPRINTK("pal_cppi41_init\n"); #ifdef CONFIG_MRPC_CPPI_CLIENT if(pal_cppi41_procs_init()) return 1; #endif return 0; } static void __exit pal_cppi41_exit(void) { #ifdef CONFIG_MRPC_CPPI_CLIENT remove_proc_subtree("cppi", NULL); #endif DPRINTK("pal_cppi41_exit\n"); } module_init(pal_cppi41_init); module_exit(pal_cppi41_exit);