/* * puma5_pp.c * Description: * See below. * * * Copyright (C) 2007 Texas Instruments, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ /* Puma-5 Packet Processor initialization. * Contains Puma-5 specific initialization. The numbers (addresses etc) are * Puma-5 specific. The static structures are filled in with Puma-5 specific * data and the generic PPD init function gets called in the end * with this data. */ #include #include #include #ifdef EL /* TODO: Include LE binaries and use corresponding structures */ #else #include #include #include #include #endif #include #include #include #define TI_PP_NUM_FW 4 extern Cppi4InitCfg cppi4InitCfg_g; /* This is PPD global initialization structure. Contains static initialization * information for the whole PPD subsystem. */ TI_PP_FIRMWARE ppFirmware_g [TI_PP_NUM_FW] = { { .id = TI_PP_BOOTS_FW, .data = (Uint32*)&PDSPcode_c_bootcode_be_0002[0], .size = sizeof (PDSPcode_c_bootcode_be_0002) }, { .id = TI_PP_CPDSP_FW, .data = (Uint32*)&PDSPcode_c_srp5_be_0002[0], .size = sizeof (PDSPcode_c_srp5_be_0002) }, { .id = TI_PP_MPDSP_FW, .data = (Uint32*)&PDSPcode_m_srp5_be_0002[0], .size = sizeof (PDSPcode_m_srp5_be_0002) }, { .id = TI_PP_QPDSP_FW, .data = (Uint32*)&PDSPcode_q_srp5_be_0002[0], .size = sizeof (PDSPcode_q_srp5_be_0002) } }; /* CHK: Forcing discard for packets not matching any installed PID */ TI_PPD_CONFIG ppCfg_g = { .dflt_host_rx_q = 0 /* Discard */, .dflt_host_rx_dst_tag = 0, .host_ev_queue = PP_HOST_EVENTQ, .host_q_mgr = PP_HOST_EVENTQMGR, #define SYNC_TIMEOUT_MICROSEC 1000 #define SYNC_TIMEOUT_MAXPACKET 48 .sync_max_pkt = SYNC_TIMEOUT_MAXPACKET, .sync_timeout_10us = SYNC_TIMEOUT_MICROSEC/10, .buff_pool_indx = BMGR0_POOL13 }; /* On-Chip descriptor management * IMPORTANT NOTES: * =============== * - Following macros control how the on-chip descriptor memory is devided * into different descriptors. All the base addresses must be varefully * determined not to avoid overlap. * - Also note that all the On-cip descriptors *must* be of size equal to the * specified in puma5_cppi.c cppi config structure for respective region. */ #define PREFETCH_DESC_BASEADDR AVALANCHE_NWSS_ONCHIPDESC_BASE #define REPLICATOR_DESC_BASEADDR ( PREFETCH_DESC_BASEADDR + (PPFW_PREFETCH_DESC_COUNT*PREFETCH_FD_SIZE) ) PrefchCfg prefCfg_g = { .pfDescCnt = PPFW_PREFETCH_DESC_COUNT, .pfDescBase = (Ptr) IO_VIRT2PHY( PREFETCH_DESC_BASEADDR ), .pfFQ.qMgr = PPFW_CPPI4x_APDSP_FD_QMGR, .pfFQ.qNum = PPFW_CPPI4x_APDSP_FD_QNUM, .repDescCnt = PPFW_REPLICA_DESC_COUNT, .repDescBase = (Ptr) IO_VIRT2PHY( REPLICATOR_DESC_BASEADDR ), .repFQ.qMgr = PPFW_CPPI4x_MPDSP_FD_QMGR, .repFQ.qNum = PPFW_CPPI4x_MPDSP_FD_QNUM, .pfBlkCnt = PPFW_PREFETCH_BUFF_COUNT, .pfBlkBase = (Ptr) IO_VIRT2PHY( AVALANCHE_NWSS_APDSP_PREFBLK_BASE ), .pfFBQ.qMgr = PPFW_CPPI4x_APDSP_PREBUF_QMGR, .pfFBQ.qNum = PPFW_CPPI4x_APDSP_PREBUF_QNUM, .pfDataSize = PREFETCH_DATA_SIZE, .pfDataOffset = PREFETCH_DATA_OFFSET, .pfCmdBase = (Ptr) AVALANCHE_NWSS_APDSP_CMD_BASE }; typedef struct { Uint32 numDesc; Uint32 qNum; Ptr firstDescPtr; Uint32 isAllocated; /* Desc config for the group */ Uint32 pktType; } BDBlkInfo; /* Embedded descriptor table: * - This table aggregates the embedded descriptors in the system. It contains * information about the first descriptor and allocation status of each * division of the region. Generally this division is based on per driver or * per Q basis. */ typedef struct { Ptr buffDescRegionPtr; Uint32 qMgr; Uint32 numDesc; Uint32 szDesc; Uint32 numBlks; BDBlkInfo BDBlk [PAL_CPPI41_FD_Q_LAST - PAL_CPPI41_FD_Q_BASE]; } EmbBDCfg; EmbBDCfg ppEmbBDCfg_g = { .qMgr = PPFW_CPPI4x_FD_QMGR, .numDesc = 2816 + PPFW_EVENT_DESC_NUM, /* Must be same as in Cppi Cgf structure */ .szDesc = 64, /* Must be same as for specific region */ .numBlks = 9, /* Change for each block added/removed */ /* Host2PP Infra 0 */ .BDBlk[0].numDesc = PPFW_RX_EMBEDDED_BD_NUM_LOW, .BDBlk[0].qNum = PPFW_CPPI4x_FD_QNUM(0), //PPFW_CPPI4x_FD_QMGR .BDBlk[0].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* Host2PP Infra 1 */ .BDBlk[1].numDesc = PPFW_RX_EMBEDDED_BD_NUM_MED, .BDBlk[1].qNum = PPFW_CPPI4x_FD_QNUM(1), //PPFW_CPPI4x_FD_QMGR .BDBlk[1].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* Ethernet */ .BDBlk[2].numDesc = CPMAC_RX_EMBEDDED_BD_NUM, .BDBlk[2].qNum = CPMAC_CPPI4x_FD_QNUM(0), //CPMAC_CPPI4x_FD_QMGR .BDBlk[2].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* USB */ .BDBlk[3].numDesc = 64, .BDBlk[3].qNum = USB_CPPI4x_EP0_FD_QNUM(0), // 145/146/147/148 .BDBlk[3].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_USB, /* DOCSIS */ .BDBlk[4].numDesc = CNI_RX_EMBEDDED_BD_NUM_LOW, .BDBlk[4].qNum = CNI_CPPI4x_FD_QNUM( PAL_CPPI4x_PRTY_LOW ), .BDBlk[4].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, .BDBlk[5].numDesc = CNI_RX_EMBEDDED_BD_NUM_HIGH, .BDBlk[5].qNum = CNI_CPPI4x_FD_QNUM( PAL_CPPI4x_PRTY_HIGH ), .BDBlk[5].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* PP2Host Events */ .BDBlk[6].numDesc = PPFW_EVENT_DESC_NUM, .BDBlk[6].qNum = PP_HOST_EVENT_FDQ, //PP_HOST_EVENT_FDQMGR .BDBlk[6].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* WLAN RX */ .BDBlk[7].numDesc = WLAN_RX_EMBEDDED_BD_NUM, .BDBlk[7].qNum = WLAN_IN_CPPI4x_FD_QNUM, .BDBlk[7].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* WLAN TX */ .BDBlk[8].numDesc = WLAN_TX_EMBEDDED_BD_NUM, .BDBlk[8].qNum = WLAN_OUT_CPPI4x_FD_QNUM, .BDBlk[8].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, }; /**************************************************************************/ /*! \fn static void setupRecycleInfra (PAL_Handle hnd) ************************************************************************** * \brief Setup infrastructure DMA for recycling use resources. * \return none. **************************************************************************/ static void setupRecycleInfra (PAL_Handle hnd) { Cppi4TxChInitCfg txCh; Cppi4RxChInitCfg rxCh; PAL_Cppi4TxChHnd cppi4TxChHnd; PAL_Cppi4RxChHnd cppi4RxChHnd; Cppi4Queue tmpQ; tmpQ.qMgr = RECYCLE_CPPI4x_FBD_QMGR; tmpQ.qNum = RECYCLE_CPPI4x_FBD_QNUM; PAL_cppi4QueueOpen (hnd, tmpQ); /* Set up Rx channel */ rxCh.chNum = RECYCLE_INFRA_CHN(0); rxCh.dmaNum = PAL_CPPI41_DMA_BLOCK1; rxCh.sopOffset = 0; rxCh.retryOnStarvation = 0; rxCh.rxCompQueue.qMgr = 0; rxCh.rxCompQueue.qNum = 0; rxCh.defDescType = CPPI41_DESC_TYPE_EMBEDDED; rxCh.u.embeddedPktCfg.fdQueue.qMgr = RECYCLE_CPPI4x_FBD_QMGR; rxCh.u.embeddedPktCfg.fdQueue.qNum = RECYCLE_CPPI4x_FBD_QNUM; rxCh.u.embeddedPktCfg.numBufSlot = (EMSLOTCNT-1); rxCh.u.embeddedPktCfg.sopSlotNum = 0; rxCh.u.embeddedPktCfg.fBufPool[0].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[0].bPool = 0; rxCh.u.embeddedPktCfg.fBufPool[1].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[1].bPool = 0; rxCh.u.embeddedPktCfg.fBufPool[2].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[2].bPool = 0; rxCh.u.embeddedPktCfg.fBufPool[3].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[3].bPool = 0; cppi4RxChHnd = PAL_cppi4RxChOpen(hnd, &rxCh, NULL); /* Set up Tx channel */ txCh.chNum = RECYCLE_INFRA_CHN(0); txCh.dmaNum = PAL_CPPI41_DMA_BLOCK1; txCh.tdQueue.qMgr = DMA1_CPPI4x_FTD_QMGR; txCh.tdQueue.qNum = DMA1_CPPI4x_FTD_QNUM; cppi4TxChHnd = PAL_cppi4TxChOpen(hnd, &txCh, NULL); if (!cppi4TxChHnd || !cppi4RxChHnd) { printk ("%s: infra channel setup failed for channel %d\n", __FUNCTION__, RECYCLE_INFRA_CHN(0) ); return; } /* Enable Tx-Rx channels */ PAL_cppi4EnableRxChannel (cppi4RxChHnd, NULL); PAL_cppi4EnableTxChannel (cppi4TxChHnd, NULL); return; } /* Embedded descriptor table: * - This table aggregates the embedded descriptors in the system. It contains * information about the first descriptor and allocation status of each * division of the region. Generally this division is based on per driver or * per Q basis. */ typedef struct { Ptr buffDescRegionPtr; Uint32 qMgr; Uint32 numDesc; Uint32 szDesc; Uint32 numBlks; BDBlkInfo BDBlk [PAL_CPPI41_FBD_Q_LAST - PAL_CPPI41_FBD_Q_BASE]; } HostBDCfg; HostBDCfg ppHostBDCfg_g = { .qMgr = PPFW_CPPI4x_FDB_QMGR, .numDesc = PPFW_HOST_BD_NUM, /* Must be same as in Cppi Cgf structure */ .szDesc = PPFW_HOST_BD_SIZE, /* Must be same as for specific region */ .numBlks = 3, /* Change for each block added/removed */ /* Host2PP Low */ .BDBlk[0].numDesc = PPFW_TX_HOST_BD_NUM_LOW, .BDBlk[0].qNum = PPFW_CPPI4x_FDB_QNUM(0), //PPFW_CPPI4x_FD_QMGR .BDBlk[0].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* Host2PP Medium */ .BDBlk[1].numDesc = PPFW_TX_HOST_BD_NUM_MED, .BDBlk[1].qNum = PPFW_CPPI4x_FDB_QNUM(1), //PPFW_CPPI4x_FD_QMGR .BDBlk[1].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, /* Host2PP Medium */ .BDBlk[2].numDesc = PPFW_TX_HOST_BD_NUM_HIGH, .BDBlk[2].qNum = PPFW_CPPI4x_FDB_QNUM(2), //PPFW_CPPI4x_FD_QMGR .BDBlk[2].pktType = PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH, }; void setup_host2pp_infra (PAL_Handle hnd, int infra_chan) { Cppi4TxChInitCfg txCh; Cppi4RxChInitCfg rxCh; PAL_Cppi4TxChHnd cppi4TxChHnd; PAL_Cppi4RxChHnd cppi4RxChHnd; /* Set up Rx channel */ rxCh.chNum = PPFW_CPPI4x_HOST_TO_PP_PROXY_CHNUM(infra_chan); rxCh.dmaNum = PAL_CPPI41_DMA_BLOCK1; rxCh.defDescType = CPPI41_DESC_TYPE_EMBEDDED; rxCh.sopOffset = 0; rxCh.rxCompQueue.qMgr = PPFW_CPPI4x_TX_EGRESS_QMGR; rxCh.rxCompQueue.qNum = PPFW_CPPI4x_TX_EGRESS_EMB_QNUM(infra_chan); rxCh.u.embeddedPktCfg.fdQueue.qMgr = PPFW_CPPI4x_FD_QMGR; rxCh.u.embeddedPktCfg.fdQueue.qNum = PPFW_CPPI4x_FD_QNUM(infra_chan); rxCh.u.embeddedPktCfg.numBufSlot = (EMSLOTCNT-1); rxCh.u.embeddedPktCfg.sopSlotNum = 0; rxCh.u.embeddedPktCfg.fBufPool[0].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[0].bPool = BMGR0_POOL06; rxCh.u.embeddedPktCfg.fBufPool[1].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[1].bPool = BMGR0_POOL07; rxCh.u.embeddedPktCfg.fBufPool[2].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[2].bPool = BMGR0_POOL07; rxCh.u.embeddedPktCfg.fBufPool[3].bMgr = BUF_POOL_MGR0; rxCh.u.embeddedPktCfg.fBufPool[3].bPool = BMGR0_POOL07; cppi4RxChHnd = PAL_cppi4RxChOpen(hnd, &rxCh, NULL); /* Set up Tx channel */ txCh.chNum = PPFW_CPPI4x_HOST_TO_PP_PROXY_CHNUM(infra_chan); txCh.dmaNum = PAL_CPPI41_DMA_BLOCK1; txCh.tdQueue.qMgr = DMA1_CPPI4x_FTD_QMGR; txCh.tdQueue.qNum = DMA1_CPPI4x_FTD_QNUM; cppi4TxChHnd = PAL_cppi4TxChOpen(hnd, &txCh, NULL); if (!cppi4TxChHnd || !cppi4RxChHnd) { printk ("%s: infra channel setup failed for channel %d\n", __FUNCTION__, infra_chan); return; } /* Enable Tx-Rx channels */ PAL_cppi4EnableRxChannel (cppi4RxChHnd, NULL); PAL_cppi4EnableTxChannel (cppi4TxChHnd, NULL); return; } int setup_docsisMngRxResources (PAL_Handle palHandle) { Cppi4Queue rxFreeQ; Cppi4Queue rxQ; PAL_Cppi4QueueHnd rxfdQueueHdl; int cnt; Cppi4HostDesc* currBD; Ptr ptrBDregion; Ptr currBuffer; Cppi4RxChInitCfg rxCh; PAL_Cppi4RxChHnd rxChHandle; /* open a queue objects one for free descriptor queue * and one for rx queue */ rxQ.qMgr = CNI_MGMT_CPPI4x_RX_QMGR; rxQ.qNum = CNI_MGMT_CPPI4x_RX_QNUM(0); if (NULL == PAL_cppi4QueueOpen( palHandle, rxQ )) { return -1; } rxFreeQ.qMgr = CNI_MGMT_CPPI4x_FBD_QMGR; rxFreeQ.qNum = CNI_MGMT_CPPI4x_FBD_QNUM(0); if (NULL == (rxfdQueueHdl = PAL_cppi4QueueOpen( palHandle, rxFreeQ ))) { return -1; } /* * get a pointer to the BD region * the region is already preallocated in the PAL init so just * get a pointer to the bd area */ if ((ptrBDregion = PAL_cppi4AllocDesc( palHandle, CNI_MGMT_CPPI4x_FBD_QMGR, CMGR_CPPI4x_RX_HOST_BD_NUM, CMGR_CPPI4x_RX_HOST_BD_SIZE )) == NULL) { printk( " %s : Failed to allocate CNI Management Rx descriptors\n", __FUNCTION__); return -1; } currBD = (Cppi4HostDesc*) ptrBDregion; /* allocate a memory for the buffers */ if (PAL_osMemAlloc(0, CMGR_CPPI4x_RX_HOST_BD_NUM * CMGR_CPPI4x_RX_HOST_BUFF_SIZE, 0, (Ptr *)&currBuffer) != PAL_SOK) { printk( " %s : Failed to allocate memory for RX buffer queue",__FUNCTION__); PAL_cppi4DeallocDesc( palHandle, CNI_MGMT_CPPI4x_FBD_QMGR, ptrBDregion ); return -1; } PAL_osMemSet(currBuffer,0,CMGR_CPPI4x_RX_HOST_BD_NUM * CMGR_CPPI4x_RX_HOST_BUFF_SIZE); /* need to consider the number of actually allocated buffers !!!!*/ /* we don't really need 32 pair for management channel, do we? */ for (cnt = 0; cnt < CMGR_CPPI4x_RX_HOST_BD_NUM; cnt++) { /* Update the hardware descriptor */ currBD->descInfo = ((PAL_CPPI4_HOSTDESC_DESC_TYPE_HOST << PAL_CPPI4_HOSTDESC_DESC_TYPE_SHIFT)); currBD->tagInfo = 0; currBD->pktInfo = (PAL_CPPI4_HOSTDESC_PKT_TYPE_ETH << PAL_CPPI4_HOSTDESC_PKT_TYPE_SHIFT) | (PAL_CPPI4_HOSTDESC_PKT_RETPLCY_LINKED << PAL_CPPI4_HOSTDESC_PKT_RETPLCY_SHIFT) | (PAL_CPPI4_HOSTDESC_DESC_LOC_OFFCHIP << PAL_CPPI4_HOSTDESC_DESC_LOC_SHIFT) | (rxFreeQ.qMgr << PAL_CPPI4_HOSTDESC_PKT_RETQMGR_SHIFT) | (rxFreeQ.qNum << PAL_CPPI4_HOSTDESC_PKT_RETQNUM_SHIFT); currBD->buffLen = 0; currBD->bufPtr = 0; currBD->nextBDPtr = 0; currBD->orgBuffLen = CMGR_CPPI4x_RX_HOST_BUFF_SIZE; currBD->orgBufPtr = PAL_CPPI4_VIRT_2_PHYS(currBuffer); PAL_CPPI4_CACHE_WRITEBACK( currBD, CMGR_CPPI4x_RX_HOST_BD_SIZE ); PAL_cppi4QueuePush ( rxfdQueueHdl, (Ptr) PAL_CPPI4_VIRT_2_PHYS((Uint32)currBD), (CMGR_CPPI4x_RX_HOST_BD_SIZE - 24)/4, 0 ); currBD = (Cppi4HostDesc*) ((Uint32)currBD + CMGR_CPPI4x_RX_HOST_BD_SIZE); currBuffer = (Ptr) ((Uint32)currBuffer + CMGR_CPPI4x_RX_HOST_BUFF_SIZE); } /* configure the DMA channel to use this queue*/ rxCh.chNum = CNI_CPPI4x_DOCSIS_TO_HOST_MGMT_CHNUM; rxCh.dmaNum = PAL_CPPI41_DMA_BLOCK1; rxCh.defDescType = CPPI41_DESC_TYPE_HOST; rxCh.sopOffset = 0; rxCh.rxCompQueue = rxQ; rxCh.u.hostPktCfg.fdbQueue[0] = rxFreeQ; rxCh.u.hostPktCfg.fdbQueue[1] = rxFreeQ; rxCh.u.hostPktCfg.fdbQueue[2] = rxFreeQ; rxCh.u.hostPktCfg.fdbQueue[3] = rxFreeQ; if (NULL == (rxChHandle = PAL_cppi4RxChOpen (palHandle, &rxCh, NULL))) { printk( " %s : Failed to open Management DMA channel",__FUNCTION__); return(-1); } PAL_cppi4EnableRxChannel (rxChHandle, NULL); return 0; } int avalanche_ppd_init(void) { int i; PAL_Handle hnd, preFQHnd, preFBQHnd; Uint32 pRcbMem; APDSP_Command_Buffer_RegsOvly apdsp; Cppi4Queue tmpQ; /* Just used for filling Q info for opening */ Cppi4BufPool tmpBufPool; /* Used for Init calls */ #ifndef CONFIG_TI_PACKET_PROCESSOR if(ti_ppd_init(TI_PP_NUM_FW, &ppFirmware_g[0], &ppCfg_g) == 0) { printk("avalanche_ppd_init: PPD initialized successfully.\n"); } else { printk("avalanche_ppd_init: ERROR: PPD failed to initialize!\n"); return -1; } #else extern int ti_pp_sys_initialize (unsigned int num_of_fw, TI_PP_FIRMWARE *firmware, TI_PPD_CONFIG *cfg); /* Initialize the PP Subsystem. */ if (ti_pp_sys_initialize (TI_PP_NUM_FW, &ppFirmware_g[0], &ppCfg_g) < 0) { printk ("Error: Failed to initialize the PP Subsystem\n"); return -1; } else { printk("avalanche_ppd_init: PP system initialized successfully.\n"); } #endif hnd = PAL_cppi4Init (&cppi4InitCfg_g, NULL);/* added by ccb */ /*--- hnd = PAL_cppi4Init (NULL, NULL); ---*/ if (!hnd) { printk("avalanche_ppd_init: CPPI41 Library NOT initialized.\n"); return -1; } /************************************************************************/ /*********** Init buffer pools used in the system ***********************/ /************************************************************************/ tmpBufPool.bMgr = BUF_POOL_MGR0; /************************************************/ /* Ethernet and USB shared pools */ /* */ tmpBufPool.bPool = CPMAC_CPPI4x_POOL_NUM(0); if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL10_REF_CNT, BMGR0_POOL10_BUF_SIZE, BMGR0_POOL10_BUF_COUNT)) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = CPMAC_CPPI4x_POOL_NUM(1); if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL11_REF_CNT, BMGR0_POOL11_BUF_SIZE, BMGR0_POOL11_BUF_COUNT)) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /************************************************/ /* PP Firmware */ /* */ tmpBufPool.bPool = BMGR0_POOL13; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL13_REF_CNT, BMGR0_POOL13_BUF_SIZE, BMGR0_POOL13_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /************************************************/ /* HOST to PP Proxy */ /* */ tmpBufPool.bPool = BMGR0_POOL06; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL06_REF_CNT, BMGR0_POOL06_BUF_SIZE, BMGR0_POOL06_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = BMGR0_POOL07; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL07_REF_CNT, BMGR0_POOL07_BUF_SIZE, BMGR0_POOL07_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /************************************************/ /* DOCSIS */ /* */ tmpBufPool.bPool = BMGR0_POOL00; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL00_REF_CNT, BMGR0_POOL00_BUF_SIZE, BMGR0_POOL00_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = BMGR0_POOL01; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL01_REF_CNT, BMGR0_POOL01_BUF_SIZE, BMGR0_POOL01_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = BMGR0_POOL04; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL04_REF_CNT, BMGR0_POOL04_BUF_SIZE, BMGR0_POOL04_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = BMGR0_POOL05; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL05_REF_CNT, BMGR0_POOL05_BUF_SIZE, BMGR0_POOL05_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = BMGR0_POOL12; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL12_REF_CNT, BMGR0_POOL12_BUF_SIZE, BMGR0_POOL12_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /************************************************/ /* MPEG I/F */ /* */ tmpBufPool.bPool = MPEG_CPPI4x_POOL_NUM(0); if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL02_REF_CNT, BMGR0_POOL02_BUF_SIZE, BMGR0_POOL02_BUF_COUNT)) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /*----------------------------------------------*/ tmpBufPool.bPool = MPEG_ENCAP_CPPI4x_POOL_NUM(0); if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL08_REF_CNT, BMGR0_POOL08_BUF_SIZE, BMGR0_POOL08_BUF_COUNT)) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /************************************************/ /* WLAN */ /* */ tmpBufPool.bPool = BMGR0_POOL09; if ((PAL_cppi4BufPoolInit(hnd, tmpBufPool, BMGR0_POOL09_REF_CNT, BMGR0_POOL09_BUF_SIZE, BMGR0_POOL09_BUF_COUNT )) == NULL) { printk ("PAL_cppi4BufPoolInit for pool %d FAILED.\n", tmpBufPool.bPool); return -1; } /************************************************/ /******************** Buffer pool Init done *****************************/ /************************************************************************/ /*********** Setup Free Embedded descriptors *************************/ /************************************************************************/ /************************************************/ /* Allocate region */ /* */ ppEmbBDCfg_g.buffDescRegionPtr = PAL_cppi4AllocDesc( hnd,ppEmbBDCfg_g.qMgr, ppEmbBDCfg_g.numDesc, ppEmbBDCfg_g.szDesc ); if (!ppEmbBDCfg_g.buffDescRegionPtr) { printk ("Embedded descriptor region allocation FAILED.\n"); return -1; } /************************************************/ { Cppi4EmbdDesc* currBD; currBD = (Cppi4EmbdDesc*)ppEmbBDCfg_g.buffDescRegionPtr; for (i = 0; i < ppEmbBDCfg_g.numBlks; i++) { BDBlkInfo* BDBlk = &ppEmbBDCfg_g.BDBlk[i]; int bd_cnt; PAL_Cppi4QueueHnd tmpQHnd; tmpQ.qMgr = ppEmbBDCfg_g.qMgr; tmpQ.qNum = BDBlk->qNum; tmpQHnd = PAL_cppi4QueueOpen (hnd, tmpQ); for (bd_cnt = 0; bd_cnt < BDBlk->numDesc; bd_cnt++) { PAL_osMemSet(currBD, 0, ppEmbBDCfg_g.szDesc); /* 4 Slots */ currBD->descInfo = CPPI41_EM_DESCINFO_DTYPE_EMBEDDED | CPPI41_EM_DESCINFO_SLOTCNT_MYCNT; currBD->tagInfo = 0; currBD->pktInfo = (BDBlk->pktType << CPPI41_EM_PKTINFO_PKTTYPE_SHIFT) |(1 << CPPI41_EM_PKTINFO_RETPOLICY_SHIFT) |(1 << CPPI41_EM_PKTINFO_PROTSPEC_SHIFT) |(ppEmbBDCfg_g.qMgr << PAL_CPPI4_HOSTDESC_PKT_RETQMGR_SHIFT) |(BDBlk->qNum << PAL_CPPI4_HOSTDESC_PKT_RETQNUM_SHIFT); PAL_CPPI4_CACHE_WRITEBACK(currBD, ppEmbBDCfg_g.szDesc); PAL_cppi4QueuePush (tmpQHnd, (Ptr)PAL_CPPI4_VIRT_2_PHYS((Uint32)currBD), (ppEmbBDCfg_g.szDesc-24)/4, 0/*!@@*/); currBD = (Cppi4EmbdDesc*)((Uint32)currBD + ppEmbBDCfg_g.szDesc); } } } /********************** Free Embedded desc setup Done ******************/ /************************************************************************/ /*********** Setup Free Host descriptors *************************/ /************************************************************************/ /************************************************/ /* Allocate region */ /* */ ppHostBDCfg_g.buffDescRegionPtr = PAL_cppi4AllocDesc( hnd,ppHostBDCfg_g.qMgr, ppHostBDCfg_g.numDesc, ppHostBDCfg_g.szDesc ); if (!ppHostBDCfg_g.buffDescRegionPtr) { printk ("Host descriptor region allocation FAILED.\n"); return -1; } /************************************************/ { Cppi4HostDesc* currBD; currBD = (Cppi4HostDesc*)ppHostBDCfg_g.buffDescRegionPtr; for (i = 0; i < ppHostBDCfg_g.numBlks; i++) { BDBlkInfo* BDBlk = &ppHostBDCfg_g.BDBlk[i]; int bd_cnt; PAL_Cppi4QueueHnd tmpQHnd; tmpQ.qMgr = ppHostBDCfg_g.qMgr; tmpQ.qNum = BDBlk->qNum; tmpQHnd = PAL_cppi4QueueOpen (hnd, tmpQ); for (bd_cnt = 0; bd_cnt < BDBlk->numDesc; bd_cnt++) { PAL_osMemSet(currBD, 0, ppHostBDCfg_g.szDesc); currBD->descInfo = (PAL_CPPI4_HOSTDESC_DESC_TYPE_HOST << PAL_CPPI4_HOSTDESC_DESC_TYPE_SHIFT); currBD->tagInfo = 0x3FFF; currBD->pktInfo = (BDBlk->pktType << PAL_CPPI4_HOSTDESC_PKT_TYPE_SHIFT) |(PAL_CPPI4_HOSTDESC_PKT_RETPLCY_LINKED << PAL_CPPI4_HOSTDESC_PKT_RETPLCY_SHIFT) |(PAL_CPPI4_HOSTDESC_DESC_LOC_OFFCHIP << PAL_CPPI4_HOSTDESC_DESC_LOC_SHIFT) |(ppHostBDCfg_g.qMgr << PAL_CPPI4_HOSTDESC_PKT_RETQMGR_SHIFT) |(BDBlk->qNum << PAL_CPPI4_HOSTDESC_PKT_RETQNUM_SHIFT); PAL_CPPI4_CACHE_WRITEBACK(currBD, ppHostBDCfg_g.szDesc); PAL_cppi4QueuePush (tmpQHnd, (Ptr)PAL_CPPI4_VIRT_2_PHYS((Uint32)currBD), (ppHostBDCfg_g.szDesc-24)/4, 0/*!@@*/); currBD = (Cppi4HostDesc*)((Uint32)currBD + ppHostBDCfg_g.szDesc); } } } setup_docsisMngRxResources( hnd ); /********************** Free Host desc setup Done ******************/ /*********** Open the Queues common for devices in PP system * Actually the queues only need to be opened here if the handle is required * (for push/pop generally) and any driver depending on these Qs would do * so. Still opening here to provide idea of the system. * Also, opening these queue here would mean that any subsequent calls (from * drivers) to open these queues will just return the same handle to the * queue without resetting them */ /************************************************************************/ /* Open Tx Qs. These will be used by drivers to push for Tx. */ /* Infra Qs */ for (i = 0; PPFW_CPPI4x_HOST_TO_PP_PROXY_CH_COUNT > i; i++) { /* Input Queue */ tmpQ.qMgr = PPFW_CPPI4x_HOST_TO_PP_PROXY_QMGR; tmpQ.qNum = PPFW_CPPI4x_HOST_TO_PP_PROXY_QNUM(i); PAL_cppi4QueueOpen (hnd, tmpQ); /* Output Queue */ tmpQ.qMgr = PPFW_CPPI4x_TX_EGRESS_QMGR; tmpQ.qNum = PPFW_CPPI4x_TX_EGRESS_EMB_QNUM(i); PAL_cppi4QueueOpen (hnd, tmpQ); setup_host2pp_infra (hnd, i); } tmpQ.qMgr = RECYCLE_INFRA_RX_QMGR; tmpQ.qNum = RECYCLE_INFRA_RX_Q(0); PAL_cppi4QueueOpen(hnd, tmpQ); /* This is done in order to keep the reference count of these queues non zero so they will not be closed */ tmpQ.qMgr = CPMAC_CPPI4x_TX_QMGR; tmpQ.qNum = CPMAC_CPPI4x_TX_QNUM(0); PAL_cppi4QueueOpen(hnd, tmpQ); tmpQ.qNum = CPMAC_CPPI4x_TX_QNUM(1); PAL_cppi4QueueOpen(hnd, tmpQ); setupRecycleInfra(hnd); /****************************************************************/ /* Set Up the rest of PP Egress queues */ /****************************************************************/ for (i = 0; PPFW_CPPI4x_TX_EGRESS_Q_COUNT > i; i++) { tmpQ.qMgr = PPFW_CPPI4x_TX_EGRESS_QMGR; tmpQ.qNum = PPFW_CPPI4x_TX_EGRESS_HOST_QNUM(i); PAL_cppi4QueueOpen (hnd, tmpQ); } /****************************************************************/ /***************************************************************** ******************* Setup Prefetcher *************************** ****************************************************************/ /* Push Prefetcher free desc */ pRcbMem = (Uint32)prefCfg_g.pfDescBase; preFQHnd = PAL_cppi4QueueOpen (hnd, prefCfg_g.pfFQ); for( i=0; iParameter0 = (24<<8) | 96; apdsp->Parameter0 = (0<<8) | 128; apdsp->Command = ACMD_COMMAND(ACMD_CONFIG_PREFETCH); for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if( i==1000000 ) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } if (ACMD_GET_RETCODE(apdsp->Command) != 1) printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ACMD_GET_RETCODE(apdsp->Command)); //-------------------------------------------------------- // // Setup Teardown descriptor queue // Note: The prefetcher is configured to route the teardown // descriptors back to free teardown desc queue. This avoids prefetcher // going into invalid state when TD is received. // CHI: __IMPORTANT__ This configuration constraints ALL PP related Endpoint // Rx channels to be configured to use SAME Teardown Queue otherwise TDs // will be lost (since the queue number specified in DMA configuration to // pick the TD and Queue number for Prefetcher to queue the TD will be // different). // apdsp->Parameter0 = (DMA1_CPPI4x_FTD_QMGR<<16) | DMA1_CPPI4x_FTD_QNUM; apdsp->Command = ACMD_COMMAND(ACMD_CONFIG_TDQ); for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if( i==1000000 ) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } if (ACMD_GET_RETCODE(apdsp->Command) != 1) printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ACMD_GET_RETCODE(apdsp->Command)); //-------------------------------------------------------- // Enable Prefetcher // // Enable prefetcher // apdsp->Command = ACMD_INDEX(1) | ACMD_COMMAND(ACMD_ENABLE_PREFETCH); for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if( i==1000000 ) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } if (ACMD_GET_RETCODE(apdsp->Command) != 1) printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ACMD_GET_RETCODE(apdsp->Command)); /*********** Prefetcher setup done **************************/ return 0; } int avalanche_ppd_deinit (void) { PAL_Handle hnd; hnd = PAL_cppi4Init (NULL, NULL); if (!hnd) { printk("avalanche_ppd_deinit: CPPI41 Library NOT initialized.\n"); return -1; } if (ppEmbBDCfg_g.buffDescRegionPtr) { PAL_cppi4DeallocDesc ( hnd, ppEmbBDCfg_g.qMgr, ppEmbBDCfg_g.buffDescRegionPtr ); } if (ppHostBDCfg_g.buffDescRegionPtr) { PAL_cppi4DeallocDesc ( hnd, ppHostBDCfg_g.qMgr, ppHostBDCfg_g.buffDescRegionPtr ); } #ifdef CONFIG_TI_PACKET_PROCESSOR ti_ppm_deinitialize (); #else ti_ppd_exit (); #endif return 0; } static Int32 pp_pref_exec_stats_cmd (int stats_index) { int i, ret_val; APDSP_Command_Buffer_RegsOvly apdsp; apdsp = prefCfg_g.pfCmdBase; printk ("Writing %#x\n", ACMD_INDEX(stats_index) | ACMD_COMMAND(ACMD_READ_STATISTICS)); apdsp->Command = ACMD_INDEX(stats_index) | ACMD_COMMAND(ACMD_READ_STATISTICS); for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if( i==1000000 ) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } if ((ret_val = ACMD_GET_RETCODE(apdsp->Command)) != 1){ printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ACMD_GET_RETCODE(apdsp->Command)); return -(ret_val); } return 0; } Int32 ti_pp_get_n_clear_pref_stats (TI_PP_PREF_STATS *stats) { int i, ret_val; APDSP_Command_Buffer_RegsOvly apdsp; apdsp = prefCfg_g.pfCmdBase; /* Get and clear Group A stats */ if (!(ret_val = pp_pref_exec_stats_cmd (0))) { stats->grp_a_preproc_pkts = apdsp->Parameter0; stats->grp_a_pref_buf_pkts = apdsp->Parameter1; stats->grp_a_pref_descbuff_pkts = apdsp->Parameter2; stats->grp_a_desc_starv_cnt = apdsp->Parameter3; } else return ret_val; /* Get and clear Group B stats */ if (!(ret_val = pp_pref_exec_stats_cmd (1))) { stats->grp_b_preproc_pkts = apdsp->Parameter0; stats->grp_b_pref_buf_pkts = apdsp->Parameter1; stats->grp_b_pref_descbuff_pkts = apdsp->Parameter2; stats->grp_b_desc_starv_cnt = apdsp->Parameter3; } else return ret_val; /* Get and clear input Queue stats */ for (i = 0; i < 6; i++) { if (!(ret_val = pp_pref_exec_stats_cmd (2+i))) { stats->in_q_congst_discards[i] = apdsp->Parameter0; stats->in_q_congst_thrsh[i] = apdsp->Parameter1; } else return ret_val; } return 0; } Int32 ti_pp_get_pref_stats (TI_PP_PREF_STATS *stats) { int i; volatile Uint32* pref_statsblk_base = (volatile Uint32*)((Uint32)(AVALANCHE_NWSS_APDSP_PREFBLK_BASE) + 0xC20); /* Get Group A stats */ stats->grp_a_preproc_pkts = *(pref_statsblk_base + 0); stats->grp_a_pref_buf_pkts = *(pref_statsblk_base + 1); stats->grp_a_pref_descbuff_pkts = *(pref_statsblk_base + 2); stats->grp_a_desc_starv_cnt = *(pref_statsblk_base + 3); /* Get Group B stats */ stats->grp_b_preproc_pkts = *(pref_statsblk_base + 4); stats->grp_b_pref_buf_pkts = *(pref_statsblk_base + 5); stats->grp_b_pref_descbuff_pkts = *(pref_statsblk_base + 6); stats->grp_b_desc_starv_cnt = *(pref_statsblk_base + 7); /* Get input Queue stats */ for (i = 0; i < 6; i++) { stats->in_q_congst_discards[i] = *(pref_statsblk_base + 8 + i); stats->in_q_congst_thrsh[i] = *(pref_statsblk_base + 8 + i); } return 0; } Int32 ti_pp_enable_psm (void) { int i, ret_val; APDSP_Command_Buffer_RegsOvly apdsp; apdsp = prefCfg_g.pfCmdBase; printk("%s: Enable prefetcher PSM mode\n", __FUNCTION__); /* Enable prefetcher PSM */ apdsp->Command = ACMD_INDEX(2) | ACMD_COMMAND(ACMD_ENABLE_PREFETCH); /* Check APDSP responsiveness */ for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if(i==1000000) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } ret_val = ACMD_GET_RETCODE(apdsp->Command); /* For now, treat IDLE, NON IDLE as success */ if ((ret_val) && (ret_val != 1) && (ret_val != 8)) { printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ret_val); return -(ret_val); } /* * CHK: Either use SETPSM command of SR or use PDSP control API to halt * the SR PDSPs */ printk("%s: Enable PPD PSM mode\n", __FUNCTION__); i = 1; ti_ppd_pdsp_control (0, TI_PP_PDSPCTRL_PSM, (Ptr)&i); printk("%s: Halt PP PDSPs\n", __FUNCTION__); /* Set LPM for PP PDSPs - clock gated, reset DE-asserted */ /* Note: Do not try to power down LUT (PSC_SR_CLK2) */ // PAL_sysPowerCtrl((INT32)PSC_SR_CLK2, PSC_DISABLE); /* Power Down QPDSP */ PAL_sysPowerCtrl((INT32)PSC_SR_CLK1, PSC_DISABLE); /* Power Down MPDSP */ PAL_sysPowerCtrl((INT32)PSC_SR_CLK0, PSC_DISABLE); /* Power Down CPDSP */ return 0; } Int32 ti_pp_disable_psm (void) { int i, ret_val; APDSP_Command_Buffer_RegsOvly apdsp; apdsp = prefCfg_g.pfCmdBase; printk("%s: Run PP PDSPs\n", __FUNCTION__); PAL_sysPowerCtrl((INT32)PSC_SR_CLK0, PSC_ENABLE); /* Power Up CPDSP */ PAL_sysPowerCtrl((INT32)PSC_SR_CLK1, PSC_ENABLE); /* Power Up MPDSP */ PAL_sysPowerCtrl((INT32)PSC_SR_CLK2, PSC_ENABLE); /* Power Up QPDSP */ /* Disable prefetcher PSM */ printk("%s: Disable prefetcher PSM mode\n", __FUNCTION__); apdsp->Command = ACMD_INDEX(1) | ACMD_COMMAND(ACMD_ENABLE_PREFETCH); for( i=0; i<1000000 && ACMD_GET_COMMAND(apdsp->Command); i++); if(i==1000000) { printk("%s(%d): Error - APDSP firmware not responding!\n", __FUNCTION__, __LINE__); return -1; } ret_val = ACMD_GET_RETCODE(apdsp->Command); /* For now, treat IDLE, NON IDLE as success */ if ((ret_val) && (ret_val != 1) && (ret_val != 8)) { printk("%s(%d):return code 0x%02x\n", __FUNCTION__, __LINE__, ret_val); return -(ret_val); } /* * CHK: Either use SETPSM command of SR or use PDSP control API to resume * the SR PDSPs */ printk("%s: Disable PPD PSM mode\n", __FUNCTION__); i = 0; ti_ppd_pdsp_control (0, TI_PP_PDSPCTRL_PSM, (Ptr)&i); return 0; } subsys_initcall(avalanche_ppd_init); EXPORT_SYMBOL(ti_pp_get_pref_stats); EXPORT_SYMBOL(ti_pp_get_n_clear_pref_stats); EXPORT_SYMBOL(ti_pp_enable_psm); EXPORT_SYMBOL(ti_pp_disable_psm); /* * Exported PPD APIs to facilitate use from modules. Coule have put these in * avalanche_misc.c instead of here, but since ti_ppd.h depends on ti_ppm.h and * ti_ppm.h is not in standard path, we get build errors wherever pformCfg.h is * included. */ EXPORT_SYMBOL(ti_ppd_init); EXPORT_SYMBOL(ti_ppd_exit); EXPORT_SYMBOL(ti_ppd_create_session); EXPORT_SYMBOL(ti_ppd_modify_session); EXPORT_SYMBOL(ti_ppd_get_session_dump); EXPORT_SYMBOL(ti_ppd_delete_session); EXPORT_SYMBOL(ti_ppd_config_pid_range); EXPORT_SYMBOL(ti_ppd_remove_pid_range); EXPORT_SYMBOL(ti_ppd_create_pid); EXPORT_SYMBOL(ti_ppd_set_pid_flags); EXPORT_SYMBOL(ti_ppd_delete_pid); EXPORT_SYMBOL(ti_ppd_create_vpid); EXPORT_SYMBOL(ti_ppd_set_vpid_flags); EXPORT_SYMBOL(ti_ppd_delete_vpid); EXPORT_SYMBOL(ti_ppd_get_vpid_stats); EXPORT_SYMBOL(ti_ppd_clear_vpid_stats); EXPORT_SYMBOL(ti_ppd_get_n_clear_vpid_stats); EXPORT_SYMBOL(ti_ppd_get_srl_pkt_stats); EXPORT_SYMBOL(ti_ppd_clear_srl_pkt_stats); EXPORT_SYMBOL(ti_ppd_get_n_clear_srl_pkt_stats); EXPORT_SYMBOL(ti_ppd_get_session_pkt_stats); EXPORT_SYMBOL(ti_ppd_clear_session_pkt_stats); EXPORT_SYMBOL(ti_ppd_get_n_clear_session_pkt_stats); EXPORT_SYMBOL(ti_ppd_register_event_handler); EXPORT_SYMBOL(ti_ppd_deregister_event_handler); EXPORT_SYMBOL(ti_ppd_health_check); EXPORT_SYMBOL(ti_ppd_qos_cluster_setup); EXPORT_SYMBOL(ti_ppd_qos_cluster_enable); EXPORT_SYMBOL(ti_ppd_qos_cluster_disable); EXPORT_SYMBOL(ti_ppd_get_qos_q_stats); EXPORT_SYMBOL(ti_ppd_get_n_clear_qos_q_stats); EXPORT_SYMBOL(ti_ppd_get_pdsp_status); EXPORT_SYMBOL(ti_ppd_pdsp_control); EXPORT_SYMBOL(ti_ppd_get_ses_age);