/* * ppd_test.c * Description: * Contains PPD test code. * * * * * This program is distributed in the hope it will be useful, but WITHOUT * kind, whether express or implied; without even the implied warranty * for more details. * * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include #include #include #include #include /* Match any IPv4 packet (works for all sessions except Session 0 */ #define PPD_TEST_MATCH_ANY_IPv4 /* Overrides Session 0 to be IPv6 */ #define PPD_TEST_IPv6 /* Suitable for IPERF test when it is required to run SERVER and CLIENT on SAME * MACHINE. * * Only operated on Session 0, disables PPD_TEST_IPv6 */ #define PPD_TEST_IPERF #ifdef PPD_TEST_IPERF #undef PPD_TEST_IPv6 #endif #if 0 static TI_PP_PID_RANGE pid_range_eth; static TI_PP_PID pid_eth; static TI_PP_VPID ingress_vpid, egress_vpid; #else static TI_PP_PID pid_usb[1]; static TI_PP_PID pid_eth[1]; static TI_PP_VPID vpid_eth[2]; /* Multiple egress rec */ #endif int avalanche_ppd_proc_init(void); static int dump_mem_words (volatile Uint32* start, int num_words) { int i; for (i = 0; i < num_words; i++) { if (i%4 == 0) { if (i) printk ("\n"); printk ("%#x: ", (Uint32)(start + i)); } printk ("%08x ", *(start + i)); } printk ("\n"); return num_words; } static void ppdt_event_handler (unsigned short event_id, unsigned int param1, unsigned int param2) { static int counter;//[100]; //if ((event_id == 3) && counter[param1] > 2) return; printk ("ppdt_event_handler(%d): Got event %d, index %d, aux code %d\n", /*(event_id == 3) ? counter[param1]+1 : 0*/ ++counter, (int)event_id, (int)param1, (int)param2); //if (event_id == 3) { // counter[param1]++; // if (counter[param1] == 2) { // printk ("0x03100094: %08x\n\n", *((volatile Uint32*)0xd3100094)); // dump_mem_words ((volatile Uint32*)0xd3100900, 512/4); // dump_mem_words ((volatile Uint32*)0xd3100B00, 256/4); // dump_mem_words ((volatile Uint32*)0xd3167C20, 80); // } } /********************** Configure QPDSP ********************/ int setup_qos (void) { int rc; TI_PP_QOS_CLST_CFG clst; TI_PP_QOS_QUEUE *qcfg; // // Cluster 0 // // Setup 2 QOS queues, one that gets 1Gb/s and then trickles down to the other. // clst.qos_q_cnt = 2; // Queue 0 qcfg = &clst.qos_q_cfg[0]; qcfg->q_num = 0; qcfg->flags = 0; qcfg->egr_q = CPMAC_CPPI4x_TX_QNUM(0);; qcfg->it_credit = 3125; /* Taken from Mike's */ qcfg->max_credit = 1514; /* ... */ qcfg->congst_thrsh = 15140; /* ... */ // Queue 1 qcfg = &clst.qos_q_cfg[1]; qcfg->q_num = 1; qcfg->flags = 0; qcfg->egr_q = CPMAC_CPPI4x_TX_QNUM(0);; qcfg->it_credit = 0; /* Taken from Mike's */ qcfg->max_credit = 1514; /* ... */ qcfg->congst_thrsh = 15140; /* ... */ // Cluster 0 clst.global_credit = 0; clst.max_global_credit = 15140; clst.egr_congst_thrsh1 = 3096; clst.egr_congst_thrsh2 = 6192; clst.egr_congst_thrsh3 = 12384; clst.egr_congst_thrsh4 = 24768; printk("Adding cluster 0\n"); rc = ti_ppd_qos_cluster_setup (0, &clst); printk("Return Value = %d\n",rc); printk("Enable cluster 0\n"); rc = ti_ppd_qos_cluster_enable (0); printk("Return Value = %d\n",rc); #if 0 // // Cluster 1 // // Setup 1 QOS queue, one that gets 200Mb/s and forwards to the host. // // Queue 2 QQUEUE_W0(2) = QQW0_FLAGS(0) | QQW0_ITERATION(625) | QQW0_EGRESS(QM_INF0TXQ(0u)); QQUEUE_TOTALCEDIT(2) = 0; QQUEUE_MAXCREDIT(2) = 1518; QQUEUE_CONGESTLEVEL(2) = 3036; QQUEUE_PKTFORWARD(2) = 0; QQUEUE_PKTDROP(2) = 0; // Cluster 1 QCLUST_GLOBALCREDIT(1) = 0; QCLUST_MAXCREDIT(1) = 1518; QCLUST_W2(1) = QCW2_QQCOUNT(1) | QCW2_QQ0(2); QCLUST_W3(1) = 0; QCLUST_W4(1) = QCW4_EQCOUNT(1) | QCW4_EQ0(QM_INF0TXQ(0u)); QCLUST_W5(1) = 0; QCLUST_W6(1) = 0; QCLUST_CONGESTLEVEL1(1) = 0; QCLUST_CONGESTLEVEL2(1) = 6192; QCLUST_CONGESTLEVEL3(1) = 12384; QCLUST_CONGESTLEVEL4(1) = 24768; printk("Adding cluster 1\n"); CPDSP_COMMAND = CCMD_COMMAND(SRPDSP_QOS_CLUSTER) | CCMD_OPTION(1) | CCMD_INDEX(1); // Wait for CPDSP command buffer to clear for( i=0; i<1000000 && CPDSP_COMMAND; i++); if( i==1000000 ) { printk("Error - CPDSP firmware not responding!\n"); return(-1); } rc = CPDSP_PARAMETER0; printk("Return Value = %d\n",rc); #endif return rc; } static PAL_Handle hnd; int ppdt_init(void) { hnd = PAL_cppi4Init (NULL, NULL); /* Replicate the PID and VPID configurations maintained in the driver to be * used for sessions. By doing this we are avoiding using driver data types * as 'extern' and setting non required fileds in driver. */ pid_eth[0].type = TI_PP_PID_TYPE_ETHERNET; pid_eth[0].ingress_framing = 1 /*TI_PP_PID_IN_FRM_ETH*/; pid_eth[0].pri_mapping = 1; /* Num prio Qs for fwd */ pid_eth[0].dflt_pri_drp = 0; pid_eth[0].dflt_dst_tag = 0x3FFF; pid_eth[0].dflt_fwd_q = CPMAC_CPPI4x_ETH_TO_HOST_PRXY_QNUM(0);/* Fwd to inf0 by default */ pid_eth[0].tx_pri_q_map[0] = CPMAC_CPPI4x_TX_QNUM(0); /* Default Q used for egress rec */ pid_eth[0].pid_handle = PP_ETH_PID_BASE+0; /* The type value is overridden (with value set by proc entry) just before * calling create session */ pid_usb[0].type = 3; /*RNDIS*/ pid_usb[0].ingress_framing = TI_PP_PID_INGRESS_ETHERNET | TI_PP_PID_INGRESS_PPPOE | TI_PP_PID_INGRESS_IPV6 | TI_PP_PID_INGRESS_IPV4 | TI_PP_PID_INGRESS_IPOE; pid_usb[0].pri_mapping = 1; /* Num prio Qs for fwd */ pid_usb[0].dflt_pri_drp = 0; pid_usb[0].dflt_dst_tag = 0x3FFF; pid_usb[0].dflt_fwd_q = USB_CPPI4x_USB_TO_HOST_PRXY_QNUM(0);/* Queue 226. Fwd to inf0 by default */ pid_usb[0].tx_pri_q_map[0] = USB_CPPI4x_EP0_TX_QNUM(0); /* Default Q used for egress rec */ pid_usb[0].pid_handle = PP_USB_PID_BASE+3; pid_usb[0].tx_hw_data_len = 44; memset (pid_usb[0].tx_hw_data, 0, 44); *(Uint32*)(pid_usb[0].tx_hw_data) = cpu_to_le32(0x00000001U); vpid_eth[0].type = TI_PP_ETHERNET; /* No VLAN, PPP LUT*/ vpid_eth[0].parent_pid_handle = pid_eth[0].pid_handle; /* Needed for LUT */ vpid_eth[0].egress_mtu = 0; /* Don't care */ vpid_eth[0].priv_tx_data_len = 0; /* HACK: Since VPID is created and managed by PPM, anticipating value of * VPID here, assuming only ethernet is present. */ vpid_eth[0].vpid_handle = 31; /* Though the PPM expects PID <-> VPID mapping, test code reuses VPIDs * across PIDs, thus ETH VPID (31) will be used by USB. This is done by * setting parent_pid_handle for an egress/ingress VPID just before calling * create session and ensuring correct pid structure is passed to PPD for * that session. Similar implementation is done for all other vpids. * * This approach will lead to problem when this test code is used with PPM * and an interface goes down. In this case, PPM is going to destroy all the * VPIDs associated with the interface PID while the test code could still * use other pid as parent to one or more of the deleted vpids. */ avalanche_ppd_proc_init (); ti_ppd_register_event_handler (ppdt_event_handler); setup_qos (); return 0; } /* Used fro modify session */ static TI_PP_SESSION ses_cfg_bak; static TI_PPD_IF ingress_if_bak, egress_if_bak[2]; /***********************************************************/ /* * CLASSIFICATION * ============== * ETH * - Src mac classification for all cases * - Full classification for ses 5, 7 * * IP * - No classification for 2 and 5 * - Src, Dst IP, Src Dst port classification for 1 * - in addition, TCP protocol classification for 6 (thus ses 6 is * same as 0 except TCP instead of UDP * - otherwise UDP protocol based classification for remaining */ /* * MODIFICATION * ============ * ETH * - no mods for sess 8 * - src and dst mac update for others * * IP * - src, dst ip, src, dst port mods for all sessions * * Routable * - 5, 8 and 2 NOT routable * - Remaining sessions routable */ /* * Session 0 -> Normal IP session (As IPv6, if PPD_TEST_IPv6) */ /* Special Sessions : * Session 5 -> ETH classification based on src+dst MAC, !routable, replace L2 * Session 7 -> Full ETH classification for IPoE, routable * Session 8 -> Full L2 classification for IPoE, !routable, priv vpid tx data, * replace L2 * Session 9 -> 2 Egress : ETH and PPPoE */ /* USB Sessions : * Session 30 -> USB -> ETH [PID, IP, !routable, no L2 mod] * Session 35 -> USB -> USB [PID, IP, No L2 mod] * Session 36 -> USB -> USB [PID, IP, L2 mod] */ /* PPDT_PPPOE_TEST --> * Session 1 as IPoE -> PPPoE * Session 2 as PPPoE -> PPPoE bridged * Session 3 as PPPoE -> PPPoE with IP classification * session 4 as PPPoE -> IP */ TI_PP_VPID vpid_pppoe; /* VLAN ---> * Session 10 as IpoE VLAN -> IPoE VLAN * Session 11 as IpoE -> IpoE VLAN * Session 12 as IPoE VLAN -> IpoE */ TI_PP_VPID vpid_vlan; /* VLAN PPPoE ---> * Session 20 as IpoE VLAN PPPoE -> IPoE VLAN PPPoE * Session 21 as IpoE PPPoE -> IpoE VLAN PPPoE * Session 22 as IPoE VLAN PPPoE -> IpoE PPPoE * Session 23 as IpoE -> IPoE VLAN PPPoE * Session 24 as IPoE VLAN PPPoE -> IpoE */ TI_PP_VPID vpid_vlan_pppoe; TI_PP_VPID ingress_vpid, egress_vpid; TI_PP_PID ingress_pid, egress_pid[2]; int ppdt_add_session (int ses_id) { Int32 ret_val; TI_PP_SESSION ses_cfg; TI_PPD_IF ingress_if, egress_if[2]; TI_PP_ETH_DESC *in_eth, *out_eth; TI_PP_IPV4_DESC *in_ipv4, *out_ipv4; #ifdef PPD_TEST_IPv6 TI_PP_IPV6_DESC *in_ipv6, *out_ipv6; #endif static int is_pppoe_vpid_created = 0; static int is_vlan_pppoe_vpid_created = 0; static int is_vlan_vpid_created = 0; #ifndef CONFIG_TI_PACKET_PROCESSOR static int is_eth_vpid_created = 0; if (!is_eth_vpid_created) { ret_val = ti_ppd_create_vpid (&vpid_eth[0]); printk ("Return code from ti_ppd_create_vpid for Ethernet VPID(%d) = %d.\n", vpid_eth[0].vpid_handle, ret_val); is_eth_vpid_created = 1; } #endif if (!is_vlan_pppoe_vpid_created) { vpid_vlan_pppoe.type = TI_PP_VLAN_PPPoE; /* No VLAN, PPP LUT*/ vpid_vlan_pppoe.parent_pid_handle = pid_eth[0].pid_handle; /* Needed for LUT */ vpid_vlan_pppoe.egress_mtu = 0; /* Don't care */ vpid_vlan_pppoe.priv_tx_data_len = 0; vpid_vlan_pppoe.vpid_handle = 28; vpid_vlan_pppoe.ppp_session_id = 0; vpid_vlan_pppoe.vlan_identifier = 0xff; ret_val = ti_ppd_create_vpid (&vpid_vlan_pppoe); printk ("Return code from ti_ppd_create_vpid for VLAN PPPoE VPID(%d) = %d.\n", vpid_vlan_pppoe.vpid_handle, ret_val); is_vlan_pppoe_vpid_created = 1; } if (!is_vlan_vpid_created) { vpid_vlan.type = TI_PP_VLAN; /* No VLAN, PPP LUT*/ vpid_vlan_pppoe.parent_pid_handle = pid_eth[0].pid_handle; /* Needed for LUT */ vpid_vlan.egress_mtu = 0; /* Don't care */ vpid_vlan.priv_tx_data_len = 0; vpid_vlan.vpid_handle = 29; vpid_vlan.vlan_identifier = 0xff; ret_val = ti_ppd_create_vpid (&vpid_vlan); printk ("Return code from ti_ppd_create_vpid for VLAN VPID(%d) = %d.\n", vpid_vlan.vpid_handle, ret_val); is_vlan_vpid_created = 1; } if (!is_pppoe_vpid_created) { vpid_pppoe.type = TI_PP_PPPoE; /* No VLAN, PPP LUT*/ vpid_vlan_pppoe.parent_pid_handle = pid_eth[0].pid_handle; /* Needed for LUT */ vpid_pppoe.egress_mtu = 0; /* Don't care */ vpid_pppoe.priv_tx_data_len = 0; vpid_pppoe.vpid_handle = 30; vpid_pppoe.ppp_session_id = 0; ret_val = ti_ppd_create_vpid (&vpid_pppoe); printk ("Return code from ti_ppd_create_vpid for PPPoE VPID(%d) = %d.\n", vpid_pppoe.vpid_handle, ret_val); is_pppoe_vpid_created = 1; } ses_cfg.session_handle = ses_id; ses_id %= 30; ses_cfg.session_timeout = 0; #ifdef PPD_TEST_IPv6 if (ses_id == 0) ses_cfg.is_routable_session = 0; else #endif if (ses_id == 2) ses_cfg.is_routable_session = 0; else if (ses_id == 8) { vpid_eth[0].priv_tx_data_len = 6; vpid_eth[0].priv_tx_data[0] = 0x00; vpid_eth[0].priv_tx_data[1] = 0x0d; vpid_eth[0].priv_tx_data[2] = 0x56; vpid_eth[0].priv_tx_data[3] = 0x6d; vpid_eth[0].priv_tx_data[4] = 0x15; vpid_eth[0].priv_tx_data[5] = 0xd4; ses_cfg.is_routable_session = 0; } else if ((ses_id == 5)) ses_cfg.is_routable_session = 0; else ses_cfg.is_routable_session = 1; ses_cfg.priority = 0; //ses_prop.ses_timeout = 60 * 100000; // 60 sec ses_cfg.num_egress = 1; /* For LUT L2 */ ses_cfg.ingress.l2_packet.packet_type = TI_PP_ETH_TYPE; in_eth = &ses_cfg.ingress.l2_packet.u.eth_desc; in_eth->enables = 0; if ((ses_id == 7) || (ses_id == 5)) { in_eth->enables = TI_PP_SESSION_L2_DSTMAC_VALID | TI_PP_SESSION_L2_SRCMAC_VALID; in_eth->dstmac[0] = 0x08; in_eth->dstmac[1] = 0x00; in_eth->dstmac[2] = 0x28; in_eth->dstmac[3] = 0x32; in_eth->dstmac[4] = 0x06; in_eth->dstmac[5] = 0x02; } in_eth->enables |= TI_PP_SESSION_L2_SRCMAC_VALID; in_eth->srcmac[0] = 0x00; in_eth->srcmac[1] = 0x0d; in_eth->srcmac[2] = 0x56; in_eth->srcmac[3] = 0x6d; in_eth->srcmac[4] = 0x15; #ifdef PPD_TEST_IPERF in_eth->srcmac[5] = 0xd4; #else in_eth->srcmac[5] = ses_id; #endif #ifdef PPD_TEST_IPv6 if (ses_id == 0) { ses_cfg.ingress.l3l4_packet.packet_type = TI_PP_IPV6_TYPE; in_ipv6 = &ses_cfg.ingress.l3l4_packet.u.ipv6_desc; in_ipv6->dst_ip[0] = 0x44444444; in_ipv6->dst_ip[1] = 0x55555555; in_ipv6->dst_ip[2] = 0x66666666; in_ipv6->dst_ip[3] = 0x77777777; in_ipv6->src_ip[0] = 0x00000000; in_ipv6->src_ip[1] = 0x11111111; in_ipv6->src_ip[2] = 0x22222222; in_ipv6->src_ip[3] = 0x33333333; in_ipv6->dst_port = 9320+ses_id; in_ipv6->src_port = 4660; in_ipv6->next_header = 17; /* UDP */ in_ipv6->flow_label = 0x12345; in_ipv6->traffic_class = 0xf8; in_ipv6->enables = TI_PP_SESSION_IPV6_DSTIP_VALID | TI_PP_SESSION_IPV6_SRCIP_VALID | TI_PP_SESSION_IPV6_DST_PORT_VALID | TI_PP_SESSION_IPV6_SRC_PORT_VALID | TI_PP_SESSION_IPV6_NEXTHDR_VALID | TI_PP_SESSION_IPV6_FLOWLBL_VALID | TI_PP_SESSION_IPV6_TRCLASS_VALID; } else { #endif ses_cfg.ingress.l3l4_packet.packet_type = TI_PP_IPV4_TYPE; in_ipv4 = &ses_cfg.ingress.l3l4_packet.u.ipv4_desc; #ifdef PPD_TEST_IPERF in_ipv4->dst_ip = 0xC0A80002; in_ipv4->src_ip = 0xC0A80001; #else in_ipv4->dst_ip = 0xAC18BEA6; in_ipv4->src_ip = 0xAC18BEA7; #endif in_ipv4->dst_port = 9320+ses_id; in_ipv4->src_port = 4660; in_ipv4->enables = 0; if ((ses_id != 2) && (ses_id != 5)) { in_ipv4->enables = TI_PP_SESSION_IPV4_DSTIP_VALID | TI_PP_SESSION_IPV4_SRCIP_VALID | TI_PP_SESSION_IPV4_DST_PORT_VALID | TI_PP_SESSION_IPV4_SRC_PORT_VALID; if (ses_id == 6) { in_ipv4->dst_port = 9320+0; in_ipv4->protocol = 6; /* TCP */ in_ipv4->enables |= TI_PP_SESSION_IPV4_PROTOCOL_VALID; } else if (ses_id != 1) { in_ipv4->protocol = 17; /* UDP */ in_ipv4->enables |= TI_PP_SESSION_IPV4_PROTOCOL_VALID; } } #ifdef PPD_TEST_IPv6 } #endif #if 0 ses_prop.egr_rec[0].frame_code = TI_PP_EGR_FRM_ETH; #endif ses_cfg.egress[0].l2_packet.packet_type = TI_PP_ETH_TYPE; out_eth = &ses_cfg.egress[0].l2_packet.u.eth_desc; out_eth->enables = 0; if (((ses_cfg.session_handle >= 30) && (ses_cfg.session_handle < 60)) || ((ses_cfg.session_handle >= 90) && (ses_cfg.session_handle < 120))) { out_eth->dstmac[0] = 0x00; out_eth->dstmac[1] = 0xe1; out_eth->dstmac[2] = 0xa7; out_eth->dstmac[3] = 0x76; out_eth->dstmac[4] = 0x76; out_eth->dstmac[5] = 0x85; } else { out_eth->dstmac[0] = 0x00; out_eth->dstmac[1] = 0x0d; out_eth->dstmac[2] = 0x56; out_eth->dstmac[3] = 0x6d; out_eth->dstmac[4] = 0x15; out_eth->dstmac[5] = 0xd4; } #ifdef PPD_TEST_IPERF out_eth->srcmac[0] = 0x08; out_eth->srcmac[1] = 0x00; out_eth->srcmac[2] = 0x28; out_eth->srcmac[3] = 0x32; out_eth->srcmac[4] = 0x06; out_eth->srcmac[5] = 0x02; #else out_eth->srcmac[0] = 0x00; out_eth->srcmac[1] = 0x00; out_eth->srcmac[2] = 0x00; out_eth->srcmac[3] = 0x00; out_eth->srcmac[4] = 0x00; out_eth->srcmac[5] = 0x00; #endif if (ses_id != 8) out_eth->enables = TI_PP_SESSION_L2_DSTMAC_VALID | TI_PP_SESSION_L2_SRCMAC_VALID; #ifdef PPD_TEST_IPv6 if (ses_id == 0) { ses_cfg.egress[0].l3l4_packet.packet_type = TI_PP_IPV6_TYPE; out_ipv6 = &ses_cfg.egress[0].l3l4_packet.u.ipv6_desc; out_ipv6->enables = 0; } else { #endif ses_cfg.egress[0].l3l4_packet.packet_type = TI_PP_IPV4_TYPE; out_ipv4 = &ses_cfg.egress[0].l3l4_packet.u.ipv4_desc; switch(ses_id) { case 0: out_ipv4->dst_ip = 0xAC18BEA5; break; case 1: out_ipv4->dst_ip = 0xAC18BEA8; break; case 2: out_ipv4->dst_ip = 0xAC18BEA7; break; case 3: out_ipv4->dst_ip = 0xAC18BEA6; break; } out_ipv4->src_ip = 0xAC18BEA6; #ifdef PPD_TEST_IPERF out_ipv4->dst_ip = 0xC0A80001; out_ipv4->src_ip = 0xC0A80002; #endif out_ipv4->src_port = 9320+ses_id; out_ipv4->dst_port = 4660; out_ipv4->protocol = 17; /* UDP - Don't care */ out_ipv4->enables = TI_PP_SESSION_IPV4_DSTIP_VALID | TI_PP_SESSION_IPV4_SRCIP_VALID | TI_PP_SESSION_IPV4_DST_PORT_VALID | TI_PP_SESSION_IPV4_SRC_PORT_VALID; #ifdef PPD_TEST_IPv6 } #endif if ((ses_id == 2) || (ses_id == 3) || (ses_id == 4)) ingress_vpid = vpid_pppoe; else if ((ses_id == 10) || (ses_id == 12)) ingress_vpid = vpid_vlan; else if ((ses_id == 20) || (ses_id == 22) || (ses_id == 24)) ingress_vpid = vpid_vlan_pppoe; else if ((ses_id == 21)) ingress_vpid = vpid_pppoe; else ingress_vpid = vpid_eth[0]; if ((ses_id == 1) || (ses_id == 2) || (ses_id == 3)) egress_vpid = vpid_pppoe; else if ((ses_id == 10) || (ses_id == 11)) egress_vpid = vpid_vlan; else if ((ses_id == 20) || (ses_id == 21) || (ses_id == 23)) egress_vpid = vpid_vlan_pppoe; else if ((ses_id == 22)) egress_vpid = vpid_pppoe; else egress_vpid = vpid_eth[0]; ingress_if.vpid = &ingress_vpid; ingress_if.pid = &ingress_pid; egress_if[0].vpid = &egress_vpid; egress_if[0].pid = &egress_pid[0]; if (ses_cfg.session_handle >= 60) { ingress_pid = pid_usb[0]; ingress_if.vpid->parent_pid_handle = pid_usb[0].pid_handle; } else { ingress_pid = pid_eth[0]; ingress_if.vpid->parent_pid_handle = pid_eth[0].pid_handle; } if (((ses_cfg.session_handle >= 30) && (ses_cfg.session_handle < 60)) || ((ses_cfg.session_handle >= 90) && (ses_cfg.session_handle < 120))) { egress_pid[0] = pid_usb[0]; egress_if[0].vpid->parent_pid_handle = pid_usb[0].pid_handle; } else { egress_pid[0] = pid_eth[0]; egress_if[0].vpid->parent_pid_handle = pid_eth[0].pid_handle; } if (ses_id == 9) { ses_cfg.egress[1] = ses_cfg.egress[0]; egress_if[1].vpid = &vpid_pppoe; egress_if[1].pid = &pid_eth[0]; ses_cfg.num_egress = 2; } /* Back-up cfg to be able to modify */ if (ses_id == 0) { ses_cfg_bak = ses_cfg; ingress_if_bak = ingress_if; egress_if_bak[0] = egress_if[0]; egress_if_bak[1] = egress_if[1]; } /* Set egress Q as QOS */ if (ses_cfg.session_handle == 0) { egress_pid[0].tx_pri_q_map[0] = 64+0; } #ifdef PPD_TEST_MATCH_ANY_IPv4 if (ses_id != 0) { in_eth->enables = 0; in_ipv4->enables = 0; out_eth->enables = 0; out_ipv4->enables = 0; } #endif #ifdef PPD_TEST_IPERF in_ipv4->enables &= ~(TI_PP_SESSION_IPV4_SRC_PORT_VALID | TI_PP_SESSION_IPV4_DST_PORT_VALID | TI_PP_SESSION_IPV4_PROTOCOL_VALID); out_ipv4->enables &= ~(TI_PP_SESSION_IPV4_SRC_PORT_VALID | TI_PP_SESSION_IPV4_DST_PORT_VALID | TI_PP_SESSION_IPV4_PROTOCOL_VALID); #endif ret_val = ti_ppd_create_session (&ses_cfg, &ingress_if, &egress_if[0]); printk ("ppdt_add_session: Return code for Session creation = %d.\n", ret_val); return ret_val; } int ppdt_modify_session (void) { Int32 ret_val; #ifdef PPD_TEST_IPv6 ses_cfg_bak.ingress.l3l4_packet.u.ipv6_desc.src_port = 4661; #else ses_cfg_bak.ingress.l3l4_packet.u.ipv4_desc.src_port = 4661; ses_cfg_bak.egress[0].l3l4_packet.u.ipv4_desc.dst_port = 4661; #endif ses_cfg_bak.egress[0].l3l4_packet.u.ipv4_desc.src_ip = 0xFFFFFFFF; ret_val = ti_ppd_modify_session (&ses_cfg_bak, &ingress_if_bak, &egress_if_bak[0]); printk ("ppdt_add_session: Return code for Session modification = %d.\n", ret_val); return 0; } static void print_vpid_stats (TI_PP_VPID_STATS vpid_stats) { printk ("rx_unicast_pkt = %u\n", vpid_stats.rx_unicast_pkt); printk ("rx_broadcast_pkt = %u\n", vpid_stats.rx_broadcast_pkt); printk ("rx_multicast_pkt = %u\n", vpid_stats.rx_multicast_pkt); printk ("rx_byte_hi = %u\n", vpid_stats.rx_byte_hi); printk ("rx_byte_lo = %u\n", vpid_stats.rx_byte_lo); printk ("rx_discard = %u\n", vpid_stats.rx_discard); printk ("tx_unicast_pkt = %u\n", vpid_stats.tx_unicast_pkt); printk ("tx_broadcast_pkt = %u\n", vpid_stats.tx_broadcast_pkt); printk ("tx_multicast_pkt = %u\n", vpid_stats.tx_multicast_pkt); printk ("tx_byte_hi = %u\n", vpid_stats.tx_byte_hi); printk ("tx_byte_lo = %u\n", vpid_stats.tx_byte_lo); printk ("tx_error = %u\n", vpid_stats.tx_error); printk ("tx_discard = %u\n", vpid_stats.tx_discard); } static void ppdt_print_qos_q_stats (Uint8 qos_qnum) { TI_PP_QOS_QUEUE_STATS qos_stats; if (!ti_ppd_get_n_clear_qos_q_stats (qos_qnum, &qos_stats)) { printk ("\nQOS Q %d Stats:\n", qos_qnum); printk ("Packets forwarded = %d\n", qos_stats.fwd_pkts); printk ("Packets dropped = %d\n", qos_stats.drp_cnt); } else printk ("Error getting QOS Q %d stats.\n", qos_qnum); } static void ppdt_dump_pdsp_status (Uint32 pdsp_id) { TI_PP_PDSP_STATUS status; ti_ppd_get_pdsp_status (pdsp_id, &status); printk ("PDSP %d status:\n", pdsp_id); printk ("State = %d\n", status.state); printk ("PC @ %#x\n", (int)status.prog_counter); printk ("Cycle count hi word = %#x\n", status.cycle_cnt_hi); printk ("Cycle count lo word = %#x\n", status.cycle_cnt_lo); printk ("Stall count hi word = %#x\n", status.stall_cnt_hi); printk ("Stall count lo word = %#x\n", status.stall_cnt_lo); printk ("Up for %hd days %hd hrs %hd mins %hd secs\n", status.uptime_days, status.uptime_hrs, status.uptime_mins, status.uptime_secs); } #define PPDT_QTYPE_FDQ 0 #define PPDT_QTYPE_FDBQ 1 #define PPDT_QTYPE_NORMAL 2 static void ppdt_dump_queue_stats (Cppi4Queue* p_q, int qtype) { Uint32 param; PAL_cppi4Control(hnd, PAL_CPPI41_IOCTL_GET_QUEUE_ENTRY_COUNT, (Ptr)p_q, (Ptr)¶m); printk ("\nNum entries in Q %d = %d\n", p_q->qNum, param); if (qtype != PPDT_QTYPE_NORMAL) { PAL_cppi4Control(hnd, (qtype == PPDT_QTYPE_FDQ ? PAL_CPPI41_IOCTL_GET_FDQ_STARVE_CNT : PAL_CPPI41_IOCTL_GET_FDBQ_STARVE_CNT), (Ptr)p_q, (Ptr)¶m); printk ("Starvation count = %d\n", param & 0xff); printk ("Starvation counts for Q set %d = %x\n", (p_q->qNum/4)*4, param); } PAL_cppi4Control(hnd, PAL_CPPI41_IOCTL_GET_QUEUE_PEND_STATUS, (Ptr)p_q, (Ptr)¶m); printk ("Pend status = %d\n", param & 1); } static void ppdt_dump_pref_stats (TI_PP_PREF_STATS* pref_stats) { printk ("Prefetcher Stats:\n"); printk ("Group A: Packets with pre-fetched buffers = %#x\n", pref_stats->grp_a_pref_buf_pkts); printk ("Group A: Packets with pre-fetched dsc and buffers = %#x\n", pref_stats->grp_a_pref_descbuff_pkts); printk ("Group A: Packets pushback = %#x\n", pref_stats->grp_a_desc_starv_cnt); printk ("Group B: Packets with pre-fetched buffers = %#x\n", pref_stats->grp_a_pref_buf_pkts); printk ("Group B: Packets with pre-fetched dsc and buffers = %#x\n", pref_stats->grp_a_pref_descbuff_pkts); printk ("Group B: Packets pushback = %#x\n", pref_stats->grp_a_desc_starv_cnt); printk ("Input Queue 0: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[0]); printk ("Input Queue 1: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[1]); printk ("Input Queue 2: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[2]); printk ("Input Queue 3: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[3]); printk ("Input Queue 4: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[4]); printk ("Input Queue 5: Packet Congestion Discards = %#x\n", pref_stats->in_q_congst_discards[5]); } static TI_PP_PREF_STATS pref_stats; static TI_PP_GLOBAL_STATS stats; static TI_PP_VPID_STATS vpid_stats; static TI_PP_SESSION_STATS ses_stats; static int _read_entry(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int i; Uint16 days, hrs, mins, secs; i = ti_ppd_health_check (); if ( i >= -1) { printk ("PP status: "); if (i == 0) printk ("OK!\n"); else if (i == 1) printk ("CPDSP in error\n"); else if (i == 2) printk ("MPDSP in error\n"); else if (i == 3) printk ("CPDSP & MPDSP in error\n"); else if (i == -1) printk ("PP System Fail\n"); else printk ("Unknown error from ti_ppd_health_check (%d)\n", i); } else printk ("ti_ppd_health_check failed\n"); if (!ti_pp_get_pref_stats (&pref_stats)) ppdt_dump_pref_stats (&pref_stats); else printk ("Error getting prefetcher stats\n"); if (!ti_pp_get_n_clear_pref_stats (&pref_stats)) ppdt_dump_pref_stats (&pref_stats); else printk ("Error getting prefetcher stats\n"); if (!ti_ppd_get_srl_pkt_stats(&stats)) { printk ("\nSR Stats:\n"); printk ("in_pkts = %u\n", stats.packets_rxed); printk ("searched_pkts = %u\n", stats.packets_searched); printk ("search_matches = %u\n", stats.search_matched); printk ("in_sync_pkts = %u\n", stats.sync_delay); printk ("fwd_pkts = %u\n", stats.packets_fwd); printk ("ip_fwd_pkts = %u\n", stats.ipv4_packets_fwd); printk ("desc_starved = %u\n", stats.desc_starved); printk ("buffer_starved = %u\n", stats.buffer_starved); } else printk ("Error getting global stats.\n"); if (!ti_ppd_get_n_clear_vpid_stats (vpid_eth[0].vpid_handle, &vpid_stats)) { printk ("\nETH VPID Stats:\n"); print_vpid_stats (vpid_stats); } else printk ("Error getting ETH vpid stats.\n"); if (!ti_ppd_get_n_clear_vpid_stats (vpid_vlan.vpid_handle, &vpid_stats)) { printk ("\nVLAN VPID Stats:\n"); print_vpid_stats (vpid_stats); } else printk ("Error getting VLAN vpid stats.\n"); if (!ti_ppd_get_n_clear_vpid_stats (vpid_pppoe.vpid_handle, &vpid_stats)) { printk ("\nPPPoE VPID Stats:\n"); print_vpid_stats (vpid_stats); } else printk ("Error getting VLAN PPPoE vpid stats.\n"); if (!ti_ppd_get_n_clear_vpid_stats (vpid_vlan_pppoe.vpid_handle, &vpid_stats)) { printk ("\nVLAN PPPoE VPID Stats:\n"); print_vpid_stats (vpid_stats); } else printk ("Error getting VLAN PPPoE vpid stats.\n"); if (!ti_ppd_get_session_pkt_stats ((int)(*(Uint8*)data), &ses_stats)) { printk ("\nSession %d Stats:\n", (int)(*(Uint8*)data)); printk ("alive_idle_time = %u\n", ses_stats.alive_idle_time); printk ("packets_forwarded = %u\n", ses_stats.packets_forwarded); printk ("bytes_forwarded_hi = %u\n", ses_stats.bytes_forwarded_hi); printk ("bytes_forwarded_lo = %u\n", ses_stats.bytes_forwarded_lo); } else printk ("Error getting session %d stats.\n", (int)(*(Uint8*)data)); ti_ppd_get_ses_age (*(Uint8*)data, &days, &hrs, &mins, &secs); printk ("Session %d is up for %hd days %hd hrs %hd mins %hd secs\n", (int)*(Uint8*)data, days, hrs, mins, secs); ppdt_print_qos_q_stats (0); ppdt_print_qos_q_stats (1); ppdt_dump_pdsp_status (TI_PP_CPDSP_ID); ppdt_dump_pdsp_status (TI_PP_MPDSP_ID); ppdt_dump_pdsp_status (TI_PP_QPDSP_ID); #define Cppi41QStatus(qnum) *(volatile Uint32 *)(0xd3020000+(qnum)*16+0x00) { Cppi4Queue tmpQ; tmpQ.qMgr = 0; tmpQ.qNum = 144; ppdt_dump_queue_stats (&tmpQ, PPDT_QTYPE_FDQ); tmpQ.qNum = 128; ppdt_dump_queue_stats (&tmpQ, PPDT_QTYPE_FDBQ); } *eof = 1; return 0; } static inline int make_ses_id_from_buff (unsigned long count, const char *buf) { return (((count > 1) ? ((buf[0] - '0')*10) : 0) + ((count > 0) ? ((count > 1) ? (buf[1] - '0') : (buf[0] - '0')) : -1)); } static int _write_entry(struct file *fp, const char *buf, unsigned long count, void *data) { if (buf[0] == 'c') { int ses_id = make_ses_id_from_buff(count-2, &buf[1]); ppdt_add_session (ses_id); *(Uint8*)data = ses_id; } if (buf[0] == 'd') ti_ppd_delete_session (make_ses_id_from_buff(count-2, &buf[1])); if (buf[0] == 'm') ppdt_modify_session (); if (buf[0] == 's') *(Uint8*)data = make_ses_id_from_buff(count-2, &buf[1]); if((count < 3) && (buf[0] == 'o')) { printk ("0: Set USB RNDIS mode\n" "1: Set USB CDC mode\n"); } if (buf[0] == 'o') { switch (buf[1] - '0') { case 0: pid_usb[0].tx_hw_data_len = 44; memset (pid_usb[0].tx_hw_data, 0, 44); *(Uint32*)(pid_usb[0].tx_hw_data) = cpu_to_le32(0x00000001U); pid_usb[0].type = 3; /*RNDIS*/ break; case 1: pid_usb[0].tx_hw_data_len = 0; pid_usb[0].type = 4; /*CDC*/ break; default: break; } } if (buf[0] == 'z') dump_mem_words ((volatile Uint32 *)simple_strtoul ((const char*)&buf[2], NULL, 16), 4); if((count < 3) && (buf[0] == 'a')) { printk ("0: Enable VPID 31\n" "1: Disable VPID 31\n" "2: Enable PID 31\n" "3: Discard all pkts on PID 31\n" "4: Fwd all Rx on PID 31\n" "5: Fwd all Rx on VPID 31\n" "6: Delete PID 31\n" "7: Delete VPID 31\n" "8: Create PID 31\n" "9: Create VPID 31\n" "a: Remove eth PID range\n" "p: PDSP control APIs\n"); } if (buf[0] == 'a') { Uint32 new_flags; switch (buf[1] - '0') { case 0: new_flags = 0; ti_ppd_set_vpid_flags (&vpid_eth[0], new_flags); break; case 1: new_flags = TI_PP_VPID_FLG_TX_DISBL | TI_PP_VPID_FLG_RX_DISBL; ti_ppd_set_vpid_flags (&vpid_eth[0], new_flags); break; case 2: ti_ppd_set_pid_flags (&pid_eth[0], 0); break; case 3: ti_ppd_set_pid_flags (&pid_eth[0], (1 << 6)/*TI_PP_PID_FLG_RX_DISBL*/); break; case 4: ti_ppd_set_pid_flags (&pid_eth[0], (1<<3)/*TI_PP_PID_FLG_RX_FWD_DFLT_PID*/); break; case 5: new_flags = TI_PP_VPID_FLG_RX_DFLT_FWD; ti_ppd_set_vpid_flags (&vpid_eth[0], new_flags); break; case 6: if (!ti_ppd_delete_pid (pid_eth[0].pid_handle)) printk ("Delete pid successful\n"); break; case 7: if (!ti_ppd_delete_vpid (vpid_eth[0].vpid_handle)) printk ("Delete vpid successful\n"); break; case 8: if (!ti_ppd_create_pid (&pid_eth[0])) printk ("Create pid successful\n"); break; case 9: if (!ti_ppd_create_vpid (&vpid_eth[0])) printk ("Create vpid successful\n"); break; case 'a' - '0': if (!ti_ppd_remove_pid_range (CPPI41_SRCPORT_CPMAC0)) printk ("Remove port range successful\n"); break; case 'c' - '0': if (!ti_ppd_register_event_handler (ppdt_event_handler)) printk ("Registered event handler\n"); break; case 'd' - '0': if (!ti_ppd_deregister_event_handler ()) printk ("Deregistered event handler\n"); break; case 'p' - '0': if (count != 5) { printk ("0: Halt specified pdsp\n" "1: Set single stepping\n" "2: Free run\n" "3: Resume pdsp execution\n" "4: Reset pdsp\n" "5: Enable PP PSM\n" "6: Disable PP PSM\n" "7: Dump PSC status\n"); break; } switch (buf[3] - '0') { int ctl_op; case 0: if (!ti_ppd_pdsp_control ((Uint8)(buf[2] - '0'), TI_PP_PDSPCTRL_HLT, NULL)) printk ("pdsp %d halted\n", (int) (buf[2] - '0')); break; case 1: if (!ti_ppd_pdsp_control ((Uint8)(buf[2] - '0'), TI_PP_PDSPCTRL_STEP, NULL)) printk ("pdsp %d set for single step\n", (int) (buf[2] - '0')); break; case 2: if (!ti_ppd_pdsp_control ((Uint8)(buf[2] - '0'), TI_PP_PDSPCTRL_FREERUN, NULL)) printk ("pdsp %d freerunning\n", (int) (buf[2] - '0')); break; case 3: if (!ti_ppd_pdsp_control ((Uint8)(buf[2] - '0'), TI_PP_PDSPCTRL_RESUME, NULL)) printk ("pdsp %d resumed\n", (int) (buf[2] - '0')); break; case 4: ctl_op = 0; if (!ti_ppd_pdsp_control ((Uint8)(buf[2] - '0'), TI_PP_PDSPCTRL_RST, (Ptr)&ctl_op)) printk ("pdsp %d reset\n", (int) (buf[2] - '0')); break; case 5: if (!ti_pp_enable_psm()) printk ("PSM enabled\n"); break; case 6: if (!ti_pp_disable_psm()) printk ("PSM disabled\n"); break; case 7: dump_mem_words ((volatile Uint32 *)0xd8621800, 48); break; default: break; } break; default: ; } } { #define TI_PP_NUM_FW 4 extern int avalanche_ppd_init(void); extern int avalanche_ppd_deinit(void); if (buf[0] == 'i') { #if 0 if(ti_ppd_init(TI_PP_NUM_FW, &ppFirmware_g[0], &ppCfg_g) == 0) { printk("ppdt: PPD initialized successfully.\n"); } else { printk("ppdt: ERROR: PPD failed to initialize!\n"); } #endif avalanche_ppd_init (); } if (buf[0] == 'x') avalanche_ppd_deinit(); } return count; } static int _write_info(struct file *fp, const char *buf, unsigned long count, void *data) { *(Uint8*)data = make_ses_id_from_buff(count-1, buf); return count; } static int _read_info(char *buf, char **start, off_t offset, int count, int *eof, void *data) { return (ti_ppd_get_session_dump (buf, start, offset, count, eof, data)); } int avalanche_ppd_proc_init(void) { static Uint8 ses_id; static struct proc_dir_entry *gp_stats_file = NULL, *gp_info_file; gp_stats_file = create_proc_entry("avalanche/ppdt", 0644, NULL); if (gp_stats_file) { gp_stats_file->read_proc = _read_entry; gp_stats_file->write_proc = _write_entry; gp_stats_file->data = &ses_id; } gp_info_file = create_proc_entry("avalanche/ppdt_info", 0644, NULL); if (gp_info_file) { gp_info_file->read_proc = _read_info; gp_info_file->write_proc = _write_info; gp_info_file->data = &ses_id; } return 0; } fs_initcall(ppdt_init);