/* * Copyright (C) 2012 Realtek Semiconductor Corp. * All Rights Reserved. * * This program is the proprietary software of Realtek Semiconductor * Corporation and/or its licensors, and only be used, duplicated, * modified or distributed under the authorized license from Realtek. * * ANY USE OF THE SOFTWARE OTHER THAN AS AUTHORIZED UNDER * THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. */ #ifdef CONFIG_APOLLO_MODEL #else #ifdef __linux__ #include <linux/init.h> #include <linux/delay.h> //for mdelay #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/proc_fs.h> //for create proc #include <linux/kernel.h> #include <linux/mm.h> #if defined(CONFIG_DEFAULTS_KERNEL_3_18) #include <linux/fs.h> #else #include <linux/config.h> #endif #include <linux/netdevice.h> #include <linux/inet.h> //for hton, in_aton #else #include <re8686_sim.h> #endif #endif #include <rtk_rg_liteRomeDriver.h> #include <rtk_rg_fwdEngine.h> #include <rtk_rg_igmpsnooping.h> //#ifdef CONFIG_RG_DEBUG #include <rtk_rg_debug.h> //#endif #include <rtk_rg_callback.h> #include <rtk_rg_apollo_liteRomeDriver.h> #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //end defined(CONFIG_APOLLO) #include <rtk_rg_xdsl_extAPI.h> #define STOP_PRINTK printk("Stop at %s LINE:%d \n",__func__,__LINE__); while(1){}; #define FUNC_PRINTK printk("Trace at %s LINE:%d \n",__func__,__LINE__); //xdsl extern function extern uint32 rtl8651_naptTcpUdpTableIndex(int8 isTCP, ipaddr_t srcAddr, uint16 srcPort, ipaddr_t destAddr, uint16 destPort); #endif //end defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #include <rtk_rg_acl.h> #if defined(CONFIG_APOLLO) #include <common/error.h> #include <rtk/init.h> #include <rtk/l34_bind_config.h> #include <rtk/svlan.h> #include <dal/apollomp/raw/apollomp_raw_hwmisc.h> #include <rtk/sec.h> #include <rtk/stat.h> #include <rtk/ponmac.h> #if defined(CONFIG_RTL9600_SERIES) #include <dal/apollomp/raw/apollomp_raw_flowctrl.h> #include <dal/apollomp/raw/apollomp_raw_l2.h> #endif //#include <dal/apollomp/dal_apollomp_l34.h> //FIXME: because RTK do not have binding related APIs //#include <hal/common/halctrl.h> //#include <hal/chipdef/apollo/apollo_reg_struct.h> //#include <hal/mac/reg.h> #if 0//def CONFIG_RG_LAYER2_SOFTWARE_LEARN #include <rtk/irq.h> //for register link-change event #include <rtk/intr.h> //for get and clear link-down indicator register #endif #endif //end defined(CONFIG_APOLLO) #if 0 //ysleu: it's not good to mix apollo testing codes and rome driver toghter, the staff we need was redefined in rtk_rg_internal.h #include <rtl_glue.h> #include <rtl_utils.h> #endif #if defined(CONFIG_RTL9602C_SERIES) #include <rtk_rg_apolloFE_liteRomeDriver.h> #include <hal/chipdef/rtl9602c/rtk_rtl9602c_reg_struct.h> #elif defined(CONFIG_RTL9607C_SERIES) #include <rtk_rg_apolloPro_liteRomeDriver.h> #include <hal/chipdef/rtl9607c/rtk_rtl9607c_reg_struct.h> #include <rtk_rg_apolloPro_asicDriver.h> #include <rtk_rg_apolloPro_internal.h> #else struct platform pf= { .rtk_rg_api_module_init= rtk_rg_apollo_api_module_init, .rtk_rg_driverVersion_get= rtk_rg_apollo_driverVersion_get, .rtk_rg_initParam_get =rtk_rg_apollo_initParam_get, .rtk_rg_initParam_set =rtk_rg_apollo_initParam_set, .rtk_rg_lanInterface_add =rtk_rg_apollo_lanInterface_add, //5 .rtk_rg_wanInterface_add =rtk_rg_apollo_wanInterface_add, .rtk_rg_staticInfo_set =rtk_rg_apollo_staticInfo_set, .rtk_rg_dhcpRequest_set =rtk_rg_apollo_dhcpRequest_set, .rtk_rg_dhcpClientInfo_set =rtk_rg_apollo_dhcpClientInfo_set, .rtk_rg_pppoeClientInfoBeforeDial_set =rtk_rg_apollo_pppoeClientInfoBeforeDial_set, //10 .rtk_rg_pppoeClientInfoAfterDial_set =rtk_rg_apollo_pppoeClientInfoAfterDial_set, .rtk_rg_interface_del =rtk_rg_apollo_interface_del, .rtk_rg_intfInfo_find =rtk_rg_apollo_intfInfo_find, .rtk_rg_cvlan_add =rtk_rg_apollo_cvlan_add, .rtk_rg_cvlan_del =rtk_rg_apollo_cvlan_del, //15 .rtk_rg_cvlan_get=rtk_rg_apollo_cvlan_get, .rtk_rg_vlanBinding_add =rtk_rg_apollo_vlanBinding_add, .rtk_rg_vlanBinding_del =rtk_rg_apollo_vlanBinding_del, .rtk_rg_vlanBinding_find =rtk_rg_apollo_vlanBinding_find, .rtk_rg_algServerInLanAppsIpAddr_add =rtk_rg_apollo_algServerInLanAppsIpAddr_add, //20 .rtk_rg_algServerInLanAppsIpAddr_del =rtk_rg_apollo_algServerInLanAppsIpAddr_del, .rtk_rg_algApps_set =rtk_rg_apollo_algApps_set, .rtk_rg_algApps_get =rtk_rg_apollo_algApps_get, .rtk_rg_dmzHost_set =rtk_rg_apollo_dmzHost_set, .rtk_rg_dmzHost_get =rtk_rg_apollo_dmzHost_get, //25 .rtk_rg_virtualServer_add =rtk_rg_apollo_virtualServer_add, .rtk_rg_virtualServer_del =rtk_rg_apollo_virtualServer_del, .rtk_rg_virtualServer_find =rtk_rg_apollo_virtualServer_find, .rtk_rg_aclFilterAndQos_add =rtk_rg_apollo_aclFilterAndQos_add, .rtk_rg_aclFilterAndQos_del =rtk_rg_apollo_aclFilterAndQos_del, //30 .rtk_rg_aclFilterAndQos_find =rtk_rg_apollo_aclFilterAndQos_find, .rtk_rg_macFilter_add =rtk_rg_apollo_macFilter_add, .rtk_rg_macFilter_del =rtk_rg_apollo_macFilter_del, .rtk_rg_macFilter_find =rtk_rg_apollo_macFilter_find, .rtk_rg_urlFilterString_add =rtk_rg_apollo_urlFilterString_add, //35 .rtk_rg_urlFilterString_del =rtk_rg_apollo_urlFilterString_del, .rtk_rg_urlFilterString_find =rtk_rg_apollo_urlFilterString_find, .rtk_rg_upnpConnection_add =rtk_rg_apollo_upnpConnection_add, .rtk_rg_upnpConnection_del =rtk_rg_apollo_upnpConnection_del, .rtk_rg_upnpConnection_find =rtk_rg_apollo_upnpConnection_find, //40 .rtk_rg_naptConnection_add =rtk_rg_apollo_naptConnection_add, .rtk_rg_naptConnection_del =rtk_rg_apollo_naptConnection_del, .rtk_rg_naptConnection_find =rtk_rg_apollo_naptConnection_find, .rtk_rg_multicastFlow_add =rtk_rg_apollo_multicastFlow_add, .rtk_rg_multicastFlow_del =rtk_rg_apollo_multicastFlow_del, /* martin zhu add */ .rtk_rg_l2MultiCastFlow_add =rtk_rg_apollo_l2MultiCastFlow_add, //45 .rtk_rg_multicastFlow_find =rtk_rg_apollo_multicastFlow_find, .rtk_rg_macEntry_add =rtk_rg_apollo_macEntry_add, .rtk_rg_macEntry_del =rtk_rg_apollo_macEntry_del, .rtk_rg_macEntry_find =rtk_rg_apollo_macEntry_find, .rtk_rg_arpEntry_add =rtk_rg_apollo_arpEntry_add, //50 .rtk_rg_arpEntry_del =rtk_rg_apollo_arpEntry_del, .rtk_rg_arpEntry_find =rtk_rg_apollo_arpEntry_find, .rtk_rg_neighborEntry_add =rtk_rg_apollo_neighborEntry_add, .rtk_rg_neighborEntry_del =rtk_rg_apollo_neighborEntry_del, .rtk_rg_neighborEntry_find =rtk_rg_apollo_neighborEntry_find, //55 .rtk_rg_accessWanLimit_set =rtk_rg_apollo_accessWanLimit_set, .rtk_rg_accessWanLimit_get =rtk_rg_apollo_accessWanLimit_get, .rtk_rg_accessWanLimitCategory_set =rtk_rg_apollo_accessWanLimitCategory_set, .rtk_rg_accessWanLimitCategory_get =rtk_rg_apollo_accessWanLimitCategory_get, .rtk_rg_softwareSourceAddrLearningLimit_set =rtk_rg_apollo_softwareSourceAddrLearningLimit_set, //60 .rtk_rg_softwareSourceAddrLearningLimit_get =rtk_rg_apollo_softwareSourceAddrLearningLimit_get, .rtk_rg_dosPortMaskEnable_set =rtk_rg_apollo_dosPortMaskEnable_set, .rtk_rg_dosPortMaskEnable_get =rtk_rg_apollo_dosPortMaskEnable_get, .rtk_rg_dosType_set =rtk_rg_apollo_dosType_set, .rtk_rg_dosType_get =rtk_rg_apollo_dosType_get, //65 .rtk_rg_dosFloodType_set =rtk_rg_apollo_dosFloodType_set, .rtk_rg_dosFloodType_get =rtk_rg_apollo_dosFloodType_get, .rtk_rg_portMirror_set =rtk_rg_apollo_portMirror_set, .rtk_rg_portMirror_get =rtk_rg_apollo_portMirror_get, .rtk_rg_portMirror_clear =rtk_rg_apollo_portMirror_clear, //70 .rtk_rg_portEgrBandwidthCtrlRate_set =rtk_rg_apollo_portEgrBandwidthCtrlRate_set, .rtk_rg_portIgrBandwidthCtrlRate_set =rtk_rg_apollo_portIgrBandwidthCtrlRate_set, .rtk_rg_portEgrBandwidthCtrlRate_get =rtk_rg_apollo_portEgrBandwidthCtrlRate_get, .rtk_rg_portIgrBandwidthCtrlRate_get =rtk_rg_apollo_portIgrBandwidthCtrlRate_get, .rtk_rg_phyPortForceAbility_set =rtk_rg_apollo_phyPortForceAbility_set, //75 .rtk_rg_phyPortForceAbility_get =rtk_rg_apollo_phyPortForceAbility_get, .rtk_rg_cpuPortForceTrafficCtrl_set =rtk_rg_apollo_cpuPortForceTrafficCtrl_set, .rtk_rg_cpuPortForceTrafficCtrl_get =rtk_rg_apollo_cpuPortForceTrafficCtrl_get, .rtk_rg_portMibInfo_get =rtk_rg_apollo_portMibInfo_get, .rtk_rg_portMibInfo_clear =rtk_rg_apollo_portMibInfo_clear, //80 .rtk_rg_stormControl_add =rtk_rg_apollo_stormControl_add, .rtk_rg_stormControl_del =rtk_rg_apollo_stormControl_del, .rtk_rg_stormControl_find =rtk_rg_apollo_stormControl_find, .rtk_rg_shareMeter_set =rtk_rg_apollo_shareMeter_set, .rtk_rg_shareMeter_get =rtk_rg_apollo_shareMeter_get, //85 .rtk_rg_qosStrictPriorityOrWeightFairQueue_set =rtk_rg_apollo_qosStrictPriorityOrWeightFairQueue_set, .rtk_rg_qosStrictPriorityOrWeightFairQueue_get =rtk_rg_apollo_qosStrictPriorityOrWeightFairQueue_get, .rtk_rg_qosInternalPriMapToQueueId_set =rtk_rg_apollo_qosInternalPriMapToQueueId_set, .rtk_rg_qosInternalPriMapToQueueId_get =rtk_rg_apollo_qosInternalPriMapToQueueId_get, .rtk_rg_qosInternalPriDecisionByWeight_set =rtk_rg_apollo_qosInternalPriDecisionByWeight_set, //90 .rtk_rg_qosInternalPriDecisionByWeight_get =rtk_rg_apollo_qosInternalPriDecisionByWeight_get, .rtk_rg_qosDscpRemapToInternalPri_set =rtk_rg_apollo_qosDscpRemapToInternalPri_set, .rtk_rg_qosDscpRemapToInternalPri_get =rtk_rg_apollo_qosDscpRemapToInternalPri_get, .rtk_rg_qosPortBasedPriority_set =rtk_rg_apollo_qosPortBasedPriority_set, .rtk_rg_qosPortBasedPriority_get =rtk_rg_apollo_qosPortBasedPriority_get, //95 .rtk_rg_qosDot1pPriRemapToInternalPri_set =rtk_rg_apollo_qosDot1pPriRemapToInternalPri_set, .rtk_rg_qosDot1pPriRemapToInternalPri_get =rtk_rg_apollo_qosDot1pPriRemapToInternalPri_get, .rtk_rg_qosDscpRemarkEgressPortEnableAndSrcSelect_set =rtk_rg_apollo_qosDscpRemarkEgressPortEnableAndSrcSelect_set, .rtk_rg_qosDscpRemarkEgressPortEnableAndSrcSelect_get =rtk_rg_apollo_qosDscpRemarkEgressPortEnableAndSrcSelect_get, .rtk_rg_qosDscpRemarkByInternalPri_set =rtk_rg_apollo_qosDscpRemarkByInternalPri_set, //100 .rtk_rg_qosDscpRemarkByInternalPri_get =rtk_rg_apollo_qosDscpRemarkByInternalPri_get, .rtk_rg_qosDscpRemarkByDscp_set =rtk_rg_apollo_qosDscpRemarkByDscp_set, .rtk_rg_qosDscpRemarkByDscp_get =rtk_rg_apollo_qosDscpRemarkByDscp_get, .rtk_rg_qosDot1pPriRemarkByInternalPriEgressPortEnable_set =rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_set, .rtk_rg_qosDot1pPriRemarkByInternalPriEgressPortEnable_get =rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_get, //105 .rtk_rg_qosDot1pPriRemarkByInternalPri_set =rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set, .rtk_rg_qosDot1pPriRemarkByInternalPri_get =rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_get, .rtk_rg_portBasedCVlanId_set =rtk_rg_apollo_portBasedCVlanId_set, .rtk_rg_portBasedCVlanId_get =rtk_rg_apollo_portBasedCVlanId_get, .rtk_rg_portStatus_get =rtk_rg_apollo_portStatus_get, //110 #ifdef CONFIG_RG_NAPT_PORT_COLLISION_PREVENTION .rtk_rg_naptExtPortGet =rtk_rg_apollo_naptExtPortGet, .rtk_rg_naptExtPortFree =rtk_rg_apollo_naptExtPortFree, #endif .rtk_rg_classifyEntry_add =rtk_rg_apollo_classifyEntry_add, .rtk_rg_classifyEntry_find =rtk_rg_apollo_classifyEntry_find, .rtk_rg_classifyEntry_del =rtk_rg_apollo_classifyEntry_del, //115 .rtk_rg_svlanTpid_get= rtk_rg_apollo_svlanTpid_get, .rtk_rg_svlanTpid_set= rtk_rg_apollo_svlanTpid_set, .rtk_rg_svlanServicePort_set=rtk_rg_apollo_svlanServicePort_set, .rtk_rg_svlanServicePort_get=rtk_rg_apollo_svlanServicePort_get, .rtk_rg_pppoeInterfaceIdleTime_get=rtk_rg_apollo_pppoeInterfaceIdleTime_get, //120 .rtk_rg_gatewayServicePortRegister_add=rtk_rg_apollo_gatewayServicePortRegister_add, .rtk_rg_gatewayServicePortRegister_del=rtk_rg_apollo_gatewayServicePortRegister_del, .rtk_rg_gatewayServicePortRegister_find=rtk_rg_apollo_gatewayServicePortRegister_find, .rtk_rg_wlanDevBasedCVlanId_set=rtk_rg_apollo_wlanDevBasedCVlanId_set, .rtk_rg_wlanDevBasedCVlanId_get=rtk_rg_apollo_wlanDevBasedCVlanId_get, //125 .rtk_rg_wlanSoftwareSourceAddrLearningLimit_set=rtk_rg_apollo_wlanSoftwareSourceAddrLearningLimit_set, .rtk_rg_wlanSoftwareSourceAddrLearningLimit_get=rtk_rg_apollo_wlanSoftwareSourceAddrLearningLimit_get, .rtk_rg_naptFilterAndQos_add=rtk_rg_apollo_naptFilterAndQos_add, .rtk_rg_naptFilterAndQos_del=rtk_rg_apollo_naptFilterAndQos_del, .rtk_rg_naptFilterAndQos_find=rtk_rg_apollo_naptFilterAndQos_find, //130 .rtk_rg_pptpClientInfoBeforeDial_set=rtk_rg_apollo_pptpClientInfoBeforeDial_set, .rtk_rg_pptpClientInfoAfterDial_set=rtk_rg_apollo_pptpClientInfoAfterDial_set, .rtk_rg_l2tpClientInfoBeforeDial_set=rtk_rg_apollo_l2tpClientInfoBeforeDial_set, .rtk_rg_l2tpClientInfoAfterDial_set=rtk_rg_apollo_l2tpClientInfoAfterDial_set, .rtk_rg_stpBlockingPortmask_set=rtk_rg_apollo_stpBlockingPortmask_set, //135 .rtk_rg_stpBlockingPortmask_get=rtk_rg_apollo_stpBlockingPortmask_get, .rtk_rg_portIsolation_set=rtk_rg_apollo_portIsolation_set, .rtk_rg_portIsolation_get=rtk_rg_apollo_portIsolation_get, .rtk_rg_dsliteInfo_set=rtk_rg_apollo_dsliteInfo_set, .rtk_rg_pppoeDsliteInfoBeforeDial_set=rtk_rg_apollo_pppoeDsliteInfoBeforeDial_set, //140 .rtk_rg_pppoeDsliteInfoAfterDial_set=rtk_rg_apollo_pppoeDsliteInfoAfterDial_set, .rtk_rg_gponDsBcFilterAndRemarking_add=rtk_rg_apollo_gponDsBcFilterAndRemarking_add, .rtk_rg_gponDsBcFilterAndRemarking_del=rtk_rg_apollo_gponDsBcFilterAndRemarking_del, .rtk_rg_gponDsBcFilterAndRemarking_find=rtk_rg_apollo_gponDsBcFilterAndRemarking_find, .rtk_rg_gponDsBcFilterAndRemarking_del_all=rtk_rg_apollo_gponDsBcFilterAndRemarking_del_all, //145 .rtk_rg_gponDsBcFilterAndRemarking_Enable=rtk_rg_apollo_gponDsBcFilterAndRemarking_Enable, .rtk_rg_dsliteMcTable_set=rtk_rg_apollo_dsliteMcTable_set, .rtk_rg_dsliteMcTable_get=rtk_rg_apollo_dsliteMcTable_get, .rtk_rg_dsliteControl_set=rtk_rg_apollo_dsliteControl_set, .rtk_rg_dsliteControl_get=rtk_rg_apollo_dsliteControl_get, //150 .rtk_rg_interfaceMibCounter_del=NULL, .rtk_rg_interfaceMibCounter_get=NULL, .rtk_rg_redirectHttpAll_set=rtk_rg_apollo_redirectHttpAll_set, .rtk_rg_redirectHttpAll_get=rtk_rg_apollo_redirectHttpAll_get, .rtk_rg_redirectHttpURL_add=rtk_rg_apollo_redirectHttpURL_add, //155 .rtk_rg_redirectHttpURL_del=rtk_rg_apollo_redirectHttpURL_del, .rtk_rg_redirectHttpWhiteList_add=rtk_rg_apollo_redirectHttpWhiteList_add, .rtk_rg_redirectHttpWhiteList_del=rtk_rg_apollo_redirectHttpWhiteList_del, .rtk_rg_redirectHttpRsp_set=rtk_rg_apollo_redirectHttpRsp_set, .rtk_rg_redirectHttpRsp_get=rtk_rg_apollo_redirectHttpRsp_get, //160 .rtk_rg_svlanTpid2_get= NULL,//supported by 9602c .rtk_rg_svlanTpid2_set= NULL,//supported by 9602c .rtk_rg_svlanTpid2_enable_get=NULL,//supported by 9602c .rtk_rg_svlanTpid2_enable_set=NULL,//supported by 9602c .rtk_rg_hostPoliceControl_set=NULL, //165 .rtk_rg_hostPoliceControl_get=NULL, .rtk_rg_hostPoliceLogging_get=NULL, .rtk_rg_hostPoliceLogging_del=NULL, .rtk_rg_redirectHttpCount_set=rtk_rg_apollo_redirectHttpCount_set, .rtk_rg_redirectHttpCount_get=rtk_rg_apollo_redirectHttpCount_get, //170 .rtk_rg_staticRoute_add=rtk_rg_apollo_staticRoute_add, .rtk_rg_staticRoute_del=rtk_rg_apollo_staticRoute_del, .rtk_rg_staticRoute_find=rtk_rg_apollo_staticRoute_find, .rtk_rg_aclLogCounterControl_get=rtk_rg_apollo_aclLogCounterControl_get, .rtk_rg_aclLogCounterControl_set=rtk_rg_apollo_aclLogCounterControl_set, //175 .rtk_rg_aclLogCounter_get=rtk_rg_apollo_aclLogCounter_get, .rtk_rg_aclLogCounter_reset=rtk_rg_apollo_aclLogCounter_reset, }; #endif #ifdef __KERNEL__ #else //for Model code #define GFP_ATOMIC 0 struct sk_buff *skb_clone(struct sk_buff *skb,int gfp_type) { struct sk_buff *new_skb; new_skb=(struct sk_buff *)(unsigned long)rtlglue_malloc(sizeof(struct sk_buff)); if(new_skb==NULL) return NULL; new_skb->len = skb->len; new_skb->data = skb->data; return new_skb; } void dev_kfree_skb_any(struct sk_buff *skb) { skb->data = NULL; rtlglue_free(skb); } int re8686_send_with_txInfo_and_mask(struct sk_buff *skb, struct tx_info* ptxInfo, int ring_num, struct tx_info* ptxInfoMask) { #ifdef CONFIG_APOLLO_MODEL struct tx_info txInfo; int egressPort; memset(&txInfo,0,sizeof(struct tx_info)); txInfo.opts1.dw = rg_kernel.txDesc.opts1.dw & rg_kernel.txDescMask.opts1.dw; txInfo.addr = rg_kernel.txDesc.addr; txInfo.opts2.dw = rg_kernel.txDesc.opts2.dw & rg_kernel.txDescMask.opts2.dw; txInfo.opts3.dw = rg_kernel.txDesc.opts3.dw & rg_kernel.txDescMask.opts3.dw; txInfo.opts4.dw = rg_kernel.txDesc.opts4.dw & rg_kernel.txDescMask.opts4.dw; dump_txInfo(txInfo); dump_packet(skb->data, skb->len+4, "\033[1;32mForwarding Engine Tx\033[m"); if(txInfo.opts3.bit.tx_portmask!=0) { //Direct Tx DEBUG("\033[1;32mDirect Tx. @ %s %d\033[m\n",__FUNCTION__,__LINE__); model_fwdEngine_directTx(skb,txInfo); for(egressPort=0;egressPort<RTK_RG_PORT_MAX;egressPort++) { if(txInfo.opts3.bit.tx_portmask & (0x1<<egressPort)) //Direct Tx packet to NIC virtualGMACQueuePushPkt(egressPort,skb); } } else { //HW. lookup DEBUG("\033[1;32mH/W lookup. @ %s %d\033[m\n",__FUNCTION__,__LINE__); struct sk_buff egressPkt[RTK_RG_PORT_MAX]; uint8 pkt[RTK_RG_PORT_MAX][2048]={0}; for(egressPort=0;egressPort<RTK_RG_PORT_CPU;egressPort++) { //HW. lookup packet to NIC egressPkt[egressPort].data=pkt[egressPort]; egressPkt[egressPort].len=0; } model_fwdEngine_hwLookup(skb,txInfo,egressPkt); for(egressPort=0;egressPort<RTK_RG_PORT_CPU;egressPort++) { if(egressPkt[egressPort].len) virtualGMACQueuePushPkt(egressPort,&egressPkt[egressPort]); } } dump_packet(skb->data, skb->len, "\033[1;32mGMAC Tx\033[m"); #endif return 0; } int re8670_rx_skb (struct re_private *cp, struct sk_buff *skb, struct rx_info *pRxInfo) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *re8670_getAlloc(unsigned int size) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *rtk_rg_skbCopyToPreAllocSkb(struct sk_buff *skb) { struct sk_buff *new_skb; printf("FIXME %s %d (skb copy not ready!)\n",__FUNCTION__,__LINE__); //return new_skb; return 0; } struct sk_buff *_vlan_insert_tag(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb,u16 outter_tagif,u16 outter_protocal,u16 outter_content,u16 inner_tagif,u16 inner_protocal,u16 inner_content) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *_vlan_remove_tag(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb, u16 protocal) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *_vlan_remove_doubleTag(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *_vlan_modify_tag(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb, u16 ori_protocal,u16 mod_protocal,u16 mod_content) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } struct sk_buff *_vlan_modify_doubleTag(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb, u16 mod_outter_protocal,u16 mod_outter_content,u16 mod_inner_protocal,u16 mod_inner_content) { printf("FIXME %s %d\n",__FUNCTION__,__LINE__); return 0; } #endif //#include "types.h" /* Module Name: System*/ //rtk_rg_intfInfo_t RG_GLB_INTF_INFO[8]; //store each interface information, LAN or WAN //int volatile RG_GLB_ARP_REQUEST_FINISHED[MAX_NETIF_SW_TABLE_SIZE]; //used to indicate the ARP request return or not /*the option for rtk_init setting by virtualmac or not: 0:normal, 1:virtualmac*/ int virtualmacEnable = DISABLE; /* ALL global Variables & Tables */ //__SRAM_FWDENG rtk_rg_globalDatabase_cache_t rg_db_cache; //__SRAM_FWDENG uint32 __rg_end_of_sram; __SRAM_FWDENG_DATA rtk_rg_globalDatabase_t rg_db; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) extern rtk_rg_fbDatabase_t rgpro_db; #endif rtk_rg_globalKernel_t rg_kernel; char mt_watch_tmp[512]; char StringErrName[64]={0}; int32 apollo_mac_init(void); #include <rtk_rg_mappingAPI.h> #ifdef CONFIG_APOLLOPRO_FPGA extern int single_test(struct file *file, const char __user *buffer, unsigned long count, void *data); #endif #if defined(CONFIG_RTL9600_SERIES) /*Apollo ACL related patch used APIs */ extern int32 _rtk_rg_AclEgressPriorityPattern_Check(void); extern int32 _rtk_rg_acl_reserved_stag_ingressCVidFromPVID(uint32 in_pvid, uint32 in_port); #ifdef CONFIG_DUALBAND_CONCURRENT extern int32 _rtk_rg_acl_reserved_wifi_internalVidPriTranslateForSlave(uint32 in_cvid, uint32 in_cpri, uint32 tran_cvid, uint32 tran_pri); #endif #ifdef RTK_RG_INGRESS_QOS_TEST_PATCH extern int _rtk_rg_qos_acl_patch(rtk_rg_mac_port_idx_t port, uint32 rate); extern int _rtk_rg_qos_acl_flush(void); #endif #ifdef __KERNEL__ //model code skips HW patch extern int _rtk_rg_acl_reserved_pppoeCvidIssue_svid2IngressCvid(int wan_port); #endif extern int _rtk_rg_acl_reserved_pppoeCvidIssue_spriRemap2InternalPri(int wan_port, int spri, int intpri); #endif int32 _rtk_rg_switch_version_get(uint32 * pChipId, uint32 * pRev, uint32 * pSubtype) { int ret; uint32 ChipId, Rev, Subtype; ret=rtk_switch_version_get(&ChipId, &Rev, &Subtype); *pChipId=ChipId; //20141119LUKE: fix hw version shift than sw definitions in apollomp if(ChipId==APOLLOMP_CHIP_ID && Rev>CHIP_REV_ID_0)*pRev=Rev-1; *pSubtype=Subtype; return ret; } rtk_rg_err_code_t rtk_rg_apollo_driverVersion_get(rtk_rg_VersionString_t *version_string) { //Check the parameter if(version_string == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Clear and initialization memset(version_string->version_string, 0, sizeof(rtk_rg_VersionString_t)); //Check switch chip revision ASSERT_EQ(_rtk_rg_switch_version_get(&rg_kernel.apolloChipId,&rg_kernel.apolloRev,&rg_kernel.apolloSubtype),RT_ERR_OK); //Return the version code snprintf(version_string->version_string,127,"Lunar:%s Switch:%s Diag:%s RG:%s User:%s (0x%x 0x%x 0x%x)", LUNAR_SVN_VERSION,SWITCH_SVN_VERSION,DIAG_SVN_VERSION,ROMEDRIVER_SVN_VERSION,USER_SVN_VERSION, rg_kernel.apolloChipId,rg_kernel.apolloRev,rg_kernel.apolloSubtype); //sprintf(version_string->version_string,"%s",ROMEDRIVER_VERSION); return (RT_ERR_RG_OK); } #if !defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rtk_rg_successFailReturn_t _rtk_rg_internal_support_check(void) { //=============== Module support check ========================= uint32 off_1=0; uint32 off_2=0; MEM32_WRITE(0xbb010004,0xa0000000); off_1 = REG32(0xbb010004); MEM32_WRITE(0xbb010004,0x00000000); MEM32_WRITE(0xbb010008,0xb0000000); off_2 = REG32(0xbb010008); MEM32_WRITE(0xbb010008,0x00000000); off_2 &= 0x00007000; #if defined(CONFIG_RTL9600_SERIES) switch(off_1) { case REG_SHIFT_BASE_1: switch(off_2) { case REG_SHIFT_3_0: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT1; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT2; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT3; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; case REG_SHIFT_3_1: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT1; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT2; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT3; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; case REG_SHIFT_3_2: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT1; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT2; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT3; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; case REG_SHIFT_3_3: default: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT1; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT2; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; } break; case REG_SHIFT_1: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT0; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT3; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; case REG_SHIFT_2: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT0; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT3; rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT4; break; default: break; } #elif defined(CONFIG_RTL9602C_SERIES) switch(off_1) { case REG_SHIFT_BASE_2: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT0; break; case REG_SHIFT_BASE_3: default: rg_db.systemGlobal.internalSupportMask |= RTK_RG_INTERNAL_SUPPORT_BIT1; break; } #endif DEBUG("internalSupportMask=0x%x", rg_db.systemGlobal.internalSupportMask); return RT_ERR_RG_OK; } #endif void _rtk_rg_set_initState(rtk_rg_initState_t newState) { rg_lock(&rg_kernel.initLock); //========================critical region start========================= rg_kernel.init_state=newState; //========================critical region end========================= rg_unlock(&rg_kernel.initLock); } __IRAM_FWDENG rtk_rg_initState_t _rtk_rg_get_initState(void) { rtk_rg_initState_t ret; rg_lock(&rg_kernel.initLock); //========================critical region start========================= ret=rg_kernel.init_state; //========================critical region end========================= rg_unlock(&rg_kernel.initLock); return ret; } rtk_rg_err_code_t rtk_rg_apollo_initParam_get(rtk_rg_initParams_t *init_param) { //Check the parameter if(init_param == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Copy from rg_db memcpy(init_param, &rg_db.systemGlobal.initParam, sizeof(rtk_rg_initParams_t)); #if 0 //Checking for parameters initialized if(rg_db.systemGlobal.initParam.arpAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.arpDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.macAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.macDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.naptAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.naptDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.routingAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.routingDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.bindingAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.bindingDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.naptInboundConnLookupFirstCallBack == NULL && rg_db.systemGlobal.initParam.naptInboundConnLookupSecondCallBack == NULL && rg_db.systemGlobal.initParam.naptInboundConnLookupThirdCallBack == NULL && rg_db.systemGlobal.initParam.interfaceAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.interfaceDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.neighborAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.neighborDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack == NULL && rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack == NULL && rg_db.systemGlobal.initParam.pppoeBeforeDiagByHwCallBack == NULL) RETURN_ERR(RT_ERR_RG_INITPM_UNINIT); //Return each function pointer init_param->arpAddByHwCallBack = rg_db.systemGlobal.initParam.arpAddByHwCallBack; init_param->arpDelByHwCallBack = rg_db.systemGlobal.initParam.arpDelByHwCallBack; init_param->macAddByHwCallBack = rg_db.systemGlobal.initParam.macAddByHwCallBack; init_param->macDelByHwCallBack = rg_db.systemGlobal.initParam.macDelByHwCallBack; init_param->naptAddByHwCallBack = rg_db.systemGlobal.initParam.naptAddByHwCallBack; init_param->naptDelByHwCallBack = rg_db.systemGlobal.initParam.naptDelByHwCallBack; init_param->routingAddByHwCallBack = rg_db.systemGlobal.initParam.routingAddByHwCallBack; init_param->routingDelByHwCallBack = rg_db.systemGlobal.initParam.routingDelByHwCallBack; init_param->bindingAddByHwCallBack = rg_db.systemGlobal.initParam.bindingAddByHwCallBack; init_param->bindingDelByHwCallBack = rg_db.systemGlobal.initParam.bindingDelByHwCallBack; init_param->naptInboundConnLookupFirstCallBack = rg_db.systemGlobal.initParam.naptInboundConnLookupFirstCallBack; init_param->naptInboundConnLookupSecondCallBack = rg_db.systemGlobal.initParam.naptInboundConnLookupSecondCallBack; init_param->naptInboundConnLookupThirdCallBack = rg_db.systemGlobal.initParam.naptInboundConnLookupThirdCallBack; init_param->interfaceAddByHwCallBack = rg_db.systemGlobal.initParam.interfaceAddByHwCallBack; init_param->interfaceDelByHwCallBack = rg_db.systemGlobal.initParam.interfaceDelByHwCallBack; init_param->neighborAddByHwCallBack = rg_db.systemGlobal.initParam.neighborAddByHwCallBack; init_param->neighborDelByHwCallBack = rg_db.systemGlobal.initParam.neighborDelByHwCallBack; init_param->v6RoutingAddByHwCallBack = rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack; init_param->v6RoutingDelByHwCallBack = rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack; init_param->pppoeBeforeDiagByHwCallBack = rg_db.systemGlobal.initParam.pppoeBeforeDiagByHwCallBack; #endif return (RT_ERR_RG_OK); } void rtk_rg_swRateLimitTimerFunc(unsigned long task_priv) { //DEBUG("rtk_rg_BCMCRateLimitTimerFunc triggered, original BCByteCount=%d IPv6MCByteCount=%d",rg_db.systemGlobal.BCByteCount,rg_db.systemGlobal.IPv6MCByteCount); //clear the accumulate pkt counter rg_db.systemGlobal.BCByteCount = 0; rg_db.systemGlobal.IPv6MCByteCount = 0; rg_db.systemGlobal.IPv4MCByteCount = 0; rg_db.systemGlobal.unKnownDAByteCount = 0; if(rg_db.systemGlobal.naptSwRateLimitTriggered) //clear if this functional is enabled to save time bzero(rg_db.systemGlobal.naptSwRateLimitByteCount,sizeof(uint32)*MAX_NAPT_FILER_SW_ENTRY_SIZE); #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.systemGlobal.wifiIngressRateLimitDevOverMask = 0; rg_db.systemGlobal.wifiEgressRateLimitDevOverMask = 0; memset(rg_db.systemGlobal.wifiIngressByteCount,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); memset(rg_db.systemGlobal.wifiEgressByteCount,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); #endif #ifdef __KERNEL__ mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*TICKTIME_PERIOD/16/*unit:(1/16)sec*/)); #endif } int _rtk_rg_igmpSnoopingOnOff(int isOn, int onlyChangeTimer, int isIVL) { int ret,i,idx=0; rtk_rg_multicastFlow_t mc; #ifdef __KERNEL__ #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) extern struct timer_list mCastQuerytimer; extern struct timer_list igmpSysTimer; void rtl_mCastQueryTimerExpired(unsigned long arg); void rtl_multicastSysTimerExpired(uint32 expireDada); if(timer_pending(&mCastQuerytimer)) del_timer(&mCastQuerytimer); if(timer_pending(&igmpSysTimer)) del_timer(&igmpSysTimer); #endif #endif if(onlyChangeTimer) goto changeTimer; mc.isIPv6=0; mc.multicast_ipv4_addr=0xeffffffa; mc.port_mask.portmask=0x1<<RTK_RG_PORT_CPU; if(isOn) { rtk_rg_ipv4MulticastFlow_t ipv4Mc; DEBUG("IGMP Snooping enable..."); #if defined(CONFIG_RTL9607C_SERIES) // ipmc mode should be configured before all setting start ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_DIP_AND_SIP),RT_ERR_RG_OK); //path3 #elif defined(CONFIG_RTL9600_SERIES) ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_DIP_AND_SIP),RT_ERR_RG_OK); ASSERT_EQ(rtk_l2_ipmcGroupLookupMissHash_set(HASH_DIP_ONLY),RT_ERR_RG_OK); #endif for(i=0;i<RTK_RG_PORT_CPU;i++) { if(rg_db.systemGlobal.multicastProtocol!=RG_MC_MLD_ONLY) ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IPMC,ACTION_DROP); else ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IPMC,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; //ret=rtk_l2_portLookupMissAction_set(i,DLF_TYPE_IP6MC,ACTION_TRAP2CPU); //to fix ICMPv6 can't to ping issue.(from LAN to Gateway) if(rg_db.systemGlobal.multicastProtocol!=RG_MC_IGMP_ONLY) ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IP6MC,ACTION_DROP);//to fix IPv6 Multicast flooding without join issue. ( ICMPv6 can't to ping issue will fixed(trap) by ACL) else ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IP6MC,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; #if defined(CONFIG_RTL9600_SERIES) #else ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_MCAST,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; #endif } if(rg_db.systemGlobal.multicastProtocol!=RG_MC_MLD_ONLY) { if(isIVL) //IVL, trap SSDP by ACL because VLAN is hard to decide. { assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_SSDP_TRAP, NULL)); } else //SVL, saveing the ACL resource by LUT { ret=rtk_rg_apollo_multicastFlow_find(&mc,&idx); if(ret==RT_ERR_RG_NO_MORE_ENTRY_FOUND) { //Create IPMC flow for SSDP 239.255.255.250 to trap from LAN port(because if igmpSnooping is on, the LAN port's IPMC will DROP) memset(&ipv4Mc,0,sizeof(rtk_rg_ipv4MulticastFlow_t)); ipv4Mc.srcFilterMode=RTK_RG_IPV4MC_DONT_CARE_SRC; ipv4Mc.groupIp=0xeffffffa; //239.255.255.250 ipv4Mc.ipm_portmask.portmask=0x1<<RTK_RG_PORT_CPU; #if defined(CONFIG_RTL9600_SERIES) if(rg_db.systemGlobal.initParam.igmpSnoopingEnable==2) //9600sersie lut path3 not support routing ipv4Mc.routingMode = RTK_RG_IPV4MC_DIS_ROUTING; #endif ASSERT_EQ(_rtk_rg_apollo_ipv4MultiCastFlow_add(&ipv4Mc,&ret),RT_ERR_RG_OK); } } //224.0.0.0~224.0.0.255 trap assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_RMA_TRAP, NULL)); } else { //make sure SSDP trap rule is deleted, no matter original is IVL by ACL or SVL by LUT. ret=rtk_rg_apollo_multicastFlow_find(&mc,&idx); if(ret==RT_ERR_RG_OK) { ASSERT_EQ(rtk_rg_apollo_multicastFlow_del(idx),RT_ERR_RG_OK); } assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_SSDP_TRAP)); } if(rg_db.systemGlobal.multicastProtocol!=RG_MC_IGMP_ONLY) { //translate IP6MC VID to DEFAULT_CPU_VLAN for passthrought IPv6 Routing Wan (Egress VLAN Filter) #if defined(CONFIG_RTL9600_SERIES) rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_ENABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT, NULL)); #else rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_DISABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT)); #endif //[20140526]Permit UDP to avoid trap(avoid multicast data flooding, if multicastDA unknow will drop/trap by rtk_l2_portLookupMissAction_set), //but always trap else packet(such as RaDVD, ICMPv6 ...etc) assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_TRAP_AND_GLOBAL_SCOPE_PERMIT, NULL)); } else { rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_DISABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT)); assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_TRAP_AND_GLOBAL_SCOPE_PERMIT)); } } else { DEBUG("IGMP Snooping disable..."); #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) #if defined(CONFIG_MASTER_WLAN0_ENABLE) && CONFIG_WIFI_REF_IGMP //rg_db.systemGlobal.initParam.igmpWifiRefEnable=0; rg_db.systemGlobal.igmpWifiRefEnable=0; #endif #endif for(i=0;i<RTK_RG_PORT_CPU;i++) { ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IPMC,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_IP6MC,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; #if defined(CONFIG_RTL9600_SERIES) #else ret=RTK_L2_PORTLOOKUPMISSACTION_SET(i,DLF_TYPE_MCAST,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD); if(ret!=RT_ERR_RG_OK) return ret; #endif } //delete SSDP trap of SVL ret=rtk_rg_apollo_multicastFlow_find(&mc,&idx); if(ret==RT_ERR_RG_OK) { ASSERT_EQ(rtk_rg_apollo_multicastFlow_del(idx),RT_ERR_RG_OK); } //delete SSDP trap assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_SSDP_TRAP)); rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_DISABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT)); assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_TRAP_AND_GLOBAL_SCOPE_PERMIT)); } #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) //Flush IGMP or MLD entry based on igmpSnoopingEnable and multicastProtocol if(isOn==0) rtl_flushAllIgmpRecord(1); else rtl_flushAllIgmpRecord(0); #endif //for Multicast PATH3 (LUT PATH3, CVID+GIP) #if RTK_RG_MULTICAST_MODE_MACFID /* path 2*/ ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_MAC_AND_VID_FID),RT_ERR_RG_OK); #if defined(CONFIG_RTL9602C_SERIES) //ipv6 path2 hash by (mac,vid/fid) rtk_l2_ipv6mcMode_set(LOOKUP_ON_MAC_AND_VID_FID); #endif #else /* ipv4/v6 dip/sip hash (include IPM routing) */ #if defined(CONFIG_RTL9602C_SERIES) ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_DIP_AND_VID_FID),RT_ERR_RG_OK); //path3 ASSERT_EQ(rtk_l2_ipv6mcMode_set(LOOKUP_ON_DIP),RT_ERR_RG_OK); //path4 //9602bvb don't have rtk_l2_ipmcGroupLookupMissHash_set #elif defined(CONFIG_RTL9607C_SERIES) ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_DIP_AND_SIP),RT_ERR_RG_OK); //path3 #else ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_DIP_AND_SIP),RT_ERR_RG_OK); ASSERT_EQ(rtk_l2_ipmcGroupLookupMissHash_set(HASH_DIP_ONLY),RT_ERR_RG_OK); #endif #endif //20141215LUKE: enable IVL support if(isIVL) { /* path 2*/ ASSERT_EQ(rtk_l2_ipmcMode_set(LOOKUP_ON_MAC_AND_VID_FID),RT_ERR_RG_OK); } //sync the ivl/svl Multicast state to rg_db #if 0 rg_db.systemGlobal.initParam.ivlMulticastSupport = isIVL; #else if (rg_db.systemGlobal.initParam.igmpSnoopingEnable==2 && rg_db.systemGlobal.initParam.ivlMulticastSupport!=0) { WARNING("rg init failed. if igmpSnoopingEnable(=2, care source mode), then ivlMulticastSupport force be 0\n"); rg_db.systemGlobal.initParam.ivlMulticastSupport = 0; } else { rg_db.systemGlobal.initParam.ivlMulticastSupport = isIVL; } #endif changeTimer: #ifdef __KERNEL__ #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) if(isOn) { init_timer(&igmpSysTimer); igmpSysTimer.function = (void*)rtl_multicastSysTimerExpired; if(rg_db.systemGlobal.igmp_sys_timer_sec==0) rg_db.systemGlobal.igmp_sys_timer_sec=RTK_RG_DEFAULT_IGMP_SYS_TIMER_INTERVAL; mod_timer(&igmpSysTimer, jiffies+rg_db.systemGlobal.igmp_sys_timer_sec*CONFIG_HZ); if(rg_db.systemGlobal.mcast_query_sec!=0) { init_timer(&mCastQuerytimer); mCastQuerytimer.function = (void*)rtl_mCastQueryTimerExpired; //rg_db.systemGlobal.mcast_query_sec=RTK_RG_DEFAULT_MCAST_QUERY_INTERVAL; mod_timer(&mCastQuerytimer, jiffies+rg_db.systemGlobal.mcast_query_sec*CONFIG_HZ); } } #endif #endif return (RT_ERR_RG_OK); } void _rtk_rg_default_svlan_manipulate(void) { #if defined(CONFIG_APOLLO) int ret; rtk_portmask_t mbpmsk, utpmsk; //#if defined(CONFIG_GPON_FEATURE) || defined(CONFIG_EPON_FEATURE) if (rg_kernel.stag_enable==RTK_RG_ENABLED) { #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT2)) #endif { //SVLAN initialization //Create SVID[1], member=all, untag=all //svlan set svlan-table svid fwdVLAN_CPU member all,6 //svlan set svlan-table svid fwdVLAN_CPU untag-member all,6 ret=rtk_svlan_create(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN); if((ret!=RT_ERR_OK) && #if defined(CONFIG_RTL9600_SERIES) (ret!=RT_ERR_SVLAN_EXIST) #else (ret!=RT_ERR_VLAN_EXIST) #endif )WARNING("SVLAN %d create failed: %x",rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN,ret); mbpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; utpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; assert_ok(RTK_SVLAN_MEMBERPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN, &mbpmsk, &utpmsk)); #if defined(CONFIG_RTL9600_SERIES) //Assign FID to WAN_FID assert_ok(rtk_svlan_fidEnable_set(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN,ENABLED)); assert_ok(rtk_svlan_fid_set(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN,WAN_FID)); //Set SVLAN untag action to assign SVLAN ID fwdVLAN_CPU assert_ok(rtk_svlan_untagAction_set(SVLAN_ACTION_SVLAN, rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN)); //Set SVLAN unmatch action to assign ingress SVLAN ID fwdVLAN_CPU, but keep original ingress SVID assert_ok(rtk_svlan_unmatchAction_set(SVLAN_ACTION_SVLAN, rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN)); #else //Set SVLAN untag action to assign SVLAN ID fwdVLAN_CPU assert_ok(rtk_svlan_untagAction_set(SVLAN_ACTION_PSVID, rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN)); for(ret=0;ret<RTK_RG_MAX_MAC_PORT;ret++) { assert_ok(rtk_svlan_portSvid_set(ret, rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN)); } #endif } } else //destroy svlan default if no needed #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT2)) #endif { ret=rtk_svlan_destroy(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN); #if defined(CONFIG_RTL9600_SERIES) if((ret!=RT_ERR_OK)&&(ret!=RT_ERR_SVLAN_NOT_EXIST))WARNING("SVLAN %d destroy failed: %x",rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN,ret); #else if((ret!=RT_ERR_OK)&&(ret!=RT_ERR_VLAN_ENTRY_NOT_FOUND))WARNING("SVLAN %d destroy failed: %x",rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN,ret); #endif } //#endif #endif } #ifdef CONFIG_MASTER_WLAN0_ENABLE void _rtk_rg_check_wlan_device_exist_or_not(void) { int i,ret; rtk_portmask_t mac_pmask, etp_pmask; //Check if WLAN0 device exist or not for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { ret=0; switch(i) { case 0: if(wlan_root_netdev!=NULL)ret=1; break; case 1: case 2: case 3: case 4: if(wlan_vap_netdev[i-1]!=NULL)ret=1; break; case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: if(wlan_wds_netdev[i-5]!=NULL)ret=1; break; #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT case 13: if(wlan_vxd_netdev!=NULL)ret=1; break; #endif #if defined(CONFIG_RG_WLAN_HWNAT_ACCELERATION) case WLAN_DEVICE_NUM: if(wlan1_root_netdev!=NULL)ret=1; break; case WLAN_DEVICE_NUM+1: case WLAN_DEVICE_NUM+2: case WLAN_DEVICE_NUM+3: case WLAN_DEVICE_NUM+4: if(wlan1_vap_netdev[i-WLAN_DEVICE_NUM-1]!=NULL)ret=1; break; case WLAN_DEVICE_NUM+5: case WLAN_DEVICE_NUM+6: case WLAN_DEVICE_NUM+7: case WLAN_DEVICE_NUM+8: case WLAN_DEVICE_NUM+9: case WLAN_DEVICE_NUM+10: case WLAN_DEVICE_NUM+11: case WLAN_DEVICE_NUM+12: if(wlan1_wds_netdev[i-WLAN_DEVICE_NUM-5]!=NULL)ret=1; break; #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT case WLAN_DEVICE_NUM+13: if(wlan1_vxd_netdev!=NULL)ret=1; break; #endif #endif default: break; } #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && i>=WLAN_DEVICE_NUM)ret=1; #endif //20161107LUKE: disable wifi device check by default. if(!rg_db.systemGlobal.checkWifiDev)ret=1; rg_db.systemGlobal.wlan0BindDecision[i].exist=ret; } //20160526LUKE: update dev-based VLAN of wifi devices mac_pmask.bits[0]=0x0; etp_pmask.bits[0]=0x2; //ext 1=>master wifi _rtk_rg_updatePortBasedVIDByLanOrder(mac_pmask, etp_pmask); if(rg_db.systemGlobal.rgInit==1 && rg_db.systemGlobal.initParam.macBasedTagDecision) { //UpdateBindInternet _rtk_rg_updateBindWanIntf(NULL); //Update non-binding _rtk_rg_updateNoneBindingPortmask(rg_db.systemGlobal.wanPortMask.portmask); //Update PVID of OtherWan-binding port to vlan specific for the WAN _rtk_rg_updateBindOtherWanPortBasedVID(NULL); } } int rtk_rg_wifiDeviceEnumerate(struct file *file, const char *buff, unsigned long len, void *data) { _rtk_rg_check_wlan_device_exist_or_not(); return len; } #endif int32 _rtk_rg_initParam_set(rtk_rg_initParams_t *init_param) { int ret,i; rtk_portmask_t mbpmsk, utpmsk, etpmsk; rtk_portmask_t srcExtPortFilterMmsk; rtk_rg_macEntry_t macEt; rtk_l34_routing_entry_t rtEntry; rtk_ipv6Routing_entry_t rtv6Entry; rtk_vlan_protoGroup_t protoGroupCfg; rtk_enable_t vlanFiltering; rtk_port_macAbility_t cpuAbility; #if defined(CONFIG_RG_WLAN_HWNAT_ACCELERATION) && defined(CONFIG_APOLLO) #ifdef CONFIG_DUALBAND_CONCURRENT rtk_rg_cvlan_info_t vlanForSlaveWifi; #endif #endif rtk_rg_portmask_t allPortMask; //rtk_classify_cfg_t cfEntry; //Checking for input parameter - if here we pass NULL, means we just reset the Global variables //if(init_param == NULL) //RETURN_ERR(RT_ERR_RG_NULL_POINTER); //=============== Clear all rg_db variables ========================= ASSERT_EQ(_rtk_rg_globalVariableReset(),RT_ERR_RG_OK); //=============== Global variables initilization ========================= if(init_param != NULL) { memcpy(&rg_db.systemGlobal.initParam,init_param,sizeof(rtk_rg_initParams_t)); if((u32)init_param->initByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.initByHwCallBack=_rtk_rg_initParameterSetByHwCallBack; if((u32)init_param->arpAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.arpAddByHwCallBack=_rtk_rg_arpAddByHwCallBack; if((u32)init_param->arpDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.arpDelByHwCallBack=_rtk_rg_arpDelByHwCallBack; if((u32)init_param->macAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.macAddByHwCallBack=_rtk_rg_macAddByHwCallBack; if((u32)init_param->macDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.macDelByHwCallBack=_rtk_rg_macDelByHwCallBack; //5 if((u32)init_param->routingAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.routingAddByHwCallBack=_rtk_rg_routingAddByHwCallBack; if((u32)init_param->routingDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.routingDelByHwCallBack=_rtk_rg_routingDelByHwCallBack; if((u32)init_param->naptAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.naptAddByHwCallBack=_rtk_rg_naptAddByHwCallBack; if((u32)init_param->naptDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.naptDelByHwCallBack=_rtk_rg_naptDelByHwCallBack; if((u32)init_param->bindingAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.bindingAddByHwCallBack=_rtk_rg_bindingAddByHwCallBack; //10 if((u32)init_param->bindingDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.bindingDelByHwCallBack=_rtk_rg_bindingDelByHwCallBack; if((u32)init_param->interfaceAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.interfaceAddByHwCallBack=_rtk_rg_interfaceAddByHwCallBack; if((u32)init_param->interfaceDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.interfaceDelByHwCallBack=_rtk_rg_interfaceDelByHwCallBack; if((u32)init_param->neighborAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.neighborAddByHwCallBack=_rtk_rg_neighborAddByHwCallBack; if((u32)init_param->neighborDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.neighborDelByHwCallBack=_rtk_rg_neighborDelByHwCallBack; //15 if((u32)init_param->v6RoutingAddByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack=_rtk_rg_v6RoutingAddByHwCallBack; if((u32)init_param->v6RoutingDelByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack=_rtk_rg_v6RoutingDelByHwCallBack; //init_param->naptInboundConnLookupFirstCallBack registered later if needed //init_param->naptInboundConnLookupSecondCallBack registered later if needed //init_param->naptInboundConnLookupThirdCallBack registered later if needed //20 if((u32)init_param->dhcpRequestByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.dhcpRequestByHwCallBack=_rtk_rg_dhcpRequestByHwCallBack; if((u32)init_param->pppoeBeforeDiagByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.pppoeBeforeDiagByHwCallBack=_rtk_rg_pppoeBeforeDialByHwCallBack; if((u32)init_param->pptpBeforeDialByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.pptpBeforeDialByHwCallBack=_rtk_rg_pptpBeforeDialByHwCallBack; if((u32)init_param->l2tpBeforeDialByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.l2tpBeforeDialByHwCallBack=_rtk_rg_l2tpBeforeDialByHwCallBack; if((u32)init_param->pppoeDsliteBeforeDialByHwCallBack==0xffffffff) rg_db.systemGlobal.initParam.pppoeDsliteBeforeDialByHwCallBack=_rtk_rg_pppoeDsliteBeforeDialByHwCallBack; //25 //rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupFirstCallBack register at init if needed //rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupSecondCallBack register at init if needed //rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupThirdCallBack register at init if needed if((u32)init_param->softwareNaptInfoAddCallBack==0xffffffff) rg_db.systemGlobal.initParam.softwareNaptInfoAddCallBack=_rtk_rg_softwareNaptInfoAddCallBack; if((u32)init_param->softwareNaptInfoDeleteCallBack==0xffffffff) rg_db.systemGlobal.initParam.softwareNaptInfoDeleteCallBack=_rtk_rg_softwareNaptInfoDeleteCallBack; //30 if((u32)init_param->naptPreRouteDPICallBack==0xffffffff) rg_db.systemGlobal.initParam.naptPreRouteDPICallBack=_rtk_rg_naptPreRouteDPICallBack; if((u32)init_param->naptForwardDPICallBack==0xffffffff) rg_db.systemGlobal.initParam.naptForwardDPICallBack=_rtk_rg_naptForwardDPICallBack; if((u32)init_param->pppoeLCPStateCallBack==0xffffffff) rg_db.systemGlobal.initParam.pppoeLCPStateCallBack=_rtk_rg_pppoeLCPStateCallBack; } else //default Init Value { rg_db.systemGlobal.initParam.igmpSnoopingEnable=1; rg_db.systemGlobal.initParam.macBasedTagDecision=0; rg_db.systemGlobal.initParam.wanPortGponMode=0; //default is non-GPON mode rg_db.systemGlobal.initParam.ivlMulticastSupport=0; } //Check and assign default VLAN value if not given by init_param if(rg_db.systemGlobal.initParam.fwdVLAN_CPU==0) rg_db.systemGlobal.initParam.fwdVLAN_CPU=DEFAULT_CPU_VLAN; #if defined(CONFIG_RTL9600_SERIES) rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN = rg_db.systemGlobal.initParam.fwdVLAN_CPU; #else if(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN==0) rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN=DEFAULT_CPU_SVLAN; #endif if(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block==0) rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block=DEFAULT_PROTO_BLOCK_VLAN; if(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET==0) rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET=DEFAULT_BIND_INTERNET; if(rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER==0) rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER=DEFAULT_BIND_OTHER; if(rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET>=MAX_VLAN_SW_TABLE_SIZE-1) RETURN_ERR(RT_ERR_RG_VLAN_OVER_RANGE); #if defined(CONFIG_RTL9600_SERIES) #else if(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN==rg_db.systemGlobal.initParam.fwdVLAN_CPU) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN==rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN==rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN>=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && rg_db.systemGlobal.initParam.fwdVLAN_CPU_SVLAN<=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); #endif if(rg_db.systemGlobal.initParam.fwdVLAN_CPU==rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_CPU==rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_CPU>=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && rg_db.systemGlobal.initParam.fwdVLAN_CPU<=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block==rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block>=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block<=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET>=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET<=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); //defaultly disable ext port filter (port range used by ps) rg_db.systemGlobal.lowerBoundPortUsedByPS = 0; rg_db.systemGlobal.upperBoundPortUsedByPS = 0; //confirm all RG platform APIs are registered ASSERT_EQ(_rtk_rg_platform_function_register_check(&pf),RT_ERR_RG_OK); //get service port from RTK API rg_db.systemGlobal.service_pmsk.portmask=0; //get timeout value from default define #if defined(CONFIG_ROME_NAPT_SHORTCUT) rg_db.systemGlobal.v4ShortCut_timeout=RTK_RG_DEFAULT_V4_SHORTCUT_TIMEOUT; #endif #if defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) rg_db.systemGlobal.v6ShortCut_timeout=RTK_RG_DEFAULT_V6_SHORTCUT_TIMEOUT; #endif #if defined(CONFIG_RG_FLOW_AUTO_AGEOUT) rg_db.systemGlobal.flow_timeout=RTK_RG_DEFAULT_FLOW_TIMEOUT; #endif rg_db.systemGlobal.arp_timeout=RTK_RG_DEFAULT_ARP_TIMEOUT; rg_db.systemGlobal.neighbor_timeout=RTK_RG_DEFAULT_NEIGHBOR_TIMEOUT; rg_db.systemGlobal.tcp_long_timeout=RTK_RG_DEFAULT_TCP_LONG_TIMEOUT; rg_db.systemGlobal.tcp_short_timeout=RTK_RG_DEFAULT_TCP_SHORT_TIMEOUT; rg_db.systemGlobal.udp_long_timeout=RTK_RG_DEFAULT_UDP_LONG_TIMEOUT; rg_db.systemGlobal.udp_short_timeout=RTK_RG_DEFAULT_UDP_SHORT_TIMEOUT; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit rg_db.systemGlobal.l2_timeout=RTK_RG_DEFAULT_L2_TIMEOUT; #endif rg_db.systemGlobal.house_keep_sec=RTK_RG_DEFAULT_HOUSE_KEEP_SECOND; rg_db.systemGlobal.mcast_query_sec=RTK_RG_DEFAULT_MCAST_QUERY_INTERVAL; rg_db.systemGlobal.arp_requset_interval_sec=RTK_RG_DEFAULT_ARP_REQUEST_INTERVAL_SECOND; rg_db.systemGlobal.auto_test_fail_arp_interval_sec=RTK_RG_DEFAULT_AUTO_TEST_FAIL_ARP_INTERVAL_SECOND; //check supported module #if !defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) ASSERT_EQ(_rtk_rg_internal_support_check(),RT_ERR_RG_OK); #endif //get phyPort status and store in software for(i=RTK_PORT_UTP0;i<RTK_PORT_UTP11;i++) { if(rtk_switch_phyPortId_get(i, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); } if(rtk_switch_phyPortId_get(RTK_PORT_PON, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); if(rtk_switch_phyPortId_get(RTK_PORT_FIBER, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); if(rtk_switch_phyPortId_get(RTK_PORT_EXT0, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); if(rtk_switch_phyPortId_get(RTK_PORT_EXT1, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); if(rtk_switch_phyPortId_get(RTK_PORT_EXT2, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); if(rtk_switch_phyPortId_get(RTK_PORT_CPU, &ret)==RT_ERR_OK)rg_db.systemGlobal.phyPortStatus|=0x1<<(RTK_RG_PORT0+ret); for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { rtk_enable_t enable; assert_ok(RTK_SVLAN_SERVICEPORT_GET(i,&enable)); if(enable==ENABLED) { rg_db.systemGlobal.service_pmsk.portmask|=(1<<i); } } DEBUG("service_pmsk=0x%x\n",rg_db.systemGlobal.service_pmsk.portmask); //Dropped jumbo frame to avoid software(NIC) buzy dropping. { for(i=0;i<RTK_RG_MAC_PORT_CPU;i++){ //reference to NIC driver SKB_BUF_SIZE #ifdef CONFIG_DUALBAND_CONCURRENT ASSERT_EQ(RTK_SWITCH_MAXPKTLENBYPORT_SET(i, 1800),RT_ERR_OK); #else ASSERT_EQ(RTK_SWITCH_MAXPKTLENBYPORT_SET(i, 1600),RT_ERR_OK); #endif } } /* Initialize SDK */ //rtlglue_printf("RTK RG initialize.....%d\n",RG_GLB_VLAN_INIT); #ifdef __KERNEL__ ASSERT_EQ(rtk_init_without_pon(),RT_ERR_OK); #else ASSERT_EQ(rtk_init(),RT_ERR_OK); #endif #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) ASSERT_EQ(rtk_rg_asic_fb_init(),RT_ERR_OK); #else ASSERT_EQ(rtk_l34_init(),RT_ERR_OK); #endif ASSERT_EQ(rtk_l2_init(),RT_ERR_OK); #if defined(CONFIG_RTL9602C_SERIES) #if 0 { //default set each vlan be untag to avoid service-port to serverive-port may content stag out because no stag unmatch action. rtk_portmask_t memberPortmask; rtk_portmask_t untagPortmask; int32 ret; for(i=0;i<MAX_VLAN_HW_TABLE_SIZE;i++) { //ASSERT_EQ(rtk_vlan_port_get(i,&memberPortmask,&untagPortmask),RT_ERR_OK); ret = rtk_vlan_port_get(i,&memberPortmask,&untagPortmask); if(ret!=RT_ERR_OK) WARNING("rtk_vlan_port_get(vid=%d) failed!!!",i); untagPortmask.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //ASSERT_EQ(RTK_VLAN_PORT_SET(i,&memberPortmask,&untagPortmask),RT_ERR_OK); ret = RTK_VLAN_PORT_SET(i,&memberPortmask,&untagPortmask); if(ret!=RT_ERR_OK) WARNING("RTK_VLAN_PORT_SET(vid=%d) failed!!!",i); } } #endif #endif // get initilized port link status ASSERT_EQ(_rtk_rg_getPortLinkupStatus(), RT_ERR_OK); #ifdef CONFIG_RG_LAYER2_SOFTWARE_LEARN //clear link-down indicator from the beginning #if 0//def __KERNEL__ ASSERT_EQ(rtk_intr_linkdownStatus_clear(),RT_ERR_OK); /*register link-change ISR bh handler*/ ASSERT_EQ(rtk_irq_isr_register(INTR_TYPE_LINK_CHANGE,_rtk_rg_switchLinkChangeHandler),RT_ERR_OK); //turn on link-change ISR mask ASSERT_EQ(rtk_intr_imr_set(INTR_TYPE_LINK_CHANGE,ENABLED),RT_ERR_OK); #endif #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //support lut traffic bit //Turn off Lut aging for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { ASSERT_EQ(rtk_l2_portAgingEnable_set(i, DISABLED),RT_ERR_OK); } #endif #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //support vid unmatched action #if defined(CONFIG_RTL9602C_SERIES) { uint32 regValue = 0x1; ASSERT_EQ(reg_field_write(RTL9602C_LUT_CFGr, RTL9602C_LUT_L34_ARP_USAGE_AS_KNOWNf, ®Value),RT_ERR_OK); } if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT1)) #elif defined(CONFIG_RTL9607C_SERIES) { uint32 regValue = 0x1; ASSERT_EQ(reg_field_write(RTL9607C_LUT_CFGr, RTL9607C_LUT_L34_ARP_USAGE_AS_KNOWNf, ®Value),RT_ERR_OK); } #endif { if(rg_db.systemGlobal.initParam.macBasedTagDecision) { for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { ASSERT_EQ(rtk_l2_vidUnmatchAction_set(i, ACTION_TRAP2CPU),RT_ERR_OK); } } } #endif //Turn off Lut auto-learning for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) //CPU port will use auto-learning, and do not turn on DMAC2CViD function { ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNT_SET(i,0),RT_ERR_OK); ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNTACTION_SET(i,LIMIT_LEARN_CNT_ACTION_TO_CPU),RT_ERR_OK); ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(i, DISABLED), RT_ERR_OK); //FIXME: we should check global variable setting for this!! } //Turn off CPU port LUT auto-learning, and set Action to Forward ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNT_SET(RTK_RG_MAC_PORT_CPU,0),RT_ERR_OK); ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNTACTION_SET(RTK_RG_MAC_PORT_CPU,LIMIT_LEARN_CNT_ACTION_FORWARD),RT_ERR_OK); //Flush all unicast LUT may be learned by hardware ASSERT_EQ(rtk_l2_addr_delAll(ENABLED),RT_ERR_OK); #endif #ifdef CONFIG_APOLLO_TESTING // inited by rtk_rg_module_init, do not do rtk_init() again!!! it will make virtualmac disabled! if(virtualmacEnable==ENABLE) { apollo_mac_init(); } #endif #ifdef CONFIG_APOLLO_MODEL #else #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //HSD debug assert_ok(rtk_l34_hsdState_set(ENABLED)); #endif #endif #if defined(CONFIG_RG_NAPT_AUTO_AGEOUT) && !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //Clear traffic rtk_l34_hwL4TrfWrkTbl_Clear(0); rtk_l34_hwL4TrfWrkTbl_Clear(1); #endif //make sharemeter burst size larger for(i=0;i<MAX_SHAREMETER_TABLE_SIZE;i++){ ASSERT_EQ(rtk_rate_shareMeterBucket_set(i,0x3fff),RT_ERR_OK); } //igmp init for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { if(i==RTK_RG_MAC_PORT_PON) continue; ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i,IGMPMLD_TYPE_IGMPV1,ACTION_TRAP2CPU),RT_ERR_OK); ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i,IGMPMLD_TYPE_IGMPV2,ACTION_TRAP2CPU),RT_ERR_OK); ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i,IGMPMLD_TYPE_IGMPV3,ACTION_TRAP2CPU),RT_ERR_OK); ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i,IGMPMLD_TYPE_MLDV1,ACTION_TRAP2CPU),RT_ERR_OK); ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i,IGMPMLD_TYPE_MLDV2,ACTION_TRAP2CPU),RT_ERR_OK); } #if defined(CONFIG_APOLLO) // XDSL:init acl in rtk_init_without_pon //Init ACL Template & Field Selector & RangeTable Value ASSERT_EQ(_rtk_rg_acl_asic_init(),RT_ERR_RG_OK); //Init Classify RangeTable Value ASSERT_EQ(_rtk_rg_classify_asic_init(),RT_ERR_RG_OK); #endif //Init naptFilterAndQos (pure software) ASSERT_EQ(_rtk_rg_apollo_naptFilterAndQos_init(),RT_ERR_RG_OK); //init acllFilter for SW maintain info ASSERT_EQ(_rtk_rg_aclSWEntry_init(),RT_ERR_RG_OK); //init classify Filter for SW maintain info ASSERT_EQ(_rtk_rg_classifySWEntry_init(),RT_ERR_RG_OK); //init reserved ACL SW maintain info ASSERT_EQ(_rtk_rg_aclReservedEntry_init(),RT_ERR_RG_OK); #if defined(__KERNEL__) && defined(CONFIG_APOLLO) && defined(CONFIG_RTL9600_SERIES) //#ifdef CONFIG_RG_PPPOE_AND_VALN_ISSUE_PATCH if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { assert_ok(_rtk_rg_acl_reserved_pppoeCvidIssue_svid2IngressCvid(RTK_RG_PORT_PON)); } //#endif #endif #ifdef CONFIG_DUALBAND_CONCURRENT assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_IGMP_TO_SLAVE_WIFI_BLOCK, NULL)); #endif #ifdef __KERNEL__ //init stormControl for SW maintain info ASSERT_EQ(_rtk_rg_stormControlEntry_init(),RT_ERR_RG_OK); #endif //init urlFilter for SW maintain info ASSERT_EQ(_rtk_rg_urlFilter_table_init(),RT_ERR_RG_OK); //init macFilter for SW maintain info ASSERT_EQ(_rtk_rg_macFilter_table_init(),RT_ERR_RG_OK); #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) //reserved RTK_RG_ACLANDCF_RESERVED_EPON_DROP_AND_INTERRUPT doesn't need for apolloFE. #else #ifdef CONFIG_EPON_FEATURE if(init_param==NULL || (init_param!=NULL && !init_param->wanPortGponMode)) { if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT3)){ _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_EPON_DROP_AND_INTERRUPT, NULL); } _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_EPON_ASSIGN_PRIORITY, NULL); } #endif #endif //Enable tpid1 ASSERT_EQ(RTK_SVLAN_TPIDENABLE_SET(0, ENABLED), RT_ERR_RG_OK); ASSERT_EQ(RTK_SVLAN_TPIDENTRY_SET(0, 0x88a8), RT_ERR_RG_OK); #if defined(CONFIG_RTL9600_SERIES)|| defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else // support tpid2 ASSERT_EQ(RTK_SVLAN_TPIDENTRY_SET(1, 0x8100), RT_ERR_RG_OK); #endif assert_ok(rtk_svlan_deiKeepState_set(ENABLED)); //keep dei format when trap to CPU //set service port from original setting. for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { rtk_enable_t enable; if(rg_db.systemGlobal.service_pmsk.portmask&(1<<i)) { enable=ENABLED; } else { enable=DISABLED; } assert_ok(RTK_SVLAN_SERVICEPORT_SET(i,enable)); } #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { //Patch for 6266 b-cut and before, because we use STAG to patch pppoe issue, //we have to set this otherwise unmatch ctag packet will always untag!! ASSERT_EQ(rtk_svlan_sp2cUnmatchCtagging_set(ENABLED),RT_ERR_OK); } #endif //Turn on Lut table extened 64 entries switch(rg_kernel.apolloChipId) { #if defined(CONFIG_RTL9600_SERIES) case APOLLOMP_CHIP_ID: ASSERT_EQ(apollomp_raw_l2_camEnable_set(ENABLED),RT_ERR_OK); break; #endif #if defined(CONFIG_RTL9601B_SERIES) case RTL9601B_CHIP_ID: ASSERT_EQ(rtk_l2_camState_set(ENABLED),RT_ERR_OK); break; #endif #if defined(CONFIG_RTL9602C_SERIES) case RTL9602C_CHIP_ID: ASSERT_EQ(rtk_l2_camState_set(ENABLED),RT_ERR_OK); break; #endif #if defined(CONFIG_RTL9607C_SERIES) case RTL9607B_CHIP_ID: ASSERT_EQ(rtk_l2_camState_set(ENABLED),RT_ERR_OK); break; #endif default: DEBUG("Chip Not Support.\n"); } //Port isolation initialization #if defined(CONFIG_RTL9600_SERIES) assert_ok(rtk_port_isolationCtagPktConfig_set(RTK_PORT_ISO_CFG_0)); //9602bvb and xdsl not support assert_ok(rtk_port_isolationL34PktConfig_set(RTK_PORT_ISO_CFG_0)); //9602bvb and xdsl not support allPortMask.portmask=0xfff; //all port and ext-port #elif defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) allPortMask.portmask=0x3ff; //all port and ext-port #elif defined(CONFIG_RTL9607C_SERIES) allPortMask.portmask=0x1fffffff; //all port and ext-port #else WARNING("FIXME"); #endif _rtk_rg_portmask_translator(allPortMask, &mbpmsk, &etpmsk); for(i=RTK_RG_PORT0;i<RTK_RG_PORT_MAX;i++) { rg_db.systemGlobal.portIsolation[i].portmask=allPortMask.portmask; if(i<RTK_RG_PORT_CPU) { assert_ok(RTK_PORT_ISOLATIONIPMCLEAKY_SET(i, DISABLED)); assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_0, i, &mbpmsk, &etpmsk)); #if defined(CONFIG_RTL9600_SERIES) assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_1, i, &mbpmsk, &etpmsk)); #endif } else if(i>RTK_RG_PORT_CPU) { #if defined(CONFIG_RTL9600_SERIES) // 9602bvb is not support port isolation from ext port. assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_0, i-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_1, i-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); #endif } else { assert_ok(RTK_PORT_ISOLATIONIPMCLEAKY_SET(i, DISABLED)); assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_0, i, &mbpmsk, &etpmsk)); #if defined(CONFIG_RTL9600_SERIES) assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_1, i, &mbpmsk, &etpmsk)); #endif #if defined(CONFIG_RTL9600_SERIES) // 9602bvb is not support port isolation from ext port. assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_0, i-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_1, i-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); #endif } } //ALG initialization rg_db.algServInLanIpMapping[RTK_RG_ALG_SIP_TCP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_SIP_UDP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_SIP_UDP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_H323_TCP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_H323_TCP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_H323_UDP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_H323_UDP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_FTP_TCP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_FTP_TCP_SRV_IN_LAN_BIT; rg_db.algServInLanIpMapping[RTK_RG_ALG_FTP_UDP_SRV_IN_LAN-RTK_RG_ALG_SIP_TCP_SRV_IN_LAN].algType=RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE<<1;i++) { //bzero(&rg_db.systemGlobal.interfaceInfo[i], sizeof(rtk_rg_interface_info_global_t)); //rg_db.systemGlobal.interfaceInfo[i].p_lanIntfConf=NULL; //rg_db.systemGlobal.interfaceInfo[i].p_wanStaticInfo=NULL; //rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo=NULL; //rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo=NULL; #ifdef __KERNEL__ if(rg_db.systemGlobal.intfArpRequest[i].finished==0) { if(timer_pending(&rg_kernel.arpRequestTimer[i])) del_timer(&rg_kernel.arpRequestTimer[i]); } if(rg_db.systemGlobal.intfNeighborDiscovery[i].finished==0) { if(timer_pending(&rg_kernel.neighborDiscoveryTimer[i])) del_timer(&rg_kernel.neighborDiscoveryTimer[i]); } #endif rg_db.systemGlobal.intfArpRequest[i].finished = 1; rg_db.systemGlobal.intfNeighborDiscovery[i].finished = 1; } for(i=0;i<MAX_STATIC_ROUTE_SIZE;i++){ #ifdef __KERNEL__ if(timer_pending(&rg_kernel.staticRouteArpOrNBReqTimer[i])) del_timer(&rg_kernel.staticRouteArpOrNBReqTimer[i]); #endif rg_db.systemGlobal.staticRouteArpReq[i].finished = 1; rg_db.systemGlobal.staticRouteNBDiscovery[i].finished = 1; } /*for(i=0;i<MAX_BIND_HW_TABLE_SIZE;i++) { rg_db.systemGlobal.bindToIntf[i]=-1; rg_db.systemGlobal.bindWithVLAN[i]=-1; }*/ //for(i=0;i<RTK_RG_PORT_MAX;i++) //rg_db.systemGlobal.portBasedVID[i]=rg_db.systemGlobal.initParam.fwdVLAN_CPU; //reset port-based VLAN in rg_db #if defined(CONFIG_RTL9602C_SERIES) // init hw/sw mib interface counter for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { rtk_rg_apolloFE_interfaceMibCounter_del(i); } #endif for(i=0;i<RTK_RG_PORT_MAX;i++) rg_db.systemGlobal.sourceAddrLearningLimitNumber[i]=DEF_SOFTWARE_LEARNING_LIMIT; //reset port-based SA learning limit (software learning) rg_db.systemGlobal.accessWanLimitPortMask=DEF_SOFTWARE_LEARNING_LIMIT; //reset portmask WAN access limit rg_db.systemGlobal.accessWanLimitPortMask_member.portmask=0x0; //default no port will be count for speed #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.systemGlobal.accessWanLimitPortMask_wlan0member=0x0; #endif for(i=0;i<WanAccessCategoryNum;i++) rg_db.systemGlobal.accessWanLimitCategory[i]=DEF_SOFTWARE_LEARNING_LIMIT; #if 0 for(i=0;i<STATIC_DHCP_ALLOC_NUM;i++) _DHCP_STATIC[i].valid=0; #endif //20151218LUKE: check if we should create default svlan or not. _rtk_rg_default_svlan_manipulate(); /* Virtual Server initialization for(i=0;i<MAX_VIRTUAL_SERVER_SW_TABLE_SIZE;i++) { rg_db.systemGlobal.virtualServerGroup[i].p_virtualServer = NULL; }*/ /* UPNP initialization for(i=0;i<MAX_UPNP_SW_TABLE_SIZE;i++) { rg_db.systemGlobal.upnpGroup[i].p_upnp = NULL; }*/ //rg_db.systemGlobal.lanIntfTotalNum = 0; //how many LAN interface added //rg_db.systemGlobal.wanIntfTotalNum = 0; //how many WAN interface added //rg_db.systemGlobal.wanInfoSet = 0; //which WAN interface had been set //rg_db.systemGlobal.vlanBindTotalNum = 0; //rg_db.systemGlobal.pppoeBeforeCalled = 0; rg_db.systemGlobal.defaultRouteSet = -1; //keep which interface is default route rg_db.systemGlobal.defaultIPV6RouteSet = -1; //keep which interface is IPv6 default route rg_db.systemGlobal.intfIdxForReset = -1; //only use when wan need reset //rg_db.systemGlobal.virtualServerTotalNum = 0; //rg_db.systemGlobal.upnpTotalNum = 0; rg_db.p_routingArpInfoArray=rg_db.routingArpInfoArray_1; rg_db.p_tempRoutingArpInfoArray=rg_db.routingArpInfoArray_2; rg_db.p_routingVlanInfoArray=rg_db.routingVlanInfoArray_1; //store interface vlan id rg_db.p_tempRoutingVlanInfoArray=rg_db.routingVlanInfoArray_2; /* //Set up PPPoE pass through Protocol Group protoGroupCfg.frametype=FRAME_TYPE_ETHERNET; protoGroupCfg.framevalue=0x8863; //PPPoE discovery stage ret = rtk_vlan_protoGroup_set(PPPOE_DISCOVERY_GROUPID,&protoGroupCfg); if(ret!=RT_ERR_OK)return ret; protoGroupCfg.frametype=FRAME_TYPE_ETHERNET; protoGroupCfg.framevalue=0x8864; //PPPoE session stage ret = rtk_vlan_protoGroup_set(PPPOE_SESSION_GROUPID,&protoGroupCfg); if(ret!=RT_ERR_OK)return ret; */ //Set up Protocol Group for IPv4/v6 separately protoGroupCfg.frametype=FRAME_TYPE_ETHERNET; protoGroupCfg.framevalue=RG_IPV4_ETHERTYPE; //IPv4 ret = RTK_VLAN_PROTOGROUP_SET(RG_IPV4_GROUPID,&protoGroupCfg); if(ret!=RT_ERR_OK)return ret; protoGroupCfg.frametype=FRAME_TYPE_ETHERNET; protoGroupCfg.framevalue=RG_ARP_ETHERTYPE; //ARP ret = RTK_VLAN_PROTOGROUP_SET(RG_ARP_GROUPID,&protoGroupCfg); if(ret!=RT_ERR_OK)return ret; protoGroupCfg.frametype=FRAME_TYPE_ETHERNET; protoGroupCfg.framevalue=RG_IPV6_ETHERTYPE; //IPv6 ret = RTK_VLAN_PROTOGROUP_SET(RG_IPV6_GROUPID,&protoGroupCfg); if(ret!=RT_ERR_OK)return ret; //Destory VLAN 1 created by RTK ret = RTK_VLAN_DESTROY(1); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); //Set up CPU VLAN ret = RTK_VLAN_CREATE(rg_db.systemGlobal.initParam.fwdVLAN_CPU); if(ret == RT_ERR_NOT_INIT) { //Initialize VLAN module ret = rtk_vlan_init(); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_NOT_INIT); ret = RTK_VLAN_CREATE(rg_db.systemGlobal.initParam.fwdVLAN_CPU); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } else if (ret == RT_ERR_VLAN_EXIST) { DEBUG("fwdVLAN_CPU[%d] had created..",rg_db.systemGlobal.initParam.fwdVLAN_CPU); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].valid = 1; } mbpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //all port utpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //all untag etpmsk.bits[0]=RTK_RG_ALL_VIRUAL_PORTMASK; //all extension port ret = RTK_VLAN_FID_SET(rg_db.systemGlobal.initParam.fwdVLAN_CPU, LAN_FID); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_FIDMODE_SET(rg_db.systemGlobal.initParam.fwdVLAN_CPU, VLAN_FID_SVL); //This is used for ALL LAN interface if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_CPU, &mbpmsk, &utpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_CPU, &etpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); #ifdef CONFIG_RG_WAN_PORT_ISOLATE //cxy: for wan port isolate with lan ports mbpmsk.bits[0]=(1<<RTK_RG_PORT_PON); //all port utpmsk.bits[0]=(1<<RTK_RG_PORT_PON); //all untag etpmsk.bits[0]=0x0; //all extension port ret = RTK_VLAN_CREATE(DEFAULT_WAN_VLAN); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_FID_SET(DEFAULT_WAN_VLAN, WAN_FID); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_FIDMODE_SET(DEFAULT_WAN_VLAN, VLAN_FID_SVL); //This is used for ALL LAN interface if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_PORT_SET(DEFAULT_WAN_VLAN, &mbpmsk, &utpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(DEFAULT_WAN_VLAN, &etpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); #endif vlanFiltering=ENABLE; ret = rtk_vlan_vlanFunctionEnable_set(vlanFiltering); //vlan igr/egr filter switch if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); for(i=0;i<RTK_RG_MAX_MAC_PORT;i++) { ret = RTK_VLAN_PORTIGRFILTERENABLE_SET(i, vlanFiltering); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_PORTPVID_SET(i, rg_db.systemGlobal.initParam.fwdVLAN_CPU); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } for(i=RTK_RG_EXT_PORT0;i<RTK_RG_PORT_MAX;i++) { uint32 extPort = i-RTK_RG_PORT_CPU; #if defined (CONFIG_RTL9607C_SERIES) extPort -= 1; // extport starts form 0 #endif ret = RTK_VLAN_EXTPORTPVID_SET(extPort,rg_db.systemGlobal.initParam.fwdVLAN_CPU); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } //set up PON and RGMII enable for binding ASSERT_EQ(RTK_CLASSIFY_CFSEL_SET(RTK_RG_MAC_PORT_PON, CLASSIFY_CF_SEL_ENABLE),RT_ERR_OK); #if !defined(CONFIG_RTL9602C_SERIES) #if defined(CONFIG_RGMII_RESET_PROCESS) //20150917LUKE: if RGMII_RESET_PROCESS is enable, RGMII will be used as LAN port. ASSERT_EQ(RTK_CLASSIFY_CFSEL_SET(RTK_RG_MAC_PORT_RGMII, CLASSIFY_CF_SEL_DISABLE),RT_ERR_OK); #else ASSERT_EQ(RTK_CLASSIFY_CFSEL_SET(RTK_RG_MAC_PORT_RGMII, CLASSIFY_CF_SEL_ENABLE),RT_ERR_OK); #endif #endif #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) //init preHashPtn RTK_RG_ASIC_PREHASHPTN_SET(FB_PREHASH_PTN_SPORT, 0x9c005); RTK_RG_ASIC_PREHASHPTN_SET(FB_PREHASH_PTN_DPORT, 0x0); RTK_RG_ASIC_PREHASHPTN_SET(FB_PREHASH_PTN_SIP, 0x500000); RTK_RG_ASIC_PREHASHPTN_SET(FB_PREHASH_PTN_DIP, 0x0); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_TTL_1, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_TRAP_TCP_SYN_FIN_REST, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_TRAP_TCP_SYN_ACK, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_TRAP_FRAGMENT, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_L3_CS_CHK, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_L4_CS_CHK, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_L2_FLOW_LOOKUP_BY_MAC, DISABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_CMP_TOS, DISABLED), RT_ERR_OK); //hash skip cvid ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH12_SKIP_CVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH34_UCBC_SKIP_CVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH34_MC_SKIP_CVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH5_SKIP_CVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH6_SKIP_CVID, ENABLED), RT_ERR_OK); //hash skip svid ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH12_SKIP_SVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH34_UCBC_SKIP_SVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH34_MC_SKIP_SVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH5_SKIP_SVID, ENABLED), RT_ERR_OK); ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH6_SKIP_SVID, ENABLED), RT_ERR_OK); //path34 hash skip da idx ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_PATH34_SKIP_DA, ENABLED), RT_ERR_OK); //init flow ingress check //check tos rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_TOS] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH34_TOS] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH5_TOS] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_TOS] = DISABLED; //check protocol rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_PROTOCOL] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_PROTOCOL] = DISABLED; //check spa rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_SPA] = DISABLED; //check stream idx rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_STREAM_IDX] = DISABLED; //check path6 rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SMAC_IDX] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DMAC_IDX] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SIP] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DIP] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SPORT] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DPORT] = DISABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_GRE_CALL_ID] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_TUNNEL_ID] = ENABLED; rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_SID] = ENABLED; rtk_rg_asic_hsbaMode_set(L34_HSBA_LOG_ALL); //turn on all hsba log #else //not CONFIG_RG_FLOW_BASED_PLATFORM ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_L34_STATE,ENABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_L3NAT_STATE,ENABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_L4NAT_STATE,ENABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_L3CHKSERRALLOW_STATE,DISABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_L4CHKSERRALLOW_STATE,DISABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_BIND_STATE,ENABLED),RT_ERR_OK); //turn on binding for 0601 #if defined(CONFIG_RTL9602C_SERIES) //9602bvb remove this flag #else ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_PPPKEEP_STATE,ENABLED),RT_ERR_OK); //turn off PPPoE keep in default #endif ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_NAT2LOG_STATE,ENABLED),RT_ERR_OK); #if defined(CONFIG_RTL9600_SERIES) //20160217LUKE: for 9600, TTLMINUS_STATE will influence multicast packet also, so disable in advance here. ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_TTLMINUS_STATE,DISABLED),RT_ERR_OK); #else //20160217LUKE: Otherwise, TTLMINUS_STATE won't influence multicast packet, so we can safely turn it on. ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_TTLMINUS_STATE,ENABLED),RT_ERR_OK); #endif #if defined(CONFIG_RTL9602C_SERIES) ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_FRAG2CPU_STATE,ENABLED),RT_ERR_OK); // 9602bvb remove L34_GLOBAL_KEEP_ORG_STATE.(always original when org=1) //20151001LUKE: turn on DSlite support defaultly ASSERT_EQ(rtk_l34_dsliteControl_set(L34_DSLITE_CTRL_DSLITE_STATE,ENABLED),RT_ERR_OK); //turn off flow route defaultly ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_V4FLOW_RT_STATE,DISABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_V6FLOW_RT_STATE,DISABLED),RT_ERR_OK); //only update sip arp ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_SIP_ARP_TRF_STATE,ENABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_DIP_ARP_TRF_STATE,DISABLED),RT_ERR_OK); #else ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_FRAG2CPU_STATE,ENABLED),RT_ERR_OK); ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_KEEP_ORG_STATE,ENABLED),RT_ERR_OK); #endif ASSERT_EQ(rtk_l34_wanRoutMode_set(L34_WANROUTE_FORWARD),RT_ERR_OK); ASSERT_EQ(rtk_l34_lookupMode_set(L34_LOOKUP_MAC_BASE),RT_ERR_OK); ASSERT_EQ(rtk_l34_hsabMode_set(L34_HSBA_LOG_ALL),RT_ERR_OK); //turn on all hsba log #endif // CONFIG_RG_FLOW_BASED_PLATFORM //initialize unmatch action register ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L2L3,L34_BIND_ACT_FORCE_L2BRIDGE),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L2L34,L34_BIND_ACT_FORCE_L2BRIDGE),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L3L2,L34_BIND_ACT_DROP),RT_ERR_OK); //20140717LUKE:these packets should be blocked ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L3L3,L34_BIND_ACT_FORCE_BINDL3),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L3L34,L34_BIND_ACT_IPV4_LOOKUPL4TABLE_IPV6_TRAP),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L34L2,L34_BIND_ACT_DROP),RT_ERR_OK); //20140717LUKE:these packets should be blocked ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_UNMATCHED_L34L3,L34_BIND_ACT_FORCE_BINDL3_SKIP_LOOKUPL4),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_CUSTOMIZED_L2,L34_BIND_ACT_PERMIT_L2BRIDGE),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_CUSTOMIZED_L3,L34_BIND_ACT_FORCE_BINDL3),RT_ERR_OK); ASSERT_EQ(RTK_L34_BINDINGACTION_SET(L34_BIND_CUSTOMIZED_L34,L34_BIND_ACT_NORMAL_LOOKUPL34),RT_ERR_OK); #ifdef __KERNEL__ //Turn off CPU port flow control (2014/09/29: To turn on flow-control will cause CPU can't send packet to any port when pause frame is received.) ASSERT_EQ(RTK_PORT_MACFORCEABILITY_GET(RTK_RG_MAC_PORT_CPU,&cpuAbility),RT_ERR_OK); cpuAbility.txFc=DISABLED; cpuAbility.rxFc=DISABLED; ASSERT_EQ(RTK_PORT_MACFORCEABILITY_SET(RTK_RG_MAC_PORT_CPU,cpuAbility),RT_ERR_OK); ASSERT_EQ(RTK_PORT_MACFORCEABILITYSTATE_SET(RTK_RG_MAC_PORT_CPU,ENABLED),RT_ERR_OK); #endif //Turn on Forced_DMAC2CVID #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else if(rg_db.systemGlobal.initParam.macBasedTagDecision){ if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT0)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT0, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT0, ENABLED), RT_ERR_OK); if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT1)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT1, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT1, ENABLED), RT_ERR_OK); #if defined(CONFIG_RTL9600_SERIES) ASSERT_EQ(rtk_svlan_dmacVidSelForcedState_set(ENABLED), RT_ERR_OK); //9602bvb is not supported if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT2)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT2, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT2, ENABLED), RT_ERR_OK); if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT3)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT3, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT3, ENABLED), RT_ERR_OK); if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT_RGMII)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT_RGMII, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT_RGMII, ENABLED), RT_ERR_OK); #endif if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<RTK_RG_MAC_PORT_PON)) ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT_PON, DISABLED), RT_ERR_OK); else ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(RTK_RG_MAC_PORT_PON, ENABLED), RT_ERR_OK); } else { #if defined(CONFIG_RTL9600_SERIES) ASSERT_EQ(rtk_svlan_dmacVidSelForcedState_set(DISABLED), RT_ERR_OK); //9602bvb is not supported #endif } #endif #ifdef CONFIG_RG_NAPT_UPNP_SUPPORT rg_db.systemGlobal.initParam.naptInboundConnLookupFirstCallBack=_rtk_rg_fwdEngine_upnpCheck; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupFirstCallBack=NULL; //reserved for future requirement #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #else rg_db.systemGlobal.initParam.naptInboundConnLookupFirstCallBack=NULL; rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupFirstCallBack=NULL; #endif #ifdef CONFIG_RG_NAPT_VIRTUAL_SERVER_SUPPORT rg_db.systemGlobal.initParam.naptInboundConnLookupSecondCallBack=_rtk_rg_fwdEngine_virtualServerCheck; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupSecondCallBack=_rtk_rg_fwdEngine_ipv6VirtualServerCheck; #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #else rg_db.systemGlobal.initParam.naptInboundConnLookupSecondCallBack=NULL; rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupSecondCallBack=NULL; #endif #ifdef CONFIG_RG_NAPT_DMZ_SUPPORT rg_db.systemGlobal.initParam.naptInboundConnLookupThirdCallBack=_rtk_rg_fwdEngine_dmzCheck; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupThirdCallBack=_rtk_rg_fwdEngine_ipv6DmzCheck; #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #else rg_db.systemGlobal.initParam.naptInboundConnLookupThirdCallBack=NULL; rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupThirdCallBack=NULL; #endif //end of CONFIG_RG_NAPT_DMZ_SUPPORT #if defined(CONFIG_APOLLO) //XDSL turn off igmp ASSERT_EQ(_rtk_rg_igmpSnoopingOnOff(rg_db.systemGlobal.initParam.igmpSnoopingEnable,0,rg_db.systemGlobal.initParam.ivlMulticastSupport),RT_ERR_RG_OK); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) ASSERT_EQ(_rtk_rg_igmpSnoopingOnOff(rg_db.systemGlobal.initParam.igmpSnoopingEnable,1,rg_db.systemGlobal.initParam.ivlMulticastSupport),RT_ERR_RG_OK); #endif //default unlimited igmp max simultaneous group size //TRACE("rg_db.systemGlobal.igmp_max_simultaneous_group_size=%d"); //rg_db.systemGlobal.igmp_max_simultaneous_group_size = RTK_RG_DEFAULT_IGMP_SYS_MAX_SIMULTANEOUS_GROUP_SIZE_UNLIMIT; //0 //TRACE("rg_db.systemGlobal.igmp_simultaneous_group_size=%d"); //rg_db.systemGlobal.igmp_simultaneous_group_size = 0; //count the igmp_simultaneous_group_size #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) // flush all igmp entry rtl_flushAllIgmpRecord(1); #endif // For IPMC Server in WLAN case //ret=rtk_l2_portLookupMissAction_set(RTK_RG_PORT_CPU,DLF_TYPE_IPMC,ACTION_FORWARD); ASSERT_EQ(RTK_L2_PORTLOOKUPMISSACTION_SET(RTK_RG_PORT_CPU,DLF_TYPE_IPMC,ACTION_TRAP2CPU),RT_ERR_RG_OK); //ret=rtk_l2_portLookupMissAction_set(RTK_RG_PORT_CPU,DLF_TYPE_IP6MC,ACTION_FORWARD); ASSERT_EQ(RTK_L2_PORTLOOKUPMISSACTION_SET(RTK_RG_PORT_CPU,DLF_TYPE_IP6MC,ACTION_TRAP2CPU),RT_ERR_RG_OK); #if defined(CONFIG_RTL9600_SERIES) ASSERT_EQ(RTK_L2_PORTLOOKUPMISSACTION_SET(RTK_RG_PORT_CPU,DLF_TYPE_MCAST,ACTION_DROP),RT_ERR_RG_OK); #else ASSERT_EQ(RTK_L2_PORTLOOKUPMISSACTION_SET(RTK_RG_PORT_CPU,DLF_TYPE_MCAST,ACTION_TRAP2CPU),RT_ERR_RG_OK); #endif mbpmsk.bits[0]=1<<RTK_RG_MAC_PORT_CPU; ASSERT_EQ(rtk_l2_lookupMissFloodPortMask_set(DLF_TYPE_MCAST,&mbpmsk),RT_ERR_RG_OK); //set multicast flooding port for(i=0;i<=RTK_RG_PORT_CPU;i++) { ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i, IGMPMLD_TYPE_IGMPV1,ACTION_TRAP2CPU),RT_ERR_RG_OK) ; ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i, IGMPMLD_TYPE_IGMPV2,ACTION_TRAP2CPU),RT_ERR_RG_OK) ; ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i, IGMPMLD_TYPE_IGMPV3,ACTION_TRAP2CPU),RT_ERR_RG_OK) ; ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i, IGMPMLD_TYPE_MLDV1,ACTION_TRAP2CPU),RT_ERR_RG_OK); ASSERT_EQ(RTK_TRAP_PORTIGMPMLDCTRLPKTACTION_SET(i, IGMPMLD_TYPE_MLDV2,ACTION_TRAP2CPU),RT_ERR_RG_OK) ; } rg_db.systemGlobal.vlanInit = 1; //Set Default routing to CPU bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //set all packets to CPU if WAN is default route ASSERT_EQ(RTK_L34_ROUTINGTABLE_SET(V4_DEFAULT_ROUTE_IDX, &rtEntry),RT_ERR_OK); //set default route #ifdef CONFIG_DUALBAND_CONCURRENT /* //Create routing to CPU for Wifi2 subnet bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.ipAddr=0x0afdfd00; //10.253.253.0 rtEntry.ipMask=29; // 2bits, total has 4 addresses rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //set all packets to CPU if WAN is default route ASSERT_EQ(RTK_L34_ROUTINGTABLE_SET(SLAVE_WIFI_ROUTE_IDX, &rtEntry),RT_ERR_OK); //set ipc route */ //20160531Chuck: instead IPC routing trap by reserved ACl to save Routing entry. _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_WIFI2_IPC_ROUTING_TRAP, NULL); #endif //Set IPv6 Default routing to CPU bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_TRAP; //set all ipv6 packets to CPU if WAN is default route ASSERT_EQ(RTK_L34_IPV6ROUTINGTABLE_SET(V6_DEFAULT_ROUTE_IDX, &rtv6Entry),RT_ERR_OK); //set default route //Create dummy LUT entry for TRAP to CPU /*i=rg_db.systemGlobal.defaultTrapLUTIdx; ret = rtk_rg_macEntry_find(&macEt, &i); if(ret==RT_ERR_RG_OK && i==rg_db.systemGlobal.defaultTrapLUTIdx) //delete old record if had rtk_rg_macEntry_del(rg_db.systemGlobal.defaultTrapLUTIdx);*/ bzero(&macEt, sizeof(rtk_rg_macEntry_t)); macEt.isIVL=1; //set to IVL macEt.fid=0; macEt.vlan_id=rg_db.systemGlobal.initParam.fwdVLAN_CPU; macEt.port_idx=RTK_RG_PORT_CPU; macEt.static_entry=1; //static for not age-out, didn't turn on ARP_USED flag for it to TRAP ASSERT_EQ(rtk_rg_apollo_macEntry_add(&macEt, &rg_db.systemGlobal.defaultTrapLUTIdx),RT_ERR_RG_OK); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //for broadcast packet to cpu memset(&macEt.mac.octet[0],0xff,sizeof(rtk_mac_t)); macEt.isIVL=1; //set to IVL macEt.fid=0; macEt.vlan_id=rg_db.systemGlobal.initParam.fwdVLAN_CPU; macEt.port_idx=RTK_RG_PORT_CPU; macEt.static_entry=1; //static for not age-out, didn't turn on ARP_USED flag for it to TRAP ASSERT_EQ(rtk_rg_apollo_macEntry_add(&macEt, &rg_db.systemGlobal.defaultTrapLUTIdx),RT_ERR_RG_OK); #endif //Set the downstream CF rule for VLAN binding /*memset(&cfEntry, 0, sizeof(cfEntry)); cfEntry.index=RG_GLB_VLAN_BINDING_CFIDX; cfEntry.direction=CLASSIFY_DIRECTION_DS; cfEntry.valid=0; //Disabled in begining, if there is Vlan binding, it will become valid cfEntry.act.dsAct.cAct=CLASSIFY_DS_CACT_ADD_CTAG_8100; cfEntry.act.dsAct.cVidAct=CLASSIFY_DS_VID_ACT_FROM_LUT; cfEntry.act.dsAct.cPriAct=CLASSIFY_DS_PRI_ACT_NOP; cfEntry.act.dsAct.uniAct=CLASSIFY_DS_UNI_ACT_NOP; //all port should be classified ret = rtk_classify_cfgEntry_add(&cfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED);*/ /* IPv4 broadcast and unicast unknown DA flooding setting: trap to CPU */ ASSERT_EQ(rtk_l2_lookupMissFloodPortMask_set(DLF_TYPE_BCAST,&mbpmsk),RT_ERR_RG_OK); //set broadcast ASSERT_EQ(rtk_l2_lookupMissFloodPortMask_set(DLF_TYPE_UCAST,&mbpmsk),RT_ERR_RG_OK); //set unicast unknown DA flooding mask //ASSERT_EQ(rtk_l2_lookupMissAction_set(DLF_TYPE_BCAST,ACTION_FORWARD),RT_ERR_RG_OK); //broadcast action can't be set, always flooding ASSERT_EQ(rtk_l2_lookupMissAction_set(DLF_TYPE_UCAST,rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED?ACTION_TRAP2CPU:ACTION_FORWARD),RT_ERR_RG_OK); //set unicast unknown DA action //add init callback to sync protocal-stack if(rg_db.systemGlobal.initParam.initByHwCallBack != NULL) { rg_db.systemGlobal.initParam.initByHwCallBack(); } //set 1Q-Base Priority ReMapping to internal Priority for(i=0;i<8;i++) { ASSERT_EQ(RTK_QOS_1PPRIREMAPGROUP_SET(0,i,i,0),SUCCESS); } // set CPU port's SA learning limit action to TRAP ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNTACTION_SET(RTK_RG_MAC_PORT_CPU,LIMIT_LEARN_CNT_ACTION_TO_CPU),RT_ERR_OK); // disable CPU port & all extension port src filter srcExtPortFilterMmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; ASSERT_EQ(RTK_L2_SRCPORTEGRFILTERMASK_SET(&srcExtPortFilterMmsk),SUCCESS); srcExtPortFilterMmsk.bits[0]=0; // src filter ext port by software ASSERT_EQ(rtk_l2_extPortEgrFilterMask_set(&srcExtPortFilterMmsk),SUCCESS); //set internal pri to CPU pri mapping for(i=0;i<8;i++) { ASSERT_EQ(rtk_qos_fwd2CpuPriRemap_set(i,i),SUCCESS); } //set pri weight { rtk_qos_priSelWeight_t weight; memset(&weight,0,sizeof(weight)); #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) weight.weight_of_portBased=1; weight.weight_of_dot1q=2; weight.weight_of_dscp=0; //disable DSCP pri-decision weight.weight_of_acl=7; weight.weight_of_svlanBased=5; weight.weight_of_l4Based=6; ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(0, &weight),SUCCESS); ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(1, &weight),SUCCESS); #else weight.weight_of_portBased=1; weight.weight_of_dot1q=2; weight.weight_of_dscp=0; //disable DSCP pri-decision weight.weight_of_acl=15; weight.weight_of_lutFwd=14; weight.weight_of_saBaed=13; weight.weight_of_vlanBased=10; weight.weight_of_svlanBased=9; weight.weight_of_l4Based=11; ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(0, &weight),SUCCESS); ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(1, &weight),SUCCESS); #endif } for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { ASSERT_EQ(RTK_QOS_PORTPRIMAP_SET(i, 3),SUCCESS); //per port use table3 for internal-pri <=> queue mapping } #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) //ApolloFE suppport only one wifi, no need reserved ACL to patch. #else #if defined(CONFIG_RG_WLAN_HWNAT_ACCELERATION) && defined(CONFIG_APOLLO) ASSERT_EQ(_rtk_rg_acl_reserved_wifi_extPMaskTranslate_add(0,(1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)),(1<<(RTK_RG_EXT_PORT2-RTK_RG_PORT_CPU))),SUCCESS); //from EXT1, destExtPortMask=0x8 (patch: NIC RX haven't EXT-SPA filed) #ifdef CONFIG_DUALBAND_CONCURRENT ASSERT_EQ(_rtk_rg_acl_reserved_wifi_extPMaskTranslate_add(1,(1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)),(1<<(RTK_RG_EXT_PORT3-RTK_RG_PORT_CPU))),SUCCESS);//from EXT2, destExtPortMask=0x10 (patch: NIC RX haven't EXT-SPA filed) vlanForSlaveWifi.isIVL=0; vlanForSlaveWifi.memberPortMask.portmask=((1<<RTK_RG_PORT_CPU)|(1<<RTK_RG_EXT_PORT0)|(1<<RTK_RG_EXT_PORT1)); vlanForSlaveWifi.untagPortMask.portmask=RTK_RG_ALL_MAC_PORTMASK; vlanForSlaveWifi.vlanId=CONFIG_DEFAULT_TO_SLAVE_GMAC_VID; vlanForSlaveWifi.vlan_based_pri=0; vlanForSlaveWifi.vlan_based_pri_enable=DISABLED; ASSERT_EQ(rtk_rg_apollo_cvlan_add(&vlanForSlaveWifi),SUCCESS); ASSERT_EQ(_rtk_rg_acl_reserved_wifi_internalVidPriTranslateForSlave(CONFIG_DEFAULT_TO_SLAVE_GMAC_VID,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI,CONFIG_DEFAULT_TO_SLAVE_GMAC_VID,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI),SUCCESS); // patch for Slave GMAC packets recvice by special 1Q VID and PRI if(CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI==7) { ASSERT_EQ(RTK_QOS_1PPRIREMAPGROUP_SET(0,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI,6,0),SUCCESS); //patch: Pri=4 is reserved for Slave GMAC, So 1Q Pri=4 is mapping to Pri=5. } else { ASSERT_EQ(RTK_QOS_1PPRIREMAPGROUP_SET(0,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI+1,0),SUCCESS); //patch: Pri=4 is reserved for Slave GMAC, So 1Q Pri=4 is mapping to Pri=5. } // SET SLAVE CPU STATIC MAC ADDRESS { rtk_rg_macEntry_t macEntry; int entry_idx; memset(&macEntry,0,sizeof(macEntry)); macEntry.arp_used=0; macEntry.isIVL=0; _rtk_rg_str2mac(CONFIG_DEFAULT_SLAVE_IPC_MAC_ADDRESS,&macEntry.mac); //printk("######### %02x:%02x:%02x:%02x:%02x:%02x ###########\n",macEntry.mac.octet[0],macEntry.mac.octet[1] // ,macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); macEntry.port_idx=RTK_RG_EXT_PORT1; macEntry.static_entry=1; macEntry.fid=LAN_FID; macEntry.vlan_id=rg_db.systemGlobal.initParam.fwdVLAN_CPU; rtk_rg_apollo_macEntry_add(&macEntry,&entry_idx); } // SET MASTER CPU STATIC MAC ADDRESS { rtk_rg_macEntry_t macEntry; int entry_idx; memset(&macEntry,0,sizeof(macEntry)); macEntry.arp_used=0; macEntry.isIVL=0; _rtk_rg_str2mac(CONFIG_DEFAULT_MASTER_IPC_MAC_ADDRESS,&macEntry.mac); //printk("######### %02x:%02x:%02x:%02x:%02x:%02x ###########\n",macEntry.mac.octet[0],macEntry.mac.octet[1] // ,macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); macEntry.port_idx=RTK_RG_PORT_CPU; macEntry.static_entry=1; macEntry.fid=LAN_FID; macEntry.vlan_id=rg_db.systemGlobal.initParam.fwdVLAN_CPU; rtk_rg_apollo_macEntry_add(&macEntry,&entry_idx); } #endif//end of #ifdef CONFIG_DUALBAND_CONCURRENT /* //just for debug { rtk_portmask_t mirroredRxPortmask; rtk_portmask_t mirroredTxPortmask; mirroredRxPortmask.bits[0] =(1<<RTK_RG_PORT_CPU); mirroredTxPortmask.bits[0] =(1<<RTK_RG_PORT_CPU); ASSERT_EQ(rtk_mirror_portBased_set(RTK_RG_PORT0, &mirroredRxPortmask, &mirroredTxPortmask),SUCCESS); } */ #endif //end of #if defined(CONFIG_RG_WLAN_HWNAT_ACCELERATION) && defined(CONFIG_APOLLO) #endif //end of #if defined(CONFIG_RTL9602C_SERIES) #ifdef CONFIG_MASTER_WLAN0_ENABLE _rtk_rg_check_wlan_device_exist_or_not(); for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { //20140716LUKE:default CPU VLAN should contains all Wlan0's device if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0, i, rg_db.systemGlobal.initParam.fwdVLAN_CPU)); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].wlan0DevMask|=(0x1<<i); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].wlan0UntagMask=0xffffffff; //all untag rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[i]=DEF_SOFTWARE_LEARNING_LIMIT; } } #endif #if defined(__KERNEL__) && defined(CONFIG_APOLLO) { int reg; //Enable PON port bandwidth control assert_ok(ioal_mem32_read(0x2d138,®)); assert_ok(ioal_mem32_write(0x2d138,reg|0x2)); //Enable PON port bandwidth control assert_ok(ioal_mem32_read(0x2308c,®)); reg &= ~0xf0; reg |= 0xd0; assert_ok(ioal_mem32_write(0x2308c,reg)); } #endif //BC, IPv6 MC rate limit //TRACE("rg_db.systemGlobal.BCRateLimitPortMask=%d", rg_db.systemGlobal.BCRateLimitPortMask); //rg_db.systemGlobal.BCRateLimitPortMask = 0x0; rg_db.systemGlobal.BCRateLimitShareMeterIdx = -1; //disabled //TRACE("rg_db.systemGlobal.BCByteCount=%d", rg_db.systemGlobal.BCByteCount); //not set /proc/rg //rg_db.systemGlobal.BCByteCount = 0; //TRACE("rg_db.systemGlobal.IPv6MCRateLimitPortMask=%d", rg_db.systemGlobal.IPv6MCRateLimitPortMask); //rg_db.systemGlobal.IPv6MCRateLimitPortMask = 0x0; rg_db.systemGlobal.IPv6MCRateLimitShareMeterIdx = -1; //TRACE("rg_db.systemGlobal.IPv6MCByteCount=%d", rg_db.systemGlobal.IPv6MCByteCount); //rg_db.systemGlobal.IPv6MCByteCount = 0; //TRACE("rg_db.systemGlobal.IPv4MCRateLimitPortMask=%d", rg_db.systemGlobal.IPv4MCRateLimitPortMask); //rg_db.systemGlobal.IPv4MCRateLimitPortMask = 0x0; rg_db.systemGlobal.IPv4MCRateLimitShareMeterIdx = -1; //TRACE("rg_db.systemGlobal.IPv4MCByteCount=%d", rg_db.systemGlobal.IPv4MCByteCount); //rg_db.systemGlobal.IPv4MCByteCount = 0; //TRACE("rg_db.systemGlobal.unKnownDARateLimitPortMask=%d", rg_db.systemGlobal.unKnownDARateLimitPortMask); //rg_db.systemGlobal.unKnownDARateLimitPortMask = 0x0; rg_db.systemGlobal.unKnownDARateLimitShareMeterIdx = -1; //TRACE("rg_db.systemGlobal.unKnownDAByteCount=%d", rg_db.systemGlobal.unKnownDAByteCount); //rg_db.systemGlobal.unKnownDAByteCount = 0; //20160817LUKE: turn on TCP swap fin and delete rst connection functionality. rg_db.systemGlobal.tcpSwapFinDelRst = RG_INIT_DEFAULT_tcp_swap_fin_del_rst; #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) //igmp report ingress filter portmask, default enabled all port permit rg_db.systemGlobal.igmpReportIngressPortmask= 0xffff; //igmp report egress filter portmask, default enabled all port permit rg_db.systemGlobal.igmpReportPortmask= 0xffff; //igmp query filter portmask, default enabled all port permit rg_db.systemGlobal.igmpMldQueryPortmask= 0xffff; rtl_mCastModuleArray[rg_db.systemGlobal.nicIgmpModuleIndex].enableFastLeave=0; #endif // set default short timeout = 2secs _rtk_rg_tcpShortTimeoutHouseKeep_set(2*CONFIG_HZ); //trap all IPv6 link local packet to protocal-stack //assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_IPV6_LINK_LOCAL_TRAP, NULL)); //sync proc/rg/hwnat, default enable _rtk_rg_hwnatACLManipulate(ENABLED); rg_db.systemGlobal.fwdStatistic=0; memset(&rg_db.systemGlobal.statistic,0,sizeof(rg_db.systemGlobal.statistic)); rg_db.systemGlobal.fwdStatistic=1; //Enable fwd statistic by default #if defined(CONFIG_APOLLO) if(rg_db.systemGlobal.stpBlockingPortmask.portmask) { _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING,NULL); } else _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING); #ifdef CONFIG_GPON_FEATURE if((init_param!=NULL)&&(init_param->wanPortGponMode==1)) { rg_db.systemGlobal.ponPortUnmatchCfDrop=1; rtk_classify_unmatchAction_set(CLASSIFY_UNMATCH_PERMIT_WITHOUT_PON); } else { //TRACE("rg_db.systemGlobal.ponPortUnmatchCfDrop=%d", rg_db.systemGlobal.ponPortUnmatchCfDrop); //rg_db.systemGlobal.ponPortUnmatchCfDrop=0; rtk_classify_unmatchAction_set(CLASSIFY_UNMATCH_PERMIT); } #else //TRACE("rg_db.systemGlobal.ponPortUnmatchCfDrop=%d", rg_db.systemGlobal.ponPortUnmatchCfDrop); //rg_db.systemGlobal.ponPortUnmatchCfDrop=0; rtk_classify_unmatchAction_set(CLASSIFY_UNMATCH_PERMIT); #endif #endif // end defined(CONFIG_APOLLO) //20151007LUKE: initialize dslite multicast table for(i=0;i<MAX_DSLITEMC_SW_TABLE_SIZE;i++){ rtk_l34_dsliteMc_entry_t dsliteMcEntry; dsliteMcEntry.index=i; memset(&dsliteMcEntry.ipUPrefix64,0,sizeof(rtk_ipv6_addr_t)); memset(&dsliteMcEntry.ipUPrefix64Mask,0xff,sizeof(rtk_ipv6_addr_t)); memset(&dsliteMcEntry.ipMPrefix64,0,sizeof(rtk_ipv6_addr_t)); memset(&dsliteMcEntry.ipMPrefix64Mask,0xff,sizeof(rtk_ipv6_addr_t)); //ASSERT_EQ(rtk_rg_apollo_dsliteMcTable_set(&dsliteMcEntry),RT_ERR_RG_OK); ASSERT_EQ(RTK_L34_DSLITEMULTICAST_SET(&dsliteMcEntry),RT_ERR_RG_OK); } //software unmatch trap to PS rg_db.systemGlobal.dsliteControlSet[L34_DSLITE_CTRL_MC_PREFIX_UNMATCH]=RTK_L34_DSLITE_UNMATCH_ACT_TRAP; //hardware unmatch trap to romeDriver rtk_l34_dsliteControl_set(L34_DSLITE_CTRL_MC_PREFIX_UNMATCH, RTK_L34_DSLITE_UNMATCH_ACT_TRAP); rg_db.systemGlobal.rgInit = 1; DEBUG("END!!!!!!!!!"); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_initParam_set(rtk_rg_initParams_t *init_param) { int32 ret; //Check switch chip revision ASSERT_EQ(_rtk_rg_switch_version_get(&rg_kernel.apolloChipId,&rg_kernel.apolloRev,&rg_kernel.apolloSubtype),RT_ERR_OK); //before start, set init_state to avoid packet receive in fwdEngine may cause kernel panic _rtk_rg_set_initState(RTK_RG_DURING_INIT); ret=_rtk_rg_initParam_set(init_param); //reset state after init when success if(ret==RT_ERR_RG_OK) { _rtk_rg_set_initState(RTK_RG_INIT_FINISHED); return (RT_ERR_RG_OK); } else RETURN_ERR(ret); } int _rtk_rg_portmask_translator(rtk_rg_portmask_t in_pmask, rtk_portmask_t* out_mac_pmask, rtk_portmask_t* out_ext_pmask){ int i; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int EXT_CPU_PORT_flag = DISABLED; #endif if(out_mac_pmask==NULL||out_ext_pmask==NULL ) RETURN_ERR(RT_ERR_RG_NULL_POINTER); bzero(out_mac_pmask,sizeof(rtk_portmask_t)); bzero(out_ext_pmask,sizeof(rtk_portmask_t)); //set mac portmask for(i=0;i<RTK_RG_MAX_MAC_PORT;i++){ if(in_pmask.portmask & (1<<i)){ out_mac_pmask->bits[0] |= (1<<i); } } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) // in flow based architecture, the extension ports and physical cpu ports are independent //set ext portmask: Bit[5:0]: EXT 5-0 in MAC9; Bit[11:6]: EXT 11-6 in MAC10; Bit[17:12]: EXT 17-12 in MAC7 for(i=0;i<RTK_RG_MAX_EXT_PORT;i++){ if(in_pmask.portmask & (1<<(i+RTK_RG_MAX_MAC_PORT))){ out_ext_pmask->bits[0] |= (1<<i); } } DEBUG("PortMask 0x%x: physical port is 0x%x, extension port is 0x%x", in_pmask, out_mac_pmask->bits[0], out_ext_pmask->bits[0]); // remove me if work well #else //set ext portmask for(i=0;i<RTK_RG_MAX_MAC_PORT;i++){ if(in_pmask.portmask & (1<<(i+RTK_RG_MAX_MAC_PORT))){ out_ext_pmask->bits[0] |= (1<<(i+1)); EXT_CPU_PORT_flag = ENABLED;//ENABLE MAC_CPU_PORT if any EXT_PORT is ENABLED } } if(EXT_CPU_PORT_flag==ENABLED) out_mac_pmask->bits[0] |= (1<<RTK_RG_MAC_PORT_CPU); if(in_pmask.portmask & (1<<RTK_RG_MAC_PORT_CPU)){ out_ext_pmask->bits[0] |= 1; //enable extCPU port } #endif return (RT_ERR_RG_OK); } //LAN Interface/Static Route/IPv4 DHCP Server int _rtk_rg_softwareArpTableLookUp(unsigned short routingIdx, ipaddr_t ipAddr, rtk_rg_arp_linkList_t **pSoftwareArpEntry, int resetIdleTime) { rtk_rg_arp_linkList_t *pArpEntry; if(list_empty(&rg_db.softwareArpTableHead[ipAddr&0xff])) goto NOT_FOUND; list_for_each_entry(pArpEntry,&rg_db.softwareArpTableHead[ipAddr&0xff],arp_list) { if(rg_db.arp[pArpEntry->idx].routingIdx==routingIdx && rg_db.arp[pArpEntry->idx].ipv4Addr==ipAddr) { TRACE("Found! SW ARP[%d] is match with %x",pArpEntry->idx,ipAddr); //Reset idle time //20141009LUKE: update idleSecs and sendReqCount if(resetIdleTime) { rg_db.arp[pArpEntry->idx].idleSecs=0; rg_db.arp[pArpEntry->idx].sendReqCount=0; } *pSoftwareArpEntry=pArpEntry; return (RT_ERR_RG_OK); } } NOT_FOUND: //not found *pSoftwareArpEntry=NULL; return (RT_ERR_RG_OK); } int _rtk_rg_softwareArpTableAdd(unsigned short routingIdx, ipaddr_t ipv4Addr, int l2Idx, int staticEntry) { rtk_rg_arp_linkList_t *pNewArpEntry,*pNextArpEntry; //Check if we have not-used free arp list if(list_empty(&rg_db.softwareArpFreeListHead)) { DEBUG("all free SW ARP list are allocated..."); //Clear all recently not used entries if(_rtk_rg_freeRecentlyNotUsedArpList()!=RT_ERR_RG_OK) RETURN_ERR(RT_ERR_RG_FAILED); } //Get one from free list list_for_each_entry_safe(pNewArpEntry,pNextArpEntry,&rg_db.softwareArpFreeListHead,arp_list) //just return the first entry right behind of head { list_del_init(&pNewArpEntry->arp_list); break; } //DEBUG("the free ARP %p idx is %d, routing=%d",pNewArpEntry,pNewArpEntry->idx,pNewArpEntry->routingIdx); //Setup ARP information rg_db.arp[pNewArpEntry->idx].rtk_arp.nhIdx=l2Idx; rg_db.arp[pNewArpEntry->idx].rtk_arp.valid=1; rg_db.arp[pNewArpEntry->idx].ipv4Addr=ipv4Addr; rg_db.arp[pNewArpEntry->idx].staticEntry=staticEntry; rg_db.arp[pNewArpEntry->idx].idleSecs=0; rg_db.arp[pNewArpEntry->idx].sendReqCount=0; rg_db.arp[pNewArpEntry->idx].routingIdx=routingIdx; //DEBUG("the arp[%d] has ip=%x, static=%d, nhIdx=%d",pNewArpEntry->idx,rg_db.arp[pNewArpEntry->idx].ipv4Addr,rg_db.arp[pNewArpEntry->idx].staticEntry,rg_db.arp[pNewArpEntry->idx].rtk_arp.nhIdx); //Add to hash head list list_add(&pNewArpEntry->arp_list,&rg_db.softwareArpTableHead[ipv4Addr&0xff]); #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit if(rg_db.lut[l2Idx].valid) rg_db.lut[l2Idx].arp_refCount++; #endif return (RT_ERR_RG_OK); } int _rtk_rg_softwareArpTableDel(rtk_rg_arp_linkList_t *pDelArpEntry) { //20140529: l2 entry is deleted when pppoe gateway nexthop arp timeout. #if 0 int ret,l2Idx; //unsigned char hashIdx; //rtk_rg_macEntry_t macEntry; l2Idx=rg_db.arp[pDelArpEntry->idx].rtk_arp.nhIdx; //keep original data, only toggle arp_used to 0 if(rg_db.lut[l2Idx].valid) { #if 1 ret=rtk_rg_macEntry_del(l2Idx); #else memcpy(macEntry.mac.octet,rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN); macEntry.fid=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.fid; macEntry.isIVL=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL)>0?1:0; macEntry.port_idx=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.port; macEntry.vlan_id=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.vid; macEntry.static_entry=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0; DEBUG("### disable arp_used in l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); macEntry.arp_used=0; ret=rtk_rg_macEntry_add(&macEntry,&l2Idx); #endif assert_ok(ret); } #endif #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit int retval=0, i, l2Idx, nxthopRefFlag; rtk_l2_addr_table_t asic_l2_entry; uint32 arpL2Idx = rg_db.arp[pDelArpEntry->idx].rtk_arp.nhIdx; #endif //Delete from head list list_del_init(&pDelArpEntry->arp_list); //Clear data memset(&rg_db.arp[pDelArpEntry->idx],0,sizeof(rtk_rg_table_arp_t)); //Add back to free list list_add(&pDelArpEntry->arp_list,&rg_db.softwareArpFreeListHead); #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit if(rg_db.lut[arpL2Idx].valid) { if(rg_db.lut[arpL2Idx].arp_refCount>0) rg_db.lut[arpL2Idx].arp_refCount--; if(rg_db.lut[arpL2Idx].arp_refCount==0) { nxthopRefFlag = 0; for(i=0; i<MAX_NEXTHOP_SW_TABLE_SIZE; i++) { if(arpL2Idx==rg_db.nexthop[i].rtk_nexthop.nhIdx) { nxthopRefFlag = 1; break; } } if(nxthopRefFlag==0) { //Sync to LUT l2Idx = arpL2Idx; memset(&asic_l2_entry,0,sizeof(rtk_l2_addr_table_t)); retval = rtk_l2_nextValidEntry_get(&l2Idx, &asic_l2_entry); if((retval==RT_ERR_OK) && (arpL2Idx == asic_l2_entry.entry.l2UcEntry.index) && (asic_l2_entry.entry.l2UcEntry.flags & RTK_L2_UCAST_FLAG_ARP_USED)!=0 && (asic_l2_entry.entry.l2UcEntry.flags & RTK_L2_UCAST_FLAG_STATIC)==0) { asic_l2_entry.entry.l2UcEntry.flags &= (~RTK_L2_UCAST_FLAG_ARP_USED); retval = RTK_L2_ADDR_ADD(&asic_l2_entry.entry.l2UcEntry); ASSERT_EQ(retval,RT_ERR_OK); } } } } #endif return (RT_ERR_RG_OK); } int _rtk_rg_freeRecentlyNotUsedArpList(void) { int count=0; int i; rtk_rg_arp_linkList_t *pArpEntry,*pNextEntry; for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { list_for_each_entry_safe(pArpEntry,pNextEntry,&rg_db.softwareArpTableHead[i],arp_list) { if(rg_db.arp[pArpEntry->idx].staticEntry==0 && rg_db.arp[pArpEntry->idx].idleSecs>=ARP_SW_TABLE_THRESHOLD) //not be accessed in ARP_SW_TABLE_THRESHOLD time { _rtk_rg_softwareArpTableDel(pArpEntry); count++; } } } if(count==0) //no recently not used ARP entry.... RETURN_ERR(RT_ERR_RG_FAILED); return (RT_ERR_RG_OK); } int32 _rtk_rg_arpRearrange(rtk_rg_routing_arpInfo_t *newAddingEntry, ipaddr_t newIpAddr, int routingARPNum) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) unsigned int i,j,k,arpNum=0,lanArpTotal=0,wanArpTotal=0,arpStart=0,inRangeIdx=0,routingIdx=0,prevRoutingIdx=0; ipaddr_t comparingArpIP=0; //init bzero(rg_db.tempArpTable,sizeof(rtk_rg_table_arp_t)*MAX_ARP_HW_TABLE_SIZE); bzero(rg_db.tempL3Table,sizeof(rtk_l34_routing_entry_t)*MAX_L3_SW_TABLE_SIZE); bzero(rg_db.arpTableCopied,sizeof(unsigned char)*MAX_ARP_HW_TABLE_SIZE); // TODO:We need to stop the packets flow from here!! //Count the ARP number all routing entries needed, and caculate each entry's ARP Start and End index for(i=0;i<routingARPNum;i++) { //newAdding one has not add to routing table yet arpNum=0x1<<rg_db.p_tempRoutingArpInfoArray[i].bitNum; routingIdx=rg_db.p_tempRoutingArpInfoArray[i].routingIdx; //DEBUG("i=%d, arpNum is %d, routingIdx is %d",i,arpNum,routingIdx); if(rg_db.l3[routingIdx].rtk_l3.valid==0) comparingArpIP=newIpAddr; else comparingArpIP=rg_db.l3[routingIdx].rtk_l3.ipAddr; for(j=0;j<i;j++) { //DEBUG("j is %d",j); prevRoutingIdx=rg_db.p_tempRoutingArpInfoArray[j].routingIdx; if(rg_db.l3[prevRoutingIdx].rtk_l3.valid) { if((comparingArpIP&rg_db.l3[prevRoutingIdx].netmask) == rg_db.l3[prevRoutingIdx].rtk_l3.ipAddr) { //DEBUG("HIT!!comparingArpIP(%x)&rg_db.l3[%d].netmask=%x, rg_db.l3[%d].rtk_l3.ipAddr=%x",comparingArpIP,prevRoutingIdx,comparingArpIP&rg_db.l3[prevRoutingIdx].netmask,prevRoutingIdx,rg_db.l3[prevRoutingIdx].rtk_l3.ipAddr); break; } } else //the routing entry has not added to table yet, therefore it must be the newAddingEntry { if((comparingArpIP&(~newAddingEntry->notMask)) == (newIpAddr&(~newAddingEntry->notMask))) { //DEBUG("HIT!!comparingArpIP(%x)&(~newAddingEntry.notMask)=%x, newIpAddr(%x)&(~newAddingEntry.notMask))=%x",comparingArpIP,comparingArpIP&(~newAddingEntry->notMask),newIpAddr,newIpAddr&(~newAddingEntry->notMask)); break; } } } if(j==i)//unmatch any IP-range before, create new IP-range { //DEBUG("add new IP-range"); if(rg_db.p_tempRoutingArpInfoArray[i].isLan) lanArpTotal+=arpNum; else wanArpTotal+=arpNum; rg_db.p_tempRoutingArpInfoArray[i].arpStart=arpStart; arpStart+=(arpNum>>0x2); rg_db.p_tempRoutingArpInfoArray[i].arpEnd=arpStart-1; //each ARP index has four entries } else if(rg_db.p_tempRoutingArpInfoArray[i].bitNum==rg_db.p_tempRoutingArpInfoArray[j].bitNum) { //DEBUG("in the same IP-range"); //no matter IVL or SVL, two interface has same subnet is prohibited here!! RETURN_ERR(RT_ERR_RG_INTF_OVERLAP_AND_SAME_SUBNET); #if 0 if(rg_db.vlan[rg_db.p_tempRoutingVlanInfoArray[i]].fidMode==VLAN_FID_IVL || rg_db.vlan[rg_db.p_tempRoutingVlanInfoArray[j]].fidMode==VLAN_FID_IVL) //IVL interfaces can not overlap their ip subnet, otherwise routing table will always hit first add one RETURN_ERR(RT_ERR_RG_VLAN_BASED_OVERLAP_SUBNET); //the Ith entry is in the Jth entry's IP-range and Ith entry is as big as Jth entry rg_db.p_tempRoutingArpInfoArray[i].arpStart=rg_db.p_tempRoutingArpInfoArray[j].arpStart; rg_db.p_tempRoutingArpInfoArray[i].arpEnd=rg_db.p_tempRoutingArpInfoArray[j].arpEnd; #endif } else { //DEBUG("the %d(%d) is inside %d(%d) IP-range",i,rg_db.p_tempRoutingArpInfoArray[i].bitNum,j,rg_db.p_tempRoutingArpInfoArray[j].bitNum); if(rg_db.vlan[rg_db.p_tempRoutingVlanInfoArray[i]].fidMode==VLAN_FID_IVL || rg_db.vlan[rg_db.p_tempRoutingVlanInfoArray[j]].fidMode==VLAN_FID_IVL) //IVL interfaces can not overlap their ip subnet, otherwise routing table will always hit first add one RETURN_ERR(RT_ERR_RG_VLAN_BASED_OVERLAP_SUBNET); //the Ith entry is inside the Jth entry's IP-range, so the Jth ARP start is referenced inRangeIdx=(comparingArpIP&rg_db.p_tempRoutingArpInfoArray[j].notMask)>>rg_db.p_tempRoutingArpInfoArray[i].bitNum; rg_db.p_tempRoutingArpInfoArray[i].arpStart=rg_db.p_tempRoutingArpInfoArray[j].arpStart+(inRangeIdx<<(rg_db.p_tempRoutingArpInfoArray[i].bitNum-2)); //each ARP index has four entries rg_db.p_tempRoutingArpInfoArray[i].arpEnd=rg_db.p_tempRoutingArpInfoArray[i].arpStart+(arpNum>>0x2)-1; } //Check ARP number is over hardware limitation or not //DEBUG("Lan ARP total is %d, Wan ARP total is %d",lanArpTotal,wanArpTotal); if(newIpAddr!=0) //in deleting, these check is not necessary { if((newAddingEntry->isLan && lanArpTotal>rg_kernel.arp_number_for_LAN) || (newAddingEntry->isLan==0 && wanArpTotal>rg_kernel.arp_number_for_WAN)) { //DEBUG("The hardware ARP table is not enough for the new routing entry...add to sw table!"); return (RT_ERR_RG_ADD_ARP_TO_SW_TABLE); //add to sw table when needed //RETURN_ERR(RT_ERR_RG_ARP_FULL); } } //DEBUG("add to hardware ARP table for the new routing entry!"); //Return the newArpRouting's ARP Start and End if(newIpAddr!=0 && rg_db.p_tempRoutingArpInfoArray[i].intfIdx==newAddingEntry->intfIdx) { newAddingEntry->arpStart=rg_db.p_tempRoutingArpInfoArray[i].arpStart; newAddingEntry->arpEnd=rg_db.p_tempRoutingArpInfoArray[i].arpEnd; } //Check if ARP rearrangement is needed j=rg_db.p_tempRoutingArpInfoArray[i].arpStart<<0x2; if(rg_db.l3[routingIdx].rtk_l3.valid==1) { //Keep routing table entry in tempRouting table memcpy(&rg_db.tempL3Table[routingIdx],&rg_db.l3[routingIdx].rtk_l3,sizeof(rtk_l34_routing_entry_t)); //DEBUG("j = %d, tmp start %d, routing start %d, arpNUm= %d",j,rg_db.p_tempRoutingArpInfoArray[i].arpStart,rg_db.l3[routingIdx].rtk_l3.arpStart,arpNum); if(rg_db.p_tempRoutingArpInfoArray[i].arpStart != rg_db.l3[routingIdx].rtk_l3.arpStart) { //DEBUG("rearrange!!"); //Rearrange old ARP records in tempArpTable for(k=rg_db.l3[routingIdx].rtk_l3.arpStart<<0x2;k<((rg_db.l3[routingIdx].rtk_l3.arpEnd+1)<<0x2);k++) memcpy(&rg_db.tempArpTable[j++],&rg_db.arp[k],sizeof(rtk_rg_table_arp_t)); //Modify routing table rg_db.tempL3Table[routingIdx].arpStart=rg_db.p_tempRoutingArpInfoArray[i].arpStart; rg_db.tempL3Table[routingIdx].arpEnd=rg_db.p_tempRoutingArpInfoArray[i].arpEnd; } else if(rg_db.arpTableCopied[j] == 0) { //DEBUG("no..i am not moving.."); memcpy(&rg_db.tempArpTable[j],&rg_db.arp[j],sizeof(rtk_rg_table_arp_t)*arpNum); } //Painted the arpTableCopied for recognize the ARP entries copied or not memset(&rg_db.arpTableCopied[rg_db.p_tempRoutingArpInfoArray[i].arpStart<<0x2],1,sizeof(unsigned char)*arpNum); } } //Write the tempRouting table to hardware L3 table for(i=0;i<MAX_L3_SW_TABLE_SIZE;i++) { //Only the modified routing entry need to be overrided if(rg_db.tempL3Table[i].valid==1) ASSERT_EQ(RTK_L34_ROUTINGTABLE_SET(i,&rg_db.tempL3Table[i]),RT_ERR_OK); } //Write the tempArp table to hardware ARP table for(i=0;i<MAX_ARP_HW_TABLE_SIZE;i++) { /*DEBUG("i = %d, valid = %d, l3idx = %d, nhidx= %d",i, rg_db.tempArpTable[i].rtk_arp.valid, rg_db.tempArpTable[i].rtk_arp.index, rg_db.tempArpTable[i].rtk_arp.nhIdx);*/ ASSERT_EQ(RTK_L34_ARPTABLE_SET(i,&rg_db.tempArpTable[i].rtk_arp),RT_ERR_OK); } // TODO:We need to restart the packets flow from here!! //DEBUG("after arp rearrange!!"); return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9602C_SERIES) FIXME("9602BVB don't need to rearrange ARP"); return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9607C_SERIES) return (RT_ERR_RG_OK); #endif } int32 _rtk_rg_addArpRoutingArray(rtk_rg_routing_arpInfo_t *newAddingEntry, ipaddr_t newIpAddr, int intfVlanId) { unsigned int ret,i=0; rtk_rg_routing_arpInfo_t *pTemporary; int *pVlanTemporary; //init bzero(rg_db.p_tempRoutingArpInfoArray,sizeof(rtk_rg_routing_arpInfo_t)*MAX_L3_SW_TABLE_SIZE); bzero(rg_db.p_tempRoutingVlanInfoArray,sizeof(int)*MAX_L3_SW_TABLE_SIZE); //Add New entry with other old ARP routing entries in routingArpInfoArray to tempRoutingArpInfoArray by the order of IP-range size. //The software ARP routing will NOT add here, the new entry will add first since we will decide it can be added in hw or not in _rtk_rg_arpRearrange. for(i=0;i<rg_db.routingArpInfoNum;i++) { if(newAddingEntry->bitNum>rg_db.p_routingArpInfoArray[i].bitNum) { memcpy(&rg_db.p_tempRoutingArpInfoArray[i],newAddingEntry,sizeof(rtk_rg_routing_arpInfo_t)); memcpy(&rg_db.p_tempRoutingArpInfoArray[i+1],&rg_db.p_routingArpInfoArray[i],sizeof(rtk_rg_routing_arpInfo_t)*(rg_db.routingArpInfoNum-i)); rg_db.p_tempRoutingVlanInfoArray[i]=intfVlanId; memcpy(&rg_db.p_tempRoutingVlanInfoArray[i+1],&rg_db.p_routingVlanInfoArray[i],sizeof(int)*(rg_db.routingArpInfoNum-i)); break; } else { memcpy(&rg_db.p_tempRoutingArpInfoArray[i],&rg_db.p_routingArpInfoArray[i],sizeof(rtk_rg_routing_arpInfo_t)); rg_db.p_tempRoutingVlanInfoArray[i]=rg_db.p_routingVlanInfoArray[i]; } } //the newAdding is the smallest one, so add it at the end of array if(i==rg_db.routingArpInfoNum) { memcpy(&rg_db.p_tempRoutingArpInfoArray[i],newAddingEntry,sizeof(rtk_rg_routing_arpInfo_t)); rg_db.p_tempRoutingVlanInfoArray[i]=intfVlanId; } //Count and check ARP table distribution ret=_rtk_rg_arpRearrange(newAddingEntry,newIpAddr,rg_db.routingArpInfoNum+1); if(ret!=RT_ERR_RG_OK) return ret; //Global variable modification rg_db.routingArpInfoNum++; pTemporary=rg_db.p_routingArpInfoArray; rg_db.p_routingArpInfoArray=rg_db.p_tempRoutingArpInfoArray; rg_db.p_tempRoutingArpInfoArray=pTemporary; pVlanTemporary=rg_db.p_routingVlanInfoArray; rg_db.p_routingVlanInfoArray=rg_db.p_tempRoutingVlanInfoArray; rg_db.p_tempRoutingVlanInfoArray=pVlanTemporary; return (RT_ERR_RG_OK); } #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) static rtk_rg_routing_linkList_t swRoutingList[MAX_L3_HW_TABLE_SIZE]; #endif int32 _rtk_rg_convertSwArpToHwTable(rtk_rg_routing_arpInfo_t *deletingEntry) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) int ret,i=0,arpNum,bitNum,arpIdx,intfVlanId; unsigned char inserted=0; struct list_head swRoutingHead; rtk_rg_routing_linkList_t *pSwRoutingList; rtk_rg_arp_linkList_t *pSwArpInfo,*pNextSwArpInfo; rtk_rg_arpEntry_t arpEntry; rtk_rg_routing_arpInfo_t newAddingRoute; ipaddr_t newIpAddr; rtk_l34_routing_entry_t rtEntry; //20140312LUKE:if this time we just set the same interface twice, bypass this convert function! if(rg_db.systemGlobal.intfIdxForReset!=-1) return (RT_ERR_RG_OK); //init INIT_LIST_HEAD(&swRoutingHead); for(i=0;i<MAX_L3_HW_TABLE_SIZE;i++) { INIT_LIST_HEAD(&swRoutingList[i].route_list); swRoutingList[i].idx=i; swRoutingList[i].bitNum=0; } //Check if the empty ARP table can accommodate the same size software ARP table routing //If so, copy from software ARP link list to hardware ARP table for(i=0;i<MAX_L3_HW_TABLE_SIZE;i++) { #ifdef CONFIG_DUALBAND_CONCURRENT if(i==SLAVE_WIFI_ROUTE_IDX) //ipc routing should not be converted at any time! continue; #endif if(rg_db.l3[i].rtk_l3.valid && rg_db.l3[i].rtk_l3.ipAddr>0 && rg_db.l3[i].rtk_l3.process==L34_PROCESS_CPU) { if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].storedInfo.is_wan) { //20140623LUKE:do not convert OtherWAN interface route!! if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.none_internet) continue; //20150916LUKE: do not convert PPTP, L2TP, Dslite, pppoe_Dslite interface route!! //20140312LUKE:do not convert PPPoE interface route!!(internal is WAN) if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE|| (rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type>=RTK_RG_PPTP && rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type<=RTK_RG_PPPoE_DSLITE)) continue; //20140916LUKE: if STATIC ROUTE set gw_MAC_autolearn and gw not yet response, we should not convert it!! if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].p_wanStaticInfo->gateway_ipv4_addr && !rg_db.systemGlobal.interfaceInfo[rg_db.l3[i].rtk_l3.netifIdx].p_wanStaticInfo->ipv4_default_gateway_on) continue; } bitNum=31-rg_db.l3[i].rtk_l3.ipMask; if(rg_db.l3[i].rtk_l3.rt2waninf!=deletingEntry->isLan && bitNum<=deletingEntry->bitNum) //convert sw bitNum bigger than deleting hw entry is impossible { DEBUG("add to candidate list!!i is %d",i); swRoutingList[i].bitNum=bitNum; if(list_empty(&swRoutingHead)) { DEBUG("add to list head!"); list_add(&swRoutingList[i].route_list,&swRoutingHead); } else { //insert the new candidate entry by its bitNum, bigger is close to head, smaller is far from it. list_for_each_entry(pSwRoutingList,&swRoutingHead,route_list) { if(bitNum>pSwRoutingList->bitNum) { inserted=1; break; } } if(inserted) { DEBUG("insert new routing list(%d) before pSwRoutingList(%d)",bitNum,pSwRoutingList->bitNum); list_add_tail(&swRoutingList[i].route_list,&pSwRoutingList->route_list); } else { DEBUG("append new routing list(%d) before the Head",bitNum); list_add_tail(&swRoutingList[i].route_list,&swRoutingHead); } } } } } arpNum=0x1<<deletingEntry->bitNum; if(!list_empty(&swRoutingHead)) { DEBUG("total can add %d arp entries",arpNum); list_for_each_entry(pSwRoutingList,&swRoutingHead,route_list) { //if the sw entry size is accommodate into the deleting range, //transfer sw ARP to hw ARP, add to tempRoutingArray and rearrange ARP table //count arp range newAddingRoute.arpStart=deletingEntry->arpStart; arpNum-=0x1<<pSwRoutingList->bitNum; DEBUG("there are %d last arp entries can be add..",arpNum); if(arpNum<0) break; if(pSwRoutingList->bitNum<=2) deletingEntry->arpStart+=1; else deletingEntry->arpStart+=(0x1<<(pSwRoutingList->bitNum-2)); newAddingRoute.arpEnd=deletingEntry->arpStart-1; DEBUG(" new arpStart=%d, arpEnd=%d...deleing arpStart=%d, arpEnd=%d",newAddingRoute.arpStart,newAddingRoute.arpEnd,deletingEntry->arpStart,deletingEntry->arpEnd); newAddingRoute.routingIdx=pSwRoutingList->idx; newAddingRoute.intfIdx=rg_db.l3[pSwRoutingList->idx].rtk_l3.netifIdx; newAddingRoute.bitNum=pSwRoutingList->bitNum; //Rearrange ARP table newIpAddr=rg_db.l3[pSwRoutingList->idx].rtk_l3.ipAddr; if(rg_db.l3[pSwRoutingList->idx].rtk_l3.rt2waninf) { newAddingRoute.isLan=0; newAddingRoute.notMask=~(rg_db.systemGlobal.interfaceInfo[newAddingRoute.intfIdx].p_wanStaticInfo->ip_network_mask); intfVlanId=rg_db.systemGlobal.interfaceInfo[newAddingRoute.intfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; } else { newAddingRoute.isLan=1; newAddingRoute.notMask=~(rg_db.systemGlobal.interfaceInfo[newAddingRoute.intfIdx].p_lanIntfConf->ip_network_mask); intfVlanId=rg_db.systemGlobal.interfaceInfo[newAddingRoute.intfIdx].p_lanIntfConf->intf_vlan_id; } ret=_rtk_rg_addArpRoutingArray(&newAddingRoute,newIpAddr,intfVlanId); if(ret==RT_ERR_RG_OK) { //change routing table's process from CPU to ARP memcpy(&rtEntry,&rg_db.l3[newAddingRoute.routingIdx].rtk_l3,sizeof(rtk_l34_routing_entry_t)); rtEntry.process = L34_PROCESS_ARP; rtEntry.arpStart = newAddingRoute.arpStart; rtEntry.arpEnd = newAddingRoute.arpEnd; DEBUG(" Convert sw ARP to hw!! newArpStart is %d, newArpEnd is %d",rtEntry.arpStart,rtEntry.arpEnd); ret = RTK_L34_ROUTINGTABLE_SET(newAddingRoute.routingIdx, &rtEntry); if(ret!=RT_ERR_OK) break; //Convert software ARP to hw ARP table for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { list_for_each_entry_safe(pSwArpInfo,pNextSwArpInfo,&rg_db.softwareArpTableHead[i],arp_list) { if(rg_db.arp[pSwArpInfo->idx].routingIdx==newAddingRoute.routingIdx) //Hit,transfer to hw arp { arpIdx=(newAddingRoute.arpStart<<2); arpIdx+=rg_db.arp[pSwArpInfo->idx].ipv4Addr&newAddingRoute.notMask; arpEntry.ipv4Addr=rg_db.arp[pSwArpInfo->idx].ipv4Addr; arpEntry.macEntryIdx=rg_db.arp[pSwArpInfo->idx].rtk_arp.nhIdx; arpEntry.staticEntry=rg_db.arp[pSwArpInfo->idx].staticEntry; //keep static character DEBUG(" convert sw ARP[%d]->l2:%d from sw to hw[%d]!!",pSwArpInfo->idx,arpEntry.macEntryIdx,arpIdx); assert_ok(rtk_rg_apollo_arpEntry_add(&arpEntry,&arpIdx)); //Free software ARP list DEBUG(" free sw ARP[%d] in array[%d]!",pSwArpInfo->idx,i); _rtk_rg_softwareArpTableDel(pSwArpInfo); } } /*pSwArpInfo=rg_db.pSoftwareArpTableHead[i]; while(pSwArpInfo!=NULL) { pNextSwArpInfo=pSwArpInfo->pNext; if(pSwArpInfo->routingIdx==pSwRoutingHead->swRoutingEntry.routingIdx) //Hit,transfer to hw arp { arpIdx=(pSwRoutingHead->swRoutingEntry.arpStart<<2); arpIdx+=pSwArpInfo->ipv4Addr&pSwRoutingHead->swRoutingEntry.notMask; arpEntry.ipv4Addr=pSwArpInfo->ipv4Addr; arpEntry.macEntryIdx=pSwArpInfo->nhIdx; arpEntry.staticEntry=pSwArpInfo->staticEntry; //keep static character DEBUG("add ARP from sw to hw[%d]!!",arpIdx); assert_ok(rtk_rg_arpEntry_add(&arpEntry,&arpIdx)); //Free software ARP list DEBUG("free sw ARP Info in array[%d]!",i); _rtk_rg_softwareArpTableDel(pSwArpInfo); } pSwArpInfo=pNextSwArpInfo; }*/ } } else break; //failed to convert, just return. } /*while(pSwRoutingHead!=NULL) { //if the sw entry size is accommodate into the deleting range, //transfer sw ARP to hw ARP, add to tempRoutingArray and rearrange ARP table //count arp range pSwRoutingHead->swRoutingEntry.arpStart=deletingEntry->arpStart; arpNum-=0x1<<pSwRoutingHead->swRoutingEntry.bitNum; DEBUG("there are %d last arp entries can be add..",arpNum); if(arpNum<0) break; if(pSwRoutingHead->swRoutingEntry.bitNum<=2) deletingEntry->arpStart+=1; else deletingEntry->arpStart+=(0x1<<(pSwRoutingHead->swRoutingEntry.bitNum-2)); pSwRoutingHead->swRoutingEntry.arpEnd=deletingEntry->arpStart; DEBUG("new arpStart=%d, arpEnd=%d...deleing arpStart=%d, arpEnd=%d",pSwRoutingHead->swRoutingEntry.arpStart,pSwRoutingHead->swRoutingEntry.arpEnd, deletingEntry->arpStart,deletingEntry->arpEnd); //Rearrange ARP table newIpAddr=rg_db.l3[pSwRoutingHead->swRoutingEntry.routingIdx].rtk_l3.ipAddr; if(pSwRoutingHead->swRoutingEntry.isLan) intfVlanId=rg_db.systemGlobal.interfaceInfo[pSwRoutingHead->swRoutingEntry.intfIdx].p_lanIntfConf->intf_vlan_id; else intfVlanId=rg_db.systemGlobal.interfaceInfo[pSwRoutingHead->swRoutingEntry.intfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; ret=_rtk_rg_addArpRoutingArray(&pSwRoutingHead->swRoutingEntry,newIpAddr,intfVlanId); if(ret==RT_ERR_RG_OK) { //change routing table's process from CPU to ARP memcpy(&rtEntry,&rg_db.l3[pSwRoutingHead->swRoutingEntry.routingIdx].rtk_l3,sizeof(rtk_l34_routing_entry_t)); rtEntry.process = L34_PROCESS_ARP; rtEntry.arpStart = pSwRoutingHead->swRoutingEntry.arpStart; rtEntry.arpEnd = pSwRoutingHead->swRoutingEntry.arpEnd; DEBUG("Convert sw ARP to hw!! newArpStart is %d, newArpEnd is %d",rtEntry.arpStart,rtEntry.arpEnd); ret = RTK_L34_ROUTINGTABLE_SET(pSwRoutingHead->swRoutingEntry.routingIdx, &rtEntry); if(ret!=RT_ERR_OK) break; for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { pSwArpInfo=rg_db.pSoftwareArpTableHead[i]; while(pSwArpInfo!=NULL) { pNextSwArpInfo=pSwArpInfo->pNext; if(pSwArpInfo->routingIdx==pSwRoutingHead->swRoutingEntry.routingIdx) //Hit,transfer to hw arp { arpIdx=(pSwRoutingHead->swRoutingEntry.arpStart<<2); arpIdx+=pSwArpInfo->ipv4Addr&pSwRoutingHead->swRoutingEntry.notMask; arpEntry.ipv4Addr=pSwArpInfo->ipv4Addr; arpEntry.macEntryIdx=pSwArpInfo->nhIdx; arpEntry.staticEntry=pSwArpInfo->staticEntry; //keep static character DEBUG("add ARP from sw to hw[%d]!!",arpIdx); assert_ok(rtk_rg_arpEntry_add(&arpEntry,&arpIdx)); //Free software ARP list DEBUG("free sw ARP Info in array[%d]!",i); _rtk_rg_softwareArpTableDel(pSwArpInfo); } pSwArpInfo=pNextSwArpInfo; } } } else break; //failed to convert, just return. pSwRoutingHead=pSwRoutingHead->pNext; }*/ } return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9602C_SERIES) FIXME("9602BVB don't need to converSwArpToHW"); return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9607C_SERIES) //FIXME: return (RT_ERR_RG_OK); #endif } int32 _rtk_rg_delArpRoutingArray(rtk_rg_routing_arpInfo_t *deletingEntry) { unsigned int ret,i=0; rtk_rg_routing_arpInfo_t *pTemporary; int *pVlanTemporary; //init bzero(rg_db.p_tempRoutingArpInfoArray,sizeof(rtk_rg_routing_arpInfo_t)*MAX_L3_SW_TABLE_SIZE); bzero(rg_db.p_tempRoutingVlanInfoArray,sizeof(int)*MAX_L3_SW_TABLE_SIZE); //if the entry is deleting, we do not add it in tmpArray for(i=0;i<rg_db.routingArpInfoNum;i++) { if(deletingEntry->routingIdx==rg_db.p_routingArpInfoArray[i].routingIdx) { memcpy(&rg_db.p_tempRoutingArpInfoArray[i],&rg_db.p_routingArpInfoArray[i+1],sizeof(rtk_rg_routing_arpInfo_t)*(rg_db.routingArpInfoNum-i-1)); memcpy(&rg_db.p_tempRoutingVlanInfoArray[i],&rg_db.p_routingVlanInfoArray[i+1],sizeof(int)*(rg_db.routingArpInfoNum-i-1)); break; } else { memcpy(&rg_db.p_tempRoutingArpInfoArray[i],&rg_db.p_routingArpInfoArray[i],sizeof(rtk_rg_routing_arpInfo_t)); rg_db.p_tempRoutingVlanInfoArray[i]=rg_db.p_routingVlanInfoArray[i]; } } //Count and check ARP table distribution ret=_rtk_rg_arpRearrange(NULL,0,rg_db.routingArpInfoNum-1); if(ret!=RT_ERR_RG_OK) return ret; //Global variable modification rg_db.routingArpInfoNum--; pTemporary=rg_db.p_routingArpInfoArray; rg_db.p_routingArpInfoArray=rg_db.p_tempRoutingArpInfoArray; rg_db.p_tempRoutingArpInfoArray=pTemporary; pVlanTemporary=rg_db.p_routingVlanInfoArray; rg_db.p_routingVlanInfoArray=rg_db.p_tempRoutingVlanInfoArray; rg_db.p_tempRoutingVlanInfoArray=pVlanTemporary; //Convert sw ARP list to hw ARP table if any #ifdef CONFIG_APOLLO_MODEL rtlglue_printf("FIXME: Execute model codes _rtk_rg_convertSwArpToHwTable() segmentation fault!\n"); #else _rtk_rg_convertSwArpToHwTable(deletingEntry); #endif return (RT_ERR_RG_OK); } void _rtk_rg_refreshPPPoEPassThroughLanOrWanPortMask(void) { if((rg_db.algFunctionMask&RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT) > 0) { //Turn off ACL first and re-enable Pass through to refresh the LAN or WAN port mask rg_db.algFunctionMask&=(~RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT); rtk_rg_apollo_algApps_set(rg_db.algFunctionMask); rg_db.algFunctionMask|=RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT; rtk_rg_apollo_algApps_set(rg_db.algFunctionMask); } } rtk_rg_successFailReturn_t _rtk_rg_createGatewayMacEntry(uint8 *gatewayMac, int vlanID, uint32 untagSet,int intfIdx) { rtk_rg_macEntry_t macEntry; int ret,l2Idx,search_index,count=0,first_invalid=-1,port_move_orig=-1,category_orig=-1,wlan0_move_orig=-1,permit_orig=0; memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); macEntry.vlan_id=vlanID; macEntry.fid=rg_db.vlan[macEntry.vlan_id].fid; //DEBUG("the internalVlanID is %d, fid is %d",macEntry.vlan_id,macEntry.fid); if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL) { macEntry.isIVL=1; l2Idx=_rtk_rg_hash_mac_vid_efid(gatewayMac,macEntry.vlan_id,0); //FIXME:EFID is 0 now } else { ADD_SVL_LUT: count=0; first_invalid=-1; macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if((untagSet&(0x1<<RTK_RG_MAC_PORT_CPU))>0) //cpu is in untag set macEntry.vlan_id=0; //untag for DMAC2CVID #else // support ctag_if macEntry.ctag_if=((untagSet&(0x1<<RTK_RG_MAC_PORT_CPU))>0)?0:1; #endif l2Idx=_rtk_rg_hash_mac_fid_efid(gatewayMac,macEntry.fid,0); //FIXME:EFID is 0 now } l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty, just add count++; //search from next entry continue; } if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,gatewayMac,ETHER_ADDR_LEN)==0)) { if(((macEntry.isIVL==1) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { permit_orig=rg_db.lut[search_index].permit_for_l34_forward; //DEBUG("match!!"); /* //FIXME: here reserved for WiFi interface may also need to handle port-moving in the future. if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port==RTK_RG_MAC_PORT_CPU) { } else */if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port!=RTK_RG_MAC_PORT_CPU) { //Mac port-moving, update LUT table without change ARP_USED flag //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.saLearningLimitLock); if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port>=RTK_RG_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU; } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port; } if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry)==RT_ERR_RG_OK) { category_orig=rg_db.lut[search_index].category; atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[category_orig]); } //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.saLearningLimitLock); //macEntry.arp_used=((rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)>0)?1:0; //if there is arp_used LUT equal to the gateway LUT entry, we just replace it by gateway's entry, therefore arp_used will always set to zero here. //macEntry.arp_used=1; //DEBUG("the port is moving..arp used is %d\n",macEntry.arp_used); DEBUG("froced replace LUT entry[%d] for gateway entry!!",search_index); break; } else if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port!=0) //WLAN's entry { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU; //decrease wlan's device count if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE wlan0_move_orig=rg_db.lut[search_index].wlan_device_idx; atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[wlan0_move_orig]); #endif } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE wlan0_move_orig=rg_db.lut[search_index].wlan_device_idx; atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[wlan0_move_orig]); #endif } #endif if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry)==SUCCESS) { category_orig=rg_db.lut[search_index].category; atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[category_orig]); } //macEntry.arp_used=1; DEBUG("froced replace WLAN's LUT entry[%d] for gateway entry!!",search_index); break; } if(macEntry.isIVL)goto ADD_SVL_LUT; //check SVL,too rg_db.netif[intfIdx].l2_idx=search_index; return RG_RET_SUCCESS; //exist, do nothing } } count++; //search from next entry }while(count < 4); if(count==4) { #if defined(CONFIG_APOLLO) //xdsl no bCAM //Check bCAM LUT first, if match, just return. for(search_index=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE;search_index<MAX_LUT_HW_TABLE_SIZE;search_index++) { if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC) { if(memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,gatewayMac,ETHER_ADDR_LEN)==0) { if(((macEntry.isIVL==1) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { permit_orig=rg_db.lut[search_index].permit_for_l34_forward; //HIT! Check port if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port!=RTK_RG_MAC_PORT_CPU) { //Mac port-moving, update LUT table without change ARP_USED flag if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port>=RTK_RG_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU; } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.port; } if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry)==SUCCESS) { category_orig=rg_db.lut[search_index].category; atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[category_orig]); } DEBUG("froced replace LUT entry[%d] for gateway entry!!",search_index); break; } else if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port!=0) //WLAN's entry { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&permit_orig) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); port_move_orig=rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU; //decrease wlan's device count if(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE wlan0_move_orig=rg_db.lut[search_index].wlan_device_idx; atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[wlan0_move_orig]); #endif } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE wlan0_move_orig=rg_db.lut[search_index].wlan_device_idx; atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[wlan0_move_orig]); #endif } #endif if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry)==SUCCESS) { category_orig=rg_db.lut[search_index].category; atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[category_orig]); } DEBUG("froced replace WLAN's LUT entry[%d] for gateway entry!!",search_index); break; } if(macEntry.isIVL)goto ADD_SVL_LUT; //check SVL,too rg_db.netif[intfIdx].l2_idx=search_index; return RG_RET_SUCCESS; } } } } #endif if(first_invalid==-1) count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table } //Use the first meet valid empty index if(first_invalid>=0) { search_index=first_invalid; } else if(count==4) //Replace the least recently used entry for new entry { search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); //port-moving fail, recovery old count if(port_move_orig>=0) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(port_move_orig))&&permit_orig) atomic_inc(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_inc(&rg_db.systemGlobal.sourceAddrLearningCount[port_move_orig]); } //wlan0-dev-moving fail, recovery old count if(wlan0_move_orig>=0) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_inc(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[wlan0_move_orig]); #endif } //category not change, recovery old count if(category_orig>=0) { atomic_inc(&rg_db.systemGlobal.accessWanLimitCategoryCount[category_orig]); } return RG_RET_FAIL; } } else search_index=l2Idx+count; memcpy(macEntry.mac.octet,gatewayMac,ETHER_ADDR_LEN); macEntry.port_idx=RTK_RG_MAC_PORT_CPU; macEntry.static_entry=1; //since this lut entry should exist till interface dead, we have to create it statically ret=rtk_rg_apollo_macEntry_add(&macEntry,&search_index); assert_ok(ret); rg_db.netif[intfIdx].l2_idx=search_index; DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x netif=%d ###\n",search_index,gatewayMac[0],gatewayMac[1],gatewayMac[2],gatewayMac[3],gatewayMac[4],gatewayMac[5],intfIdx); if(macEntry.isIVL)goto ADD_SVL_LUT; //add SVL,too return RG_RET_SUCCESS; } void _rtk_rg_deleteGatewayMacEntry(uint8 *gatewayMac, int vlanID, uint32 untagSet) { rtk_rg_macEntry_t macEntry; int ret,l2Idx,search_index,count=0,i; memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); macEntry.vlan_id=vlanID; macEntry.fid=rg_db.vlan[macEntry.vlan_id].fid; //DEBUG("the internalVlanID is %d, fid is %d",macEntry.vlan_id,macEntry.fid); if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL) { macEntry.isIVL=1; l2Idx=_rtk_rg_hash_mac_vid_efid(gatewayMac,macEntry.vlan_id,0); //FIXME:EFID is 0 now } else { DEL_SVL_LUT: count=0; macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if((untagSet&(0x1<<RTK_RG_MAC_PORT_CPU))>0) //cpu is in untag set macEntry.vlan_id=0; //untag for DMAC2CVID #else // support ctag_if macEntry.ctag_if=((untagSet&(0xa<<RTK_RG_MAC_PORT_CPU))>0)?0:1; #endif l2Idx=_rtk_rg_hash_mac_fid_efid(gatewayMac,macEntry.fid,0); //FIXME:EFID is 0 now } l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,gatewayMac,ETHER_ADDR_LEN)==0)) { if(((macEntry.isIVL==1) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //DEBUG("match!!"); break; } } count++; //search from next entry }while(count < 4); if(count==4) { //Check bCAM LUT first, if match, just return. for(search_index=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE;search_index<MAX_LUT_HW_TABLE_SIZE;search_index++) { if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC) { if(memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,gatewayMac,ETHER_ADDR_LEN)==0) { if(((macEntry.isIVL==1) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //HIT! break; } } } } if(search_index==MAX_LUT_HW_TABLE_SIZE) { DEBUG("gateway %s lut is not exist anymore...",macEntry.isIVL?"VLAN-based":"MAC-based"); if(macEntry.isIVL)goto DEL_SVL_LUT; //del SVL,too return; } } //Check if there are LAN or WAN interfaces use the same MAC and VLANID for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { //if the interface are IVL, VLAN and MAC are all the same, we just keep the IVL one, but SVL should check again if(macEntry.isIVL==1) { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id==macEntry.vlan_id && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->isIVL && memcmp(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto DEL_SVL_LUT; } else //check the VLANID to insure that tag/untag will be same { if(macEntry.vlan_id==0) //untag { if((rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->untag_mask.portmask&(0x1<<RTK_RG_MAC_PORT_CPU))>0 && macEntry.vlan_id==0 && //cpu is in untag set memcmp(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto KEEP_MAC; } else //tag: compare VID { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id==macEntry.vlan_id && memcmp(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto KEEP_MAC; } } } for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //if the interface are IVL, VLAN and MAC are all the same, we just keep the IVL one, but SVL should check again if(macEntry.isIVL==1) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id==macEntry.vlan_id && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->isIVL && memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto DEL_SVL_LUT; } else { if(macEntry.vlan_id==0) //untag { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on==0 && //cpu is in untag set memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto KEEP_MAC; } else //tag: compare VID { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id==macEntry.vlan_id && memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->gmac.octet,rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) goto KEEP_MAC; } } } ret=rtk_rg_apollo_macEntry_del(search_index); if(ret==RT_ERR_RG_ENTRY_NOT_EXIST) //not in hardware anymore memset(&rg_db.lut[search_index],0,sizeof(rtk_rg_table_lut_t)); if(macEntry.isIVL)goto DEL_SVL_LUT; //del SVL,too KEEP_MAC: return; } int _rtk_rg_updatePortBasedVIDByLanOrder(rtk_portmask_t mac_pmask, rtk_portmask_t etp_pmask) { int i,j,setPVid,ret; char IPVerAll,IPVerAllUntag,IPVerV4,IPVerV6,IPProtoBlock; char IPVerAll_ext,IPVerAllUntag_ext; char VLANProtoBlockUsed=0; rtk_vlan_protoVlanCfg_t protoVlanCfg; rtk_portmask_t mbpmsk, utpmsk, etpmsk; //rtk_portmask_t mbpmsk,etpmsk,update_mbpmsk,update_etpmsk; //DEBUG("mac_pmsk is %x, ext_pmsk is %x",mac_pmask.bits[0],etp_pmask.bits[0]); //Union all LAN port into DEFAULT_LAN_VLAN /*bzero(&update_mbpmsk,sizeof(rtk_portmask_t)); bzero(&update_etpmsk,sizeof(rtk_portmask_t)); for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { _rtk_rg_portmask_translator(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask,&mbpmsk,&etpmsk); update_mbpmsk.bits[0]|=mbpmsk.bits[0]; update_etpmsk.bits[0]|=etpmsk.bits[0]; } //filter WAN port update_mbpmsk.bits[0]&=0x4f; //update DEFAULT_LAN_VLAN ret = RTK_VLAN_PORT_SET(DEFAULT_LAN_VLAN, &update_mbpmsk, &rg_db.vlan[DEFAULT_LAN_VLAN].UntagPortmask); assert_ok(ret); ret = RTK_VLAN_EXTPORT_SET(DEFAULT_LAN_VLAN, &update_etpmsk); assert_ok(ret);*/ //Check member port of VLAN ID for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { if((mac_pmask.bits[0]&(0x1<<i)) > 0 /*&& i != RTK_RG_MAC_PORT_CPU*/) { //init _rtk_rg_cleanPortAndProtocolSettings(i); IPVerAll=0; IPVerAllUntag=0; IPVerV4=0; IPVerV6=0; IPProtoBlock=0; setPVid=rg_db.systemGlobal.initParam.fwdVLAN_CPU; for(j=0;j<rg_db.systemGlobal.lanIntfTotalNum;j++) { if((rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->port_mask.portmask&(0x1<<i)) > 0) { if(rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->ip_version==IPVER_V4ONLY) { if(IPVerV4==0) //first add V4 ony inteface, add to PPB setting { //DEBUG("@@@ add IPv4 PPB as %d in port %d",rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id,i); IPVerV4=1; //add V4 PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV4_GROUPID,&protoVlanCfg); assert_ok(ret); //add ARP PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_ARP_GROUPID,&protoVlanCfg); assert_ok(ret); if(IPVerAll==0 && IPVerAllUntag==0) { //if no other interface set to "ALL", the PVID should set to DEFAULT_LAN_VLAN //to block all other protocol packet from going to other port. IPProtoBlock=1; setPVid=rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block; } } } else if(rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->ip_version==IPVER_V6ONLY) { if(IPVerV6==0) { //DEBUG("@@@ add IPv6 PPB as %d in port %d",rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id,i); IPVerV6=1; //add V6 PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV6_GROUPID,&protoVlanCfg); assert_ok(ret); if(IPVerAll==0 && IPVerAllUntag==0) { //if no other interface set to "ALL", the PVID should set to DEFAULT_LAN_VLAN //to block all other protocol packet from going to other port. IPProtoBlock=1; setPVid=rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block; } } } else //all { if(IPVerAllUntag==0 && (rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->untag_mask.portmask&(0x1<<i))>0) { //DEBUG("@@@ set pvid of port %d as %d(first Untag LAN)",i,rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id); setPVid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; IPVerAllUntag=1; } else if(IPVerAllUntag==0 && IPVerAll==0) { //DEBUG("@@@ set pvid of port %d as %d(first LAN)",i,rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id); setPVid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; IPVerAll=1; } else { //Do nothing //DEBUG("@@@ set pvid of port %d as %d(already decided)",i,setPVid); } } } } #if 0 firstIPVer=-1; setPVid=DEFAULT_CPU_VLAN; for(j=0;j<rg_db.systemGlobal.lanIntfTotalNum;j++) { if((rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->port_mask.portmask&(0x1<<i)) > 0) { if(firstIPVer<0) //first interface's IPVersion { setPVid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; firstIPVer=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->ip_version; } else //the j is after first interface, so check if we need to add port and protocol based VLAN { if(firstIPVer==IPVER_V4V6 || rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->ip_version != firstIPVer) { if(firstIPVer==IPVER_V4ONLY) { //add V6 PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV6_GROUPID,&protoVlanCfg); assert_ok(ret); } else if(firstIPVer==IPVER_V6ONLY) { //add V4 PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV4_GROUPID,&protoVlanCfg); assert_ok(ret); //add ARP PPB protoVlanCfg.valid=1; protoVlanCfg.vid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; protoVlanCfg.pri=0; //FIXME: should I change this? protoVlanCfg.dei=0; ret = RTK_VLAN_PORTPROTOVLAN_SET(i,RG_ARP_GROUPID,&protoVlanCfg); assert_ok(ret); } break; } } } } #endif if(IPProtoBlock && !rg_db.systemGlobal.vlan_proto_block_created) { ret = RTK_VLAN_CREATE(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block); if(ret==RT_ERR_VLAN_EXIST) { WARNING("the fwdVLAN_Proto_Block[%d] had been used...please check!",rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block); } else { assert_ok(ret); } mbpmsk.bits[0]=(0x1<<RTK_RG_MAC_PORT_CPU); //CPU port utpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //all untag etpmsk.bits[0]=0x1; //extension CPU port ret = RTK_VLAN_FID_SET(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block, LAN_FID); assert_ok(ret); ret = RTK_VLAN_FIDMODE_SET(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block, VLAN_FID_SVL); //This is used for ALL LAN interface assert_ok(ret); ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block, &mbpmsk, &utpmsk); assert_ok(ret); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block, &etpmsk); assert_ok(ret); rg_db.systemGlobal.vlan_proto_block_created=1; VLANProtoBlockUsed=1; } //DEBUG("MAC port %d vlan is %d",i,setPVid); ret = RTK_VLAN_PORTPVID_SET(i, setPVid); assert_ok(ret); } if(i==0)continue; //extension port 0 is CPU port, no port-based VID if((etp_pmask.bits[0]&(0x1<<i)) > 0 && i+RTK_RG_PORT_CPU < RTK_RG_PORT_MAX) { IPVerAll_ext=0; IPVerAllUntag_ext=0; setPVid=rg_db.systemGlobal.initParam.fwdVLAN_CPU; for(j=0;j<rg_db.systemGlobal.lanIntfTotalNum;j++) { if((rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->port_mask.portmask&(0x1<<(i+RTK_RG_PORT_CPU))) > 0) { if(IPVerAllUntag_ext==0 && (rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->untag_mask.portmask&(0x1<<RTK_RG_PORT_CPU))>0) { //DEBUG("@@@ set pvid of port %d as %d(first Untag LAN)",i,rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id); setPVid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; IPVerAllUntag_ext=1; } else if(IPVerAllUntag_ext==0 && IPVerAll_ext==0) { //DEBUG("@@@ set pvid of port %d as %d(first LAN)",i,rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id); setPVid=rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id; IPVerAll_ext=1; } else { //Do nothing //DEBUG("@@@ set pvid of port %d as %d(already decided)",i,setPVid); } } } //DEBUG("EXT port %d vlan is %d",i,setPVid); ret = RTK_VLAN_EXTPORTPVID_SET(i, setPVid); //ext port index is 0-4, means ext port 1-5 assert_ok(ret); #ifdef CONFIG_MASTER_WLAN0_ENABLE if(i==1) { //Assign device-based VID to WLAN0's devices for(j=0;j<MAX_WLAN_DEVICE_NUM;j++) { if(rg_db.systemGlobal.wlan0BindDecision[j].exist) { #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(j==RG_WWAN_WLAN0_VXD || j==RG_WWAN_WLAN1_VXD)continue; #endif ret = rtk_rg_apollo_wlanDevBasedCVlanId_set(0,j,setPVid); assert_ok(ret); //Add this device to setPVid's Wlan0DevMask rg_db.vlan[setPVid].wlan0DevMask|=(0x1<<j); if(rg_db.vlan[setPVid].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[setPVid].wlan0UntagMask|=(0x1<<j); else rg_db.vlan[setPVid].wlan0UntagMask&=(~(0x1<<j)); }else{ //Remove this device to setPVid's Wlan0DevMask rg_db.vlan[setPVid].wlan0DevMask&=(~(0x1<<j)); rg_db.vlan[setPVid].wlan0UntagMask|=(0x1<<j); } } } #endif //1 FIXME: in 6266, port and protocol based VLAN do not have extension port settings!! } } //Clear up proto block vlan if not needed anymore if(rg_db.systemGlobal.vlan_proto_block_created==1 && VLANProtoBlockUsed==0) { ret=RTK_VLAN_DESTROY(rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block); assert_ok(ret); rg_db.systemGlobal.vlan_proto_block_created=0; } return (RT_ERR_RG_OK); } #if defined(CONFIG_APOLLO) void _rtk_rg_deletingPortBindFromInterface(int intfIdx) { int i; rtk_binding_entry_t pbindEt; rtk_rg_bindingEntry_t cb_bindEt; //clear all binding rules for this intfIdx for(i=MAX_BIND_SW_TABLE_SIZE - 1;i>=0;i--) { if(rg_db.bind[i].valid && rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx == intfIdx) { bzero(&pbindEt, sizeof(rtk_binding_entry_t)); bzero(&cb_bindEt,sizeof(rtk_rg_bindingEntry_t)); if(rg_db.bind[i].rtk_bind.vidLan == 0) //vlan != 0 means port-vlan binding, which we don't care here { cb_bindEt.port_bind_pmask.portmask=rg_db.bind[i].rtk_bind.portMask.bits[0]; RTK_L34_BINDINGTABLE_SET(i, &pbindEt); // TODO:Call the initParam's bindingDelByHwCallBack if(rg_db.systemGlobal.initParam.bindingDelByHwCallBack != NULL) { cb_bindEt.type=BIND_TYPE_PORT; cb_bindEt.wan_intf_idx=intfIdx; rg_db.systemGlobal.initParam.bindingDelByHwCallBack(&cb_bindEt); } } } } } int32 _rtk_rg_addBindFromPortmask(unsigned int pmsk, unsigned int expmsk, int intfIdx, int wantypeIdx, int v6WantypeIdx) { int i,j,ret,errorno,count=0,v6bindIdx=-1,addV6Bind=0; rtk_binding_entry_t pbindEt; rtk_rg_bindingEntry_t cb_bindEt; unsigned int tmppmsk=pmsk, tmpexpmsk=expmsk; //MAC Port count=tmppmsk; RG_ONE_COUNT(count); //DEBUG("MAC port count is %d",count); for(j=0;j<count;j++) { addV6Bind=0; v6bindIdx=-1; ADD_PORT_FOR_V6: errorno=RT_ERR_RG_ENTRY_FULL; for(i=MAX_BIND_SW_TABLE_SIZE - 1;i>=0;i--) //Port-binding start from the bottom of Binding Table { if(rg_db.bind[i].valid == 0) break; } if(i==-1)goto RET_BINDING_ERR; if(v6bindIdx<0)v6bindIdx = i; //Keep //DEBUG("%d is available",i); //rg_db.systemGlobal.bindToIntf[bindIdx]=intfIdx; //Add port binding entry once a time errorno=RT_ERR_RG_PORT_BIND_SET_FAIL; bzero(&pbindEt, sizeof(rtk_binding_entry_t)); pbindEt.extPortMask.bits[0]=0; pbindEt.vidLan=0; if(addV6Bind) { //we should change previous bind rule to v6 and add new v4 entry!(v4 upper than v6) rg_db.bind[v6bindIdx].rtk_bind.bindProto=L34_BIND_PROTO_NOT_IPV4; //v6 and other rg_db.bind[v6bindIdx].rtk_bind.wanTypeIdx=v6WantypeIdx; ret = RTK_L34_BINDINGTABLE_SET(v6bindIdx, &rg_db.bind[v6bindIdx].rtk_bind); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_BINDING_ERR; } if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; pbindEt.wanTypeIdx=wantypeIdx; pbindEt.bindProto=L34_BIND_PROTO_NOT_IPV6; //v4 and other } else { pbindEt.wanTypeIdx=wantypeIdx; pbindEt.bindProto=L34_BIND_PROTO_ALL; //ALL protocol in L3, and L2 } //DEBUG("before: tmppmsk is %x",tmppmsk); if((tmppmsk&(0x1<<RTK_RG_PORT0)) >0) //PORT0 { tmppmsk&=~(0x1<<RTK_RG_PORT0); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT0); } else if((tmppmsk&(0x1<<RTK_RG_PORT1)) >0) //PORT1 { tmppmsk&=~(0x1<<RTK_RG_PORT1); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT1); } #if !defined(CONFIG_RTL9602C_SERIES) else if((tmppmsk&(0x1<<RTK_RG_PORT2)) >0) //PORT2 { tmppmsk&=~(0x1<<RTK_RG_PORT2); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT2); } else if((tmppmsk&(0x1<<RTK_RG_PORT3)) >0) //PORT3 { tmppmsk&=~(0x1<<RTK_RG_PORT3); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT3); } else if((tmppmsk&(0x1<<RTK_RG_PORT_RGMII)) >0) //PORT5 { tmppmsk&=~(0x1<<RTK_RG_PORT_RGMII); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT_RGMII); } #endif else if((tmppmsk&(0x1<<RTK_RG_PORT_PON)) >0) //PORT4 { tmppmsk&=~(0x1<<RTK_RG_PORT_PON); pbindEt.portMask.bits[0]=(0x1<<RTK_RG_PORT_PON); } else continue; ret = RTK_L34_BINDINGTABLE_SET(i, &pbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_BINDING_ERR; } if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; // TODO:Call the initParam's bindingAddByHwCallBack if(!addV6Bind && rg_db.systemGlobal.initParam.bindingAddByHwCallBack != NULL) { cb_bindEt.type=BIND_TYPE_PORT; cb_bindEt.port_bind_pmask.portmask=pbindEt.portMask.bits[0]; cb_bindEt.wan_intf_idx=intfIdx; rg_db.systemGlobal.initParam.bindingAddByHwCallBack(&cb_bindEt); } //20140806LUKE: if we have ipv6 wanType, we should create one more bind for it!! if(!addV6Bind && v6WantypeIdx!=FAIL) { addV6Bind=1; tmppmsk|=pbindEt.portMask.bits[0]; goto ADD_PORT_FOR_V6; } } //Extension port count=tmpexpmsk; RG_ONE_COUNT(count); for(j=0;j<count;j++) { addV6Bind=0; v6bindIdx=-1; ADD_EXTPORT_FOR_V6: errorno=RT_ERR_RG_ENTRY_FULL; for(i=MAX_BIND_SW_TABLE_SIZE - 1;i>=0;i--) //Port-binding start from the bottom of Binding Table { if(rg_db.bind[i].valid == 0) break; } if(i==-1)goto RET_BINDING_ERR; if(v6bindIdx<0)v6bindIdx = i; //Keep //DEBUG("%d is available",i); //rg_db.systemGlobal.bindToIntf[bindIdx]=intfIdx; //Add port binding entry once a time errorno=RT_ERR_RG_EXTPORT_BIND_SET_FAIL; bzero(&pbindEt, sizeof(rtk_binding_entry_t)); pbindEt.portMask.bits[0]=0; pbindEt.vidLan=0; if(addV6Bind) { //we should change previous bind rule to v6 and add new v4 entry!(v4 upper than v6) rg_db.bind[v6bindIdx].rtk_bind.bindProto=L34_BIND_PROTO_NOT_IPV4; //v6 and other rg_db.bind[v6bindIdx].rtk_bind.wanTypeIdx=v6WantypeIdx; ret = RTK_L34_BINDINGTABLE_SET(v6bindIdx, &rg_db.bind[v6bindIdx].rtk_bind); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_BINDING_ERR; } if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; pbindEt.wanTypeIdx=wantypeIdx; pbindEt.bindProto=L34_BIND_PROTO_NOT_IPV6; //v4 and other } else { pbindEt.wanTypeIdx=wantypeIdx; pbindEt.bindProto=L34_BIND_PROTO_ALL; //ALL protocol in L3, and L2 } //DEBUG("before: tmpexpmsk is %x",tmpexpmsk); if((tmpexpmsk&(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_EXT_PORT0))) >0) //EXTPORT0 { tmpexpmsk&=~(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_EXT_PORT0)); pbindEt.extPortMask.bits[0]=(0x1<<RTK_RG_BD_EXT_PORT0); } else if((tmpexpmsk&(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_EXT_PORT0))) >0) //EXTPORT1 { tmpexpmsk&=~(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_EXT_PORT0)); pbindEt.extPortMask.bits[0]=(0x1<<RTK_RG_BD_EXT_PORT1); } else if((tmpexpmsk&(0x1<<(RTK_RG_EXT_PORT2-RTK_RG_EXT_PORT0))) >0) //EXTPORT2 { tmpexpmsk&=~(0x1<<(RTK_RG_EXT_PORT2-RTK_RG_EXT_PORT0)); pbindEt.extPortMask.bits[0]=(0x1<<RTK_RG_BD_EXT_PORT2); } else if((tmpexpmsk&(0x1<<(RTK_RG_EXT_PORT3-RTK_RG_EXT_PORT0))) >0) //EXTPORT3 { tmpexpmsk&=~(0x1<<(RTK_RG_EXT_PORT3-RTK_RG_EXT_PORT0)); pbindEt.extPortMask.bits[0]=(0x1<<RTK_RG_BD_EXT_PORT3); } else if((tmpexpmsk&(0x1<<(RTK_RG_EXT_PORT4-RTK_RG_EXT_PORT0))) >0) //EXTPORT4 { tmpexpmsk&=~(0x1<<(RTK_RG_EXT_PORT4-RTK_RG_EXT_PORT0)); pbindEt.extPortMask.bits[0]=(0x1<<RTK_RG_BD_EXT_PORT4); } else continue; ret = RTK_L34_BINDINGTABLE_SET(i, &pbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_BINDING_ERR; } if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; // TODO:Call the initParam's bindingAddByHwCallBack if(!addV6Bind && rg_db.systemGlobal.initParam.bindingAddByHwCallBack != NULL) { cb_bindEt.type=BIND_TYPE_PORT; cb_bindEt.port_bind_pmask.portmask=pbindEt.extPortMask.bits[0]; cb_bindEt.wan_intf_idx=intfIdx; rg_db.systemGlobal.initParam.bindingAddByHwCallBack(&cb_bindEt); } //20140806LUKE: if we have ipv6 wanType, we should create one more bind for it!! if(!addV6Bind && v6WantypeIdx!=FAIL) { addV6Bind=1; tmpexpmsk|=pbindEt.extPortMask.bits[0]; goto ADD_EXTPORT_FOR_V6; } } return (RT_ERR_RG_OK); RET_BINDING_ERR: //Delete each port-binding entry in binding table (vlan=0) _rtk_rg_deletingPortBindFromInterface(intfIdx); return (errorno); } int32 _rtk_rg_updatingVlanBind(int wanIdx,int v6wanTypeIdx) { int i,j,ret,wanTypeIdx; rtk_binding_entry_t pbindEt; wanTypeIdx=rg_db.systemGlobal.interfaceInfo[wanIdx].storedInfo.wan_intf.bind_wan_type_ipv4; DEBUG("wanTypeIdx is %d ,v6wanTypeIdx is %d",wanTypeIdx,v6wanTypeIdx); if(v6wanTypeIdx>=0) //change ALL to V4andOther, create v6andOther { for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan!=0 && rg_db.bind[i].rtk_bind.bindProto==L34_BIND_PROTO_ALL && rg_db.bind[i].rtk_bind.wanTypeIdx==wanTypeIdx) { //find a valid one for v6bind for(j=0;j<MAX_BIND_SW_TABLE_SIZE;j++) { if(!rg_db.bind[j].valid) break; } if(j==MAX_BIND_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_ENTRY_FULL); memcpy(&pbindEt,&rg_db.bind[i].rtk_bind,sizeof(rtk_binding_entry_t)); pbindEt.bindProto=L34_BIND_PROTO_NOT_IPV6; ret = RTK_L34_BINDINGTABLE_SET(i, &pbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(ret); DEBUG("change bind[%d] to v4andOther!",i); pbindEt.wanTypeIdx=v6wanTypeIdx; pbindEt.bindProto=L34_BIND_PROTO_NOT_IPV4; ret = RTK_L34_BINDINGTABLE_SET(j, &pbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(ret); rg_db.systemGlobal.vlanBindTotalNum++; DEBUG("add bind[%d] to v6andOther!",j); } } } else //delete v6andOther, recovery v4andOther to ALL { for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan!=0 && rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==wanIdx) { if(rg_db.bind[i].rtk_bind.bindProto==L34_BIND_PROTO_NOT_IPV4) { //delete v6bind memset(&pbindEt,0,sizeof(rtk_binding_entry_t)); ret = RTK_L34_BINDINGTABLE_SET(i, &pbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(ret); if(rg_db.systemGlobal.vlanBindTotalNum>0) rg_db.systemGlobal.vlanBindTotalNum--; DEBUG("delete bind[%d] to v6andOther!",i); } else if(rg_db.bind[i].rtk_bind.bindProto==L34_BIND_PROTO_NOT_IPV6) { //recovery v4bind rg_db.bind[i].rtk_bind.bindProto=L34_BIND_PROTO_ALL; ret = RTK_L34_BINDINGTABLE_SET(i, &rg_db.bind[i].rtk_bind); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(ret); DEBUG("change bind[%d] to all!",i); } } } } return (RT_ERR_RG_OK); } #endif //end defined(CONFIG_APOLLO) int32 _rtk_rg_deleteSwARP(int intfIdx, int routingIdx) { int i; rtk_rg_arp_linkList_t *pSwArpList,*pNextSwArpList; for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { if(!list_empty(&rg_db.softwareArpTableHead[i])) { list_for_each_entry_safe(pSwArpList,pNextSwArpList,&rg_db.softwareArpTableHead[i],arp_list) { if(rg_db.arp[pSwArpList->idx].routingIdx==routingIdx) { _rtk_rg_softwareArpTableDel(pSwArpList); } } } } return (RT_ERR_RG_OK); } int32 _rtk_rg_deleteIPv4Routing(int lan_or_wan_intf_idx) { int i,j,ret; int nh_num,match_intf=0,deleteL3Idx=-1; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) int count=0,l2Idx,search_index; rtk_rg_macEntry_t secondMacEt; rtk_rg_table_arp_t *pArpInfo; rtk_rg_routing_arpInfo_t deletingEntry; #endif rtk_l34_routing_entry_t rtEntry; rtk_rg_ipv4RoutingEntry_t cb_routEt; rtk_l34_ext_intip_entry_t extipEt; for(i=0; i<MAX_L3_SW_TABLE_SIZE; i++) { if(i== V4_DEFAULT_ROUTE_IDX) continue; #ifdef CONFIG_DUALBAND_CONCURRENT if(i==SLAVE_WIFI_ROUTE_IDX) //ipc routing should not be deleted at any time! continue; #endif if(rg_db.l3[i].rtk_l3.process == L34_PROCESS_ARP && rg_db.l3[i].rtk_l3.netifIdx == lan_or_wan_intf_idx) { //Delete the ARP table entries referenced by routing table #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) for(j=(rg_db.l3[i].rtk_l3.arpStart<<2);j<((rg_db.l3[i].rtk_l3.arpEnd+1)<<2);j++) { //bzero(&macEt, sizeof(rtk_rg_macEntry_t)); bzero(&secondMacEt, sizeof(rtk_rg_macEntry_t)); //bzero(&arpInfo,sizeof(rtk_rg_table_arp_t)); count=0; pArpInfo=&rg_db.arp[j]; if(pArpInfo->rtk_arp.valid==0) continue; //ret = rtk_rg_arpEntry_find(&arpInfo,&j); //if(ret==RT_ERR_RG_NO_MORE_ENTRY_FOUND) //arp after j is all invalid //break; //if(j>=((rg_db.l3[i].rtk_l3.arpEnd+1)<<2)) //means this routing entry didn't has valid ARP //break; //Delete LUT table entries referenced by ARP: //If the VLAN is IVL, find if there is SVL LUT, delete it also, and vice versa. //20140728LUKE: static LUT should keep in system if(rg_db.lut[pArpInfo->rtk_arp.nhIdx].valid && rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entryType==RTK_LUT_L2UC && (rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) { //Check fidmode if(rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL) { //delete SVL, too secondMacEt.isIVL=0; secondMacEt.fid=rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.fid; //DEBUG("the SVL(%d) lut has to be deleted,too",secondMacEt.fid); l2Idx=_rtk_rg_hash_mac_fid_efid(rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.mac.octet,secondMacEt.fid,rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.efid); } else { //delete IVL, too secondMacEt.isIVL=1; //because the SVL LUT will set VID as 0 if untag, therefore VLAN ID should get for interface setting if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1) //wan interface secondMacEt.vlan_id = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; else //lan interface secondMacEt.vlan_id = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->intf_vlan_id; //DEBUG("the IVL(%d) lut has to be deleted,too",secondMacEt.vlan_id); l2Idx=_rtk_rg_hash_mac_vid_efid(rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.mac.octet,secondMacEt.vlan_id,rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.efid); } l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d",search_index); if(rg_db.lut[search_index].valid==0) { count++; continue; //empty } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (!memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,rg_db.lut[pArpInfo->rtk_arp.nhIdx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN))) { if((secondMacEt.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==secondMacEt.vlan_id) || (secondMacEt.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==secondMacEt.fid)) { //DEBUG("%s MAC is exist @ %d, deleting...",secondMacEt.isIVL?"IVL":"SVL",search_index); rtk_rg_apollo_macEntry_del(search_index); break; } } count++; //search from next entry } while(count < 4); //Delete original one ret = rtk_rg_apollo_macEntry_del(pArpInfo->rtk_arp.nhIdx); //if the MAC is not exist, continue //if(ret!=RT_ERR_RG_OK)return ret; } if(rg_db.arp[j].rtk_arp.valid!=0) { ret = rtk_rg_apollo_arpEntry_del(j); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); } } #elif defined(CONFIG_RTL9602C_SERIES) //Delete the ARP table entries referenced by routing table _rtk_rg_deleteHwARP(lan_or_wan_intf_idx,i); _rtk_rg_deleteSwARP(lan_or_wan_intf_idx,i); #endif #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Deleting the routing entry bzero(&deletingEntry, sizeof(rtk_rg_routing_arpInfo_t)); deletingEntry.routingIdx=i; if(rg_db.l3[i].rtk_l3.rt2waninf) //routing to WAN deletingEntry.isLan=0; else deletingEntry.isLan=1; deletingEntry.bitNum=31-rg_db.l3[i].rtk_l3.ipMask; deletingEntry.arpStart=rg_db.l3[i].rtk_l3.arpStart; deletingEntry.arpEnd=rg_db.l3[i].rtk_l3.arpEnd; #endif bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=rg_db.l3[i].rtk_l3.ipAddr; cb_routEt.ip_mask=rg_db.l3[i].netmask; ret = RTK_L34_ROUTINGTABLE_SET(i, &rtEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); deleteL3Idx = i; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Rerrange ARP table ret=_rtk_rg_delArpRoutingArray(&deletingEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); #endif //2 Call the initParam's routingDelByHwCallBack if(rg_db.systemGlobal.initParam.routingDelByHwCallBack != NULL) { cb_routEt.nexthop=0; cb_routEt.wan_intf_idx=lan_or_wan_intf_idx; rg_db.systemGlobal.initParam.routingDelByHwCallBack(&cb_routEt); } } else if(rg_db.l3[i].rtk_l3.process == L34_PROCESS_CPU && rg_db.l3[i].rtk_l3.ipAddr>0 && rg_db.l3[i].rtk_l3.netifIdx == lan_or_wan_intf_idx) //delete routing which added sw ARP table { #ifdef CONFIG_APOLLO_MODEL rtlglue_printf("FIXME: Execute list_for_each_entry_safe() lead to segmentation fault @ %s %d\n",__FUNCTION__,__LINE__); continue; #endif //Delete the ARP table entries referenced by routing table _rtk_rg_deleteSwARP(lan_or_wan_intf_idx,i); #if 0 //no need to rerrange ARP table //Deleting the routing entry bzero(&deletingEntry, sizeof(rtk_rg_routing_arpInfo_t)); deletingEntry.routingIdx=i; if(rg_db.l3[i].rtk_l3.rt2waninf) //routing to WAN deletingEntry.isLan=0; else deletingEntry.isLan=1; deletingEntry.bitNum=31-rg_db.l3[i].rtk_l3.ipMask; deletingEntry.arpStart=rg_db.l3[i].rtk_l3.arpStart; deletingEntry.arpEnd=rg_db.l3[i].rtk_l3.arpEnd; #endif bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=rg_db.l3[i].rtk_l3.ipAddr; cb_routEt.ip_mask=rg_db.l3[i].netmask; ret = RTK_L34_ROUTINGTABLE_SET(i, &rtEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); deleteL3Idx = i; //no need to rerrange ARP table //ret=_rtk_rg_delArpRoutingArray(&deletingEntry); //if(ret!=RT_ERR_OK)goto RET_ERR; //2 Call the initParam's routingDelByHwCallBack if(rg_db.systemGlobal.initParam.routingDelByHwCallBack != NULL) { cb_routEt.nexthop=0; cb_routEt.wan_intf_idx=lan_or_wan_intf_idx; rg_db.systemGlobal.initParam.routingDelByHwCallBack(&cb_routEt); } } else if(rg_db.l3[i].rtk_l3.process == L34_PROCESS_NH) { //20140827LUKE: till setup STATIC ROUTE, we will create sw-ARP when asking remote server, delete it now!! _rtk_rg_deleteSwARP(lan_or_wan_intf_idx,i); //2 FIXME:if load-balance is used, then here should modified for it!! //Lookup Nexthop table for checking related interface nh_num = rg_db.l3[i].rtk_l3.nhStart; nh_num += (0x1<<rg_db.l3[i].rtk_l3.nhNum); if(nh_num > MAX_NEXTHOP_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_NXP_GET_FAIL); match_intf=0; for(j=rg_db.l3[i].rtk_l3.nhStart; j<nh_num; j++) { if(rg_db.nexthop[j].rtk_nexthop.ifIdx == lan_or_wan_intf_idx) { ret=_rtk_rg_decreaseNexthopReference(j); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); match_intf=1;//match_intf++; } } //Deleting the routing entry if(match_intf == 1)//if(match_intf == (nh_num-rtEntry.nhStart)) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=rg_db.l3[i].rtk_l3.ipAddr; cb_routEt.ip_mask=rg_db.l3[i].netmask; ret = RTK_L34_ROUTINGTABLE_SET(i, &rtEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); deleteL3Idx = i; //2 Call the initParam's routingDelByHwCallBack if(rg_db.systemGlobal.initParam.routingDelByHwCallBack != NULL) { cb_routEt.wan_intf_idx=lan_or_wan_intf_idx; cb_routEt.nexthop=rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr; rg_db.systemGlobal.initParam.routingDelByHwCallBack(&cb_routEt); } } } } //Delete Internal External IP table entry, if any,decrease Nexthop table ref count, if zero delete nexthop entry if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE) { //Reset Default route to CPU if ipv4_default_gateway_on is 1 if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on == 1) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); rtEntry.process=L34_PROCESS_CPU; rtEntry.valid=1; //20140808LUKE: if we delete default route WAN, decrease nexthop counter here! if(rg_db.l3[V4_DEFAULT_ROUTE_IDX].rtk_l3.process==L34_PROCESS_NH) { ret=_rtk_rg_decreaseNexthopReference(rg_db.l3[V4_DEFAULT_ROUTE_IDX].rtk_l3.nhStart); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); } ret = RTK_L34_ROUTINGTABLE_SET(V4_DEFAULT_ROUTE_IDX, &rtEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->napt_enable || (rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on==0 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr!=0)) { //2 $$$$ Delete NAPTR, NAPT table with deleting interface by comparing NAPTR's EIP idx $$$$ int eipidx=rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.extip_idx; for(i=0;i<MAX_NAPT_OUT_SW_TABLE_SIZE;i++) if(rg_db.naptOut[i].rtk_naptOut.valid && rg_db.naptIn[rg_db.naptOut[i].rtk_naptOut.hashIdx].rtk_naptIn.extIpIdx==rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.extip_idx) rtk_rg_apollo_naptConnection_del(i); if(eipidx>=0) { ret=_rtk_rg_decreaseNexthopReference(rg_db.extip[eipidx].rtk_extip.nhIdx); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); //20150609LUKE: we have to clear interface's eipidx in rg_db, too. bzero(&extipEt, sizeof(rtk_l34_ext_intip_entry_t)); ret = RTK_L34_EXTINTIPTABLE_SET(eipidx, &extipEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_EXTIP_SET_FAIL); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.extip_idx=-1; } //20140807LUKE: modify wanType's type when deleting if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4>=0 && rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].valid) { rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].rtk_wantype.wanType=L34_WAN_TYPE_L3_ROUTE; rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].valid=1; ret = RTK_L34_WANTYPETABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4, &rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].rtk_wantype); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_WANTYPE_SET_FAIL); DEBUG("change wanType[%d] to %d",rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4,rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].rtk_wantype.wanType); } } } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid == SOFTWARE_ONLY_ENTRY) { //delete ipv4 reserved acl for software entry _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP+deleteL3Idx); } return (RT_ERR_RG_OK); } int _rtk_rg_deleteIPv6NeighborByRouting(int routeIdx, int lan_or_wan_intf_idx) { int i,ret,count,l2Idx,search_index; rtk_rg_neighborInfo_t neighborInfo; rtk_rg_macEntry_t secondMacEt; for(i=0;i<MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE;i++) { bzero(&neighborInfo, sizeof(rtk_rg_neighborInfo_t)); bzero(&secondMacEt, sizeof(rtk_rg_macEntry_t)); count=0; ret = rtk_rg_apollo_neighborEntry_find(&neighborInfo,&i); if(ret==RT_ERR_RG_NO_MORE_ENTRY_FOUND) //neighbor after j is all invalid break; if(neighborInfo.neighborEntry.matchRouteIdx==routeIdx) { //Delete LUT table entries referenced by Neighbor ret = rtk_rg_apollo_neighborEntry_del(i); if(ret!=RT_ERR_RG_OK)return (ret); //If the VLAN is IVL, find if there is SVL LUT, delete it also, and vice versa. if(rg_db.lut[neighborInfo.neighborEntry.l2Idx].valid && rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entryType==RTK_LUT_L2UC) { //Check fidmode if(rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL) { //delete SVL, too secondMacEt.isIVL=0; secondMacEt.fid=rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.fid; //DEBUG("the SVL(%d) lut has to be deleted,too",secondMacEt.fid); l2Idx=_rtk_rg_hash_mac_fid_efid(rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,secondMacEt.fid,rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.efid); } else { //delete IVL, too secondMacEt.isIVL=1; //because the SVL LUT will set VID as 0 if untag, therefore VLAN ID should get for interface setting if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1) //wan interface secondMacEt.vlan_id = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; else //lan interface secondMacEt.vlan_id = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->intf_vlan_id; //DEBUG("the IVL(%d) lut has to be deleted,too",secondMacEt.vlan_id); l2Idx=_rtk_rg_hash_mac_vid_efid(rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,secondMacEt.vlan_id,rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.efid); } l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d",search_index); if(rg_db.lut[search_index].valid==0) { count++; continue; //empty } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (!memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN))) { if((secondMacEt.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==secondMacEt.vlan_id) || (secondMacEt.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==secondMacEt.fid)) { //DEBUG("%s MAC is exist @ %d, deleting...",secondMacEt.isIVL?"IVL":"SVL",search_index); rtk_rg_apollo_macEntry_del(search_index); break; } } count++; //search from next entry } while(count < 4); #if defined(CONFIG_RTL9602C_SERIES) if(count==4) { for(search_index=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE;search_index<MAX_LUT_HW_TABLE_SIZE;search_index++) { if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC) { if(memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,rg_db.lut[neighborInfo.neighborEntry.l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN)==0) { if((secondMacEt.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==secondMacEt.vlan_id) || (secondMacEt.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==secondMacEt.fid)) { //DEBUG("%s MAC is exist @ %d, deleting...",secondMacEt.isIVL?"IVL":"SVL",search_index); rtk_rg_apollo_macEntry_del(search_index); break; } } } } } #endif //Delete original one ret = rtk_rg_apollo_macEntry_del(neighborInfo.neighborEntry.l2Idx); //if L2 is not valid, do nothing //if(ret!=RT_ERR_RG_OK)return ret; } } } return (RT_ERR_RG_OK); } int32 _rtk_rg_deleteIPv6Routing(int lan_or_wan_intf_idx) { int i,j,ret; int nh_num,match_intf=0; rtk_ipv6Routing_entry_t rtv6Entry; rtk_rg_ipv6RoutingEntry_t cb_routv6Et; rtk_wanType_entry_t wantEt; unsigned int tmppmsk,tmpexpmsk; rtk_portmask_t out_mac_pmask,out_ext_pmask; for(i=0; i<MAX_IPV6_ROUTING_SW_TABLE_SIZE; i++) { if(i == V6_HW_DEFAULT_ROUTE_IDX) continue; if(rg_db.v6route[i].rtk_v6route.valid) { if((rg_db.v6route[i].rtk_v6route.type == L34_IPV6_ROUTE_TYPE_LOCAL || rg_db.v6route[i].rtk_v6route.type == L34_IPV6_ROUTE_TYPE_TRAP) && rg_db.v6route[i].rtk_v6route.nhOrIfidIdx == lan_or_wan_intf_idx) //FIXME:the trap routing entry need to be deleted { //Delete the Neighbor table entries referenced by routing table ret=_rtk_rg_deleteIPv6NeighborByRouting(i,lan_or_wan_intf_idx); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); //Deleting the routing entry bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); bzero(&cb_routv6Et, sizeof(rtk_rg_ipv6RoutingEntry_t)); memcpy(cb_routv6Et.dest_ip.ipv6_addr,rg_db.v6route[i].rtk_v6route.ipv6Addr.ipv6_addr,IPV6_ADDR_LEN); cb_routv6Et.prefix_len=rg_db.v6route[i].rtk_v6route.ipv6PrefixLen; cb_routv6Et.NhOrIntfIdx=rg_db.v6route[i].rtk_v6route.nhOrIfidIdx; cb_routv6Et.type=rg_db.v6route[i].rtk_v6route.type; ret = RTK_L34_IPV6ROUTINGTABLE_SET(i, &rtv6Entry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); //2 Call the initParam's v6routingDelByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack(&cb_routv6Et); } } else if(rg_db.v6route[i].rtk_v6route.type == L34_IPV6_ROUTE_TYPE_GLOBAL) //nexthop { //Lookup Nexthop table for checking related interface nh_num = rg_db.v6route[i].rtk_v6route.nhOrIfidIdx; if(nh_num > MAX_NEXTHOP_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_NXP_GET_FAIL); if(rg_db.nexthop[nh_num].rtk_nexthop.ifIdx == lan_or_wan_intf_idx) { ret=_rtk_rg_decreaseNexthopReference(nh_num); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); match_intf=1; } //Deleting the routing entry if(match_intf == 1) { //20140904LUKE: for STATIC ROUTE may have neighbor entry, so we should delete it right here! ret=_rtk_rg_deleteIPv6NeighborByRouting(i,lan_or_wan_intf_idx); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); bzero(&cb_routv6Et, sizeof(rtk_rg_ipv6RoutingEntry_t)); memcpy(cb_routv6Et.dest_ip.ipv6_addr,rg_db.v6route[i].rtk_v6route.ipv6Addr.ipv6_addr,IPV6_ADDR_LEN); cb_routv6Et.prefix_len=rg_db.v6route[i].rtk_v6route.ipv6PrefixLen; cb_routv6Et.NhOrIntfIdx=rg_db.v6route[i].rtk_v6route.nhOrIfidIdx; cb_routv6Et.type=rg_db.v6route[i].rtk_v6route.type; ret = RTK_L34_IPV6ROUTINGTABLE_SET(i, &rtv6Entry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); //2 Call the initParam's v6routingDelByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack(&cb_routv6Et); } } } } } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE) { //Delete wanType if added DEBUG("wan[%d].bind_wan_type_ipv6 is %d",lan_or_wan_intf_idx,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6); if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6>=0) { //update binding rules _rtk_rg_portmask_translator(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask,&out_mac_pmask,&out_ext_pmask); tmppmsk=out_mac_pmask.bits[0]; tmpexpmsk=out_ext_pmask.bits[0]>>0x1; //FIXME:translator contain cpu port, but binding should not contain it, so shift it _rtk_rg_deletingPortBindFromInterface(lan_or_wan_intf_idx); ret=_rtk_rg_addBindFromPortmask(tmppmsk,tmpexpmsk,lan_or_wan_intf_idx,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4,FAIL); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); //update vlan-binding ret=_rtk_rg_updatingVlanBind(lan_or_wan_intf_idx,FAIL); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); DEBUG("deleting ipv6 wantype[%d]!!",rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6); j = rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].rtk_wantype.nhIdx; bzero(&wantEt, sizeof(rtk_wanType_entry_t)); rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].valid =0; ret = RTK_L34_WANTYPETABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6, &wantEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_WANTYPE_SET_FAIL); ret=_rtk_rg_decreaseNexthopReference(j); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6=FAIL; } //Reset Default route to CPU if ipv6_default_gateway_on is 1 if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on == 1) { bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); bzero(&cb_routv6Et, sizeof(rtk_rg_ipv6RoutingEntry_t)); rtv6Entry.type=L34_IPV6_ROUTE_TYPE_TRAP; rtv6Entry.valid=1; cb_routv6Et.type=L34_IPV6_ROUTE_TYPE_TRAP; //20140807LUKE: if we delete default route WAN, decrease nexthop counter here! if(rg_db.v6route[V6_DEFAULT_ROUTE_IDX].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_GLOBAL) { ret=_rtk_rg_decreaseNexthopReference(rg_db.v6route[V6_DEFAULT_ROUTE_IDX].rtk_v6route.nhOrIfidIdx); if(ret!=RT_ERR_RG_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); } ret = RTK_L34_IPV6ROUTINGTABLE_SET(V6_DEFAULT_ROUTE_IDX,&rtv6Entry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL); } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_lanInterface_add(rtk_rg_lanIntfConf_t *lan_info,int *intf_idx) { int ret,vlan_exist=0,errorno=RT_ERR_RG_OK,tmpVid,/*tmpPVid,*/ipv4Enable=0,ipv6Enable=0,intfMatch=0/*,changeMTU=0*/; int i,intfIdx=-1,rtIdx=-1,rtv6Idx=-1,input_ipmsk; rtk_l34_netif_entry_t intfEntry; rtk_l34_routing_entry_t rtEntry; rtk_ipv6Routing_entry_t rtv6Entry; rtk_portmask_t ori_pmsk,ori_utmsk,ori_etpmsk; rtk_rg_table_vlan_t ori_vlanEntry; rtk_portmask_t out_mac_pmask,out_ext_pmask; rtk_portmask_t untag_mac_pmask; //rtk_portmask_t ori_CPU_member_mask,ori_CPU_untag_mask,ori_CPU_ext_mask; rtk_rg_ipv4RoutingEntry_t cb_routEt; rtk_rg_ipv6RoutingEntry_t cb_routv6Et; //rtk_vlan_protoVlanCfg_t protoVlanCfg; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rtk_rg_routing_arpInfo_t newAddingEntry; #endif rtk_fidMode_t fidMode; unsigned char zeroMac[ETHER_ADDR_LEN]={0}; rtk_rg_ip_updated_t ip_update_state=NO_IP_UPDATED; rtk_rg_aclAndCf_reserved_type_t rsvType; rtk_rg_aclAndCf_reserved_intf_linkLocal_trap_t intf_link_local_trap_para; if(lan_info == NULL || intf_idx == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); DEBUG("ip_version=%d",lan_info->ip_version); DEBUG("gmac=%02x:%02x:%02x:%02x:%02x:%02x",lan_info->gmac.octet[0],lan_info->gmac.octet[1],lan_info->gmac.octet[2],lan_info->gmac.octet[3],lan_info->gmac.octet[4],lan_info->gmac.octet[5]); DEBUG("ip_addr=0x%x",lan_info->ip_addr); DEBUG("ip_network_mask=0x%x",lan_info->ip_network_mask); DEBUG("ipv6_addr=%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x",lan_info->ipv6_addr.ipv6_addr[0],lan_info->ipv6_addr.ipv6_addr[1],lan_info->ipv6_addr.ipv6_addr[2],lan_info->ipv6_addr.ipv6_addr[3],lan_info->ipv6_addr.ipv6_addr[4],lan_info->ipv6_addr.ipv6_addr[5],lan_info->ipv6_addr.ipv6_addr[6],lan_info->ipv6_addr.ipv6_addr[7] ,lan_info->ipv6_addr.ipv6_addr[8],lan_info->ipv6_addr.ipv6_addr[9],lan_info->ipv6_addr.ipv6_addr[10],lan_info->ipv6_addr.ipv6_addr[11],lan_info->ipv6_addr.ipv6_addr[12],lan_info->ipv6_addr.ipv6_addr[13],lan_info->ipv6_addr.ipv6_addr[14],lan_info->ipv6_addr.ipv6_addr[15]); DEBUG("ipv6_network_mask_length=%x",lan_info->ipv6_network_mask_length); DEBUG("port_mask=0x%x",lan_info->port_mask); DEBUG("untag_mask=0x%x",lan_info->untag_mask); DEBUG("intf_vlan_id=%d",lan_info->intf_vlan_id); DEBUG("vlan_based_pri_enable=%d",lan_info->vlan_based_pri_enable); DEBUG("vlan_based_pri=%d",lan_info->vlan_based_pri); DEBUG("mtu=%d",lan_info->mtu); DEBUG("isIVL=%d",lan_info->isIVL); DEBUG("replace_subnet=%d",lan_info->replace_subnet); //Checking for input parameter //if(rg_db.systemGlobal.wanIntfTotalNum > 0) //RETURN_ERR(RT_ERR_RG_MODIFY_LAN_AT_WAN_EXIST); //LAN intf can not add after WAN created if(rg_db.systemGlobal.initParam.macBasedTagDecision && lan_info->isIVL) //IVL can not be set when DMAC2CVID is trun on RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Check IP version if((lan_info->ip_version==IPVER_V4ONLY || lan_info->ip_version==IPVER_V4V6) && lan_info->ip_network_mask>0) ipv4Enable=1; if((lan_info->ip_version==IPVER_V6ONLY || lan_info->ip_version==IPVER_V4V6) && lan_info->ipv6_network_mask_length>0) ipv6Enable=1; if(ipv4Enable==1 && (lan_info->ip_addr == 0 || lan_info->ip_network_mask == 0)) ipv4Enable=0;//RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(ipv6Enable==1) { if(lan_info->ipv6_network_mask_length== 0) ipv6Enable=0;//RETURN_ERR(RT_ERR_RG_INVALID_PARAM); else { //Check for valid IPv6 address if(*(unsigned int *)lan_info->ipv6_addr.ipv6_addr == 0 && *(unsigned int *)(lan_info->ipv6_addr.ipv6_addr+4) == 0 && *(unsigned int *)(lan_info->ipv6_addr.ipv6_addr+8) == 0 && *(unsigned int *)(lan_info->ipv6_addr.ipv6_addr+12) == 0) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ipv6Enable=0; } } if((ipv6Enable|ipv4Enable) && lan_info->gmac.octet[0] == 0 && lan_info->gmac.octet[1] == 0 && lan_info->gmac.octet[2] == 0 && lan_info->gmac.octet[3] == 0 && lan_info->gmac.octet[4] == 0 && lan_info->gmac.octet[5] == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(ipv6Enable && lan_info->ipv6_network_mask_length<64) //interface route should not less than 64bit RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(lan_info->gmac.octet[0]&1) //interface MAC can not use multicast address RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(lan_info->port_mask.portmask == 0 || lan_info->mtu == 0 || (lan_info->untag_mask.portmask>>RTK_RG_MAC_PORT_MAX)>0) //untag set didn't contain extension port RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].valid && lan_info->intf_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_CPU) || (rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block].valid && lan_info->intf_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) || (rg_db.systemGlobal.initParam.macBasedTagDecision==1 && (lan_info->intf_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET || (lan_info->intf_vlan_id >= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && lan_info->intf_vlan_id <= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET)))) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); //20150309LUKE: always treat disabled pri as "-1" if(lan_info->vlan_based_pri_enable==RTK_RG_DISABLED)lan_info->vlan_based_pri=-1; //if(lan_info->intf_vlan_id >= DEFAULT_PPB_VLAN_START && lan_info->intf_vlan_id < (DEFAULT_PPB_VLAN_START+MAX_NETIF_SW_TABLE_SIZE)) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //different Lan interface can have same port /*for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { //Check if there is any LAN interface has the same port with the adding intf if((rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask.portmask&lan_info->port_mask.portmask) > 0) RETURN_ERR(RT_ERR_RG_PORT_USED); }*/ tmpVid=lan_info->intf_vlan_id; for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { //Check if there is a LAN interface has the same information but IP setting with the adding intf if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->ip_version==lan_info->ip_version && !memcmp(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->gmac.octet,lan_info->gmac.octet,ETHER_ADDR_LEN) && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask.portmask==lan_info->port_mask.portmask && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->untag_mask.portmask==lan_info->untag_mask.portmask && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id==lan_info->intf_vlan_id && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->vlan_based_pri_enable==lan_info->vlan_based_pri_enable && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->vlan_based_pri==lan_info->vlan_based_pri && rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->isIVL==lan_info->isIVL) { intfMatch=1; //keep old interface index intfIdx=rg_db.systemGlobal.lanIntfGroup[i].index; //Check if IPv4_changed if(rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_addr != lan_info->ip_addr || rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_network_mask != lan_info->ip_network_mask) ip_update_state=ONLY_IPV4_UPDATED; //Check if IPv6_changed if(memcmp(rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_addr.ipv6_addr,lan_info->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN) || rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_network_mask_length != lan_info->ipv6_network_mask_length) { if(ip_update_state==ONLY_IPV4_UPDATED) ip_update_state=IPV4_IPV6_UPDATED; else ip_update_state=ONLY_IPV6_UPDATED; } if(ip_update_state==NO_IP_UPDATED) { DEBUG("all information are same with interface[%d], do nothing with IP.",intfIdx); ipv4Enable=0; ipv6Enable=0; } else { //20140702LUKE:Check if we are replace subnet this time!! if(lan_info->replace_subnet) { if(ip_update_state==ONLY_IPV4_UPDATED || ip_update_state==IPV4_IPV6_UPDATED) { DEBUG("change IPv4 settings only!! IPv4 enable is %d",ipv4Enable); //just delete IPv4 related setting, do v4_only procedure later if(ip_update_state==ONLY_IPV4_UPDATED)ipv6Enable=0; //20140702LUKE:when delete ARP if need, it should not convert software ARP to HW, since we are going to reset right after! rg_db.systemGlobal.intfIdxForReset=intfIdx; ret=_rtk_rg_deleteIPv4Routing(intfIdx); rg_db.systemGlobal.intfIdxForReset=-1; if(ret!=RT_ERR_RG_OK) RETURN_ERR(ret); //Clear software data structure rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_addr=0; rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_network_mask=0; } if(ip_update_state==ONLY_IPV6_UPDATED || ip_update_state==IPV4_IPV6_UPDATED) { DEBUG("change IPv6 settings only!! ipv6enable is %d",ipv6Enable); //just delete IPv6 related setting, do v6_only procedure later if(ip_update_state==ONLY_IPV6_UPDATED)ipv4Enable=0; ret=_rtk_rg_deleteIPv6Routing(intfIdx); if(ret!=RT_ERR_RG_OK) RETURN_ERR(ret); //Clear software data structure bzero(rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_network_mask_length=0; } } else { //Check if we are change v4 IP only without lan_info->replace_subnet if(ip_update_state==ONLY_IPV4_UPDATED) { //when add IPv4 subnet, IPv6 won't be count. ipv6Enable=0; } else //ONLY_IPV6_UPDATED || IPV4_IPV6_UPDATED { //We don't support IPv6 for adding one more subnet with same interface right now. //Besides, add IPv4 more subnet and change IPv6 at same time is also illegal. RETURN_ERR(RT_ERR_RG_IPV6_LAN_MORE_SUBNET_FAIL); } } } break; } } #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //Check VLAN-binding use this VLAN or not for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan==lan_info->intf_vlan_id) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_VLANBINDING); //Check Customer VLAN use this VLAN or not if(rg_db.vlan[lan_info->intf_vlan_id].valid && rg_db.vlan[lan_info->intf_vlan_id].addedAsCustomerVLAN) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_CVLAN); #endif #ifdef CONFIG_MASTER_WLAN0_ENABLE //20160524LUKE: check wlan-device existence _rtk_rg_check_wlan_device_exist_or_not(); #endif //rtlglue_printf("RTK RG lan add.....%d\n",RG_GLB_VLAN_INIT); //Transfer RG portmask to RTK portmask _rtk_rg_portmask_translator(lan_info->port_mask,&out_mac_pmask,&out_ext_pmask); //20140702LUKE:bypass create new interface since we are replace old IP settings or add one more IPv4 subnet!! if(intfMatch)goto CHECK_ROUTE; //Check interface table available or not for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { //bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(i, &intfEntry); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_GET_FAIL); //if(intfEntry.valid == 0) if(rg_db.systemGlobal.interfaceInfo[i].valid == 0) break; } if(i==MAX_NETIF_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); intfIdx=i; //keep CHECK_ROUTE: //Check routing table available or not if(ipv4Enable==1) { rtIdx=MAX_L3_SW_TABLE_SIZE; for(i=0;i<MAX_L3_SW_TABLE_SIZE ;i++) //because idx MAX_L3_SW_TABLE_SIZE-1 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; //if(rg_db.l3[i].rtk_l3.valid == 0 && rtIdx==MAX_L3_SW_TABLE_SIZE) //rtIdx=i; //keep the first valid entry if(rg_db.l3[i].rtk_l3.valid == 0) { rtIdx=i; //keep the first valid entry break; } //if(rg_db.l3[i].rtk_l3.process == L34_PROCESS_ARP && rg_db.l3[i].rtk_l3.arpEnd > last_arp) //find the end ARP address //last_arp = rg_db.l3[i].rtk_l3.arpEnd; } if(rtIdx==MAX_L3_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } if(ipv6Enable==1) { rtv6Idx=MAX_IPV6_ROUTING_SW_TABLE_SIZE; for(i=0;i<MAX_IPV6_ROUTING_SW_TABLE_SIZE;i++) //because idx 3 is reserved for default route { if(i == V6_HW_DEFAULT_ROUTE_IDX) continue; if(rg_db.v6route[i].rtk_v6route.valid == 0) { rtv6Idx=i; //keep the first valid entry break; } } if(rtv6Idx==MAX_IPV6_ROUTING_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.interfaceLock); //Set up Interface table bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); intfEntry.valid=1; memcpy(intfEntry.gateway_mac.octet, lan_info->gmac.octet,ETHER_ADDR_LEN); intfEntry.mac_mask=0x7; //no mask intfEntry.vlan_id=lan_info->intf_vlan_id; //for LAN DMAC2CVID to replace this SVL CVID intfEntry.enable_rounting=1; intfEntry.mtu=lan_info->mtu; #if defined(CONFIG_RTL9600_SERIES) //20141110LUKE: add for inhibiting multicast routing downstream trigger overMTU trap. if(rg_kernel.apolloChipId==APOLLOMP_CHIP_ID) intfEntry.mtu+=2; #endif //20140702LUKE:bypass set VLAN since we are replace old IP settings or add one more IPv4 subnet!! if(intfMatch)goto BYPASS_VLAN; //Set VLAN memset(&ori_vlanEntry,0,sizeof(rtk_rg_table_vlan_t)); ori_pmsk.bits[0]=out_mac_pmask.bits[0]; //initial port mask ori_etpmsk.bits[0]=out_ext_pmask.bits[0]; //initial ext-port mask ori_utmsk.bits[0]=lan_info->untag_mask.portmask; //initial untag set mask errorno=RT_ERR_RG_VLAN_SET_FAIL; //---------------------- //---------------------- ret = RTK_VLAN_CREATE(tmpVid); if(ret==RT_ERR_VLAN_EXIST) { memcpy(&ori_pmsk, &rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_utmsk, &rg_db.vlan[tmpVid].UntagPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_etpmsk, &rg_db.vlan[tmpVid].Ext_portmask,sizeof(rtk_portmask_t)); //keep all information of original VLAN memcpy(&ori_vlanEntry, &rg_db.vlan[tmpVid],sizeof(rtk_rg_table_vlan_t)); vlan_exist=1; } else if(ret!=RT_ERR_OK) { goto RET_VLAN_ERR; } //Check vlan-based priority enable for setup vlan-priority if(lan_info->vlan_based_pri_enable==RTK_RG_ENABLED) { #ifdef CONFIG_DUALBAND_CONCURRENT if(lan_info->vlan_based_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) { errorno=RT_ERR_RG_VLAN_PRI_CONFLICT_WIFI; goto RET_VLAN_ERR; } #endif #if defined(CONFIG_RTL9602C_SERIES) //WARNING("[FIXME]for 9602C, we can't set priority for VLAN directly..."); //errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; //goto RET_VLAN_ERR; { rtk_rg_aclAndCf_reserved_AssignVlanBasedPriorityForInterface_t assignVlanBasedPriorityForInterfacePara; bzero(&assignVlanBasedPriorityForInterfacePara,sizeof(rtk_rg_aclAndCf_reserved_AssignVlanBasedPriorityForInterface_t)); assignVlanBasedPriorityForInterfacePara.ingress_vlan = lan_info->intf_vlan_id; assignVlanBasedPriorityForInterfacePara.assigned_priority = lan_info->vlan_based_pri; if(intfIdx < MAX_NETIF_HW_TABLE_SIZE) _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_ASSIGN_VLAN_BASED_RRIORITY_FOR_INTF0+intfIdx, &assignVlanBasedPriorityForInterfacePara); } #else ret = RTK_VLAN_PRIORITYENABLE_SET(tmpVid,ENABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_PRIORITY_SET(tmpVid,lan_info->vlan_based_pri); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } else { #if defined(CONFIG_RTL9602C_SERIES) #else ret = RTK_VLAN_PRIORITYENABLE_SET(tmpVid,DISABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } //decide to use IVL or SVL for VLAN tag decision, IVL by untag set; SVL by DMAC2CVID if(lan_info->isIVL) fidMode=VLAN_FID_IVL; else fidMode=VLAN_FID_SVL; ret = RTK_VLAN_FIDMODE_SET(tmpVid, fidMode); //Patch 20121129 if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_FID_SET(tmpVid, LAN_FID); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; if((out_mac_pmask.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU))==0) //if user didn't add CPU port, we set it to tagged { out_mac_pmask.bits[0]|=(0x1<<RTK_RG_MAC_PORT_CPU); //add CPU port to vlan, keep it tagged untag_mac_pmask.bits[0]&=(~(0x1<<RTK_RG_MAC_PORT_CPU)); //otherwise,add CPU port to vlan untag set by user himself } out_mac_pmask.bits[0]|=ori_pmsk.bits[0]; //add LAN port to vlan untag_mac_pmask.bits[0]=ori_utmsk.bits[0]; untag_mac_pmask.bits[0]|=lan_info->untag_mask.portmask; //add untag set to LAN //20140613LUKE: for multicast routing packet will use ingress's VLAN untag set, therefore set all none-member port as untag!!(ServerInLAN) untag_mac_pmask.bits[0]|=(~(out_mac_pmask.bits[0]))&RTK_RG_ALL_MAC_PORTMASK; #if 0 //add bridge WAN port, if any. for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { //Check if there is any LAN equals the WAN's VLANID, because macBasedTagDecision is off(vlan-based) if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id!=lan_info->intf_vlan_id) { if(rg_db.systemGlobal.initParam.macBasedTagDecision==0) continue; //Check if there is any other LAN equals the WAN's VLANID /*errorno=RT_ERR_RG_UNBIND_BDWAN_SHOULD_EQUAL_LAN_VLAN; for(j=0;j<rg_db.systemGlobal.lanIntfTotalNum;j++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id==rg_db.systemGlobal.lanIntfGroup[j].p_intfInfo->p_lanIntfConf->intf_vlan_id) { WARNING("egress_vlan_id==intf_vlan_id!!"); break; } } if(j==rg_db.systemGlobal.lanIntfTotalNum) //if vlan is different, just continue, not add to LAN's VLAN member { WARNING("vlan is different, just continue.."); continue;//goto RET_VLAN_ERR; }*/ } else { //If there is bridge WAN has same VLANID as the new LAN, disable broadcast to this bridge WAN!! rg_db.systemGlobal.wanIntfGroup[i].disableBroadcast=1; //Check if the CPU port's tag/untag setting match this Bridge WAN's tag/untag setting errorno=RT_ERR_RG_CPU_TAG_DIFF_BRIDGE_WAN; if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) { if(untag_mac_pmask.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU)) goto RET_VLAN_ERR; } else { if((untag_mac_pmask.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU))==0) goto RET_VLAN_ERR; } } out_mac_pmask.bits[0]|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx); //use the NEWEST bridge wan's egress_vlan_tag_on to set wan port's untag setting if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) //egress tagged packet untag_mac_pmask.bits[0]&=(~(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx)); //set WAN port to 0 in untag set (tagging) else untag_mac_pmask.bits[0]|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; //set WAN port to 1 in untag set (untagging) } } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //ysleu@20150313:We need to add WAN member to LAN VLAN for RTL8685S LAN-Based. for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { rtk_portmask_t mbpmsk,utpmsk,etpmsk; int wanVid = rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; //Add LAN member to WAN VLAN memcpy(&mbpmsk, &rg_db.vlan[wanVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&utpmsk, &rg_db.vlan[wanVid].UntagPortmask,sizeof(rtk_portmask_t)); memcpy(&etpmsk, &rg_db.vlan[wanVid].Ext_portmask,sizeof(rtk_portmask_t)); mbpmsk.bits[0] |= ori_pmsk.bits[0]; //add LAN port to vlan utpmsk.bits[0] |= ori_utmsk.bits[0]; //add untag set to vlan etpmsk.bits[0] |= lan_info->untag_mask.portmask; RTK_VLAN_PORT_SET(wanVid, &out_mac_pmask, &untag_mac_pmask); //Check if there is any LAN equals the WAN's VLANID, because macBasedTagDecision is off(vlan-based) if(rg_db.systemGlobal.initParam.macBasedTagDecision) { continue; } else { //If there is bridge WAN has same VLANID as the new LAN, disable broadcast to this bridge WAN!! rg_db.systemGlobal.wanIntfGroup[i].disableBroadcast=1; //Check if the CPU port's tag/untag setting match this Bridge WAN's tag/untag setting errorno=RT_ERR_RG_CPU_TAG_DIFF_BRIDGE_WAN; if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) { if(untag_mac_pmask.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU)) goto RET_VLAN_ERR; } else { if((untag_mac_pmask.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU))==0) goto RET_VLAN_ERR; } } out_mac_pmask.bits[0]|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx); //use the NEWEST bridge wan's egress_vlan_tag_on to set wan port's untag setting if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) //egress tagged packet untag_mac_pmask.bits[0]&=(~(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx)); //set WAN port to 0 in untag set (tagging) else untag_mac_pmask.bits[0]|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; //set WAN port to 1 in untag set (untagging) } } #endif errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_PORT_SET(tmpVid, &out_mac_pmask, &untag_mac_pmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; out_ext_pmask.bits[0]|=ori_etpmsk.bits[0]; //add LAN ext-port to vlan out_ext_pmask.bits[0]|=0x1; //add ext-CPU port to vlan #if defined(CONFIG_RTL9602C_SERIES) //if(rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_FIXME) { ret = RTK_VLAN_EXTPORT_SET(tmpVid, &out_ext_pmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; } #else ret = RTK_VLAN_EXTPORT_SET(tmpVid, &out_ext_pmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif //Check for bridge WAN, add Lan member to their VLAN if under MAC-based setting if(rg_db.systemGlobal.initParam.macBasedTagDecision) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { //ori_pmsk.bits[0]|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.wan_port_idx); tmpVid=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; memcpy(&ori_pmsk, &rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_utmsk, &rg_db.vlan[tmpVid].UntagPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_etpmsk, &rg_db.vlan[tmpVid].Ext_portmask,sizeof(rtk_portmask_t)); //Let LAN port become active in WAN's VLAN member port mask out_mac_pmask.bits[0]|=ori_pmsk.bits[0]; out_ext_pmask.bits[0]|=ori_etpmsk.bits[0]; //We do not add Lan port to untag set errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_PORT_SET(tmpVid, &out_mac_pmask, &ori_utmsk); if(ret!=RT_ERR_OK)goto RET_BD_WAN_VLAN_ERR; #if defined(CONFIG_RTL9602C_SERIES) //if(rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_FIXME) { ret = RTK_VLAN_EXTPORT_SET(tmpVid,&out_ext_pmask); if(ret!=RT_ERR_OK)goto RET_BD_WAN_VLAN_ERR; } #else ret = RTK_VLAN_EXTPORT_SET(tmpVid,&out_ext_pmask); if(ret!=RT_ERR_OK)goto RET_BD_WAN_VLAN_ERR; #endif } } } BYPASS_VLAN: //Set up interface table //for L2_only setting, if the MAC address is all zero, just set netif entry valid to zero!!(for interface index synchronisation) if(!memcmp(intfEntry.gateway_mac.octet,zeroMac,ETHER_ADDR_LEN)) intfEntry.valid=0; #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) intfEntry.isL34=intfEntry.valid; intfEntry.ipAddr=lan_info->ip_addr; intfEntry.isCtagIf=0; //FIXME: temporary setting this to zero now, it seems don't care. intfEntry.dslite_state=DISABLED; #endif #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) if(!ipv4Enable) intfEntry.deny_ipv4 = TRUE; if(!ipv6Enable) intfEntry.deny_ipv6 = TRUE; #endif errorno=RT_ERR_RG_INTF_SET_FAIL; ret = RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); if(ret!=RT_ERR_OK)goto RET_INTF_ERR; //reset software MTU should keep original MTU, only hardware MTU need to change!! rg_db.netif[intfIdx].rtk_netif.mtu=lan_info->mtu; //Set up Routing table if(ipv4Enable==1) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.netifIdx=intfIdx; rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //default add to sw table rtEntry.internal=1; rtEntry.ipAddr=lan_info->ip_addr&lan_info->ip_network_mask; //20130301-store IP addr after masked rtEntry.rt2waninf=0; input_ipmsk=lan_info->ip_network_mask; RG_ONE_COUNT(input_ipmsk); rtEntry.ipMask=input_ipmsk-1; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Check for ARP table for enough entry // TODO:Check for ARP range and add to rg_db.systemGlobal.routingArpInfoArray bzero(&newAddingEntry,sizeof(rtk_rg_routing_arpInfo_t)); newAddingEntry.routingIdx=rtIdx; newAddingEntry.intfIdx=intfIdx; newAddingEntry.notMask=~lan_info->ip_network_mask; newAddingEntry.bitNum=32-input_ipmsk; newAddingEntry.isLan=1; if(newAddingEntry.bitNum <= 8) //if need more than or equal to 512 entries, recorded in fwdEngine { errorno=RT_ERR_RG_ADD_ARP_MAC_FAILED; ret=_rtk_rg_addArpRoutingArray(&newAddingEntry,lan_info->ip_addr,lan_info->intf_vlan_id); if(ret==RT_ERR_RG_OK) { rtEntry.process = L34_PROCESS_ARP; rtEntry.arpStart = newAddingEntry.arpStart; rtEntry.arpEnd = newAddingEntry.arpEnd; } else if(ret==RT_ERR_RG_ADD_ARP_TO_SW_TABLE) //for sw table, routing entry just set process to CPU { WARNING("HW table is not enough...will add LAN%d to software ARP table!",intfIdx); } else goto RET_INVALID_ARP; } else { WARNING("HW table is not enough...will add LAN%d to software ARP table!",intfIdx); } #elif defined(CONFIG_RTL9602C_SERIES) rtEntry.process = L34_PROCESS_ARP; #endif errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_ROUTINGTABLE_SET(rtIdx, &rtEntry); if(ret!=RT_ERR_OK)goto RET_ROUTING_ERR; //20140703LUKE: keep original IP address for check gateway IP rg_db.l3[rtIdx].gateway_ip=lan_info->ip_addr; //20140702LUKE: we should sync software data after replace old settings!! if((ip_update_state==ONLY_IPV4_UPDATED||ip_update_state==IPV4_IPV6_UPDATED) && lan_info->replace_subnet==1) { rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_addr=lan_info->ip_addr; rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ip_network_mask=lan_info->ip_network_mask; } } if(ipv6Enable==1) { bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_LOCAL; rtv6Entry.nhOrIfidIdx=intfIdx; rtv6Entry.ipv6PrefixLen=lan_info->ipv6_network_mask_length; memcpy(&rtv6Entry.ipv6Addr,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); rtv6Entry.rt2waninf=0; //local route, routing to LAN errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_IPV6ROUTINGTABLE_SET(rtv6Idx,&rtv6Entry); if(ret!=RT_ERR_OK)goto RET_ROUTING_ERR; //20160601LUKE: keep original IPv6 address for check gateway IP memcpy(&rg_db.v6route[rtv6Idx].gateway_ipv6Addr,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); //20140702LUKE: we should sync software data after replace old settings!! if((ip_update_state==ONLY_IPV6_UPDATED||ip_update_state==IPV4_IPV6_UPDATED) && lan_info->replace_subnet==1) { memcpy(&rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_addr,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf->ipv6_network_mask_length=lan_info->ipv6_network_mask_length; } } // TODO:Call the initParam's routingAddByHwCallBack if(ipv4Enable==1) { if(rg_db.systemGlobal.initParam.routingAddByHwCallBack != NULL) { bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=lan_info->ip_addr; cb_routEt.ip_mask=lan_info->ip_network_mask; cb_routEt.nexthop=0; //interface route cb_routEt.wan_intf_idx=intfIdx; rg_db.systemGlobal.initParam.routingAddByHwCallBack(&cb_routEt); } } // TODO:Call the initParam's v6RoutingAddByHwCallBack if(ipv6Enable==1) { if(rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack != NULL) { bzero(&cb_routv6Et, sizeof(rtk_rg_ipv6RoutingEntry_t)); memcpy(&cb_routv6Et.dest_ip,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); cb_routv6Et.prefix_len=lan_info->ipv6_network_mask_length; cb_routv6Et.NhOrIntfIdx=intfIdx; cb_routv6Et.type=L34_IPV6_ROUTE_TYPE_LOCAL; rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack(&cb_routv6Et); } } //20140702LUKE:only add gateway MAC when create new interface if(!intfMatch) { rtk_rg_successFailReturn_t ret_fs; //1 FIXME: patch for DA==GatewayMac will hit layer2 unknown DA, if action is trap //Create Lan gateway STATIC MAC errorno=RT_ERR_RG_CREATE_GATEWAY_LUT_FAIL; ret_fs = _rtk_rg_createGatewayMacEntry(lan_info->gmac.octet,lan_info->intf_vlan_id,lan_info->untag_mask.portmask,intfIdx); if(ret_fs==RG_RET_FAIL)goto RET_ROUTING_ERR; } errorno=RT_ERR_RG_OK; *intf_idx = intfIdx; //20140702LUKE:bypass software data restored rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.mtu=lan_info->mtu; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.replace_subnet=lan_info->replace_subnet; if(intfMatch)goto BYPASS_SW_DATA; //store information in Global variable rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].index=intfIdx; rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].p_intfInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx]; rg_db.systemGlobal.interfaceInfo[intfIdx].lan_or_wan_index=rg_db.systemGlobal.lanIntfTotalNum; rg_db.systemGlobal.lanIntfTotalNum++; //update LAN port mask rg_db.systemGlobal.lanPortMask.portmask|=lan_info->port_mask.portmask; rg_db.systemGlobal.interfaceInfo[intfIdx].valid=1; bzero(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.intf_name,32); sprintf(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.intf_name, "LAN%d",intfIdx); rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.is_wan=0; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ip_version=lan_info->ip_version; memcpy(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.gmac.octet, lan_info->gmac.octet,ETHER_ADDR_LEN); if(ipv4Enable==1) { rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ip_addr=lan_info->ip_addr; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ip_network_mask=lan_info->ip_network_mask; } if(ipv6Enable==1) { memcpy(&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ipv6_addr,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ipv6_network_mask_length=lan_info->ipv6_network_mask_length; } rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.port_mask=lan_info->port_mask; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.untag_mask.portmask=lan_info->untag_mask.portmask; //rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.extport_mask=lan_info->extport_mask; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.intf_vlan_id=lan_info->intf_vlan_id; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.vlan_based_pri_enable=lan_info->vlan_based_pri_enable; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.vlan_based_pri=lan_info->vlan_based_pri; //rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.pppoe_passThrough=lan_info->pppoe_passThrough; #if 0 rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.dhcp_server_enable=lan_info->dhcp_server_enable; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.lease_time=lan_info->lease_time; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.dhcp_start_ip_addr=lan_info->dhcp_start_ip_addr; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.dhcp_end_ip_addr=lan_info->dhcp_end_ip_addr; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.dhcp_port_binding_mask=lan_info->dhcp_port_binding_mask; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.dhcp_extport_binding_mask=lan_info->dhcp_extport_binding_mask; #endif rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.isIVL=lan_info->isIVL; rg_db.systemGlobal.interfaceInfo[intfIdx].p_lanIntfConf=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf; //short-cut of lan interface structure //Check PPPoE Pass through _rtk_rg_refreshPPPoEPassThroughLanOrWanPortMask(); //Update PVID _rtk_rg_updatePortBasedVIDByLanOrder(rg_db.vlan[lan_info->intf_vlan_id].MemberPortmask, rg_db.vlan[lan_info->intf_vlan_id].Ext_portmask); //20150128LUKE: recovery pvid whild add LAN after delete one. if(rg_db.systemGlobal.initParam.macBasedTagDecision) { //UpdateBindInternet _rtk_rg_updateBindWanIntf(NULL); //Update non-binding _rtk_rg_updateNoneBindingPortmask(rg_db.systemGlobal.wanPortMask.portmask); //Update PVID of OtherWan-binding port to vlan specific for the WAN _rtk_rg_updateBindOtherWanPortBasedVID(NULL); } if(intfIdx < MAX_NETIF_HW_TABLE_SIZE) { if(lan_info->ip_version==IPVER_V6ONLY || lan_info->ip_version==IPVER_V4V6) { //20141226LUKE: add the trap link local ACL since we turn on IPv6 this interface!! memcpy(intf_link_local_trap_para.gmac.octet,lan_info->gmac.octet,ETHER_ADDR_LEN); rsvType=RTK_RG_ACLANDCF_RESERVED_IPV6_INTF0_LINK_LOCAL_TRAP+intfIdx; _rtk_rg_aclAndCfReservedRuleAdd(rsvType, &intf_link_local_trap_para); } else { //20141226LUKE: delete the trap link local ACL since we didn't support IPv6 this interface!! rsvType=RTK_RG_ACLANDCF_RESERVED_IPV6_INTF0_LINK_LOCAL_TRAP+intfIdx; _rtk_rg_aclAndCfReservedRuleDel(rsvType); } } if((intfIdx>=MAX_NETIF_HW_TABLE_SIZE) || rg_db.l3[rtIdx].valid==SOFTWARE_ONLY_ENTRY) { rtk_rg_aclAndCf_reserved_dip_mask_trap_t dip_mask_trap; bzero(&dip_mask_trap,sizeof(dip_mask_trap)); dip_mask_trap.dip=rg_db.l3[rtIdx].rtk_l3.ipAddr; dip_mask_trap.mask =~((1<<(31-(rg_db.l3[rtIdx].rtk_l3.ipMask)))-1); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP +rtIdx, &dip_mask_trap); rg_db.systemGlobal.interfaceInfo[intfIdx].valid=SOFTWARE_ONLY_ENTRY; WARNING("ReservedRuleAdd software data path ADD_LAN netif=%d L3Idx=%d",intfIdx,rtIdx); } #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20141225LUKE: while add LAN interface we should rearrange ACL which use the gmac address for L34 if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num) ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif BYPASS_SW_DATA: //add lan-interfcae callback to sync protocal-stack if(rg_db.systemGlobal.initParam.interfaceAddByHwCallBack != NULL) { //20140704LUKE:special case, if we change MTU only but set replace_subnet to 1, callback will flush IP address including more subnet, //therefore we should let callback think we are just setting the same IP information without modify. if(ip_update_state==NO_IP_UPDATED)rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.replace_subnet=0; rg_db.systemGlobal.initParam.interfaceAddByHwCallBack(&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo,&intfIdx); //recover to original setting rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.replace_subnet=lan_info->replace_subnet; } goto RET_SUCCESS; RET_ROUTING_ERR: //Delete the routing entry if(rtIdx>=0) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(rtIdx, &rtEntry); } if(rtv6Idx>=0) { bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); RTK_L34_IPV6ROUTINGTABLE_SET(rtv6Idx,&rtv6Entry); } #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) RET_INVALID_ARP: #endif RET_INTF_ERR: //Delete the interface entry bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); RET_BD_WAN_VLAN_ERR: RTK_VLAN_PORT_SET(tmpVid, &ori_pmsk, &ori_utmsk); RTK_VLAN_EXTPORT_SET(tmpVid, &ori_etpmsk); RET_VLAN_ERR: //Delete the VLAN created, or recovery its member port if exist /*if(lan_info->pppoe_passThrough == 1) { bzero(&protoVlanCfg,sizeof(rtk_vlan_protoVlanCfg_t)); if(out_ext_pmask.bits[0] > 0x1) { rtk_vlan_portProtoVlan_set(RTK_RG_MAC_PORT_CPU,PPPOE_DISCOVERY_GROUPID,&protoVlanCfg); rtk_vlan_portProtoVlan_set(RTK_RG_MAC_PORT_CPU,PPPOE_SESSION_GROUPID,&protoVlanCfg); } for(i=0;i<RTK_RG_PORT_CPU;i++) { if((out_mac_pmask.bits[0]&(0x1<<i)) > 0) { rtk_vlan_portProtoVlan_set(i,PPPOE_DISCOVERY_GROUPID,&protoVlanCfg); rtk_vlan_portProtoVlan_set(i,PPPOE_SESSION_GROUPID,&protoVlanCfg); } } }*/ if(vlan_exist) { RTK_VLAN_PORT_SET(intfEntry.vlan_id, &ori_vlanEntry.MemberPortmask, &ori_vlanEntry.UntagPortmask); RTK_VLAN_EXTPORT_SET(intfEntry.vlan_id, &ori_vlanEntry.Ext_portmask); RTK_VLAN_FIDMODE_SET(intfEntry.vlan_id, ori_vlanEntry.fidMode); RTK_VLAN_FID_SET(intfEntry.vlan_id, ori_vlanEntry.fid); #if defined(CONFIG_RTL9602C_SERIES) #else RTK_VLAN_PRIORITYENABLE_SET(intfEntry.vlan_id,ori_vlanEntry.priorityEn); RTK_VLAN_PRIORITY_SET(intfEntry.vlan_id,ori_vlanEntry.priority); #endif } else { RTK_VLAN_DESTROY(intfEntry.vlan_id); } /*RET_DEF_VLAN_ERR: //Recovery Default VLAN setting RTK_VLAN_PORT_SET(DEFAULT_LAN_VLAN, &ori_CPU_member_mask, &ori_CPU_untag_mask);*/ RET_SUCCESS: //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.interfaceLock); RETURN_ERR(errorno); } #if 0 int32 rtk_rg_apollo_dhcpServerStaticAlloc_add(ipaddr_t ipaddr, rtk_mac_t *macaddr,int *static_idx) { int i; //Check input param if(static_idx == NULL || macaddr == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(ipaddr == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(macaddr->octet[0]==0 && macaddr->octet[1]==0 && macaddr->octet[2]==0 && macaddr->octet[3]==0 && macaddr->octet[4]==0 && macaddr->octet[5]==0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Store ip and macaddr in structure for(i=0; i<STATIC_DHCP_ALLOC_NUM; i++) { if(_DHCP_STATIC[i].valid != 0) continue; _DHCP_STATIC[i].ip=ipaddr; memcpy(_DHCP_STATIC[i].mac.octet, macaddr->octet, 6); _DHCP_STATIC[i].valid=1; } if(i == STATIC_DHCP_ALLOC_NUM)RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //Return the index of new added ip-mac structure *static_idx = i; return (RT_ERR_RG_OK); } int32 rtk_rg_apollo_dhcpServerStaticAlloc_del(int static_idx) { //Check param if(static_idx<0 || static_idx>=STATIC_DHCP_ALLOC_NUM) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(_DHCP_STATIC[static_idx].valid == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); _DHCP_STATIC[static_idx].valid=0; //we just turn off the valid bit return (RT_ERR_RG_OK); } int32 rtk_rg_apollo_dhcpServerStaticAlloc_find(ipaddr_t *ipaddr, rtk_mac_t *macaddr, int *idx) { //Check param int i; if(idx == NULL || ipaddr == NULL || macaddr == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(*idx<0 || *idx>=STATIC_DHCP_ALLOC_NUM) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Find the first available one from idx for(i=*idx; i<STATIC_DHCP_ALLOC_NUM; i++) { if(_DHCP_STATIC[i].valid==0) continue; *ipaddr=_DHCP_STATIC[i].ip; memcpy(macaddr->octet, _DHCP_STATIC[i].mac.octet, 6); } if(i==STATIC_DHCP_ALLOC_NUM)RETURN_ERR(RT_ERR_RG_STATIC_NOT_FOUND); //Return the first available index from idx *idx = i; return (RT_ERR_RG_OK); } #endif //WAN Interface int32 _rtk_rg_updateWANPortBasedVID(rtk_rg_port_idx_t wan_port) { #ifdef CONFIG_RG_WAN_PORT_ISOLATE int tmpPVid=DEFAULT_WAN_VLAN; #else int tmpPVid=rg_db.systemGlobal.initParam.fwdVLAN_CPU; #endif int origPVid,ret,i; int untag_RT=0,tag_WAN=0; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && wan_port==RTK_RG_PORT_RGMII){ DEBUG("Special recovery WAN_PORT from RGMII to PON."); wan_port=RTK_RG_PORT_PON; } #endif rg_db.systemGlobal.wanPortMask.portmask=0x0; #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl){ rg_db.systemGlobal.wanPortMask.portmask|=(1<<RTK_RG_PORT_RGMII); } #endif for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //update WAN port mask rg_db.systemGlobal.wanPortMask.portmask|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx==wan_port) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE && !rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) { //Untag BG WAN tmpPVid=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; break; } else if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE && !rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) { //Untag RT WAN if(untag_RT==0) //keep first { untag_RT++; tmpPVid=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; } } else if(untag_RT==0) { //Tagged BG or RT WAN if(tag_WAN==0) //keep first { tag_WAN++; tmpPVid=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; } } } } //#if defined(CONFIG_GPON_FEATURE) || defined(CONFIG_EPON_FEATURE) //20141119LUKE: support S-tag from service port, which equals to WAN-port if (rg_kernel.stag_enable==RTK_RG_ENABLED) { #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT2)) #endif { if(rg_db.systemGlobal.wanPortMask.portmask&(1<<RTK_RG_MAC_PORT_PON)) { assert_ok(RTK_SVLAN_SERVICEPORT_SET(RTK_RG_MAC_PORT_PON, ENABLED)); } else { assert_ok(RTK_SVLAN_SERVICEPORT_SET(RTK_RG_MAC_PORT_PON, DISABLED)); } /*for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { if(rg_db.systemGlobal.wanPortMask.portmask&(1<<i)) { assert_ok(RTK_SVLAN_SERVICEPORT_SET(i, ENABLED)); } else { assert_ok(RTK_SVLAN_SERVICEPORT_SET(i, DISABLED)); } }*/ } } //#endif origPVid=rg_db.systemGlobal.portBasedVID[wan_port]; ret = RTK_VLAN_PORTPVID_SET(wan_port, tmpPVid); if(ret!=RT_ERR_OK) { RTK_VLAN_PORTPVID_SET(wan_port, origPVid); //recovery original pvid WARNING("set Port_%d PVID[%d] fail...recovery to [%d]",wan_port,tmpPVid,origPVid); RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } return (RT_ERR_RG_OK); } #ifdef CONFIG_MASTER_WLAN0_ENABLE void _rtk_rg_updateOtherWanWlan0Setting(rtk_rg_wanIntfConf_t *wanintf, rtk_portmask_t *etpmsk, int vidX) { int i; if(wanintf->wlan0_dev_binding_mask) { etpmsk->bits[0]|=(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)); #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && wanintf->wlan0_dev_binding_mask>=(0x1<<WLAN_DEVICE_NUM)) etpmsk->bits[0]|=(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)); #endif //Change DVID of all binding WLAN0 device to OtherWanVlan for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(wanintf->wlan0_dev_binding_mask&(0x1<<i)) assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,vidX)); } //Add binding WLAN0 device to OtherWanVlan's Wlan0DevMask rg_db.vlan[vidX].wlan0DevMask|=wanintf->wlan0_dev_binding_mask; if(rg_db.vlan[vidX].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[vidX].wlan0UntagMask|=wanintf->wlan0_dev_binding_mask; else rg_db.vlan[vidX].wlan0UntagMask&=(~(wanintf->wlan0_dev_binding_mask)); } else { if(etpmsk->bits[0]&(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))) { //Change DVID of all WLAN0 device to OtherWanVlan #ifdef CONFIG_DUALBAND_CONCURRENT for(i=0;i<WLAN_DEVICE_NUM;i++) #else for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) #endif { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,vidX)); //Add all WLAN0 device to OtherWanVlan's Wlan0DevMask rg_db.vlan[vidX].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[vidX].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[vidX].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[vidX].wlan0UntagMask&=(~(0x1<<i)); } } } #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && etpmsk->bits[0]&(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU))) { //Change DVID of WLAN1 device to OtherWanVlan for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,vidX)); //Add all WLAN1 device to OtherWanVlan's Wlan0DevMask rg_db.vlan[vidX].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[vidX].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[vidX].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[vidX].wlan0UntagMask&=(~(0x1<<i)); } } } #endif } } #endif #if defined(CONFIG_APOLLO) int32 _rtk_rg_updateBindWanIntf(rtk_rg_wanIntfConf_t *wanintf) { int i,ret,vlanId; rtk_portmask_t mbpmsk, utpmsk, etpmsk; rtk_portmask_t out_mac_pmask,out_ext_pmask,vlan_bind_pmsk,vlan_bind_extpmsk; if(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].valid==0) { //first use, create it ret = RTK_VLAN_CREATE(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET); if (ret == RT_ERR_VLAN_EXIST) { DEBUG("fwdVLAN_BIND_INTERNET[%d] had created..",rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].valid = 1; } } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) mbpmsk.bits[0]=RTK_RG_ALL_LAN_PORTMASK|RTK_RG_ALL_CPU_PORTMASK; //all LAN port with CPU utpmsk.bits[0]=RTK_RG_ALL_LAN_PORTMASK|RTK_RG_ALL_CPU_PORTMASK; //all untag etpmsk.bits[0]=RTK_RG_ALL_USED_VIRUAL_PORTMASK; //cpu, extension port 0 and 1 #else mbpmsk.bits[0]=RTK_RG_ALL_LAN_PORTMASK|(1<<RTK_RG_PORT_CPU); //all LAN port with CPU utpmsk.bits[0]=RTK_RG_ALL_LAN_PORTMASK|(1<<RTK_RG_PORT_CPU); //all untag etpmsk.bits[0]=RTK_RG_ALL_USED_VIRUAL_PORTMASK; //cpu, extension port 0 and 1 #endif //Search all non-internet binding WAN, remove their member from VID fwdVLAN_BIND_INTERNET for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet==1 && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask.portmask>0) { _rtk_rg_portmask_translator(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); mbpmsk.bits[0]&=(~(out_mac_pmask.bits[0])); etpmsk.bits[0]&=(~(out_ext_pmask.bits[0])); //update it's VLAN member by binding mask and wlan dev mask vlanId=rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[i].index]; rg_db.vlan[vlanId].MemberPortmask.bits[0]=out_mac_pmask.bits[0]; //all binding LAN port with CPU rg_db.vlan[vlanId].MemberPortmask.bits[0]|=(0x1<<RTK_RG_PORT_CPU); rg_db.vlan[vlanId].UntagPortmask.bits[0]=out_mac_pmask.bits[0]; //all untag rg_db.vlan[vlanId].UntagPortmask.bits[0]|=(0x1<<RTK_RG_PORT_CPU); rg_db.vlan[vlanId].Ext_portmask.bits[0]=out_ext_pmask.bits[0]; //all binding ext LAN port rg_db.vlan[vlanId].Ext_portmask.bits[0]|=0x1; #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.vlan[vlanId].wlan0DevMask=0x0; //clear first rg_db.vlan[vlanId].wlan0UntagMask=0x0; _rtk_rg_updateOtherWanWlan0Setting(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf,&rg_db.vlan[vlanId].Ext_portmask,vlanId); #endif ret = RTK_VLAN_PORT_SET(vlanId, &rg_db.vlan[vlanId].MemberPortmask, &rg_db.vlan[vlanId].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(vlanId, &rg_db.vlan[vlanId].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); //dismiss all LAN member... _rtk_rg_portmask_translator(rg_db.systemGlobal.lanPortMask,&out_mac_pmask,&out_ext_pmask); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]&=(~(out_mac_pmask.bits[0]&(~(0x1<<RTK_RG_PORT_CPU)))); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]&=(~(out_ext_pmask.bits[0]&(~(0x1)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0UntagMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN0_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN0_VXD; else if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN1_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN1_VXD; #endif #endif //update WAN's VLAN member if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type == RTK_RG_BRIDGE) { //20140725LUKE: If there is vlan-binding, we should add the port to WAN's VLAN member!! _rtk_rg_portmask_translator(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->vlan_binding_mask,&vlan_bind_pmsk,&vlan_bind_extpmsk); //assign LAN member to Other VLAN's member rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]|=rg_db.vlan[vlanId].MemberPortmask.bits[0]; rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]|=vlan_bind_pmsk.bits[0]; //all vlan-binding port rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]|=rg_db.vlan[vlanId].Ext_portmask.bits[0]; rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]|=vlan_bind_extpmsk.bits[0]; //all vlan-binding ext-port #ifdef CONFIG_MASTER_WLAN0_ENABLE //assign Other VLAN's WLAN0 Device to mask of WAN's VLAN rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask|=rg_db.vlan[vlanId].wlan0DevMask; rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0UntagMask|=rg_db.vlan[vlanId].wlan0UntagMask; #endif //assign WAN port to Other VLAN's member rg_db.vlan[vlanId].MemberPortmask.bits[0]|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; ret = RTK_VLAN_PORT_SET(vlanId, &rg_db.vlan[vlanId].MemberPortmask, &rg_db.vlan[vlanId].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } //reset WAN's VLAN ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } } #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask=0x0; rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask=0x0; //Check wlan-device binding only when there is no WAN port-binding ext0. if(etpmsk.bits[0]&(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))) { etpmsk.bits[0]&=(~(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))); //dismiss ext0-Port from member //Search all wlan0 devices, if one of them are un-binding or binding to internet WAN, keep ext0 port in member. #ifdef CONFIG_DUALBAND_CONCURRENT for(i=0;i<WLAN_DEVICE_NUM;i++) #else for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) #endif { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { if(rg_db.systemGlobal.wlan0BindDecision[i].set_bind) { //internet-binding device, keep ext0 in member! if(!rg_db.systemGlobal.interfaceInfo[rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf].storedInfo.wan_intf.wan_intf_conf.none_internet) { etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)); //Change DVID of internet-bindingl WLAN device to fwdVLAN_BIND_INTERNET assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET)); //Set this device to fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(0x1<<i)); } } else { //un-binding device, keep ext0 in member! etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)); //Change DVID of un-binding WLAN device to fwdVLAN_BIND_INTERNET assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET)); //Set this device to fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(0x1<<i)); } } } //Compare the new adding WAN,too if(wanintf != NULL) { if(wanintf->none_internet==0) { //internet-binding device, keep ext0 in member! if(wanintf->wlan0_dev_binding_mask>0) etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)); } else { //remove the otherWAN bind device from fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask&=(~(wanintf->wlan0_dev_binding_mask)); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(wanintf->wlan0_dev_binding_mask)); } } } #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && etpmsk.bits[0]&(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU))) { etpmsk.bits[0]&=(~(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU))); //dismiss ext1-Port from member //Search all wlan1 devices, if one of them are un-binding or binding to internet WAN, keep ext1 port in member. for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { if(rg_db.systemGlobal.wlan0BindDecision[i].set_bind) { //internet-binding device, keep ext1 in member! if(!rg_db.systemGlobal.interfaceInfo[rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf].storedInfo.wan_intf.wan_intf_conf.none_internet) { etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)); //Change DVID of internet-bindingl WLAN device to fwdVLAN_BIND_INTERNET assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET)); //Set this device to fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(0x1<<i)); } } else { //un-binding device, keep ext0 in member! etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)); //Change DVID of un-binding WLAN device to fwdVLAN_BIND_INTERNET assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET)); //Set this device to fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask|=(0x1<<i); if(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask|=(0x1<<i); else rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(0x1<<i)); } } } //Compare the new adding WAN,too if(wanintf != NULL) { if(wanintf->none_internet==0) { //internet-binding device, keep ext0 in member! if(wanintf->wlan0_dev_binding_mask>(0x1<<WLAN_DEVICE_NUM)) etpmsk.bits[0]|=(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)); } else { //remove the otherWAN bind device from fwdVLAN_BIND_INTERNET's DevMask rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask&=(~(wanintf->wlan0_dev_binding_mask)); rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask&=(~(wanintf->wlan0_dev_binding_mask)); } } } #endif #endif //Compare the new adding WAN,too if(wanintf != NULL && wanintf->none_internet==1 && wanintf->port_binding_mask.portmask>0) { _rtk_rg_portmask_translator(wanintf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); mbpmsk.bits[0]&=(~(out_mac_pmask.bits[0])); etpmsk.bits[0]&=(~(out_ext_pmask.bits[0])); } //20140516LUKE:we should keep CPU port unless there is no ext port anymore if(etpmsk.bits[0]) mbpmsk.bits[0]|=(0x1<<RTK_RG_PORT_CPU); ret = RTK_VLAN_FID_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, LAN_FID); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_FIDMODE_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, VLAN_FID_SVL); //This is used for ALL LAN interface if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &mbpmsk, &utpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &etpmsk); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); //Change PVID of all LAN port remain in fwdVLAN_BIND_INTERNET for(i=0;i<=RTK_RG_PORT_CPU;i++) { if(mbpmsk.bits[0]&(0x1<<i)) { //20150408LUKE: check PPB first if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].vid=rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV4_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID]),RT_ERR_OK); rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID].vid=rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_ARP_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID]),RT_ERR_OK); } else if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].vid=rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV6_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID]),RT_ERR_OK); } else ASSERT_EQ(RTK_VLAN_PORTPVID_SET(i,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET),RT_ERR_OK); } } for(i=RTK_RG_EXT_PORT0;i<RTK_RG_PORT_MAX;i++) { if(etpmsk.bits[0]&(0x1<<(i-RTK_RG_PORT_CPU))) ASSERT_EQ(RTK_VLAN_EXTPORTPVID_SET(i-RTK_RG_PORT_CPU,rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET),RT_ERR_OK); } //Refresh all internet WAN's VLAN member from VID fwdVLAN_BIND_INTERNET for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(!rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE) { //dismiss all LAN member... _rtk_rg_portmask_translator(rg_db.systemGlobal.lanPortMask,&out_mac_pmask,&out_ext_pmask); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]&=(~(out_mac_pmask.bits[0]&(~(0x1<<RTK_RG_PORT_CPU)))); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]&=(~(out_ext_pmask.bits[0]&(~(0x1)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0UntagMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN0_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN0_VXD; else if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN1_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN1_VXD; #endif #endif //reset internet WAN's VLAN ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } } //update all bridge WAN's VLAN member for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { //dismiss all LAN member... _rtk_rg_portmask_translator(rg_db.systemGlobal.lanPortMask,&out_mac_pmask,&out_ext_pmask); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]&=(~(out_mac_pmask.bits[0]&(~(0x1<<RTK_RG_PORT_CPU)))); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]&=(~(out_ext_pmask.bits[0]&(~(0x1)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0UntagMask=((0x1<<RG_WWAN_WLAN0_VXD)|(0x1<<RG_WWAN_WLAN1_VXD)); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN0_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN0_VXD; else if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN1_VXD) rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask=0x1<<RG_WWAN_WLAN1_VXD; #endif #endif //internet bridge WAN's LAN member is come from fwdVLAN_BIND_INTERNET if(!rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet) { vlanId=rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET; } else //other bridge WAN's LAN member { vlanId=rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[i].index]; } if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->vlan_binding_mask.portmask) { _rtk_rg_portmask_translator(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->vlan_binding_mask,&vlan_bind_pmsk,&vlan_bind_extpmsk); rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]|=vlan_bind_pmsk.bits[0]; //all vlan-binding port rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]|=vlan_bind_extpmsk.bits[0]; //all vlan-binding ext-port } //assign LAN member to fwdVLAN_BIND_INTERNET's member rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask.bits[0]|=rg_db.vlan[vlanId].MemberPortmask.bits[0]; rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask.bits[0]|=rg_db.vlan[vlanId].Ext_portmask.bits[0]; #ifdef CONFIG_MASTER_WLAN0_ENABLE //assign fwdVLAN_BIND_INTERNET's WLAN0 Device to VLAN's Mask rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0DevMask|=rg_db.vlan[vlanId].wlan0DevMask; rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].wlan0UntagMask|=rg_db.vlan[vlanId].wlan0UntagMask; #endif //20140516LUKE:All LAN port should be untag, CPU port will follow WAN port's setting! rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].UntagPortmask.bits[0]|=RTK_RG_ALL_LAN_PORTMASK; //reset WAN's VLAN ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].MemberPortmask, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } } return (RT_ERR_RG_OK); } #endif // end defined(CONFIG_APOLLO) int32 _rtk_rg_transformPortmaskToPortIdx(uint32 portmask, unsigned int *portIdx) { int i; for(i=RTK_RG_PORT0;i<RTK_RG_PORT_MAX;i++) { if(portmask&(0x1<<i)) { *portIdx=i; break; } } return (RT_ERR_RG_OK); } #if defined(CONFIG_APOLLO) int32 _rtk_rg_updateNoneBindingPortmask(uint32 wanPmsk) { int i; unsigned int portIdx=0,none_binding_wlan=0; //init rg_db.systemGlobal.non_binding_pmsk.portmask=RTK_RG_ALL_LAN_PORTMASK|RTK_RG_ALL_EXT_PORTMASK; //all LAN port, non-LAN is always non-binding!! for(i=RTK_RG_PORT0;i<RTK_RG_PORT_MAX;i++) rg_db.systemGlobal.portbinding_wan_idx[i]=-1; //search all port-binding WAN and vlan-binding rules, record non-binding port in rg_db.systemGlobal.non_binding_pmsk //record each port which port-binding to WAN interface in rg_db.systemGlobal.portbinding_wan_idx[RTK_RG_PORT_MAX] for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(rg_db.bind[i].valid) { if(rg_db.bind[i].rtk_bind.portMask.bits[0]>0) { rg_db.systemGlobal.non_binding_pmsk.portmask &= (~rg_db.bind[i].rtk_bind.portMask.bits[0]); //only record port-binding if(rg_db.bind[i].rtk_bind.vidLan==0 && _rtk_rg_transformPortmaskToPortIdx(rg_db.bind[i].rtk_bind.portMask.bits[0],&portIdx)==RT_ERR_RG_OK) rg_db.systemGlobal.portbinding_wan_idx[portIdx]=rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx; TABLE("rg_db.systemGlobal.portbinding_wan_idx[%d]=%d",portIdx,rg_db.systemGlobal.portbinding_wan_idx[portIdx]); } else if(rg_db.bind[i].rtk_bind.extPortMask.bits[0]>0) { rg_db.systemGlobal.non_binding_pmsk.portmask &= (~(rg_db.bind[i].rtk_bind.extPortMask.bits[0]<<RTK_RG_EXT_PORT0)); //only record port-binding if(rg_db.bind[i].rtk_bind.vidLan==0 && _rtk_rg_transformPortmaskToPortIdx((rg_db.bind[i].rtk_bind.extPortMask.bits[0]<<RTK_RG_EXT_PORT0),&portIdx)==RT_ERR_RG_OK) rg_db.systemGlobal.portbinding_wan_idx[portIdx]=rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx; } } } #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.non_binding_pmsk.portmask&(0x1<<RTK_RG_EXT_PORT0)) { //Check if there are none-binding-device in WLAN0 for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && !rg_db.systemGlobal.wlan0BindDecision[i].set_bind) { none_binding_wlan|=0x1<<i; } } //if all WLAN0 device is binding, dismiss ext0 port in non_binding_pmsk if(!none_binding_wlan) { rg_db.systemGlobal.non_binding_pmsk.portmask&=(~(0x1<<RTK_RG_EXT_PORT0)); } } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.systemGlobal.non_binding_pmsk.portmask&(0x1<<RTK_RG_EXT_PORT1)) { //Check if there are none-binding-device in WLAN1 for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && !rg_db.systemGlobal.wlan0BindDecision[i].set_bind) { none_binding_wlan|=0x1<<i; } } //if all WLAN1 device is binding, dismiss ext1 port in non_binding_pmsk if(!none_binding_wlan) { rg_db.systemGlobal.non_binding_pmsk.portmask&=(~(0x1<<RTK_RG_EXT_PORT1)); } } #endif #endif if((rg_db.systemGlobal.non_binding_pmsk.portmask|none_binding_wlan)==0x0) { //remove WAN port from fwdVLAN_BIND_INTERNET VLAN's member rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask.bits[0]&=(~wanPmsk); assert_ok(RTK_VLAN_PORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask)); //assert_ok(RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].Ext_portmask)); } else { //add WAN port to fwdVLAN_BIND_INTERNET VLAN's member rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask.bits[0]|=wanPmsk; assert_ok(RTK_VLAN_PORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].UntagPortmask)); //assert_ok(RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET, &rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].Ext_portmask)); } //Clear All shortcut, otherwise shortcut decision may be different after binding!! _rtk_rg_shortCut_clear(); return (RT_ERR_RG_OK); } #endif //defined(CONFIG_APOLLO) int32 _rtk_rg_caculateOtherWanVlanID(rtk_rg_wanIntfConf_t *wanintf) { int count=0,vidX=-1; rtk_rg_portmask_t bindingPmsk; memcpy(&bindingPmsk,&wanintf->port_binding_mask,sizeof(rtk_rg_portmask_t)); //Caculate OtherWanVlan based on the lowest indes of bindingPmsk while(bindingPmsk.portmask>0) { if((bindingPmsk.portmask&0x1)>0) { //ext_port0 is 4, ext_port1 is 5 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9601B_SERIES) if(count>RTK_RG_PORT3) count-=3; #elif defined(CONFIG_RTL9602C_SERIES) if(count>RTK_RG_PORT1) count-=2; #endif vidX=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+count; break; } count++; bindingPmsk.portmask>>=1; } #ifdef CONFIG_MASTER_WLAN0_ENABLE if(vidX==-1 && wanintf->wlan0_dev_binding_mask>0) { int dmsk=wanintf->wlan0_dev_binding_mask; vidX=rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER; count=6; //start from wlan's root while(dmsk>0) { if((dmsk&0x1)>0) { vidX+=count; break; } count++; dmsk>>=1; } } #endif return vidX; } #if defined(CONFIG_APOLLO) int32 _rtk_rg_updateBindOtherWanPortBasedVID(rtk_rg_wanIntfConf_t *otherWanIntf) { int intfidx,i,vidX=-1; rtk_portmask_t mbpmsk, etpmsk; for(intfidx=0;intfidx<rg_db.systemGlobal.wanIntfTotalNum;intfidx++) { if(rg_db.systemGlobal.wanIntfGroup[intfidx].p_wanIntfConf->none_internet) { _rtk_rg_portmask_translator(rg_db.systemGlobal.wanIntfGroup[intfidx].p_wanIntfConf->port_binding_mask,&mbpmsk,&etpmsk); //Change PVID of all binding port to OtherWanVlan for(i=0;i<=RTK_RG_PORT_CPU;i++) { if(mbpmsk.bits[0]&(0x1<<i)) { //20150408LUKE: check PPB first if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].vid=rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[intfidx].index]; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV4_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID]),RT_ERR_OK); rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID].vid=rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[intfidx].index]; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_ARP_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID]),RT_ERR_OK); } else if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].vid=rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[intfidx].index]; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV6_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID]),RT_ERR_OK); } else ASSERT_EQ(RTK_VLAN_PORTPVID_SET(i,rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[intfidx].index]),RT_ERR_OK); } } for(i=RTK_RG_EXT_PORT0;i<RTK_RG_PORT_MAX;i++) { //20140711LUKE: if we setup WLAN device binding, we can't change ext0 port's PVID here! //only change it's PVID when the ext0 port is in original out_ext_pmask. if(etpmsk.bits[0]&(0x1<<(i-RTK_RG_PORT_CPU))) ASSERT_EQ(RTK_VLAN_EXTPORTPVID_SET(i-RTK_RG_PORT_CPU,rg_db.systemGlobal.otherWanVlan[rg_db.systemGlobal.wanIntfGroup[intfidx].index]),RT_ERR_OK); } } } if(otherWanIntf!=NULL) { if(otherWanIntf->none_internet) { vidX=_rtk_rg_caculateOtherWanVlanID(otherWanIntf); if(vidX<0) RETURN_ERR(RT_ERR_RG_VLAN_OVER_RANGE); _rtk_rg_portmask_translator(otherWanIntf->port_binding_mask,&mbpmsk,&etpmsk); //Change PVID of all binding port to OtherWanVlan for(i=0;i<=RTK_RG_PORT_CPU;i++) { if(mbpmsk.bits[0]&(0x1<<i)) { //20150408LUKE: check PPB first if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID].vid=vidX; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV4_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV4_GROUPID]),RT_ERR_OK); rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID].vid=vidX; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_ARP_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_ARP_GROUPID]),RT_ERR_OK); } else if(rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].valid) { rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID].vid=vidX; ASSERT_EQ(RTK_VLAN_PORTPROTOVLAN_SET(i,RG_IPV6_GROUPID,&rg_db.systemGlobal.protoBasedVID[i].protoVLANCfg[RG_IPV6_GROUPID]),RT_ERR_OK); } else ASSERT_EQ(RTK_VLAN_PORTPVID_SET(i,vidX),RT_ERR_OK); } } for(i=RTK_RG_EXT_PORT0;i<RTK_RG_PORT_MAX;i++) { //20140711LUKE: if we setup WLAN device binding, we can't change ext0 port's PVID here! //only change it's PVID when the ext0 port is in original out_ext_pmask. if(etpmsk.bits[0]&(0x1<<(i-RTK_RG_PORT_CPU))) ASSERT_EQ(RTK_VLAN_EXTPORTPVID_SET(i-RTK_RG_PORT_CPU,vidX),RT_ERR_OK); } } } return RT_ERR_RG_OK; } #endif //defined(CONFIG_APOLLO) int32 _rtk_rg_createOtherWanVlan(rtk_rg_wanIntfConf_t *wanintf, int *otherWanVlan, int reAddSameWan) { int vidX,i,ret; rtk_portmask_t out_mac_pmask,out_ext_pmask; //init _rtk_rg_portmask_translator(wanintf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); //Create VID otherWanVlan based on the lowest index of bindingPmsk vidX=_rtk_rg_caculateOtherWanVlanID(wanintf); if(vidX<0) RETURN_ERR(RT_ERR_RG_VLAN_OVER_RANGE); //init DEBUG("vidX is %d , original vid is %d",vidX,reAddSameWan==FAIL?-1:rg_db.systemGlobal.otherWanVlan[reAddSameWan]); if(rg_db.vlan[vidX].valid==0) { //first use, create it ret = RTK_VLAN_CREATE(vidX); if (ret == RT_ERR_VLAN_EXIST) { DEBUG("Vid x[%d] had created..",vidX); rg_db.vlan[vidX].valid = 1; } } rg_db.vlan[vidX].MemberPortmask.bits[0]=out_mac_pmask.bits[0]; //all binding LAN port with CPU rg_db.vlan[vidX].MemberPortmask.bits[0]|=(0x1<<RTK_RG_PORT_CPU); rg_db.vlan[vidX].UntagPortmask.bits[0]=out_mac_pmask.bits[0]; //all untag rg_db.vlan[vidX].UntagPortmask.bits[0]|=(0x1<<RTK_RG_PORT_CPU); rg_db.vlan[vidX].Ext_portmask.bits[0]=out_ext_pmask.bits[0]; //all binding ext LAN port rg_db.vlan[vidX].Ext_portmask.bits[0]|=0x1; #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.vlan[vidX].wlan0DevMask=0x0; //clear first rg_db.vlan[vidX].wlan0UntagMask=0x0; _rtk_rg_updateOtherWanWlan0Setting(wanintf,&rg_db.vlan[vidX].Ext_portmask,vidX); #endif ret = RTK_VLAN_FID_SET(vidX, LAN_FID); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_FIDMODE_SET(vidX, VLAN_FID_SVL); //This is used for ALL LAN interface if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_PORT_SET(vidX, &rg_db.vlan[vidX].MemberPortmask, &rg_db.vlan[vidX].UntagPortmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(vidX, &rg_db.vlan[vidX].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); _rtk_rg_updateBindOtherWanPortBasedVID(wanintf); //20140723LUKE: delete old vlan if different if(reAddSameWan!=FAIL) { for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { //if there is otherWan use the old vlan, we should not delete it!! if(i!=reAddSameWan) { if(rg_db.systemGlobal.otherWanVlan[i]==rg_db.systemGlobal.otherWanVlan[reAddSameWan]) { rg_db.systemGlobal.otherWanVlan[reAddSameWan]=0; break; } } } if(vidX!=rg_db.systemGlobal.otherWanVlan[reAddSameWan]) { if(rg_db.systemGlobal.otherWanVlan[reAddSameWan]!=0) RTK_VLAN_DESTROY(rg_db.systemGlobal.otherWanVlan[reAddSameWan]); rg_db.systemGlobal.otherWanVlan[reAddSameWan]=0; } } *otherWanVlan=vidX; return (RT_ERR_RG_OK); } void _rtk_rg_wanInterface_special_case_check(void) { int i,j; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) //check is there any PPPoE Wan { if(rg_db.systemGlobal.interfaceInfo[i].valid && rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE) { for(j=0;j<MAX_NETIF_SW_TABLE_SIZE;j++) //check is there any bridgeWan using the same interface vlan { if(rg_db.systemGlobal.interfaceInfo[j].valid && rg_db.systemGlobal.interfaceInfo[j].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[j].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE && rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id==rg_db.systemGlobal.interfaceInfo[j].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id) { //show warning message to remind the patch may need for special case: WARNING("patch may need for specail case: PPPoE(v4_only)+bridgeWan(v6_only) Or PPPoE(v6_only)+bridgeWan(v4_only) with same interface vlan\n"); WARNING("For PPPoE Wan need below patch:"); WARNING("(1) ACL:(ingress_port_mask=WAN_PORT) + (ingress_ctag_vid=PPPoE_WAN_VID) + (ingress_dmac=WAN_GMAC) => (action_acl_ingress_vid=PPPoE_WAN_VID ) [avoid bridge patch ACL tranlate the vlan] \n"); WARNING("For BridgeWan need below patch:"); WARNING("(1) ACL:(ingress_port_mask=WAN_PORT) + (ingress_ctag_vid=PPPoE_WAN_VID) + (inrgess_ethertype=permit_ipversion_type) => (action_acl_ingress_vid=LAN_VID) [avoid vlan filter]"); WARNING("(2) ACL:(ingress_port_mask=BRIDGEWAN_PORT_BINDING_MASK) => Trap [avoid using wring sessionID] "); WARNING("(3) echo 1 > proc/rg/pppoe_bc_passthrought_to_bindingWan [PADI can be pass throught from LAN to WAN] "); WARNING("(4) echo [netIfIdx] [Action] > proc/rg/bridgeWan_drop_by_protocal [discard not support ip_veriosn in bridgeWan] "); } } } } } rtk_rg_err_code_t rtk_rg_apollo_wanInterface_add(rtk_rg_wanIntfConf_t *wanintf, int *wan_intf_idx) { int ret,i,vlanID,errorno,vlan_exist=0,tmpVid; unsigned int intfIdx,tmppmsk,tmpexpmsk,nxpIdx=-1,v6nxpIdx=-1,addToStaticMAC=0,wantypeIdx=-1,v6wantypeIdx=-1,pppoeIdx=-1,extipIdx=-1,baseIntfIdx=-1; unsigned int reAddSameWan=0,old_pmsk=0,old_extpmsk=0,delete_pmsk=0,delete_extpmsk=0; unsigned int disableBroadcast=0; #ifdef CONFIG_MASTER_WLAN0_ENABLE unsigned int isMasterSSidBind=0; #ifdef CONFIG_DUALBAND_CONCURRENT unsigned int isSlaveSSidBind=0; #endif #endif rtk_rg_wirelessWan_t wirelessWan=RG_WWAN_WIRED; rtk_portmask_t mbpmsk,utpmsk,etpmsk,/*all_lan_pmsk,all_lan_etpmsk,all_lan_utagpmsk,*/wanPmsk; //member, untag, extension port masks //rtk_portmask_t ori_CPU_member_mask,ori_CPU_untag_mask;//,ori_CPU_ext_mask; rtk_l34_netif_entry_t intfEntry; //rtk_l34_routing_entry_t rtEntry,ori_rtEt; rtk_rg_table_vlan_t ori_vlanEntry; rtk_portmask_t out_mac_pmask,out_ext_pmask; rtk_l34_nexthop_entry_t nxpEt; rtk_wanType_entry_t wantEt; rtk_fidMode_t fidMode; //rtk_vlan_protoVlanCfg_t protoVlanCfg; rtk_mac_t zeroMac={{0}}; #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM rtk_rg_portmask_t orig_pbdmsk; unsigned int changeBindingPmsk=0; #endif rtk_rg_aclAndCf_reserved_type_t rsvType; rtk_rg_aclAndCf_reserved_intf_linkLocal_trap_t intf_link_local_trap_para; if(wanintf == NULL || wan_intf_idx == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wanintf->egress_vlan_id <= 0 || wanintf->egress_vlan_id >= 4095) //invalid vid RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check parameter //20160315LUKE: if portmask contain WAN port, return fail. if(rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_PORTMASK && rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<wanintf->wan_port_idx)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //if(rg_db.systemGlobal.lanIntfTotalNum==0) //Check if LAN added before WAN creation //RETURN_ERR(RT_ERR_RG_LAN_NOT_EXIST); if((rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].valid && wanintf->egress_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_CPU) || (rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block].valid && wanintf->egress_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) || (rg_db.systemGlobal.initParam.macBasedTagDecision==1 && (wanintf->egress_vlan_id == rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET || (wanintf->egress_vlan_id >= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && wanintf->egress_vlan_id <= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET)))) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); //if(wanintf->egress_vlan_id >= DEFAULT_PPB_VLAN_START && wanintf->egress_vlan_id < (DEFAULT_PPB_VLAN_START+MAX_NETIF_SW_TABLE_SIZE)) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20150427LUKE: check for wireless WAN setting if((wanintf->wan_port_idx)==RTK_RG_EXT_PORT2) { #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(!rg_db.systemGlobal.wlan0BindDecision[RG_WWAN_WLAN0_VXD].exist) RETURN_ERR(RT_ERR_RG_WWAN_NOT_EXIST); for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN0_VXD) RETURN_ERR(RT_ERR_RG_WWAN_SAME_VXD); wirelessWan=RG_WWAN_WLAN0_VXD; wanintf->wan_port_idx=RTK_RG_PORT_PON; #else RETURN_ERR(RT_ERR_RG_WWAN_NOT_EXIST); #endif } else if((wanintf->wan_port_idx)==RTK_RG_EXT_PORT3) { #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT if(!rg_db.systemGlobal.wlan0BindDecision[RG_WWAN_WLAN1_VXD].exist) RETURN_ERR(RT_ERR_RG_WWAN_NOT_EXIST); for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wirelessWan==RG_WWAN_WLAN1_VXD) RETURN_ERR(RT_ERR_RG_WWAN_SAME_VXD); wirelessWan=RG_WWAN_WLAN1_VXD; wanintf->wan_port_idx=RTK_RG_PORT_PON; #else RETURN_ERR(RT_ERR_RG_WWAN_NOT_EXIST); #endif } if(wanintf->wan_port_idx < RTK_RG_PORT0 || wanintf->wan_port_idx >= RTK_RG_EXT_PORT0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20150309LUKE: always treat disabled pri as "-1" if(wanintf->vlan_based_pri_enable==RTK_RG_DISABLED)wanintf->vlan_based_pri=-1; #if 0//def CONFIG_GPON_FEATURE if(rg_db.systemGlobal.initParam.wanPortGponMode && wanintf->wan_port_idx==RTK_RG_MAC_PORT_PON && (wanintf->gponStreamID<0||wanintf->gponStreamID>127)) //Check stream ID valid or not in GPON mode RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif if(rg_db.systemGlobal.initParam.macBasedTagDecision) { if(wanintf->isIVL) //IVL can not be set when DMAC2CVID is trun on RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(wanintf->port_binding_mask.portmask&(0x1<<wanintf->wan_port_idx)) //wan port should not be included in port-binding portmask RETURN_ERR(RT_ERR_RG_INVALID_PARAM); }else{ if(wanintf->port_binding_mask.portmask>0) //set port-binding but global switch is off RETURN_ERR(RT_ERR_RG_BIND_WITH_UNBIND_WAN); #ifdef CONFIG_MASTER_WLAN0_ENABLE if(wanintf->wlan0_dev_binding_mask>0) //set ssid-binding but global switch is off RETURN_ERR(RT_ERR_RG_BIND_WITH_UNBIND_WAN); #endif } #if 0 //test chip should not restrict wan port to PON or RGMII, since it do not has binding function if(wanintf->wan_port_idx<RTK_RG_MAC_PORT_PON || wanintf->wan_port_idx>RTK_RG_MAC_PORT_RGMII) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif /*if((wanintf->wan_port_idx==RTK_RG_MAC_PORT_PON && RG_GLB_WAN_PON_USED == 1) || //wan port used (wanintf->wan_port_idx==RTK_RG_MAC_PORT_RGMII && RG_GLB_WAN_RGMII_USED == 1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM);*/ #ifdef CONFIG_MASTER_WLAN0_ENABLE for(i=0;i<WLAN_DEVICE_NUM;i++) { if(wanintf->wlan0_dev_binding_mask&(0x1<<i)) { isMasterSSidBind=1; break; } } //If we or other WAN enable ext0 port in port-binding-mask, means we are binding ALL wlan0 device to single wan, //therefore wlan0-dev-binding-mask sould keep zero. if((wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT0)) && isMasterSSidBind==1) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); #ifdef CONFIG_DUALBAND_CONCURRENT for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(wanintf->wlan0_dev_binding_mask&(0x1<<i)) { isSlaveSSidBind=1; break; } } //If we or other WAN enable ext1 port in port-binding-mask, means we are binding ALL wlan1 device to single wan, //therefore wlan0-dev-binding-mask with wlan1's device sould keep zero. if(rg_db.systemGlobal.enableSlaveSSIDBind && (wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT1)) && isSlaveSSidBind==1) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); #endif //Check if the WLAN0's devices are absence or already binded by other WAN separately //20140718LUKE: we need to support dynamic replace WLAN binding when add WAN, so display WARNING instead return error. for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(wanintf->wlan0_dev_binding_mask&(0x1<<i)) { if(!rg_db.systemGlobal.wlan0BindDecision[i].exist) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_ABSENCE); else if(rg_db.systemGlobal.wlan0BindDecision[i].set_bind) { WARNING("WLAN0 Dev[%d] Binded from WAN[%d] to this WAN!!",i,rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf);//RETURN_ERR(RT_ERR_RG_WLAN_BINDING_OVERLAP); //remove this WLAN device from original binding WAN rg_db.systemGlobal.interfaceInfo[rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf].storedInfo.wan_intf.wan_intf_conf.wlan0_dev_binding_mask&=(~(0x1<<i)); rg_db.systemGlobal.wlan0BindDecision[i].set_bind=0; rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf=0; } } } #endif if(wanintf->port_binding_mask.portmask>=(1<<RTK_RG_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((wanintf->port_binding_mask.portmask&(1<<RTK_RG_MAC_PORT_CPU)) > 0) //port-binding should not be CPU port RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(wanintf->gmac.octet[0]&1) //interface MAC can not use multicast address RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(wanintf->port_binding_mask.portmask != 0 || wanintf->wlan0_dev_binding_mask != 0) //xdsl not support binding function RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(wanintf->vlan_based_pri_enable==RTK_RG_ENABLED) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(wanintf->isIVL==1) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif #ifdef CONFIG_MASTER_WLAN0_ENABLE //20160524LUKE: check wlan-device existence _rtk_rg_check_wlan_device_exist_or_not(); #endif //for non-zero MAC address, we should add it as static gateway mac in LUT table if(memcmp(wanintf->gmac.octet,zeroMac.octet,ETHER_ADDR_LEN)) addToStaticMAC=1; vlanID=wanintf->egress_vlan_id; wanPmsk.bits[0]=0x1<<wanintf->wan_port_idx; //pon is the 4th bit and RGMII is 5th //all_lan_pmsk.bits[0]=wanPmsk.bits[0]; //all_lan_etpmsk.bits[0]=0; //all_lan_utagpmsk.bits[0]=0; //Check if we are re-add same WAN interface //20141229LUKE: add forcedAdd option to create new interface even parameters are the same. (for static route) if(!wanintf->forcedAddNewIntf) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(wanintf->wan_type==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type && !memcmp(wanintf->gmac.octet,rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->gmac.octet,ETHER_ADDR_LEN) && wanintf->wan_port_idx==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx && wanintf->egress_vlan_tag_on==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on && wanintf->egress_vlan_id==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id && wanintf->vlan_based_pri_enable==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->vlan_based_pri_enable && wanintf->vlan_based_pri==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->vlan_based_pri && wanintf->isIVL==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->isIVL && wanintf->none_internet==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet) { reAddSameWan=1; intfIdx=rg_db.systemGlobal.wanIntfGroup[i].index; //keep DEBUG("reAdd WAN[%d]!!",intfIdx); //Convert RG portmask to RTK portmask _rtk_rg_portmask_translator(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); old_pmsk=out_mac_pmask.bits[0]; old_extpmsk=out_ext_pmask.bits[0]>>0x1; //FIXME:translator contain cpu port, but binding should not contain it, so shift it DEBUG("old pmsk is %x, old extpmsk is %x",old_pmsk,old_extpmsk); _rtk_rg_portmask_translator(wanintf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); delete_pmsk=old_pmsk&(~out_mac_pmask.bits[0]); delete_extpmsk=old_extpmsk&(~(out_ext_pmask.bits[0]>>0x1)); DEBUG("need to be delete: pmsk=%x, extpmsk=%x",delete_pmsk,delete_extpmsk); break; } } if(reAddSameWan) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if((wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT0))) { //Check if the WLAN0's devices are absence or already binded by other WAN separately for(i=0;i<WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0BindDecision[i].set_bind && rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf!=intfIdx) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } if(isMasterSSidBind) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT0) && rg_db.systemGlobal.wanIntfGroup[i].index!=intfIdx) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } #ifdef CONFIG_DUALBAND_CONCURRENT if((wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT1))) { //Check if the WLAN1's devices are absence or already binded by other WAN separately for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0BindDecision[i].set_bind && rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf!=intfIdx) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } if(rg_db.systemGlobal.enableSlaveSSIDBind && isSlaveSSidBind) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT1) && rg_db.systemGlobal.wanIntfGroup[i].index!=intfIdx) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } #endif #endif #ifdef CONFIG_MASTER_WLAN0_ENABLE for(i=0;i<WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { //20140723LUKE: Clear old wlan0 binding if(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wlan0_dev_binding_mask&(0x1<<i)) { rg_db.systemGlobal.wlan0BindDecision[i].set_bind=0; rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf=0; assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.portBasedVID[RTK_RG_EXT_PORT0])); } } } #ifdef CONFIG_DUALBAND_CONCURRENT for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist) { //20140723LUKE: Clear old wlan0 binding if(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wlan0_dev_binding_mask&(0x1<<i)) { rg_db.systemGlobal.wlan0BindDecision[i].set_bind=0; rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf=0; if(rg_db.systemGlobal.enableSlaveSSIDBind) assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,i,rg_db.systemGlobal.portBasedVID[RTK_RG_EXT_PORT1])); } } } #endif #endif goto RESET_INTF; } } #ifdef CONFIG_MASTER_WLAN0_ENABLE if((wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT0))) { //Check if the WLAN0's devices are absence or already binded by other WAN separately for(i=0;i<WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0BindDecision[i].set_bind) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } if(isMasterSSidBind) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT0)) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } #ifdef CONFIG_DUALBAND_CONCURRENT if((wanintf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT1))) { //Check if the WLAN1's devices are absence or already binded by other WAN separately for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0BindDecision[i].set_bind) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } if(rg_db.systemGlobal.enableSlaveSSIDBind && isSlaveSSidBind) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->port_binding_mask.portmask&(0x1<<RTK_RG_EXT_PORT1)) RETURN_ERR(RT_ERR_RG_WLAN_BINDING_CONFLICT); } } #endif #endif //Check if we set two untag bridge WAN at the same port if(wanintf->wan_type==RTK_RG_BRIDGE && wanintf->egress_vlan_tag_on==0) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on==0 && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx==wanintf->wan_port_idx) RETURN_ERR(RT_ERR_RG_UNTAG_BRIDGEWAN_TWICE); } } //Check if we didn't add LAN interface before un-binding bridge WAN //if(wanintf->wan_type==RTK_RG_BRIDGE && wanintf->port_binding_mask.portmask==0 && rg_db.systemGlobal.lanIntfTotalNum==0) //RETURN_ERR(RT_ERR_RG_LAN_NOT_EXIST); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //Check VLAN-binding use this VLAN or not for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan==wanintf->egress_vlan_id) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_VLANBINDING); //Checl Customer VLAN use this VLAN or not if(rg_db.vlan[wanintf->egress_vlan_id].valid && rg_db.vlan[wanintf->egress_vlan_id].addedAsCustomerVLAN) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_CVLAN); #endif //Check interface table available or not if(rg_db.systemGlobal.intfIdxForReset == -1) { for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { #if defined(CONFIG_RTL9600_SERIES) //PATCH_20131022:Because CF rule will use WAN intf as zero, and multicast will use index 7, so these indexes are avoided!! if( (i==0) || (i ==(MAX_NETIF_HW_TABLE_SIZE-1)) ) continue ; #endif if(rg_db.systemGlobal.interfaceInfo[i].valid == 0) break; } if(i==MAX_NETIF_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); intfIdx=i; //rtlglue_printf("rg_db.systemGlobal.intfIdxForReset == -1(%d): intfIdx=%d \n",__LINE__,intfIdx); } else { //we are reset the added wan interface, so give me the same index!! intfIdx = rg_db.systemGlobal.intfIdxForReset; rg_db.systemGlobal.intfIdxForReset = -1; //rtlglue_printf("else(%d): intfIdx=%d \n",__LINE__,intfIdx); } //Check pppoe table available or not if wanType=pppoe or pppoe_dslite if((wanintf->wan_type == RTK_RG_PPPoE)||(wanintf->wan_type == RTK_RG_PPPoE_DSLITE)){ for(i=0;i<MAX_PPPOE_SW_TABLE_SIZE;i++){ if(rg_db.pppoe[i].rtk_pppoe.sessionID==0) break; } if(i==MAX_PPPOE_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); pppoeIdx=i; rg_db.pppoe[i].rtk_pppoe.sessionID=0xffffffff; //prevent another interface choose same PPPoE index } #if defined(CONFIG_RTL9602C_SERIES) //20151001LUKE: lookup for invalid DSlite entry if wan_type is DSlite or PPPoE_DSlite if((wanintf->wan_type == RTK_RG_DSLITE)||(wanintf->wan_type == RTK_RG_PPPoE_DSLITE)){ for(i=0;i<MAX_DSLITE_SW_TABLE_SIZE;i++){ if(rg_db.dslite[i].rtk_dslite.valid==DISABLED) break; } if(i==MAX_DSLITE_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.dslite_idx=i; rg_db.dslite[i].rtk_dslite.valid=ENABLED; rg_db.dslite[i].rtk_dslite.index=i; rg_db.dslite[i].intfIdx=intfIdx; } #endif RESET_INTF: //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (wanintf->wan_type)==RTK_RG_PPPoE && (wanintf->wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); wanintf->wan_port_idx=RTK_RG_PORT_RGMII; wanPmsk.bits[0]|=0x1<<RTK_RG_PORT_RGMII; } #endif //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.interfaceLock); //20140723LUKE: bypass set interface if(reAddSameWan)goto DELETE_OLD_BIND; //Set up interface table errorno=RT_ERR_RG_INTF_SET_FAIL; bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); intfEntry.valid=1; //20151002LUKE: for bridge WAN interface should not fill GMAC in interface table! if(wanintf->wan_type != RTK_RG_BRIDGE) memcpy(intfEntry.gateway_mac.octet, wanintf->gmac.octet,ETHER_ADDR_LEN); intfEntry.mac_mask=0x7; //no mask intfEntry.vlan_id=vlanID; intfEntry.enable_rounting=1; intfEntry.mtu=1500; //This dummy value should be fine since we don't care MTU in L2 bridging!! #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) && defined(CONFIG_RTL_8685S_HWNAT) intfEntry.v6mtu=1500; intfEntry.enable_v6Route=1; #elif defined(CONFIG_RTL9602C_SERIES) | defined(CONFIG_RTL9607C_SERIES) if(wanintf->wan_type != RTK_RG_BRIDGE) intfEntry.isL34=1; intfEntry.isCtagIf=wanintf->egress_vlan_tag_on; if((wanintf->wan_type == RTK_RG_DSLITE)||(wanintf->wan_type == RTK_RG_PPPoE_DSLITE)) { intfEntry.dslite_state=ENABLED; intfEntry.dslite_idx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.dslite_idx; } else { intfEntry.dslite_state=DISABLED; intfEntry.dslite_idx=0; } #endif #if defined(CONFIG_RTL9600_SERIES) //Patch for 0601 and 6266, when binding to interface happened, //the packet size have 2 byte would't decrease, causing TRAP reason 224. //therefore the hardware setting should be set as preferred value plus 2 here if(rg_kernel.apolloChipId==APOLLOMP_CHIP_ID && rg_db.systemGlobal.initParam.macBasedTagDecision) intfEntry.mtu=1502; #endif // for testing upstream(jumbo frame) binding to bridge wan if(wanintf->wan_type == RTK_RG_BRIDGE) intfEntry.mtu=16383; //14 bits //if VLAN-based and bridge WAN MAC is zero, set this netif entry as invalid!!(for interface index synchronisation) if(wanintf->wan_type==RTK_RG_BRIDGE && rg_db.systemGlobal.initParam.macBasedTagDecision==0 && !addToStaticMAC) intfEntry.valid=0; DEBUG("Add NETIF[%d] VLAN[%d]\n",intfIdx,intfEntry.vlan_id); ret = RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); DEBUG("ret:%x\n",ret); if(ret!=RT_ERR_OK)goto RET_INTF_ERR; //reset software MTU should keep original MTU, only hardware MTU need to patch!! if(wanintf->wan_type != RTK_RG_BRIDGE) // for testing upstream(jumbo frame) binding to bridge wan rg_db.netif[intfIdx].rtk_netif.mtu=1500; DELETE_OLD_BIND: //Convert RG portmask to RTK portmask _rtk_rg_portmask_translator(wanintf->port_binding_mask,&out_mac_pmask,&out_ext_pmask); tmppmsk=out_mac_pmask.bits[0]; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) tmpexpmsk=out_ext_pmask.bits[0]; #else tmpexpmsk=out_ext_pmask.bits[0]>>0x1; //FIXME:translator contain cpu port, but binding should not contain it, so shift it #endif #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM orig_pbdmsk.portmask=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask; //keep orig port-binding mask for ACL rearrange #endif DEBUG("the binding mask is %x, extmsk is %x, wlan0_bind_mask is %x",tmppmsk,tmpexpmsk,wanintf->wlan0_dev_binding_mask); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //Check if there is other interface or vlan-binding set same port as binding port if(rg_db.systemGlobal.initParam.macBasedTagDecision) { errorno=RT_ERR_RG_PORT_BIND_SET_FAIL; for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(rg_db.bind[i].valid) { //we should not assign the same port binding with different Wan interface //20140718LUKE: we need to support dynamic replace port binding when add WAN, so display WARNING instead return error. ret=0; if(reAddSameWan && rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==intfIdx) { //keep the unchange port-binding rules for the same WAN! if((rg_db.bind[i].rtk_bind.portMask.bits[0]&tmppmsk)>0 || (rg_db.bind[i].rtk_bind.extPortMask.bits[0]&tmpexpmsk)>0) continue; } if((rg_db.bind[i].rtk_bind.portMask.bits[0]&tmppmsk)>0 || (rg_db.bind[i].rtk_bind.portMask.bits[0]&delete_pmsk)>0) { //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.bind[i].rtk_bind.vidLan==0) { if(!reAddSameWan)WARNING("Portmask 0x%x Binded from WAN[%d] to this WAN!!",rg_db.bind[i].rtk_bind.portMask.bits[0],rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx); ret=1; } else { if(!reAddSameWan)WARNING("Vlan[%d]@Portmask 0x%x Binded from WAN[%d] will add port-binding to this WAN!!(Vlan-binding is untouched)",rg_db.bind[i].rtk_bind.vidLan,rg_db.bind[i].rtk_bind.portMask.bits[0],rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx); } } if((rg_db.bind[i].rtk_bind.extPortMask.bits[0]&tmpexpmsk)>0 || (rg_db.bind[i].rtk_bind.extPortMask.bits[0]&delete_extpmsk)>0) { //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.bind[i].rtk_bind.vidLan==0) { if(!reAddSameWan)WARNING("ExtPortmask 0x%x Binded from WAN[%d] to WAN!!",rg_db.bind[i].rtk_bind.extPortMask.bits[0],rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx); ret=1; } else { if(!reAddSameWan)WARNING("Vlan[%d]@ExtPortmask 0x%x Binded from WAN[%d] will add port-binding to this WAN!!(Vlan-binding is untouched)",rg_db.bind[i].rtk_bind.vidLan,rg_db.bind[i].rtk_bind.extPortMask.bits[0],rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx); } } if(ret) { rtk_rg_table_bind_t ori_bindEntry; memcpy(&ori_bindEntry,&rg_db.bind[i],sizeof(rtk_rg_table_bind_t)); //store software bzero(&rg_db.bind[i],sizeof(rtk_rg_table_bind_t)); //clear software ret=RTK_L34_BINDINGTABLE_SET(i,&rg_db.bind[i].rtk_bind); //clear hardware memcpy(&rg_db.bind[i],&ori_bindEntry,sizeof(rtk_rg_table_bind_t)); //recovery software if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_OVERLAP_BIND_ERR; } if(ret!=RT_ERR_OK)goto RET_OVERLAP_BIND_ERR; rg_db.bind[i].valid = INVALID_ENTRY; //remove the overlap port from original binding WAN if(ori_bindEntry.rtk_bind.portMask.bits[0]>0) rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[ori_bindEntry.rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask&=(~(ori_bindEntry.rtk_bind.portMask.bits[0])); else rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[ori_bindEntry.rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask&=(~(ori_bindEntry.rtk_bind.extPortMask.bits[0]<<RTK_RG_EXT_PORT0)); #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM changeBindingPmsk=1; #endif } } } } #endif //end defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Check if Bridge mode //20150303LUKE: we should not block broadcast from WAN when macBasedTagDecision is on. if((rg_db.systemGlobal.initParam.macBasedTagDecision==0)&&(wanintf->wan_type==RTK_RG_BRIDGE)) { //Get all Lan interface and add WAN port to their VLAN for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { tmpVid=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id; //Check if there is LAN interface has same VLAN id with me, if so, disable broadcast to avoid re-send packet!! if(tmpVid==vlanID)disableBroadcast=1; //20140424LUKE:we don't add WAN to LAN now, since binding scenario need more complicated setting!! #if 0 if(rg_db.systemGlobal.initParam.macBasedTagDecision) //only add to LAN interface if MAC-based { memcpy(&mbpmsk, &rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&utpmsk, &rg_db.vlan[tmpVid].UntagPortmask,sizeof(rtk_portmask_t)); memcpy(&etpmsk, &rg_db.vlan[tmpVid].Ext_portmask,sizeof(rtk_portmask_t)); //Let WAN port become active in LAN's VLAN member port mask //mbpmsk.bits[0] |= wanintf->wan_port_mask.bits[0]; mbpmsk.bits[0] |= wanPmsk.bits[0]; //add wan port to lan's untag set by egress_tag_on setting for(j=0;j<rg_db.systemGlobal.wanIntfTotalNum;j++) { if(rg_db.systemGlobal.wanIntfGroup[j].p_wanIntfConf->wan_type==RTK_RG_BRIDGE && wanPmsk.bits[0]&(0x1<<rg_db.systemGlobal.wanIntfGroup[j].p_wanIntfConf->wan_port_idx)) break; } if(j==rg_db.systemGlobal.wanIntfTotalNum) //no before bridge WAN added at this port { if(wanintf->egress_vlan_tag_on) //egress tagged packet utpmsk.bits[0]&=(~(wanPmsk.bits[0])); //set WAN port to 0 in untag set (tagging) else utpmsk.bits[0]|=wanPmsk.bits[0]; //set WAN port to 1 in untag set (untagging) } //Keep Lan's member port mask in all_lan_pmsk and all_lan_etpmsk all_lan_pmsk.bits[0] |= mbpmsk.bits[0]; all_lan_etpmsk.bits[0] |= etpmsk.bits[0]; all_lan_utagpmsk.bits[0] |= utpmsk.bits[0]; errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_PORT_SET(tmpVid, &mbpmsk, &utpmsk); if(ret!=RT_ERR_OK)goto RET_BRIDGE_ERR; } #endif } } //L3 routing interface (Static or DHCP or PPPOE) will reach here directly //Set up VLAN memset(&ori_vlanEntry,0,sizeof(rtk_rg_table_vlan_t)); mbpmsk=wanPmsk; //WAN port mask etpmsk.bits[0]=0; utpmsk.bits[0]=0; errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_CREATE(vlanID); if(ret == RT_ERR_VLAN_EXIST) { //keep all information of original VLAN memcpy(&ori_vlanEntry, &rg_db.vlan[vlanID],sizeof(rtk_rg_table_vlan_t)); vlan_exist=1; } else if(ret!=RT_ERR_OK) goto RET_VLAN_ERR; //Set up its member port, extension port set, and FID mode //decide to use IVL or SVL for VLAN tag decision, IVL by untag set; SVL by DMAC2CVID if(wanintf->isIVL) fidMode=VLAN_FID_IVL; //vlan-based else fidMode=VLAN_FID_SVL; //mac-based ret = RTK_VLAN_FIDMODE_SET(vlanID, fidMode); //Patch 20121130 if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_FID_SET(vlanID, WAN_FID); //Patch 20121130 if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; mbpmsk.bits[0]|=ori_vlanEntry.MemberPortmask.bits[0]; etpmsk.bits[0]|=ori_vlanEntry.Ext_portmask.bits[0]; utpmsk.bits[0]|=ori_vlanEntry.UntagPortmask.bits[0]; //20140424LUKE:we don't add LAN to WAN now, since binding scenario need more complicated setting!! #if 0 //only add LAN member to WAN when DMAC2CVID is turn on (mac-based) if(wanintf->wan_type == RTK_RG_BRIDGE && rg_db.systemGlobal.initParam.macBasedTagDecision==1) { //Patch for flooding 20121129, noneed, because we change to svl at all /*if(vlan_exist==0) { ret = RTK_VLAN_FIDMODE_SET(vlanID, VLAN_FID_SVL); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_FID_SET(vlanID, LAN_FID); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; }*/ mbpmsk.bits[0]|=all_lan_pmsk.bits[0]; //add LAN port into WAN VLAN member port mask etpmsk.bits[0]|=all_lan_etpmsk.bits[0]; //add LAN ext-port into WAN VLAN ext-port mask utpmsk.bits[0]|=all_lan_utagpmsk.bits[0]; //add LAN untag-port into WAN VLAN untag-port mask } #else //20140502LUKE:Only turn on this when macBasedTagDecision is set to 1 if(rg_db.systemGlobal.initParam.macBasedTagDecision) { _rtk_rg_updateBindWanIntf(wanintf); //update fwdVLAN_BIND_INTERNET member and set un-bind LAN to it //Set WAN VID's LAN member if(wanintf->port_binding_mask.portmask == 0 && wanintf->wlan0_dev_binding_mask == 0) //non-binding { //dismiss all LAN member... //mbpmsk.bits[0]&=(~(0x1<<RTK_RG_PORT0|0x1<<RTK_RG_PORT1|0x1<<RTK_RG_PORT2|0x1<<RTK_RG_PORT3)); //etpmsk.bits[0]&=(~( (0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))|(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask //rg_db.vlan[vlanID].wlan0DevMask=0x0; //rg_db.vlan[vlanID].wlan0UntagMask=0x0; #endif //internet WAN's LAN member is come from fwdVLAN_BIND_INTERNET if(!wanintf->none_internet && wanintf->wan_type == RTK_RG_BRIDGE) { //assign LAN member to fwdVLAN_BIND_INTERNET's member mbpmsk.bits[0]|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask.bits[0]; etpmsk.bits[0]|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].Ext_portmask.bits[0]; #ifdef CONFIG_MASTER_WLAN0_ENABLE //assign fwdVLAN_BIND_INTERNET's WLAN0 Device to VLAN's Mask rg_db.vlan[vlanID].wlan0DevMask|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask; rg_db.vlan[vlanID].wlan0UntagMask|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0UntagMask; #endif //2040516LUKE:All LAN port should be untag, CPU port will follow WAN port's setting! utpmsk.bits[0]|=RTK_RG_ALL_LAN_PORTMASK; } if(reAddSameWan && rg_db.systemGlobal.otherWanVlan[intfIdx]!=0) { //delete Other Wan's VLANID used for traffic isolation ret = RTK_VLAN_DESTROY(rg_db.systemGlobal.otherWanVlan[intfIdx]); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; rg_db.systemGlobal.otherWanVlan[intfIdx]=0; } } else { if(wanintf->none_internet) { //create Other VLAN! if(reAddSameWan) errorno=_rtk_rg_createOtherWanVlan(wanintf,&ret,intfIdx); else errorno=_rtk_rg_createOtherWanVlan(wanintf,&ret,FAIL); if(errorno==RT_ERR_RG_OK) { //dismiss all LAN member... //mbpmsk.bits[0]&=(~(0x1<<RTK_RG_PORT0|0x1<<RTK_RG_PORT1|0x1<<RTK_RG_PORT2|0x1<<RTK_RG_PORT3)); //etpmsk.bits[0]&=(~( (0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))|(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask //rg_db.vlan[vlanID].wlan0DevMask=0x0; //rg_db.vlan[vlanID].wlan0UntagMask=0x0; #endif if(wanintf->wan_type == RTK_RG_BRIDGE) { //assign LAN member to Other VLAN's member mbpmsk.bits[0]|=rg_db.vlan[ret].MemberPortmask.bits[0]; etpmsk.bits[0]|=rg_db.vlan[ret].Ext_portmask.bits[0]; #ifdef CONFIG_MASTER_WLAN0_ENABLE //assign Other VLAN's WLAN0 Device to mask of WAN's VLAN rg_db.vlan[vlanID].wlan0DevMask|=rg_db.vlan[ret].wlan0DevMask; rg_db.vlan[vlanID].wlan0UntagMask|=rg_db.vlan[ret].wlan0UntagMask; #endif //assign WAN port to Other VLAN's member rg_db.vlan[ret].MemberPortmask.bits[0]|=wanPmsk.bits[0]; errorno = RTK_VLAN_PORT_SET(ret, &rg_db.vlan[ret].MemberPortmask, &rg_db.vlan[ret].UntagPortmask); if(errorno!=RT_ERR_OK) goto RET_VLAN_ERR; //2040925LUKE:All LAN port would be untag utpmsk.bits[0]|=RTK_RG_ALL_LAN_PORTMASK; } //Store this VLAN ID in WAN's data structure rg_db.systemGlobal.otherWanVlan[intfIdx]=ret; } else { //fail, recovery.. _rtk_rg_updateBindWanIntf(NULL); goto RET_VLAN_ERR; } } else //internet binding WAN { //dismiss all LAN member... //mbpmsk.bits[0]&=(~(0x1<<RTK_RG_PORT0|0x1<<RTK_RG_PORT1|0x1<<RTK_RG_PORT2|0x1<<RTK_RG_PORT3)); //etpmsk.bits[0]&=(~( (0x1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))|(0x1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))); #ifdef CONFIG_MASTER_WLAN0_ENABLE //dismiss all WLAN0 Device in VLAN's Mask //rg_db.vlan[vlanID].wlan0DevMask=0x0; //rg_db.vlan[vlanID].wlan0UntagMask=0x0; #endif if(wanintf->wan_type == RTK_RG_BRIDGE) { //assign LAN member to fwdVLAN_BIND_INTERNET's member //20141121LUKE: should not include port member which not in any LAN!! for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { _rtk_rg_portmask_translator(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask, &out_mac_pmask, &out_ext_pmask); mbpmsk.bits[0]|=(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask.bits[0]&out_mac_pmask.bits[0]); etpmsk.bits[0]|=(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].Ext_portmask.bits[0]&out_ext_pmask.bits[0]); #ifdef CONFIG_MASTER_WLAN0_ENABLE //assign fwdVLAN_BIND_INTERNET's WLAN0 Device to VLAN's Mask rg_db.vlan[vlanID].wlan0DevMask|=(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask&rg_db.vlan[rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id].wlan0DevMask); rg_db.vlan[vlanID].wlan0UntagMask|=(rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].wlan0DevMask&rg_db.vlan[rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id].wlan0UntagMask); #endif } //2040516LUKE:All LAN port should be untag, CPU port will follow WAN port's setting! utpmsk.bits[0]|=RTK_RG_ALL_LAN_PORTMASK; } } } } #endif //end #if 0 else #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) mbpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_MASTERCPU_CORE0; mbpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_MASTERCPU_CORE1; DEBUG("mbr mask = 0x%x, extpmask = 0x%d", mbpmsk.bits[0], etpmsk.bits[0]); #else mbpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_CPU; //CPUport always on, or TRAP will failed etpmsk.bits[0]|=0x1; //CPUport always on, or TRAP will failed #endif #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT //20150428LUKE: for macBased, we should broadcast to bridge WWAN by interface, not by vlan-decision if(!rg_db.systemGlobal.initParam.macBasedTagDecision) { if(wirelessWan!=RG_WWAN_WIRED) { etpmsk.bits[0]|=0x2; //enable ext_1 for wireless WAN #ifdef CONFIG_MASTER_WLAN0_ENABLE if(wirelessWan==RG_WWAN_WLAN0_VXD) { assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,RG_WWAN_WLAN0_VXD,vlanID)); rg_db.vlan[vlanID].wlan0DevMask|=(0x1<<RG_WWAN_WLAN0_VXD); if(wanintf->egress_vlan_tag_on)rg_db.vlan[vlanID].wlan0UntagMask&=(~(0x1<<RG_WWAN_WLAN0_VXD)); else rg_db.vlan[vlanID].wlan0UntagMask|=(0x1<<RG_WWAN_WLAN0_VXD); } else if(wirelessWan==RG_WWAN_WLAN1_VXD) { assert_ok(rtk_rg_apollo_wlanDevBasedCVlanId_set(0,RG_WWAN_WLAN1_VXD,vlanID)); rg_db.vlan[vlanID].wlan0DevMask|=(0x1<<RG_WWAN_WLAN1_VXD); if(wanintf->egress_vlan_tag_on)rg_db.vlan[vlanID].wlan0UntagMask&=(~(0x1<<RG_WWAN_WLAN1_VXD)); else rg_db.vlan[vlanID].wlan0UntagMask|=(0x1<<RG_WWAN_WLAN1_VXD); } #endif if(wanintf->wan_type==RTK_RG_BRIDGE) mbpmsk.bits[0]&=(~(wanPmsk.bits[0])); //dismiss wan port if we are birdge } } #endif if(wanintf->egress_vlan_tag_on) //egress tagged packet { utpmsk.bits[0]&=(~(wanPmsk.bits[0])); //set WAN port to 0 in untag set (tagging) if(wanintf->wan_type!=RTK_RG_BRIDGE) utpmsk.bits[0]&=(~(0x1<<RTK_RG_MAC_PORT_CPU)); //set CPU port to 0 in untag set (tagging) for SMUX to detag else { //20140508LUKE:add UNBIND tagged bridge WAN should set VLAN's CPU port to TAGGED!! //for BINDING bridge WAN, vlan_exist should be zero! if(vlan_exist==0) { utpmsk.bits[0]&=(~(0x1<<RTK_RG_MAC_PORT_CPU)); //set CPU port to 0 in untag set (tagging) for SMUX to detag } else if(utpmsk.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU)) { errorno=RT_ERR_RG_CPU_TAG_DIFF_BRIDGE_WAN; goto RET_VLAN_ERR; } } } else { utpmsk.bits[0]|=wanPmsk.bits[0]; //set WAN port to 1 in untag set (untagging) if(wanintf->wan_type!=RTK_RG_BRIDGE) utpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_CPU; //set CPU port to 1 in untag set (untagging) else { //20140508LUKE:add UNBIND untag bridge WAN should set VLAN's CPU port to UNTAG!! //for BINDING bridge WAN, vlan_exist should be zero! if(vlan_exist==0) { utpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_CPU; //set CPU port to 1 in untag set (untagging) } else if((utpmsk.bits[0]&(0x1<<RTK_RG_MAC_PORT_CPU))==0) { errorno=RT_ERR_RG_CPU_TAG_DIFF_BRIDGE_WAN; goto RET_VLAN_ERR; } } } #if defined(CONFIG_RTL9600_SERIES) //Patch:20131009, for multicast routing packet will use ingress's VLAN untag set, therefore set all none-member port as untag!! utpmsk.bits[0]|=(~(mbpmsk.bits[0]))&RTK_RG_ALL_MAC_PORTMASK; #endif #if defined(CONFIG_OPENWRT_RG) && defined(CONFIG_SWCONFIG) //ysleu: Always tagged to CPU on SWCONFIG mode utpmsk.bits[0]&=(~(0x1<<RTK_RG_PORT_CPU)); #endif ret = RTK_VLAN_PORT_SET(vlanID, &mbpmsk, &utpmsk); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_EXTPORT_SET(vlanID, &etpmsk); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #if defined(CONFIG_RTL9600_SERIES) && defined(CONFIG_MASTER_WLAN0_ENABLE) //20160524LUKE: for multicast routing packet will use ingress's VLAN untag set, therefore set all none-member port as untag!! rg_db.vlan[vlanID].wlan0UntagMask|=(~(rg_db.vlan[vlanID].wlan0DevMask))&(0xffffffff>>(32-MAX_WLAN_DEVICE_NUM)); #endif if(wanintf->vlan_based_pri_enable==RTK_RG_ENABLED) { #ifdef CONFIG_DUALBAND_CONCURRENT if(wanintf->vlan_based_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) { errorno=RT_ERR_RG_VLAN_PRI_CONFLICT_WIFI; goto RET_VLAN_ERR; } #endif #if defined(CONFIG_RTL9602C_SERIES) //WARNING("[FIXME]for 9602C, we can't set priority for VLAN directly..."); //errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; //goto RET_VLAN_ERR; { rtk_rg_aclAndCf_reserved_AssignVlanBasedPriorityForInterface_t assignVlanBasedPriorityForInterfacePara; bzero(&assignVlanBasedPriorityForInterfacePara,sizeof(rtk_rg_aclAndCf_reserved_AssignVlanBasedPriorityForInterface_t)); assignVlanBasedPriorityForInterfacePara.ingress_vlan = wanintf->egress_vlan_id; assignVlanBasedPriorityForInterfacePara.assigned_priority = wanintf->vlan_based_pri; _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_ASSIGN_VLAN_BASED_RRIORITY_FOR_INTF0+intfIdx, &assignVlanBasedPriorityForInterfacePara); } #else ret = RTK_VLAN_PRIORITY_SET(vlanID, wanintf->vlan_based_pri); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_PRIORITYENABLE_SET(vlanID, ENABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } else { #if defined(CONFIG_RTL9602C_SERIES) #else ret = RTK_VLAN_PRIORITYENABLE_SET(vlanID, DISABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } //20140723LUKE: bypass other tables //20150622LUKE: since we didn't turn on macBased decision, we won't have binding, then the wanType is no need, //nexthop will be setup in _rtk_rg_internal_GWMACSetup_stage2 or _rtk_rg_internal_IPV6GWMACSetup_stage2 //20150624LUKE: for PPTP and L2TP, we need nexthop index at internal_wanSet, so we have to choose one here! if(reAddSameWan || (!rg_db.systemGlobal.initParam.macBasedTagDecision && wanintf->wan_type!=RTK_RG_PPTP && wanintf->wan_type!=RTK_RG_L2TP))goto ADD_BINDING; //Set up WAN type and NXP hop table here //Check for empty entry errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.nxpRefCount[i] == 0) break; } if(i==MAX_NEXTHOP_SW_TABLE_SIZE)goto RET_VLAN_ERR; nxpIdx = i; //Keep //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; nxpEt.ifIdx=intfIdx; // if WAN is PPPoE, LAN is untag. (keepPppoe=1 will send untag packet to WAN) if((wanintf->wan_type == RTK_RG_PPPoE)||(wanintf->wan_type == RTK_RG_PPPoE_DSLITE)){ nxpEt.type=L34_NH_PPPOE; #if defined(CONFIG_RTL9602C_SERIES) nxpEt.keepPppoe=2; /* If original tagged, keep. Otherwise add tag with PPPIDX session id */ #else nxpEt.keepPppoe=0; #endif nxpEt.pppoeIdx=pppoeIdx; }else{ nxpEt.type=L34_NH_ETHER; nxpEt.keepPppoe=1; nxpEt.pppoeIdx=0; } // FIXME: here should to use binding remote host mac index, if port-binding is set nxpEt.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; //use this DUMMY index to force packet TRAP to CPU rg_db.nexthop[nxpIdx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxpIdx, &nxpEt); if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; rg_db.systemGlobal.nxpRefCount[nxpIdx]++; //add for deleting it when del interface //20150618LUKE: v6 nexthop should be assigned only when needed /*if(wanintf->wan_type != RTK_RG_BRIDGE) { //Check for empty entry errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.nxpRefCount[i] == 0) break; } if(i==MAX_NEXTHOP_SW_TABLE_SIZE)goto RET_VLAN_ERR; v6nxpIdx = i; //Keep //20140623LUKE:IPv4 and IPv6 may have different remote MAC address, so we keep two nexthop respectively TRACE("ifIdx=%d keepPPPoE=%d pppoeIdx=%d type=%d index=%d",nxpEt.ifIdx,nxpEt.keepPppoe,nxpEt.nhIdx,nxpEt.pppoeIdx,nxpEt.type,v6nxpIdx); #if defined(CONFIG_RTL9602C_SERIES) FIXME("9602BVB nexthop entry number=netif entry number"); ret = RTK_L34_NEXTHOPTABLE_SET(v6nxpIdx, &nxpEt); //used for IPv6 if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; rg_db.systemGlobal.nxpRefCount[v6nxpIdx]++; //add for deleting it when del interface #else ret = RTK_L34_NEXTHOPTABLE_SET(v6nxpIdx, &nxpEt); //used for IPv6 if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; rg_db.systemGlobal.nxpRefCount[v6nxpIdx]++; //add for deleting it when del interface #endif }*/ //Add WAN type table errorno=RT_ERR_RG_WANTYPE_SET_FAIL; bzero(&wantEt, sizeof(rtk_wanType_entry_t)); wantEt.nhIdx=nxpIdx; if(wanintf->wan_type==RTK_RG_BRIDGE) wantEt.wanType=L34_WAN_TYPE_L2_BRIDGE; else wantEt.wanType=L34_WAN_TYPE_L3_ROUTE; //this value should be modified if NAPT for(i=0;i<MAX_WANTYPE_SW_TABLE_SIZE;i++) { if(rg_db.wantype[i].valid==0) { wantypeIdx=i; break; } } if(wantypeIdx<0)goto RET_WANTYPE_ERR; rg_db.wantype[wantypeIdx].valid=1; ret = RTK_L34_WANTYPETABLE_SET(wantypeIdx, &wantEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_WANTYPE_ERR; } if(ret!=RT_ERR_OK)goto RET_WANTYPE_ERR; rg_db.systemGlobal.nxpRefCount[nxpIdx]++; //nexthop reference by WAN type table ADD_BINDING: if(reAddSameWan) { //if we add binding before, we do not readd them here! DEBUG("before: binding pmsk is %x, extpmsk is %x",tmppmsk,tmpexpmsk); tmppmsk&=(~old_pmsk); tmpexpmsk&=(~old_extpmsk); DEBUG("binding pmsk is %x, extpmsk is %x",tmppmsk,tmpexpmsk); wantypeIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv4; v6wantypeIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv6; nxpIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.nexthop_ipv4; v6nxpIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.nexthop_ipv6; pppoeIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_idx; extipIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.extip_idx; baseIntfIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.baseIntf_idx; wirelessWan=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wirelessWan; } #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //Set up port-binding for WAN interface //DEBUG("start"); //20140423LUKE:CPU port can't be add to binding rule!! tmppmsk&=(~(0x1<<RTK_RG_PORT_CPU)); if(tmppmsk>0 || tmpexpmsk>0) { DEBUG("wantypeIdx is %d, v6wantypeIdx is %d",wantypeIdx,v6wantypeIdx); errorno=_rtk_rg_addBindFromPortmask(tmppmsk,tmpexpmsk,intfIdx,wantypeIdx,v6wantypeIdx); if(errorno!=RT_ERR_RG_OK)goto RET_WANTYPE_ERR; } #ifdef CONFIG_MASTER_WLAN0_ENABLE //Add WLAN0 device-binding rules for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && (wanintf->wlan0_dev_binding_mask&(0x1<<i))) { rg_db.systemGlobal.wlan0BindDecision[i].set_bind=1; rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf=intfIdx; } } #endif //20140502LUKE:Only turn on this when macBasedTagDecision is set to 1 if(rg_db.systemGlobal.initParam.macBasedTagDecision==1) { //Update non binding portmask, if portmask is zero, remove WAN port from fwdVLAN_BIND_INTERNET //otherwise add WAN port to fwdVLAN_BIND_INTERNET! _rtk_rg_updateNoneBindingPortmask(wanPmsk.bits[0]); } #endif //edn defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20140723LUKE: bypass create gateway mac if(reAddSameWan||changeBindingPmsk){ //20141224LUKE: since port-binding may be modified, we should rearrange ACL which use the WAN interface as egress interface of the binding if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num && (orig_pbdmsk.portmask!=wanintf->port_binding_mask.portmask||changeBindingPmsk)){ rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask=wanintf->port_binding_mask.portmask; ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); } //20140723LUKE: bypass create gateway mac if(reAddSameWan) goto RESET_RGDB; } #else //20140723LUKE: bypass create gateway mac if(reAddSameWan) goto RESET_RGDB; #endif if(addToStaticMAC) { rtk_rg_successFailReturn_t ret_fs; //1 FIXME: patch for DA==GatewayMac will hit layer2 unknown DA, if action is trap //Create Wan gateway STATIC MAC errorno=RT_ERR_RG_CREATE_GATEWAY_LUT_FAIL; if(wanintf->egress_vlan_tag_on) //cpu tagged utpmsk.bits[0]=0; else utpmsk.bits[0]=0x1<<RTK_RG_MAC_PORT_CPU; ret_fs = _rtk_rg_createGatewayMacEntry(wanintf->gmac.octet,wanintf->egress_vlan_id,utpmsk.bits[0],intfIdx); if(ret_fs==RG_RET_FAIL)goto RET_WANTYPE_ERR; } //Set Global variables rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].index=intfIdx; rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].disableBroadcast=disableBroadcast; rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].p_intfInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx]; rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].p_wanIntfConf=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf; rg_db.systemGlobal.interfaceInfo[intfIdx].lan_or_wan_index=rg_db.systemGlobal.wanIntfTotalNum; rg_db.systemGlobal.wanIntfTotalNum++; //add WAN interface number RESET_RGDB: *wan_intf_idx = intfIdx; //rtlglue_printf("wan_intf_idx get = %d",*wan_intf_idx); //store information in Global variable rg_db.systemGlobal.interfaceInfo[intfIdx].valid=1; bzero(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.intf_name,32); rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.is_wan=1; //20140806LUKE: for binding WAN, wanType is set to v4 and bind rule protocol is v4v6_all first!! rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv4=wantypeIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv6=v6wantypeIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wirelessWan=wirelessWan; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.nexthop_ipv4=nxpIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.nexthop_ipv6=v6nxpIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_idx=pppoeIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.extip_idx=extipIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.baseIntf_idx=baseIntfIdx; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type=wanintf->wan_type; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.isIVL=wanintf->isIVL; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.none_internet=wanintf->none_internet; #ifdef CONFIG_MASTER_WLAN0_ENABLE rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wlan0_dev_binding_mask=wanintf->wlan0_dev_binding_mask; #endif if(wanintf->wan_type == RTK_RG_STATIC) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.static_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_DHCP) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.dhcp_client_info.hw_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_PPPoE) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_info.after_dial.hw_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_PPTP) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pptp_info.after_dial.hw_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_L2TP) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.l2tp_info.after_dial.hw_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_DSLITE) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.dslite_info.static_info; //short-cut of wan static info structure else if(wanintf->wan_type == RTK_RG_PPPoE_DSLITE) rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.static_info; //short-cut of wan static info structure else //bridge WAN rg_db.systemGlobal.interfaceInfo[intfIdx].p_wanStaticInfo=NULL; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx=wanintf->wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (wanintf->wan_type)==RTK_RG_PPPoE && (wanintf->wan_port_idx)==RTK_RG_PORT_RGMII){ DEBUG("Special recovery WAN_PORT from RGMII to PON."); rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx=RTK_RG_PORT_PON; } #endif rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask=wanintf->port_binding_mask.portmask; //rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.extport_binding_mask=wanintf->extport_binding_mask; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on=wanintf->egress_vlan_tag_on; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id=wanintf->egress_vlan_id; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri_enable=wanintf->vlan_based_pri_enable; rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri=wanintf->vlan_based_pri; //_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.default_gateway_on=wanintf->default_gateway_on; memcpy(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.gmac.octet, wanintf->gmac.octet,ETHER_ADDR_LEN); //if(rg_db.systemGlobal.initParam.wanPortGponMode && wanintf->wan_port_idx==RTK_RG_MAC_PORT_PON) //rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.gponStreamID=wanintf->gponStreamID; //20140723LUKE: bypass unnecessary parts if(reAddSameWan) { errorno=RT_ERR_RG_OK; goto RET_SUCCESS; } //Update PVID _rtk_rg_updateWANPortBasedVID(wanintf->wan_port_idx); //Check PPPoE Pass through _rtk_rg_refreshPPPoEPassThroughLanOrWanPortMask(); //20141208LUKE: setup ACL for traping DHCP packets if(wanintf->wan_type==RTK_RG_DHCP && intfIdx<MAX_NETIF_HW_TABLE_SIZE) { rtk_rg_aclAndCf_reserved_intf_dhcp_trap_t intf_dhcp_trap_para; bzero(&intf_dhcp_trap_para,sizeof(intf_dhcp_trap_para)); memcpy(intf_dhcp_trap_para.gmac.octet,wanintf->gmac.octet,ETHER_ADDR_LEN); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_INTF0_DHCP_TRAP+intfIdx,&intf_dhcp_trap_para); } //20150312LUKE: for OMCI wanInfo, we should call callback for every WAN. //if(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE){ //add wan-interfcae callback to sync protocal-stack if(rg_db.systemGlobal.initParam.interfaceAddByHwCallBack != NULL) { //rtk_rg_intfInfo_t intfInfo; //bzero(&intfInfo,sizeof(intfInfo)); //memcpy(&intfInfo, &rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo, sizeof(intfInfo)); rg_db.systemGlobal.initParam.interfaceAddByHwCallBack(&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo,&intfIdx); } //} #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)){ // Multicast packets must change cvid to PVID if they're from PON port and un-Ctag. (MC CVID is not from internalVID when VSPMSK enabled) #if 0 { rtk_rg_aclAndCf_reserved_multicastVidTranslateForIpv4_t multicastVidTranslateForIpv4; rtk_rg_aclAndCf_reserved_multicastVidTranslateForIpv6_t multicastVidTranslateForIpv6; //int pvid; //rtk_vlan_portPvid_get(RTK_RG_MAC_PORT_PON,&pvid); multicastVidTranslateForIpv4.vid=rg_db.systemGlobal.portBasedVID[RTK_RG_MAC_PORT_PON]; multicastVidTranslateForIpv6.vid=rg_db.systemGlobal.portBasedVID[RTK_RG_MAC_PORT_PON]; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV4, &multicastVidTranslateForIpv4)); assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6, &multicastVidTranslateForIpv6)); } #endif } #endif //20150315CHUCK: detect if RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY is needed. if(_rtk_rg_add_pppoe_lcp_reserved_acl_detect()){ rtk_rg_aclAndCf_reserved_ack_packet_assign_priority_t ack_packet_assign_priority; bzero(&ack_packet_assign_priority,sizeof(ack_packet_assign_priority)); ack_packet_assign_priority.priority=7; #ifdef CONFIG_DUALBAND_CONCURRENT if(ack_packet_assign_priority.priority==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) ack_packet_assign_priority.priority = (CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI-1); #endif _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY, &ack_packet_assign_priority); DEBUG("Add reserved ACL RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY"); }else{ _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY); DEBUG("Del reserved ACL RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY"); } //20160427CHUCK: move the per L34 ipv6 interfcae link-local trap to add wan stage, otherwise the DHCPv6 and PPPoEv6 may not be diag up. if((*wan_intf_idx) < MAX_NETIF_HW_TABLE_SIZE) { if(wanintf->wan_type==RTK_RG_BRIDGE || wanintf->wan_type==RTK_RG_PPTP|| wanintf->wan_type==RTK_RG_L2TP) { //Bridge or IPv4 interfcae do not need to trap ipv6 link-local } else { //20141226LUKE: add the trap link local ACL since we turn on IPv6 this interface!! memcpy(intf_link_local_trap_para.gmac.octet,wanintf->gmac.octet,ETHER_ADDR_LEN); rsvType=RTK_RG_ACLANDCF_RESERVED_IPV6_INTF0_LINK_LOCAL_TRAP+(*wan_intf_idx); _rtk_rg_aclAndCfReservedRuleAdd(rsvType, &intf_link_local_trap_para); } } _rtk_rg_wanInterface_special_case_check(); errorno=RT_ERR_RG_OK; goto RET_SUCCESS; #if 0 RET_DEF_ROUTE_ERR: //Recovery default setting RTK_L34_ROUTINGTABLE_SET(RTK_L34_ROUTINGTABLE_SET, &ori_rtEt); #endif RET_WANTYPE_ERR: if(wantypeIdx>=0) { //Delete WAN type entry bzero(&wantEt, sizeof(rtk_wanType_entry_t)); rg_db.wantype[wantypeIdx].valid=0; RTK_L34_WANTYPETABLE_SET(wantypeIdx, &wantEt); } RET_NEXTHOP_ERR: //Delete nexthop entry bzero(&nxpEt, sizeof(rtk_l34_nexthop_entry_t)); rg_db.nexthop[nxpIdx].valid=0; RTK_L34_NEXTHOPTABLE_SET(nxpIdx, &nxpEt); RET_VLAN_ERR: //Recovery VLAN setting if(vlan_exist) { RTK_VLAN_PORT_SET(vlanID, &ori_vlanEntry.MemberPortmask, &ori_vlanEntry.UntagPortmask); RTK_VLAN_EXTPORT_SET(vlanID, &ori_vlanEntry.Ext_portmask); RTK_VLAN_FIDMODE_SET(vlanID, ori_vlanEntry.fidMode); RTK_VLAN_FID_SET(vlanID, ori_vlanEntry.fid); #if defined(CONFIG_RTL9602C_SERIES) #else RTK_VLAN_PRIORITYENABLE_SET(vlanID, ori_vlanEntry.priorityEn); RTK_VLAN_PRIORITY_SET(vlanID, ori_vlanEntry.priority); #endif } else { RTK_VLAN_DESTROY(vlanID); } //RET_BRIDGE_ERR: //Recovery all Lan interface's VLAN member port mask if(wanintf->wan_type == RTK_RG_BRIDGE) { for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { tmpVid=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id; //rtk_vlan_port_get(rg_db.systemGlobal.interfaceInfo[i].storedInfo.lan_intf.intf_vlan_id, &mbpmsk, &utpmsk); memcpy(&mbpmsk, &rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&utpmsk, &rg_db.vlan[tmpVid].UntagPortmask,sizeof(rtk_portmask_t)); mbpmsk.bits[0] &= (~(wanPmsk.bits[0])); //negative the WAN PORT in LAN's VLAN member port mask RTK_VLAN_PORT_SET(tmpVid, &mbpmsk, &utpmsk); } } #if 0 RET_DEFALT_VLAN_ERR: //Recovery DEFAULT LAN VLAN RTK_VLAN_PORT_SET(DEFAULT_LAN_VLAN, &ori_CPU_member_mask, &ori_CPU_untag_mask); #endif /*RET_PPPOE_PASS_ERR: ret=0; for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.wan_port_idx == wanintf->wan_port_idx) ret++; } if(ret==0) //only this WAN used the wan port { for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->pppoe_passThrough == 1) { //remove WAN port to their PPB VLAN tmpVid=DEFAULT_PPB_VLAN_START+rg_db.systemGlobal.lanIntfGroup[i].index; memcpy(&ori_pmsk, &rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_utmsk, &rg_db.vlan[tmpVid].UntagPortmask,sizeof(rtk_portmask_t)); memcpy(&ori_etpmsk, &rg_db.vlan[tmpVid].Ext_portmask,sizeof(rtk_portmask_t)); if(wanintf->wan_port_idx <= RTK_RG_PORT_CPU) { ori_pmsk.bits[0]&=~(0x1<<wanintf->wan_port_idx); ori_utmsk.bits[0]&=~(0x1<<wanintf->wan_port_idx); } else { ori_etpmsk.bits[0]&=~(0x1<<(wanintf->wan_port_idx-RTK_RG_EXT_PORT0)); ori_utmsk.bits[0]&=~(0x1<<RTK_RG_PORT_CPU); } RTK_VLAN_PORT_SET(tmpVid, &ori_pmsk, &ori_utmsk); RTK_VLAN_EXTPORT_SET(tmpVid, &ori_etpmsk); } } } */ RET_INTF_ERR: //Delete interface entry bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); #if defined(CONFIG_APOLLO) RET_OVERLAP_BIND_ERR: //re-Sync binding table by software database for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { RTK_L34_BINDINGTABLE_SET(i,&rg_db.bind[i].rtk_bind); } #endif RET_SUCCESS: //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.interfaceLock); RETURN_ERR(errorno); } int _rtk_rg_decreaseNexthopReference(int nexthopIdx) { int ret; rtk_l34_pppoe_entry_t pppEt; rtk_l34_nexthop_entry_t nxpEt; rtk_l2_ucastAddr_t l2UcEntry; if(rg_db.systemGlobal.nxpRefCount[nexthopIdx] <= 0) { rg_db.systemGlobal.nxpRefCount[nexthopIdx]=0; return (RT_ERR_RG_OK); } rg_db.systemGlobal.nxpRefCount[nexthopIdx]--; //20140724LUKE: because the last one ref count is interface, so we should delete LUT here. //when add IPv4's nexthop, interface and wanType table are counted, so two means the LUT is no needed //for IPv6's nexthop, one means the LUT is no needed if((rg_db.systemGlobal.nxpRefCount[nexthopIdx] == 2 && nexthopIdx == rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[nexthopIdx].rtk_nexthop.ifIdx].storedInfo.wan_intf.nexthop_ipv4) || rg_db.systemGlobal.nxpRefCount[nexthopIdx] == 1) { //if there is another nexthop use the same LUT, we can't delete it!! for(ret=0;ret<MAX_NEXTHOP_SW_TABLE_SIZE;ret++) { //20150916LUKE: if the decrease nexthop is used as base WAN of PPTP/L2TP, delete LUT would be OK. if(ret!=nexthopIdx && rg_db.nexthop[ret].rtk_nexthop.nhIdx==rg_db.nexthop[nexthopIdx].rtk_nexthop.nhIdx && rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[ret].rtk_nexthop.ifIdx].valid && (rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[ret].rtk_nexthop.ifIdx].storedInfo.is_wan==0 || rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[ret].rtk_nexthop.ifIdx].storedInfo.wan_intf.baseIntf_idx!=rg_db.nexthop[nexthopIdx].rtk_nexthop.ifIdx)) break; } //Delete LUT referenced by Nexthop and no other nexthop used if(ret==MAX_NEXTHOP_SW_TABLE_SIZE && rg_db.lut[rg_db.nexthop[nexthopIdx].rtk_nexthop.nhIdx].valid) { memcpy(&l2UcEntry,&rg_db.lut[rg_db.nexthop[nexthopIdx].rtk_nexthop.nhIdx].rtk_lut.entry.l2UcEntry,sizeof(rtk_l2_ucastAddr_t)); #if defined(CONFIG_RTL9600_SERIES) //20160329LUKE: fix untag LUT won't be deleted issue. if(l2UcEntry.vid==0) //auto-learned as untag l2UcEntry.vid=rg_db.systemGlobal.initParam.fwdVLAN_CPU; #endif _rtk_rg_deleteGatewayMacEntry(l2UcEntry.mac.octet, l2UcEntry.vid, rg_db.vlan[l2UcEntry.vid].UntagPortmask.bits[0]); } //Set to default trap LUT index rg_db.nexthop[nexthopIdx].rtk_nexthop.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; rg_db.nexthop[nexthopIdx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nexthopIdx, &rg_db.nexthop[nexthopIdx].rtk_nexthop); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); } else if(rg_db.systemGlobal.nxpRefCount[nexthopIdx] == 0) { //Delete Nexthop entry, since nobody use it if(rg_db.nexthop[nexthopIdx].rtk_nexthop.type==L34_NH_PPPOE) { //Delete PPPoE table bzero(&pppEt, sizeof(rtk_l34_pppoe_entry_t)); ret = RTK_L34_PPPOETABLE_SET(rg_db.nexthop[nexthopIdx].rtk_nexthop.pppoeIdx, &pppEt); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_PPPOE_SET_FAIL); } bzero(&nxpEt, sizeof(rtk_l34_nexthop_entry_t)); rg_db.nexthop[nexthopIdx].valid=0; ret = RTK_L34_NEXTHOPTABLE_SET(nexthopIdx, &nxpEt); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_NXP_SET_FAIL); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_interface_del(int lan_or_wan_intf_idx) { #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) #else int addRsvPPPoEBridgeMcTrap=0; #endif int i,ret,tmpVid,errorno; unsigned int wan_set_mask; rtk_l34_netif_entry_t intfEt; rtk_portmask_t utpmsk; rtk_portmask_t pvid_mac_pmask,pvid_ext_pmask; rtk_portmask_t all_lan_pmsk,all_lan_etpmsk,all_lan_utagpmsk; rtk_portmask_t wanPmsk; rtk_wanType_entry_t wantEt; rtk_l34_pppoe_entry_t pppoeEt; #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else rtk_binding_entry_t pbindEt; rtk_rg_bindingEntry_t cb_bindEt; #endif rtk_rg_intfInfo_t keep_store_info; //for callback rtk_rg_virtualServer_t virtual_server; rtk_rg_upnpConnection_t upnp; rtk_rg_dmzInfo_t dmz_info; rtk_rg_port_idx_t deleting_wan_port=RTK_RG_MAC_PORT_MAX; rtk_rg_aclAndCf_reserved_type_t rsvType; //Check parameter if(lan_or_wan_intf_idx<0 || lan_or_wan_intf_idx>=MAX_NETIF_SW_TABLE_SIZE){ rtlglue_printf("intf lan_or_wan_intf_idx=%d\n",lan_or_wan_intf_idx); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } //Check the interface had created or not if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid == 0){ rtlglue_printf("intf valid=%d\n",rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } #if defined(CONFIG_RTL9602C_SERIES) //patch for mismatching mib ipv6 netif problem if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE) { if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ip_version==IPVER_V4V6 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->napt_enable==1 && lan_or_wan_intf_idx>=(MAX_NETIF_SW_TABLE_SIZE/2) ) { rtlglue_printf("Can not delete ipv6 wan netif[%d].\n", lan_or_wan_intf_idx); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } } } #endif //bzero(&intfEt, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(lan_or_wan_intf_idx, &intfEt); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_GET_FAIL); //if(intfEt.valid != 1)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.interfaceLock); //20161031LUKE: move this from the end of API to avoid reflash ACL will recreate inexistence VLAN ID. //20141226LUKE: delete the trap link local ACL since we are deleting this interface!! if(lan_or_wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) { rsvType=RTK_RG_ACLANDCF_RESERVED_IPV6_INTF0_LINK_LOCAL_TRAP+lan_or_wan_intf_idx; _rtk_rg_aclAndCfReservedRuleDel(rsvType); } //Reset all Lan interface to delete WAN port to their VLAN (only for Bridge mode WAN) if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan) { deleting_wan_port=rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; wanPmsk.bits[0]=0x1<<deleting_wan_port; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type == RTK_RG_PPPoE) && deleting_wan_port==RTK_RG_PORT_PON){ DEBUG("Special add RGMII to WAN_PORT_MASK."); wanPmsk.bits[0]|=0x1<<RTK_RG_PORT_RGMII; } #endif #if 0//def CONFIG_GPON_FEATURE //Clear Classfication for GPON stream ID based on interface index if(rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx==RTK_RG_MAC_PORT_PON) { if(rg_db.systemGlobal.untagBridgeGponWanIdx==lan_or_wan_intf_idx) { _rtk_rg_cf_reserved_pon_intfSSIDRemap_del(-1); rg_db.systemGlobal.untagBridgeGponWanIdx=-1; } else _rtk_rg_cf_reserved_pon_intfSSIDRemap_del(lan_or_wan_intf_idx); } #endif if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo!=NULL) //not bridge WAN { //Stop ARP request timer if this WAN interface is ipv4 default route if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on == 1) rg_db.systemGlobal.intfArpRequest[lan_or_wan_intf_idx].finished = 1; //Stop Neighbor Discovery request timer if this WAN interface is ipv6 default route if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on == 1) rg_db.systemGlobal.intfNeighborDiscovery[lan_or_wan_intf_idx].finished = 1; } #ifdef CONFIG_RG_PPPOE_PASSTHROUGH #if 1 //pppoe_passthru acl disable //Delete ACL setting for PPPoE Pass through if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type == RTK_RG_PPPoE) _rtk_rg_acl_reserved_pppoePassthrough_IntfisPppoewan_del(lan_or_wan_intf_idx); #endif #endif #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //20140620LUKE:if we set same interface twice, we just want to reset it's staticInfo without change it's binding rules and related data!! if(rg_db.systemGlobal.intfIdxForReset!=lan_or_wan_intf_idx) { //Delete the VLAN-Binding entries which binding to the deleting interface first for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { //if(rg_db.systemGlobal.bindToIntf[i]==lan_or_wan_intf_idx && rg_db.systemGlobal.bindWithVLAN[i]!=-1) //the binding rule points to the deleting interface if(rg_db.bind[i].valid && rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==lan_or_wan_intf_idx) { if(rg_db.bind[i].rtk_bind.vidLan!=0) //the binding rule points to the deleting interface { errorno = rtk_rg_apollo_vlanBinding_del(i); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("del vlan-binding[%d]",i); } else { //Delete Binding table for port-binding entries cb_bindEt.type=BIND_TYPE_PORT; cb_bindEt.port_bind_pmask.portmask=rg_db.bind[i].rtk_bind.portMask.bits[0]; cb_bindEt.port_bind_pmask.portmask|=rg_db.bind[i].rtk_bind.extPortMask.bits[0]<<RTK_RG_EXT_PORT0; cb_bindEt.wan_intf_idx=lan_or_wan_intf_idx; #if defined(CONFIG_RTL9600_SERIES) if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid == SOFTWARE_ONLY_ENTRY) { //delete pure software netif for port binding int iterPort=0; for(iterPort=0 ;iterPort <RTK_RG_PORT_CPU ; iterPort++ ) { if(rg_db.bind[i].rtk_bind.portMask.bits[0] & (1<<iterPort)) { _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PORT0_TRAP + iterPort); } } } #endif bzero(&pbindEt, sizeof(rtk_binding_entry_t)); //ret = dal_apollomp_l34_bindingTable_set(i, &pbindEt); //FIXME:no RTK APIs ret = RTK_L34_BINDINGTABLE_SET(i, &pbindEt); errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; if(ret==RT_ERR_CHIP_NOT_SUPPORTED)goto RET_ERR; errorno=RT_ERR_RG_PORT_BIND_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; //rg_db.systemGlobal.bindToIntf[i]=-1; //reset to unused DEBUG("del port-binding[%d], mask is %x",i,cb_bindEt.port_bind_pmask.portmask); //20140807LUKE: dismiss the portmask from WAN's binding_mask rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask&=(~(cb_bindEt.port_bind_pmask.portmask)); DEBUG("wan[%d] port-binding-mask is %x..",lan_or_wan_intf_idx,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask); //2 Call the initParam's bindingDelByHwCallBack if(rg_db.systemGlobal.initParam.bindingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.bindingDelByHwCallBack(&cb_bindEt); } } } } } #endif //end defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //20140424LUKE:we don't add LAN to WAN now, since binding scenario need more complicated setting!! switch(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type) { case RTK_RG_PPPoE: case RTK_RG_PPPoE_DSLITE: //delete the pppoe table entry errorno=RT_ERR_RG_PPPOE_SET_FAIL; pppoeEt.sessionID=0; ret=RTK_L34_PPPOETABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_idx,&pppoeEt); if(ret!=RT_ERR_OK)goto RET_ERR; break; default: break; } //1 patch for DA==GatewayMac will hit layer2 unknown DA, if action is trap errorno=RT_ERR_RG_DELETE_GATEWAY_LUT_FAIL; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) //cpu tagged utpmsk.bits[0]=0; else utpmsk.bits[0]=0x1<<RTK_RG_MAC_PORT_CPU; //Delete the interface MAC entry _rtk_rg_deleteGatewayMacEntry(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id,utpmsk.bits[0]); //20160625LUKE: decrease nexthop reference count when del interface if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4>=0){ ret=_rtk_rg_decreaseNexthopReference(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4); if(ret!=RT_ERR_RG_OK)goto RET_ERR; } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.nexthop_ipv6>=0){ ret=_rtk_rg_decreaseNexthopReference(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.nexthop_ipv6); if(ret!=RT_ERR_RG_OK)goto RET_ERR; } } else //LAN interface { //The LAN interface can not be deleted when WAN interface had added //errorno=RT_ERR_RG_MODIFY_LAN_AT_WAN_EXIST; //if(rg_db.systemGlobal.wanIntfTotalNum>0) //goto RET_ERR; //re-gather all Lan's VLAN information all_lan_pmsk.bits[0]=0; all_lan_etpmsk.bits[0]=0; all_lan_utagpmsk.bits[0]=0; for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if(rg_db.systemGlobal.lanIntfGroup[i].index==lan_or_wan_intf_idx) continue; tmpVid=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id; //DEBUG("i = %d, vid = %d\n",i,tmpVid); all_lan_pmsk.bits[0] |= rg_db.vlan[tmpVid].MemberPortmask.bits[0]; all_lan_etpmsk.bits[0] |= rg_db.vlan[tmpVid].Ext_portmask.bits[0]; all_lan_utagpmsk.bits[0] |= rg_db.vlan[tmpVid].UntagPortmask.bits[0]; } //reset bridge WAN's VLAN setting, since there is LAN deleting for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { //add untag set by wan setting utpmsk.bits[0]=rg_db.vlan[rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id].UntagPortmask.bits[0]|all_lan_utagpmsk.bits[0]; if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on) //egress tagged packet utpmsk.bits[0]&=(~(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx)); //set WAN port to 0 in untag set (tagging) else { utpmsk.bits[0]|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; //set WAN port to 1 in untag set (untagging) //utpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_CPU; //set CPU port to 1 in untag set (untagging) } if(all_lan_pmsk.bits[0]==0 && all_lan_etpmsk.bits[0]==0) //the last LAN intf is deleting { //reset to WAN port only all_lan_pmsk.bits[0]|=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &all_lan_pmsk, &utpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &all_lan_etpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; } else { errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_PORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &all_lan_pmsk, &utpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; ret = RTK_VLAN_EXTPORT_SET(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id, &all_lan_etpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; } } } #if 0 //If this is the last LAN interface, reset DEFAULT LAN VLAN to default value if(rg_db.systemGlobal.lanIntfTotalNum == 1) { mbpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //all port utpmsk.bits[0]=RTK_RG_ALL_MAC_PORTMASK; //all untag etpmsk.bits[0]=RTK_RG_ALL_VIRUAL_PORTMASK; //all extension port ret = RTK_VLAN_PORT_SET(DEFAULT_LAN_VLAN, &mbpmsk, &utpmsk); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; ret = RTK_VLAN_EXTPORT_SET(DEFAULT_LAN_VLAN, &etpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; } else { //take off this LAN interface's member port and extension port from DEFAULT LAN VLAN //mbpmsk.bits[0]=0; //utpmsk.bits[0]=0; //etpmsk.bits[0]=0; //ret = rtk_vlan_port_get(DEFAULT_LAN_VLAN, &mbpmsk, &utpmsk); //errorno=RT_ERR_RG_VLAN_GET_FAIL; //if(ret!=RT_ERR_OK)goto RET_ERR; memcpy(&mbpmsk, &rg_db.vlan[DEFAULT_LAN_VLAN].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&utpmsk, &rg_db.vlan[DEFAULT_LAN_VLAN].UntagPortmask,sizeof(rtk_portmask_t)); //ret = rtk_vlan_extPort_get(DEFAULT_LAN_VLAN, &etpmsk); //if(ret!=RT_ERR_OK)goto RET_ERR; memcpy(&etpmsk, &rg_db.vlan[DEFAULT_LAN_VLAN].Ext_portmask,sizeof(rtk_portmask_t)); //Transfer RG portmask to RTK portmask _rtk_rg_portmask_translator(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->port_mask,&out_mac_pmask,&out_ext_pmask); mbpmsk.bits[0]&=(~(out_mac_pmask.bits[0])); etpmsk.bits[0]&=(~(out_ext_pmask.bits[0])); ret = RTK_VLAN_PORT_SET(DEFAULT_LAN_VLAN, &mbpmsk, &utpmsk); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; ret = RTK_VLAN_EXTPORT_SET(DEFAULT_LAN_VLAN, &etpmsk); if(ret!=RT_ERR_OK)goto RET_ERR; } //Reset PPB setting, if any /*errorno=RT_ERR_RG_PPB_SET_FAILED; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->pppoe_passThrough == 1) { bzero(&protoVlanCfg,sizeof(rtk_vlan_protoVlanCfg_t)); protoVlanCfg.vid=DEFAULT_CPU_VLAN; //fixme: rtk api won't accept vid=0, so we assign a dummy one if(out_ext_pmask.bits[0] > 0x1) { ret = rtk_vlan_portProtoVlan_set(RTK_RG_MAC_PORT_CPU,PPPOE_DISCOVERY_GROUPID,&protoVlanCfg); if(ret!=RT_ERR_OK)goto RET_ERR; ret = rtk_vlan_portProtoVlan_set(RTK_RG_MAC_PORT_CPU,PPPOE_SESSION_GROUPID,&protoVlanCfg); if(ret!=RT_ERR_OK)goto RET_ERR; } for(i=0;i<RTK_RG_PORT_CPU;i++) { if((out_mac_pmask.bits[0]&(0x1<<i)) > 0) { ret = rtk_vlan_portProtoVlan_set(i,PPPOE_DISCOVERY_GROUPID,&protoVlanCfg); if(ret!=RT_ERR_OK)goto RET_ERR; ret = rtk_vlan_portProtoVlan_set(i,PPPOE_SESSION_GROUPID,&protoVlanCfg); if(ret!=RT_ERR_OK)goto RET_ERR; } } errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_DESTROY(DEFAULT_PPB_VLAN_START+lan_or_wan_intf_idx); if(ret!=RT_ERR_OK)goto RET_ERR; }*/ //Set back port-based and extport-based VLAN ID errorno=RT_ERR_RG_VLAN_SET_FAIL; for(i=0;i<RTK_RG_PORT_MAX;i++) { if((rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->port_mask.portmask&(0x1<<i)) > 0) { if(i<=RTK_RG_PORT_CPU) { ret = rtk_vlan_portPvid_set(i, DEFAULT_CPU_VLAN); if(ret!=RT_ERR_OK) goto RET_ERR; } else { ret = rtk_vlan_extPortPvid_set(i-RTK_RG_PORT_CPU, DEFAULT_CPU_VLAN); if(ret!=RT_ERR_OK) goto RET_ERR; } rg_db.systemGlobal.portBasedVID[i]=DEFAULT_CPU_VLAN; //reset port-based and ext-port-based VLAN in rg_db } } #endif //1 patch for DA==GatewayMac will hit layer2 unknown DA, if action is trap errorno=RT_ERR_RG_DELETE_GATEWAY_LUT_FAIL; //Delete the interface MAC entry _rtk_rg_deleteGatewayMacEntry(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->gmac.octet,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->intf_vlan_id,rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->untag_mask.portmask); } //Delete routing table entry, decrease Nexthop table ref count, if zero, delete nexthop entry errorno=_rtk_rg_deleteIPv4Routing(lan_or_wan_intf_idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; //Delete ipv6 routing table entry, decrease Nexthop table ref count, if zero delete nexthop entry errorno=_rtk_rg_deleteIPv6Routing(lan_or_wan_intf_idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1) { if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4>=0) { errorno = RT_ERR_RG_NXP_SET_FAIL; ret=_rtk_rg_decreaseNexthopReference(rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].rtk_wantype.nhIdx); if(ret!=RT_ERR_RG_OK)goto RET_ERR; bzero(&wantEt, sizeof(rtk_wanType_entry_t)); rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].valid=0; ret = RTK_L34_WANTYPETABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4, &wantEt); errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; if(ret==RT_ERR_CHIP_NOT_SUPPORTED)goto RET_ERR; errorno=RT_ERR_RG_WANTYPE_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6>=0) { errorno = RT_ERR_RG_NXP_SET_FAIL; ret=_rtk_rg_decreaseNexthopReference(rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].rtk_wantype.nhIdx); if(ret!=RT_ERR_RG_OK)goto RET_ERR; bzero(&wantEt, sizeof(rtk_wanType_entry_t)); rg_db.wantype[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].valid=0; ret = RTK_L34_WANTYPETABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6, &wantEt); errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; if(ret==RT_ERR_CHIP_NOT_SUPPORTED)goto RET_ERR; errorno=RT_ERR_RG_WANTYPE_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; errorno = RT_ERR_RG_NXP_SET_FAIL; } #if defined(CONFIG_RTL9602C_SERIES) //9602bvb not support vlan based pri #else //Reset VLAN priority if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri_enable==RTK_RG_ENABLED && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri!=0) { ret = RTK_VLAN_PRIORITY_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id, 0); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; ret = RTK_VLAN_PRIORITYENABLE_SET(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id, DISABLED); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; } #endif } //Delete the deleting interface's VLAN setting, if there is no other interface or binding rule use it ret = 0; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1) //wan interface { tmpVid = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.none_internet && rg_db.systemGlobal.otherWanVlan[lan_or_wan_intf_idx]!=0) { //delete Other Wan's VLANID used for traffic isolation ret = RTK_VLAN_DESTROY(rg_db.systemGlobal.otherWanVlan[lan_or_wan_intf_idx]); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; rg_db.systemGlobal.otherWanVlan[lan_or_wan_intf_idx]=0; } } else //lan interface { tmpVid = rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->intf_vlan_id; memcpy(&pvid_mac_pmask,&rg_db.vlan[tmpVid].MemberPortmask,sizeof(rtk_portmask_t)); memcpy(&pvid_ext_pmask,&rg_db.vlan[tmpVid].Ext_portmask,sizeof(rtk_portmask_t)); } for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(i != lan_or_wan_intf_idx && rg_db.systemGlobal.interfaceInfo[i].valid == 1) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan==1) //wan interface { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id==tmpVid) ret++; //20150915LUKE: clear PPTP/L2TP's baseIntf_idx if this is the base WAN of them, and re-initialize request for MAC if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.baseIntf_idx==lan_or_wan_intf_idx) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP) _rtk_rg_PPTPLearningTimerInitialize(i); else _rtk_rg_L2TPLearningTimerInitialize(i); rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.baseIntf_idx=-1; rg_db.nexthop[rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.nexthop_ipv4].rtk_nexthop.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; //use this DUMMY index to force packet TRAP to CPU rg_db.nexthop[rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.nexthop_ipv4].valid=1; RTK_L34_NEXTHOPTABLE_SET(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.nexthop_ipv4, &rg_db.nexthop[rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.nexthop_ipv4].rtk_nexthop); } } else //lan interface { if(rg_db.systemGlobal.interfaceInfo[i].p_lanIntfConf->intf_vlan_id==tmpVid) ret++; } } } for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { //if(rg_db.systemGlobal.bindWithVLAN[i]==tmpVid) if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan==tmpVid) ret++; } if(ret==0) { ret = RTK_VLAN_DESTROY(tmpVid); errorno=RT_ERR_RG_VLAN_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; } #if defined(CONFIG_RTL9602C_SERIES) { //distroyed vlan based prioirty. DEBUG("del vlanbased priority"); if(lan_or_wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_ASSIGN_VLAN_BASED_RRIORITY_FOR_INTF0+lan_or_wan_intf_idx); } #endif #if defined(CONFIG_RTL9602C_SERIES) //patch for mismatching mib ipv6 netif problem if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE) { if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ip_version==IPVER_V4V6 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->napt_enable==1) { bzero(&intfEt, sizeof(rtk_l34_netif_entry_t)); ret = RTK_L34_NETIFTABLE_SET(lan_or_wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2), &intfEt); errorno=RT_ERR_RG_INTF_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; //bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2)], sizeof(rtk_rg_interface_info_global_t)); } } } #endif //Delete interface table entry bzero(&intfEt, sizeof(rtk_l34_netif_entry_t)); ret = RTK_L34_NETIFTABLE_SET(lan_or_wan_intf_idx, &intfEt); errorno=RT_ERR_RG_INTF_SET_FAIL; if(ret!=RT_ERR_OK)goto RET_ERR; #if defined(CONFIG_RTL9602C_SERIES) // errorno=RT_ERR_RG_INTF_SET_FAIL; ret = rtk_rg_apolloFE_interfaceMibCounter_del(lan_or_wan_intf_idx); if(ret!=RT_ERR_OK)goto RET_ERR; #endif /*for(i=0;i<8;i++) { //Lookup IP table for checking related interface memset(&extipEt, 0, sizeof(extipEt)); ret = rtk_l34_extIntIPTable_get(i, &extipEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_EXTIP_FAIL); //Lookup nexthop table memset(&nxpEt, 0, sizeof(nxpEt)); ret = rtk_l34_nexthopTable_get(extipEt.nhIdx, &nxpEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_NXP_FAIL); if(nxpEt.ifIdx == lan_or_wan_intf_idx) { rg_db.systemGlobal.nxpRefCount[j]--; match_intf = 1; if(rg_db.systemGlobal.nxpRefCount[j]==0) { //Delete Nexthop entry, since nobody use it if(nxpEt.type==L34_NH_PPPOE) { //Delete PPPoE table memset(&pppEt, 0, sizeof(pppEt)); RTK_L34_PPPOETABLE_SET(nxpEt.pppoeIdx, &pppEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_PPPOE_FAIL); } memset(&nxpEt, 0, sizeof(nxpEt)); ret = RTK_L34_NEXTHOPTABLE_SET(j, &nxpEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_NXP_FAIL); } } }*/ #ifdef CONFIG_MASTER_WLAN0_ENABLE //Check if WLAN binding to this WAN, clear it! if(rg_db.systemGlobal.initParam.macBasedTagDecision) { for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0BindDecision[i].set_bind && rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf==lan_or_wan_intf_idx) { rg_db.systemGlobal.wlan0BindDecision[i].set_bind=0; rg_db.systemGlobal.wlan0BindDecision[i].bind_wanIntf=0; } } } #endif #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) //9602c PPPoE Action is normal #else if(rg_db.systemGlobal.internalSupportMask&RTK_RG_INTERNAL_SUPPORT_BIT4) { //removed the H/W intf PPPoE Multicast trap rule if( rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_idx >=0 && rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid == SOFTWARE_HARDWARE_SYNC_ENTRY){ _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoE_MULTICAST_INTF0_PERMIT+lan_or_wan_intf_idx); } //check if default bridge PPPoE Multicast trap rule is need or not. addRsvPPPoEBridgeMcTrap=0; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if( rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE && rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.pppoe_idx >=0 && rg_db.systemGlobal.interfaceInfo[i].valid == SOFTWARE_HARDWARE_SYNC_ENTRY) { //exist any PPPoE Wan in H/W, still need reserved addRsvPPPoEBridgeMcTrap=1; break; } } if(addRsvPPPoEBridgeMcTrap==0){ _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoE_MULTICAST_DEFAULT_TRAP); } } #endif //keep Global variable structure for callback function bzero(&keep_store_info,sizeof(rtk_rg_intfInfo_t)); memcpy(&keep_store_info,&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo,sizeof(rtk_rg_intfInfo_t)); //Reset Global variable structure rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].valid=0; bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.intf_name, 32); if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.is_wan == 1) { //Reset WAN set mask wan_set_mask=0x1<<lan_or_wan_intf_idx; rg_db.systemGlobal.wanInfoSet &= ~(wan_set_mask); //Decrease Global WAN interface count rg_db.systemGlobal.wanIntfTotalNum--; //decrease WAN interface number #if 1 //Reset WAN group entry, defragmentation if needed if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index != rg_db.systemGlobal.wanIntfTotalNum) { for(i=rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { memcpy(&rg_db.systemGlobal.wanIntfGroup[i],&rg_db.systemGlobal.wanIntfGroup[i+1],sizeof(rtk_rg_wan_interface_group_info_t)); //rg_db.systemGlobal.wanIntfGroup[i].index = rg_db.systemGlobal.wanIntfGroup[i+1].index; //rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo = rg_db.systemGlobal.wanIntfGroup[i+1].p_intfInfo; //rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf = rg_db.systemGlobal.wanIntfGroup[i+1].p_wanIntfConf; rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->lan_or_wan_index--; } } #else //Reset WAN group entry, defragmentation if needed //since interface order is not important, we just move the last one to the deleting index if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index != rg_db.systemGlobal.wanIntfTotalNum) { rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index].index = rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].index; rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index].p_intfInfo= rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].p_intfInfo; } #endif bzero(&rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum],sizeof(rtk_rg_wan_interface_group_info_t)); //rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].index=0; //rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].disableBroadcast=0; //rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].p_intfInfo=NULL; //rg_db.systemGlobal.wanIntfGroup[rg_db.systemGlobal.wanIntfTotalNum].p_wanIntfConf=NULL; //Clear WAN structure rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4=-1; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6=-1; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wirelessWan=RG_WWAN_WIRED; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_idx=-1; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.extip_idx=-1; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.baseIntf_idx=-1; bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac, sizeof(rtk_mac_t)); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask=0; //rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.extport_binding_mask.bits[0]=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri_enable=RTK_RG_DISABLED; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.isIVL=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.none_internet=0; if(rg_db.systemGlobal.defaultRouteSet == lan_or_wan_intf_idx) rg_db.systemGlobal.defaultRouteSet=-1; if(rg_db.systemGlobal.defaultIPV6RouteSet == lan_or_wan_intf_idx) rg_db.systemGlobal.defaultIPV6RouteSet=-1; if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo!=NULL) //not bridge WAN { //20140620LUKE:if we set same interface twice, we just want to reset it's staticInfo without change it's binding rules and related data!! if(rg_db.systemGlobal.intfIdxForReset!=lan_or_wan_intf_idx) { //Check for L4 WAN to clean all virtualServer, UPnP, and DMZ for the WAN index //if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->napt_enable) //napt_enable is zero while intf is up but not getting ip { //Clear Virtual Server i=0; ret=RT_ERR_RG_OK; while(i<MAX_VIRTUAL_SERVER_SW_TABLE_SIZE && ret==RT_ERR_RG_OK) { ret=rtk_rg_apollo_virtualServer_find(&virtual_server,&i); if(ret==RT_ERR_RG_OK && virtual_server.wan_intf_idx==lan_or_wan_intf_idx) rtk_rg_apollo_virtualServer_del(i); i++; } //Clear UPnP i=0; ret=RT_ERR_RG_OK; while(i<MAX_UPNP_SW_TABLE_SIZE && ret==RT_ERR_RG_OK) { ret=rtk_rg_apollo_upnpConnection_find(&upnp,&i); if(ret==RT_ERR_RG_OK && upnp.wan_intf_idx==lan_or_wan_intf_idx) rtk_rg_apollo_upnpConnection_del(i); i++; } //Clear DMZ bzero(&dmz_info,sizeof(rtk_rg_dmzInfo_t)); rtk_rg_apollo_dmzHost_set(lan_or_wan_intf_idx,&dmz_info); } } rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ip_version=0; //default ipv4 rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->napt_enable=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ip_addr=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ip_network_mask=0; bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv6_mask_length=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on=0; bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->mtu=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4=0; bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv4,sizeof(rtk_mac_t)); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv6=0; bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv6,sizeof(rtk_mac_t)); } if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DHCP) { rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dhcp_client_info.stauts=0; } else if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE) { rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.after_dial.sessionId=0; bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, 4); bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, 4); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.auth_type=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.pppoe_proxy_enable=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.max_pppoe_proxy_num=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.auto_reconnect=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.dial_on_demond=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.idle_timeout_secs=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.stauts=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.dialOnDemondCallBack=NULL; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.idleTimeOutCallBack=NULL; } else if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DSLITE) { bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite,sizeof(rtk_l34_dsliteInf_entry_t)); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_info.aftr_mac_auto_learn=0; bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_info.aftr_mac_addr,sizeof(rtk_mac_t)); #if defined(CONFIG_RTL9602C_SERIES) bzero(&rg_db.dslite[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx].rtk_dslite,sizeof(rtk_l34_dsliteInf_entry_t)); ASSERT_EQ(RTK_L34_DSLITEINFTABLE_SET(&rg_db.dslite[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx].rtk_dslite),RT_ERR_OK); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx=-1; #else //disable reserve ACL if(lan_or_wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_INTF0_DSLITE_TRAP+lan_or_wan_intf_idx); #endif } else if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE_DSLITE) { bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite,sizeof(rtk_l34_dsliteInf_entry_t)); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.aftr_mac_auto_learn=0; bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.aftr_mac_addr,sizeof(rtk_mac_t)); #if defined(CONFIG_RTL9602C_SERIES) bzero(&rg_db.dslite[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx].rtk_dslite,sizeof(rtk_l34_dsliteInf_entry_t)); ASSERT_EQ(RTK_L34_DSLITEINFTABLE_SET(&rg_db.dslite[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx].rtk_dslite),RT_ERR_OK); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.dslite_idx=-1; #else //disable reserve ACL if(lan_or_wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_INTF0_DSLITE_TRAP+lan_or_wan_intf_idx); #endif } rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_wanStaticInfo=NULL; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type=0; //Update PVID _rtk_rg_updateWANPortBasedVID(deleting_wan_port); //20140502LUKE:Only turn on this when macBasedTagDecision is set to 1 if(rg_db.systemGlobal.initParam.macBasedTagDecision==1) { //UpdateBindInternet _rtk_rg_updateBindWanIntf(NULL); //Update non-binding _rtk_rg_updateNoneBindingPortmask(wanPmsk.bits[0]); } //20141208LUKE: setup ACL for traping DHCP packets if(keep_store_info.wan_intf.wan_intf_conf.wan_type==RTK_RG_DHCP && lan_or_wan_intf_idx<MAX_NETIF_HW_TABLE_SIZE) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_INTF0_DHCP_TRAP+lan_or_wan_intf_idx); } else { //Decrease Global LAN interface count rg_db.systemGlobal.lanIntfTotalNum--; //decrease LAN interface number #if 1 //Reset LAN group entry, defragmentation if needed if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index != rg_db.systemGlobal.lanIntfTotalNum) { for(i=rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { rg_db.systemGlobal.lanIntfGroup[i].index = rg_db.systemGlobal.lanIntfGroup[i+1].index; rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo = rg_db.systemGlobal.lanIntfGroup[i+1].p_intfInfo; rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->lan_or_wan_index--; } } #else //Reset LAN group entry, defragmentation if needed //since interface order is not important, we just move the last one to the deleting index if(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index != rg_db.systemGlobal.lanIntfTotalNum) { rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index].index = rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].index; rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].lan_or_wan_index].p_intfInfo= rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].p_intfInfo; } #endif rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].index=0; rg_db.systemGlobal.lanIntfGroup[rg_db.systemGlobal.lanIntfTotalNum].p_intfInfo=NULL; //Remove LAN's portmask from global one. rg_db.systemGlobal.lanPortMask.portmask&=(~(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->port_mask.portmask)); //Clear LAN structure bzero(&rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->gmac, sizeof(rtk_mac_t)); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->ip_version=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->ip_addr=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->ip_network_mask=0; bzero(rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->ipv6_network_mask_length=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->port_mask.portmask=0; //rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.extport_mask.bits[0]=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->intf_vlan_id=0; //rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->pppoe_passThrough=0; #if 0 rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.dhcp_server_enable=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.lease_time=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.dhcp_start_ip_addr=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.dhcp_end_ip_addr=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.dhcp_port_binding_mask.bits[0]=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo.lan_intf.dhcp_extport_binding_mask.bits[0]=0; #endif rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf->mtu=0; rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].p_lanIntfConf=NULL; //Update PVID _rtk_rg_updatePortBasedVIDByLanOrder(pvid_mac_pmask, pvid_ext_pmask); } #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)){ // Multicast packets must change cvid to PVID if they're from PON port and un-Ctag. (MC CVID is not from internalVID when VSPMSK enabled) #if 0 { rtk_rg_aclAndCf_reserved_multicastVidTranslateForIpv4_t multicastVidTranslateForIpv4; rtk_rg_aclAndCf_reserved_multicastVidTranslateForIpv6_t multicastVidTranslateForIpv6; //int pvid; //rtk_vlan_portPvid_get(RTK_RG_MAC_PORT_PON,&pvid); multicastVidTranslateForIpv4.vid=rg_db.systemGlobal.portBasedVID[RTK_RG_MAC_PORT_PON]; multicastVidTranslateForIpv6.vid=rg_db.systemGlobal.portBasedVID[RTK_RG_MAC_PORT_PON]; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV4, &multicastVidTranslateForIpv4)); assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6, &multicastVidTranslateForIpv6)); } #endif } #endif errorno=RT_ERR_RG_OK; //Check PPPoE Pass through _rtk_rg_refreshPPPoEPassThroughLanOrWanPortMask(); #if defined(CONFIG_RTL9602C_SERIES) //20160418LUKE: for DSlite routing mode, we should trap TCP SYN for MSS clamping. _rtk_rg_dslite_routing_reserved_acl_decision(); #endif #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20141225LUKE: since LAN or WAN is gone, we should release hw ACL which use the WAN interface as egress interface or use LANGMAC as DMAC for L34 if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num) ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif //del wan-interfcae callback to sync protocal-stack if(rg_db.systemGlobal.initParam.interfaceDelByHwCallBack != NULL) { //rg_db.systemGlobal.interfaceInfo[lan_or_wan_intf_idx].storedInfo has been reset! so, use keep_store_info rg_db.systemGlobal.initParam.interfaceDelByHwCallBack(&keep_store_info,&lan_or_wan_intf_idx); } //20150315CHUCK: detect if RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY is needed. if(_rtk_rg_add_pppoe_lcp_reserved_acl_detect()){ rtk_rg_aclAndCf_reserved_ack_packet_assign_priority_t ack_packet_assign_priority; bzero(&ack_packet_assign_priority,sizeof(ack_packet_assign_priority)); ack_packet_assign_priority.priority=7; #ifdef CONFIG_DUALBAND_CONCURRENT if(ack_packet_assign_priority.priority==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) ack_packet_assign_priority.priority = (CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI-1); #endif _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY, &ack_packet_assign_priority); DEBUG("Add reserved ACL RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY"); }else{ _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY); DEBUG("Del reserved ACL RTK_RG_ACLANDCF_RESERVED_PPPoE_LCP_PACKET_ASSIGN_PRIORITY"); } //if non-any-dslite-wan delete dsliteMc entry { uint32 delDsliteMcEntry=1; rtk_l34_dsliteMc_entry_t dsliteMcEntry; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if( rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[i].valid && (rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DSLITE || rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE_DSLITE)) { delDsliteMcEntry=0; break; } } bzero(&dsliteMcEntry,sizeof(dsliteMcEntry)); memset(&dsliteMcEntry.ipUPrefix64Mask,0xff,sizeof(rtk_ipv6_addr_t)); memset(&dsliteMcEntry.ipMPrefix64Mask,0xff,sizeof(rtk_ipv6_addr_t)); ASSERT_EQ(RTK_L34_DSLITEMULTICAST_SET(&dsliteMcEntry),RT_ERR_RG_OK); } RET_ERR: //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.interfaceLock); RETURN_ERR(errorno); } /* if (valid_lan_or_wan_intf_idx == -1) find interface by ip (wan first then lan) else find first vaild start from valid_lan_or_wan_intf_idx */ rtk_rg_err_code_t rtk_rg_apollo_intfInfo_find(rtk_rg_intfInfo_t *intf_info, int *valid_lan_or_wan_intf_idx) { int i,/*ret,*/valid_idx; //ipaddr_t search_ip, search_mask; //rtk_l34_netif_entry_t intfEt; rtk_ipv6_addr_t zeroV6Addr; //Check parameter if(intf_info == NULL || valid_lan_or_wan_intf_idx == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); valid_idx = *valid_lan_or_wan_intf_idx; if(valid_idx<-1 || valid_idx>=MAX_NETIF_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(valid_idx == -1) { //Check IP address valid or not bzero(zeroV6Addr.ipv6_addr,IPV6_ADDR_LEN); if(intf_info->lan_intf.ip_addr == 0 && memcmp(intf_info->lan_intf.ipv6_addr.ipv6_addr,zeroV6Addr.ipv6_addr,IPV6_ADDR_LEN) == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //search_ip=intf_info->lan_intf.ip_addr; //search_mask=intf_info->lan_intf.ip_network_mask; //Find interface by ip //Search Wan first, then Lan for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { //Bridge WAN won't be compared with if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) continue; if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr == intf_info->lan_intf.ip_addr || memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ipv6_addr.ipv6_addr,intf_info->lan_intf.ipv6_addr.ipv6_addr,IPV6_ADDR_LEN)==0) { valid_idx=rg_db.systemGlobal.wanIntfGroup[i].index; goto MATCHING_IDX; } } for(i=0; i<rg_db.systemGlobal.lanIntfTotalNum; i++) { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->ip_addr == intf_info->lan_intf.ip_addr || memcmp(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->ipv6_addr.ipv6_addr,intf_info->lan_intf.ipv6_addr.ipv6_addr,IPV6_ADDR_LEN)==0) { valid_idx=rg_db.systemGlobal.lanIntfGroup[i].index; goto MATCHING_IDX; } } return (RT_ERR_RG_ENTRY_NOT_EXIST); } else { //Find the first valid interface from valid_lan_or_wan_intf_idx for(i=valid_idx; i<MAX_NETIF_SW_TABLE_SIZE; i++) { //bzero(&intfEt, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(i, &intfEt); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_GET_FAIL); //if(intfEt.valid != 0) if(rg_db.systemGlobal.interfaceInfo[i].valid != 0) { valid_idx = i; break; } } if(i==MAX_NETIF_SW_TABLE_SIZE) return (RT_ERR_RG_ENTRY_NOT_EXIST); } MATCHING_IDX: bzero(intf_info,sizeof(rtk_rg_intfInfo_t)); memcpy(intf_info,&rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo,sizeof(rtk_rg_intfInfo_t)); #if 0 memcpy(intf_info->intf_name, rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.intf_name, 32); intf_info->is_wan=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.is_wan; if(intf_info->is_wan) { intf_info->wan_intf.wan_intf_conf.wan_type=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.wan_type; memcpy(&intf_info->wan_intf.wan_intf_conf.gmac, &rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.gmac, sizeof(rtk_mac_t)); intf_info->wan_intf.wan_intf_conf.wan_port_idx=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; intf_info->wan_intf.wan_intf_conf.port_binding_mask.portmask=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask; intf_info->wan_intf.wan_intf_conf.egress_vlan_tag_on=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on; intf_info->wan_intf.wan_intf_conf.egress_vlan_id=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; intf_info->wan_intf.wan_intf_conf.vlan_based_pri=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.wan_intf_conf.vlan_based_pri; if(intf_info->wan_intf.wan_intf_conf.wan_type != RTK_RG_BRIDGE) { intf_info->wan_intf.static_info.ip_version=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ip_version; intf_info->wan_intf.static_info.napt_enable=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->napt_enable; intf_info->wan_intf.static_info.ip_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ip_addr; intf_info->wan_intf.static_info.ip_network_mask=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ip_network_mask; memcpy(intf_info->wan_intf.static_info.ipv6_addr.ipv6_addr,rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); intf_info->wan_intf.static_info.ipv6_mask_length=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ipv6_mask_length; intf_info->wan_intf.static_info.ipv4_default_gateway_on=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ipv4_default_gateway_on; intf_info->wan_intf.static_info.gateway_ipv4_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gateway_ipv4_addr; intf_info->wan_intf.static_info.ipv6_default_gateway_on=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->ipv6_default_gateway_on; intf_info->wan_intf.static_info.gateway_ipv6_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gateway_ipv6_addr; intf_info->wan_intf.static_info.dns_ip_addr1=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->dns_ip_addr1; intf_info->wan_intf.static_info.dns_ip_addr2=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->dns_ip_addr2; intf_info->wan_intf.static_info.dns_ip_addr3=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->dns_ip_addr3; intf_info->wan_intf.static_info.mtu=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->mtu; intf_info->wan_intf.static_info.gw_mac_auto_learn_for_ipv4=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4; memcpy(&intf_info->wan_intf.static_info.gateway_mac_addr_for_ipv4,&rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv4,sizeof(rtk_mac_t)); intf_info->wan_intf.static_info.gw_mac_auto_learn_for_ipv4=rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4; memcpy(&intf_info->wan_intf.static_info.gateway_mac_addr_for_ipv6,&rg_db.systemGlobal.interfaceInfo[valid_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv6,sizeof(rtk_mac_t)); if(intf_info->wan_intf.wan_intf_conf.wan_type == RTK_RG_DHCP) { intf_info->wan_intf.dhcp_client_info.stauts=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.dhcp_client_info.stauts; } else if(intf_info->wan_intf.wan_intf_conf.wan_type == RTK_RG_PPPoE) { intf_info->wan_intf.pppoe_info.after_dial.sessionId=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.after_dial.sessionId; bzero(intf_info->wan_intf.pppoe_info.before_dial.username, 4); memcpy(intf_info->wan_intf.pppoe_info.before_dial.username, rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, 4); bzero(intf_info->wan_intf.pppoe_info.before_dial.password, 4); memcpy(intf_info->wan_intf.pppoe_info.before_dial.password, rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, 4); intf_info->wan_intf.pppoe_info.before_dial.auth_type=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.auth_type; intf_info->wan_intf.pppoe_info.before_dial.pppoe_proxy_enable=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.pppoe_proxy_enable; intf_info->wan_intf.pppoe_info.before_dial.max_pppoe_proxy_num=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.max_pppoe_proxy_num; intf_info->wan_intf.pppoe_info.before_dial.auto_reconnect=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.auto_reconnect; intf_info->wan_intf.pppoe_info.before_dial.dial_on_demond=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.dial_on_demond; intf_info->wan_intf.pppoe_info.before_dial.idle_timeout_secs=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.idle_timeout_secs; intf_info->wan_intf.pppoe_info.before_dial.stauts=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.stauts; intf_info->wan_intf.pppoe_info.before_dial.dialOnDemondCallBack=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.dialOnDemondCallBack; intf_info->wan_intf.pppoe_info.before_dial.idleTimeOutCallBack=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.wan_intf.pppoe_info.before_dial.idleTimeOutCallBack; } } } else { intf_info->lan_intf.ip_version=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->ip_version; memcpy(&intf_info->lan_intf.gmac, &rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->gmac, sizeof(rtk_mac_t)); intf_info->lan_intf.ip_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->ip_addr; intf_info->lan_intf.ip_network_mask=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->ip_network_mask; memcpy(intf_info->lan_intf.ipv6_addr.ipv6_addr,rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); intf_info->lan_intf.ipv6_network_mask_length=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->ipv6_network_mask_length; intf_info->lan_intf.port_mask.portmask=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->port_mask.portmask; intf_info->lan_intf.untag_mask.portmask=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->untag_mask.portmask; intf_info->lan_intf.intf_vlan_id=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->intf_vlan_id; #if 0 intf_info->lan_intf.dhcp_server_enable=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.dhcp_server_enable; intf_info->lan_intf.lease_time=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.lease_time; intf_info->lan_intf.dhcp_start_ip_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.dhcp_start_ip_addr; intf_info->lan_intf.dhcp_end_ip_addr=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.dhcp_end_ip_addr; intf_info->lan_intf.dhcp_port_binding_mask.bits[0]=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.dhcp_port_binding_mask.bits[0]; intf_info->lan_intf.dhcp_extport_binding_mask.bits[0]=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.lan_intf.dhcp_extport_binding_mask.bits[0]; #endif intf_info->lan_intf.mtu=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->mtu; intf_info->lan_intf.pppoe_passThrough=rg_db.systemGlobal.interfaceInfo[valid_idx].p_lanIntfConf->pppoe_passThrough; } intf_info->ingress_packet_count=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.ingress_packet_count; intf_info->ingress_byte_count=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.ingress_byte_count; intf_info->egress_packet_count=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.egress_packet_count; intf_info->egress_byte_count=rg_db.systemGlobal.interfaceInfo[valid_idx].storedInfo.egress_byte_count; #endif //Return the valid index *valid_lan_or_wan_intf_idx = valid_idx; return (RT_ERR_RG_OK); } #if defined(CONFIG_APOLLO) __IRAM_FWDENG_SLOWPATH unsigned int _rtk_rg_NAPTRemoteHash_get(unsigned int ip, unsigned int port) { unsigned short hash_value=0; hash_value = ((ip&0xffff) ^ ((ip>>16)&0xffff) ^ (port)); return hash_value; } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //end defined(CONFIG_APOLLO) //return 10bits absolute index unsigned int _rtk_rg_NAPTRemoteHash_get( uint16 isTCP,unsigned int ip, unsigned int port) { return rtl8651_naptTcpUdpTableIndex(((uint8)isTCP) |HASH_FOR_VERI , ip,port, 0, 0); } #endif //end defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) unsigned char _rtk_rg_IPv6NeighborHash(const uint8 *ifid, unsigned char rtidx) { /* nb_8bhash_idx[7:0] = pkt.dip[7:0] ^ pkt.dip[15:8] ^ pkt.dip[23:16] ^ pkt.dip[31:24] ^ pkt.dip[39:32] ^ pkt.dip[47:40] ^ pkt.dip[55:48] ^ pkt.dip[63:56] ; nb_hash_idx[3:0] = nb_8bhash_idx[3:0] ^ nb_8bhash_idx[7:4] ^ { Ipv6_rtidx[0], Ipv6_rtidx[1], Ipv6_rtidx[1:0] } ; */ unsigned char pre_idx; unsigned char rightHalf; unsigned char leftHalf; unsigned char rtidx_0; unsigned char rtidx_1; unsigned char newrtidx; unsigned char hash_idx; pre_idx = ifid[7]^ifid[6]^ifid[5]^ifid[4]^ifid[3]^ifid[2]^ifid[1]^ifid[0]; rightHalf = pre_idx&0xf; leftHalf = (pre_idx>>4)&0xf; rtidx_0 = (rtidx&0x1)<<3; rtidx_1 = (rtidx&0x2)<<1; newrtidx = rtidx_0 | rtidx_1 | rtidx; hash_idx = rightHalf^leftHalf^newrtidx; return hash_idx; } void txinfo_debug(struct tx_info *pTxInfo); int rtk_rg_congestionCtrlQueuePkt(struct sk_buff *skb, struct tx_info* ptxInfo,struct tx_info* ptxInfoMask,int isHighQueue) { int firstPortIdx=0; rtk_rg_congestionCtrlRing_t *ccr; if(ptxInfo->opts3.bit.tx_portmask&0x1) { firstPortIdx=0; } else if(ptxInfo->opts3.bit.tx_portmask&0x2) { firstPortIdx=1; } else if(ptxInfo->opts3.bit.tx_portmask&0x4) { firstPortIdx=2; } #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) || defined(CONFIG_RTL9607C_SERIES) else if(ptxInfo->opts3.bit.tx_portmask&0x8) { firstPortIdx=3; } else if(ptxInfo->opts3.bit.tx_portmask&0x10) { firstPortIdx=4; } #endif #if defined(CONFIG_RTL9607C_SERIES) else if(ptxInfo->opts3.bit.tx_portmask&0x20) { firstPortIdx=5; } #endif else { if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[rg_db.pktHdr->ingressPort]++; } return re8686_send_with_txInfo_and_mask(skb,ptxInfo,0,ptxInfoMask); } if((rg_db.congestionCtrlQueueCounter[isHighQueue][firstPortIdx]+1)==MAX_CONGESTION_CTRL_RING_SIZE) { rg_db.congestionCtrlFullDrop[isHighQueue][firstPortIdx]++; _rtk_rg_dev_kfree_skb_any(skb); return 0; } ccr=&rg_db.congestionCtrlRing[isHighQueue][firstPortIdx][rg_db.congestionCtrlQueueIdx[isHighQueue][firstPortIdx]]; memcpy(&ccr->ptxInfo,ptxInfo,sizeof(struct tx_info)); memcpy(&ccr->ptxInfoMsk,ptxInfoMask,sizeof(struct tx_info)); ccr->pSkb=skb; ccr->ingressPort=rg_db.pktHdr->ingressPort; rg_db.congestionCtrlQueueCounter[isHighQueue][firstPortIdx]++; if(rg_db.congestionCtrlQueueCounter[isHighQueue][firstPortIdx]>rg_db.congestionCtrlMaxQueueCounter[isHighQueue][firstPortIdx]) rg_db.congestionCtrlMaxQueueCounter[isHighQueue][firstPortIdx]=rg_db.congestionCtrlQueueCounter[isHighQueue][firstPortIdx]; rg_db.congestionCtrlQueueIdx[isHighQueue][firstPortIdx]++; if(rg_db.congestionCtrlQueueIdx[isHighQueue][firstPortIdx]==MAX_CONGESTION_CTRL_RING_SIZE) rg_db.congestionCtrlQueueIdx[isHighQueue][firstPortIdx]=0; return 0; } __IRAM_FWDENG static void _rtk_rg_congestionCtrlTimerFunc(struct re_private *cp) { uint32 bytesSended[MAX_CONGESTION_CTRL_PORTS]={0}; uint32 sendFinished[2][MAX_CONGESTION_CTRL_PORTS]={{0},{0}}; int i,j,k; for(j=(rg_db.systemGlobal.congestionCtrlInboundAckToHighQueue?1:0);j>=0;j--) { int finishedPorts=0; while(1) { for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { int sendTimesPerPort=0; if(sendFinished[j][i]==0) { while(1) { int len; rtk_rg_congestionCtrlRing_t *ccr; if(rg_db.congestionCtrlQueueCounter[j][i]==0) { sendFinished[j][i]=1; finishedPorts++; break; } ccr=&rg_db.congestionCtrlRing[j][i][rg_db.congestionCtrlSendIdx[j][i]]; len=ccr->pSkb->len; if(len<60) len=60; len+=24; //preamble+CRC if((rg_db.systemGlobal.congestionCtrlSendBytesInterval[i]+rg_db.congestionCtrlSendedRemainder[i])<(bytesSended[i]+len)) { sendFinished[j][i]=1; if(rg_db.systemGlobal.congestionCtrlSendRemainderInNextGap) { rg_db.congestionCtrlSendedRemainder[i]=(rg_db.systemGlobal.congestionCtrlSendBytesInterval[i]+rg_db.congestionCtrlSendedRemainder[i]-bytesSended[i]); } finishedPorts++; break; } if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[ccr->ingressPort]++; } re8686_send_with_txInfo_and_mask(ccr->pSkb,&ccr->ptxInfo,0,&ccr->ptxInfoMsk); sendTimesPerPort++; rg_db.congestionCtrlQueueCounter[j][i]--; rg_db.congestionCtrlSendIdx[j][i]++; if(rg_db.congestionCtrlSendIdx[j][i]==MAX_CONGESTION_CTRL_RING_SIZE) rg_db.congestionCtrlSendIdx[j][i]=0; bytesSended[i]+=len; if((1<<i)!=ccr->ptxInfo.opts3.bit.tx_portmask) //send to multi-ports { for(k=i+1;k<MAX_CONGESTION_CTRL_PORTS;k++) { if((1<<k)&ccr->ptxInfo.opts3.bit.tx_portmask) { bytesSended[k]+=len; } } } if(sendTimesPerPort>=rg_db.systemGlobal.congestionCtrlSendTimesPerPort) { break; } } } } if(finishedPorts>=MAX_CONGESTION_CTRL_PORTS) break; } } } __IRAM_FWDENG static irqreturn_t rtk_rg_timer_interrupt(int irq, void * dev_instance, struct pt_regs *regs) { //_rtk_rg_congestionCtrlTimerSubFunc(); if(REG32(TC2INT)&0x10000) { REG32(TC2INT)|=0x110000; //enable interrupt & clear isr tasklet_hi_schedule(&rg_db.systemGlobal.congestionCtrlTasklets); } else { return IRQ_RETVAL(IRQ_NONE); } return IRQ_RETVAL(IRQ_HANDLED); } #define JUMBO_SKB_BUF_SIZE (9216+18+2) //#define JUBMO_SPLIT_LEN (1514) __IRAM_FWDENG int _rtk_rg_send_with_txInfo_and_mask(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb, struct tx_info* ptxInfo, int ring_num, struct tx_info* ptxInfoMask) { #if defined(CONFIG_RTL9602C_SERIES) //20151027LUKE: from protocol stack(local-out) should not be counted!! if(pPktHdr->ingressLocation!=RG_IGR_PROTOCOL_STACK){ //20151026LUKE: count MIB for downstream skb only once. if(pPktHdr->mibDirect==RTK_RG_CLASSIFY_DIRECTION_DOWNSTREAM){ rg_db.netif[pPktHdr->mibNetifIdx].rtk_mib.ifInOctets+=(pPktHdr->pRxDesc->rx_data_length); //original packet length minus CRC rg_db.netif[pPktHdr->mibNetifIdx].rtk_mib.ifInUcstPkts++; pPktHdr->mibDirect=RTK_RG_CLASSIFY_DIRECTION_END; //in case _rtk_rg_splitJumbo will count more than once. }else if(pPktHdr->mibDirect==RTK_RG_CLASSIFY_DIRECTION_UPSTREAM){ //20151026LUKE: count MIB for each split upstream skb. rg_db.netif[pPktHdr->mibNetifIdx].rtk_mib.ifOutOctets+=(skb->len+pPktHdr->mibTagDelta+4); // add crc rg_db.netif[pPktHdr->mibNetifIdx].rtk_mib.ifOutUcstPkts++; } } //20160323LUKE: update TX mib count if DMAC match, padding and crc will be counted as hw. if(pPktHdr->dmacL2Idx!=FAIL && rg_db.lut[pPktHdr->dmacL2Idx].host_idx_valid){ if(skb->len+pPktHdr->mibTagDelta<=RG_MIN_MRU) rg_db.hostPoliceList[rg_db.lut[pPktHdr->dmacL2Idx].host_idx].count.tx_count+=RG_MIN_MRU+4; //add crc else rg_db.hostPoliceList[rg_db.lut[pPktHdr->dmacL2Idx].host_idx].count.tx_count+=(skb->len+pPktHdr->mibTagDelta+4); //add crc DEBUG("update TX host mib, dmacL2=%d, tx=>%llu",pPktHdr->dmacL2Idx,rg_db.hostPoliceList[rg_db.lut[pPktHdr->dmacL2Idx].host_idx].count.tx_count); } #endif if((rg_db.systemGlobal.congestionCtrlIntervalMicroSecs!=0)&&(ptxInfo->opts3.bit.tx_portmask&rg_db.systemGlobal.congestionCtrlPortMask)) { int isHighQueue=0,ret; if((skb->len<=100)&&(rg_db.systemGlobal.congestionCtrlInboundAckToHighQueue)&&(pPktHdr->tcpFlags.ack==1)) isHighQueue=1; ret=rtk_rg_congestionCtrlQueuePkt(skb,ptxInfo,ptxInfoMask,isHighQueue); return ret; } else { if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[pPktHdr->ingressPort]++; } #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) { struct sk_buff *new_skb=NULL; _rtk_rg_fpgaTest_txProcess(skb, &new_skb, (void *)ptxInfo, (void *)ptxInfoMask, pPktHdr); return re8686_send_with_txInfo_and_mask(new_skb,ptxInfo,ring_num,ptxInfoMask); } #else //TRACE("Kernel Panic!!!"); //xdsl:Magic TRACE cause 8685S kernel panic //TRACE("DIRECTX to portmask 0x%x (txDescMask=0x%x)",ptxInfo->opts3.bit.tx_portmask,ptxInfoMask->opts3.bit.tx_portmask); return re8686_send_with_txInfo_and_mask(skb,ptxInfo,ring_num,ptxInfoMask); #endif } } int _rtk_rg_splitJumbo(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb) { unsigned char *skbdata=(unsigned char *)(((u32)skb->data)|0xa0000000); int remainLen=skb->len; uint16 l3Offset=pPktHdr->l3Offset; int l3HdrLen=((skbdata[l3Offset]&0xf)<<2); int l23Len=l3Offset+l3HdrLen; int offset=pPktHdr->ipv4FragmentOffset<<3; int eachPayloadLen; #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) unsigned int ori_tx_portmask = rg_kernel.txDesc.tx_tx_portmask; #endif if(pPktHdr->netifIdx==FAIL) { WARNING("pPktHdr->netifIdx==FAIL\n"); goto send_error; } eachPayloadLen=rg_db.netif[pPktHdr->netifIdx].rtk_netif.mtu-l3HdrLen; eachPayloadLen=(eachPayloadLen>>3)<<3; //align to multiple of 8, because each fragment offset is based on 8 //DEBUG("Jumbo Frame send to NIC len=%d tagif=0x%x l23Len=%d\n",skb->len,pPktHdr->tagif,l23Len); if(remainLen>JUMBO_SKB_BUF_SIZE) { goto send_error; } //20151030LUKE: we just don't support DS-Lite fragment now. if((pPktHdr->tagif&IPV4_TAGIF)==0 || (pPktHdr->egressTagif&DSLITE_TAGIF)) { TRACE("OverMTU and can't be fragment...DROP!"); goto send_error; } remainLen-=l23Len; rg_kernel.txDescMask.tx_l4cs=1; rg_kernel.txDesc.tx_l4cs=0; while(remainLen>0) { struct sk_buff *allocSkb; unsigned char *new_data; allocSkb=_rtk_rg_getAlloc(skb->len); if(allocSkb==NULL) goto send_error; new_data=(unsigned char *)(((u32)allocSkb->data)|0xa0000000); //new_data=((u32)allocSkb->data); memcpy(new_data,skbdata,l23Len); memcpy(new_data+l23Len,skbdata+(skb->len-remainLen),eachPayloadLen); allocSkb->len=((remainLen>=eachPayloadLen)?eachPayloadLen:remainLen)+l23Len; //ACL("Send %x len=%d remainLen=%d l23Len=%d l4sum=%04x eachPayloadLen=%d (skb->len-remainLen)=%d\n",(u32)allocSkb,allocSkb->len,remainLen,l23Len,*pPktHdr->pL4Checksum,eachPayloadLen,(skb->len-remainLen)); //memDump(allocSkb->data+l23Len,8,"l4"); remainLen-=eachPayloadLen; if(offset==0) { new_data[l3Offset+6]|=0x20; //set MF } else { new_data[l3Offset+6]=offset>>3>>8; //H_offset new_data[l3Offset+7]=((offset>>3)&0xff); //L_offset if((remainLen>0)||(pPktHdr->ipv4MoreFragment)) new_data[l3Offset+6]|=0x20; //set MF(when org packet have already MF bit) } new_data[l3Offset+2]=(allocSkb->len-l3Offset)>>8; //IP hdr total H_length new_data[l3Offset+3]=(allocSkb->len-l3Offset)&0xff; //IP hdr total L_length if(pPktHdr->egressTagif&PPPOE_TAGIF){//PPPoE Hdr len new_data[l3Offset-4]=(allocSkb->len-l3Offset+2)>>8; new_data[l3Offset-3]=(allocSkb->len-l3Offset+2)&0xff; } #if defined(CONFIG_RTL9600_SERIES) //20160331LUKE: checksum by sw offload // L3 checksum re-calculate *(u16*)((u32)new_data+l3Offset+10)=0; *(u16*)((u32)new_data+l3Offset+10)=htons(inet_chksum(new_data+l3Offset,l3HdrLen)); #endif #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) #if defined(CONFIG_RTL9602C_SERIES) rg_kernel.txDesc.tx_tx_portmask = ori_tx_portmask&0x3f; #elif defined(CONFIG_RTL9607C_SERIES) rg_kernel.txDesc.tx_tx_portmask = ori_tx_portmask&0x3ff; #endif #endif TRACE("SPLIT VID=0x%x(0x%x) VID_ACT=%d(%d) L34KEEP=%d(%d) PMSK=0x%x(0x%x) KEEP=%d(%d) frag_off=%d" ,rg_kernel.txDesc.tx_cvlan_vidh<<8|rg_kernel.txDesc.tx_cvlan_vidl ,rg_kernel.txDescMask.tx_cvlan_vidh<<8|rg_kernel.txDescMask.tx_cvlan_vidl ,rg_kernel.txDesc.tx_tx_cvlan_action ,rg_kernel.txDescMask.tx_tx_cvlan_action ,rg_kernel.txDesc.tx_l34_keep ,rg_kernel.txDescMask.tx_l34_keep ,rg_kernel.txDesc.tx_tx_portmask ,rg_kernel.txDescMask.tx_tx_portmask ,rg_kernel.txDesc.tx_keep ,rg_kernel.txDescMask.tx_keep ,offset); offset+=eachPayloadLen; _rtk_rg_send_with_txInfo_and_mask(pPktHdr,allocSkb,(struct tx_info*)&rg_kernel.txDesc,0,(struct tx_info*)&rg_kernel.txDescMask); } if(skb) _rtk_rg_dev_kfree_skb_any(skb); return RG_FWDENGINE_RET_DIRECT_TX; send_error: //if(skb) _rtk_rg_dev_kfree_skb_any(skb); return RG_FWDENGINE_RET_DROP; } __IRAM_FWDENG int _rtk_rg_splitJumboSendToNicWithTxInfoAndMask(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb, struct tx_info* ptxInfo, int ring_num, struct tx_info* ptxInfoMask) { if(skb->len<=1522 && (!pPktHdr->overMTU)) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK){ //keep packet ptxInfo->tx_l4cs=0; ptxInfo->tx_ipcs=0; ptxInfo->tx_tx_cvlan_action=0; ptxInfo->tx_tx_pppoe_action=0; } #endif _rtk_rg_send_with_txInfo_and_mask(pPktHdr,skb,ptxInfo,ring_num,ptxInfoMask); } else //skb->len > 1522 || pPktHdr->overMTU == 1 { if(pPktHdr->netifIdx==FAIL || (pPktHdr->netifIdx!=FAIL && rg_db.systemGlobal.interfaceInfo[pPktHdr->netifIdx].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[pPktHdr->netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE)) { if(skb->len>SKB_BUF_SIZE) { WARNING("Bridge packet's size is larger than SKB_BUF_SIZE(%d), drop.", SKB_BUF_SIZE); return RG_FWDENGINE_RET_DROP; } TRACE("Forward jumbo bridge packet directly, length=%d.", skb->len); _rtk_rg_send_with_txInfo_and_mask(pPktHdr,skb,ptxInfo,ring_num,ptxInfoMask); } else if(!pPktHdr->overMTU) // not over mtu { if(skb->len>SKB_BUF_SIZE) { WARNING("Routing packet's size is larger than SKB_BUF_SIZE(%d), drop.", SKB_BUF_SIZE); return RG_FWDENGINE_RET_DROP; } TRACE("Forward jumbo routing packet directly, length=%d.", skb->len); _rtk_rg_send_with_txInfo_and_mask(pPktHdr,skb,ptxInfo,ring_num,ptxInfoMask); } else { return _rtk_rg_splitJumbo(pPktHdr, skb); } } return RG_FWDENGINE_RET_DIRECT_TX; } void _rtk_rg_psRxMirrorToPort0(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *new_skb; new_skb=_rtk_rg_getAlloc(RG_FWDENGINE_PKT_LEN); TRACE("Rx Mirror to Port0"); if(new_skb!=NULL) { memcpy(new_skb->data,"\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x01\x88\x88",14); memcpy(new_skb->data+14,dev->name,16); memcpy(new_skb->data+30,skb->data,skb->len); new_skb->len=skb->len+30; //clear old value rg_kernel.txDescMask.opts1.dw=0; rg_kernel.txDescMask.opts2.dw=0; rg_kernel.txDescMask.opts3.dw=0; rg_kernel.txDesc.opts1.dw=0; rg_kernel.txDesc.opts2.dw=0; rg_kernel.txDesc.opts3.dw=0; //turn on txInfo mask, otherwise value won't be add rg_kernel.txDescMask.tx_ipcs=1; rg_kernel.txDescMask.tx_l4cs=1; rg_kernel.txDescMask.tx_dislrn=1; rg_kernel.txDescMask.tx_keep=1; rg_kernel.txDescMask.tx_l34_keep=1; rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rg_kernel.txDescMask.tx_cputag_ipcs=1; rg_kernel.txDescMask.tx_cputag_l4cs=1; rg_kernel.txDesc.tx_cputag_ipcs=0; rg_kernel.txDesc.tx_cputag_l4cs=0; #endif rg_kernel.txDesc.tx_ipcs=0; rg_kernel.txDesc.tx_l4cs=0; rg_kernel.txDesc.tx_dislrn=1; rg_kernel.txDesc.tx_keep=1; rg_kernel.txDesc.tx_l34_keep=1; if(rg_db.systemGlobal.phyPortStatus&(0x1<<RTK_RG_PORT0)) rg_kernel.txDesc.tx_tx_portmask=0x1; else rg_kernel.txDesc.tx_tx_portmask=0x4; _rtk_rg_send_with_txInfo_and_mask(rg_db.pktHdr,new_skb,(struct tx_info*)&rg_kernel.txDesc,0,(struct tx_info*)&rg_kernel.txDescMask); } else { TRACE("skb alloc failed, skip mirror!"); } return; } #ifdef CONFIG_MASTER_WLAN0_ENABLE void _rtk_rg_wifiTxRedirect(struct sk_buff *skb,struct net_device *netdev) { struct sk_buff *new_skb; new_skb=_rtk_rg_getAlloc(skb->len+30); if(new_skb!=NULL) { memcpy(new_skb->data,"\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x01\x88\x88",14); memcpy(new_skb->data+14,netdev->name,16); memcpy(new_skb->data+30,skb->data,skb->len); new_skb->len=skb->len+30; _rtk_rg_dev_kfree_skb_any(skb); //clear old value rg_kernel.txDescMask.opts1.dw=0; rg_kernel.txDescMask.opts2.dw=0; rg_kernel.txDescMask.opts3.dw=0; rg_kernel.txDesc.opts1.dw=0; rg_kernel.txDesc.opts2.dw=0; rg_kernel.txDesc.opts3.dw=0; //turn on txInfo mask, otherwise value won't be add rg_kernel.txDescMask.tx_ipcs=1; rg_kernel.txDescMask.tx_l4cs=1; rg_kernel.txDescMask.tx_dislrn=1; rg_kernel.txDescMask.tx_keep=1; rg_kernel.txDescMask.tx_l34_keep=1; rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rg_kernel.txDescMask.tx_cputag_ipcs=1; rg_kernel.txDescMask.tx_cputag_l4cs=1; rg_kernel.txDesc.tx_cputag_ipcs=0; rg_kernel.txDesc.tx_cputag_l4cs=0; #endif rg_kernel.txDesc.tx_ipcs=0; rg_kernel.txDesc.tx_l4cs=0; rg_kernel.txDesc.tx_dislrn=1; rg_kernel.txDesc.tx_keep=1; rg_kernel.txDesc.tx_l34_keep=1; rg_kernel.txDesc.tx_tx_portmask=0x1; if((rg_db.systemGlobal.congestionCtrlIntervalMicroSecs!=0)&&(rg_kernel.txDesc.tx_tx_portmask&rg_db.systemGlobal.congestionCtrlPortMask)) { } else { if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[rg_db.pktHdr->ingressPort]--; //already count in wifi_tx. } } _rtk_rg_send_with_txInfo_and_mask(rg_db.pktHdr,new_skb,(struct tx_info*)&rg_kernel.txDesc,0,(struct tx_info*)&rg_kernel.txDescMask); } else { TRACE("new skb alloc failed!"); } } rtk_rg_fwdEngineReturn_t _rtk_rg_splitJumboSendToMasterWifi_afterLimit(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb, struct net_device *dev) { unsigned char *skbdata=(unsigned char *)(((u32)skb->data)|0xa0000000); if(skb->len<=1522 && (!pPktHdr->overMTU)) { if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[pPktHdr->ingressPort]++; } if(rg_db.systemGlobal.wifiTxRedirect==1) { _rtk_rg_wifiTxRedirect(skb,dev); } else { #if defined(CONFIG_DEFAULTS_KERNEL_3_18) || defined(CONFIG_OPENWRT_RG) dev->netdev_ops->ndo_start_xmit(skb,dev); #else dev->hard_start_xmit(skb,dev); #endif } return RG_FWDENGINE_RET_SEND_TO_WIFI; } else //skb->len > 1522 || pPktHdr->overMTU == 1 { if(pPktHdr->netifIdx==FAIL || (pPktHdr->netifIdx!=FAIL && rg_db.systemGlobal.interfaceInfo[pPktHdr->netifIdx].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[pPktHdr->netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE)) { if(skb->len>SKB_BUF_SIZE) { WARNING("Bridge packet's size is larger than SKB_BUF_SIZE(%d), drop.", SKB_BUF_SIZE); return RG_FWDENGINE_RET_DROP; } TRACE("Forward jumbo bridge packet directly, length=%d.", skb->len); if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[pPktHdr->ingressPort]++; } if(rg_db.systemGlobal.wifiTxRedirect==1) { _rtk_rg_wifiTxRedirect(skb,dev); } else { #if defined(CONFIG_DEFAULTS_KERNEL_3_18) || defined(CONFIG_OPENWRT_RG) dev->netdev_ops->ndo_start_xmit(skb,dev); #else dev->hard_start_xmit(skb,dev); #endif } return RG_FWDENGINE_RET_SEND_TO_WIFI; } else if(!pPktHdr->overMTU) // not over mtu { if(skb->len>SKB_BUF_SIZE) { WARNING("Routing packet's size is larger than SKB_BUF_SIZE(%d), drop.", SKB_BUF_SIZE); return RG_FWDENGINE_RET_DROP; } TRACE("Forward jumbo routing packet directly, length=%d.", skb->len); if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[pPktHdr->ingressPort]++; } if(rg_db.systemGlobal.wifiTxRedirect==1) { _rtk_rg_wifiTxRedirect(skb,dev); } else { #if defined(CONFIG_DEFAULTS_KERNEL_3_18) || defined(CONFIG_OPENWRT_RG) dev->netdev_ops->ndo_start_xmit(skb,dev); #else dev->hard_start_xmit(skb,dev); #endif } return RG_FWDENGINE_RET_SEND_TO_WIFI; } else { int remainLen=skb->len; uint16 l3Offset=pPktHdr->l3Offset; int l3HdrLen=((skbdata[l3Offset]&0xf)<<2); int l23Len=l3Offset+l3HdrLen; int offset=pPktHdr->ipv4FragmentOffset<<3; int eachPayloadLen; if(pPktHdr->netifIdx==FAIL) { WARNING("pPktHdr->netifIdx==FAIL\n"); goto send_error; } eachPayloadLen=(rg_db.netif[pPktHdr->netifIdx].rtk_netif.mtu+14)-l23Len; eachPayloadLen=(eachPayloadLen>>3)<<3; //align to multiple of 8, because each fragment offset is based on 8 //printk("Jumbo Frame send to NIC len=%d tagif=0x%x l23Len=%d\n",skb->len,pPktHdr->tagif,l23Len); if(remainLen>JUMBO_SKB_BUF_SIZE) { goto send_error; } if((pPktHdr->tagif&IPV4_TAGIF)==0) { goto send_error; } remainLen-=l23Len; while(remainLen>0) { struct sk_buff *allocSkb; unsigned char *new_data; allocSkb=_rtk_rg_getAlloc(skb->len); if(allocSkb==NULL) goto send_error; new_data=(unsigned char *)(((u32)allocSkb->data)|0xa0000000); memcpy(new_data,skbdata,l23Len); memcpy(new_data+l23Len,skbdata+(skb->len-remainLen),eachPayloadLen); allocSkb->len=((remainLen>=eachPayloadLen)?eachPayloadLen:remainLen)+l23Len; //printk("Send %x len=%d remainLen=%d l23Len=%d l4sum=%04x\n",(u32)allocSkb,allocSkb->len,remainLen,l23Len,*pPktHdr->pL4Checksum); //memDump(new_data+l23Len,8,"l4_start"); //memDump(new_data+allocSkb->len-8,8,"l4_end"); remainLen-=eachPayloadLen; if(eachPayloadLen<=0) printk("looping....r=%d e=%d\n",remainLen,eachPayloadLen); if(offset==0) { new_data[l3Offset+6]|=0x20; //set MF } else { new_data[l3Offset+6]=offset>>3>>8; //H_offset new_data[l3Offset+7]=((offset>>3)&0xff); //L_offset if((remainLen>0)||(pPktHdr->ipv4MoreFragment)) new_data[l3Offset+6]|=0x20; //set MF(when org packet have already MF bit) } new_data[l3Offset+2]=(allocSkb->len-l3Offset)>>8; //IP hdr total H_length new_data[l3Offset+3]=(allocSkb->len-l3Offset)&0xff; //IP hdr total L_length if(pPktHdr->egressTagif&PPPOE_TAGIF){//PPPoE Hdr len new_data[l3Offset-4]=(allocSkb->len-l3Offset+2)>>8; new_data[l3Offset-3]=(allocSkb->len-l3Offset+2)&0xff; } // L3 checksum re-calculate *(u16*)((u32)new_data+l3Offset+10)=0; *(u16*)((u32)new_data+l3Offset+10)=htons(inet_chksum(new_data+l3Offset,l3HdrLen)); TRACE("SPLIT frag_off=%d",offset); offset+=eachPayloadLen; if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[pPktHdr->ingressPort]++; } if(rg_db.systemGlobal.wifiTxRedirect==1) { _rtk_rg_wifiTxRedirect(allocSkb,dev); } else { #if defined(CONFIG_DEFAULTS_KERNEL_3_18) || defined(CONFIG_OPENWRT_RG) dev->netdev_ops->ndo_start_xmit(allocSkb,dev); #else dev->hard_start_xmit(allocSkb,dev); #endif } } if(skb) _rtk_rg_dev_kfree_skb_any(skb); } } return RG_FWDENGINE_RET_SEND_TO_WIFI; send_error: if(skb) _rtk_rg_dev_kfree_skb_any(skb); return RG_FWDENGINE_RET_ERROR; } rtk_rg_fwdEngineReturn_t _rtk_rg_splitJumboSendToMasterWifi(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb, struct net_device *dev) { //20141002LUKE: remove outter IP header, GRE header, PPP header //20141017LUKE: remove outter IP header, UDP header, L2TP header, PPP header //20150206LUKE: remove outter IPv6 header if(pPktHdr->tagif&PPTP_INNER_TAGIF||pPktHdr->tagif&L2TP_INNER_TAGIF||pPktHdr->tagif&DSLITE_INNER_TAGIF){ TRACE("Remove Tunnel tag"); _rtk_rg_removeTunnelTag(pPktHdr); } //Ctag/Stag translate in pktbuff _rtk_rg_TranslateVlanSvlan2Packet(skb,pPktHdr,1); //20151210LUKE: wifi egress rate limie check here if(rg_db.systemGlobal.wifiEgressRateLimitMeter[pPktHdr->egressWlanDevIdx]){ if(rg_db.systemGlobal.wifiEgressRateLimitDevOverMask&(0x1<<pPktHdr->egressWlanDevIdx))goto OVERLIMIT_DROP; rg_db.systemGlobal.wifiEgressByteCount[pPktHdr->egressWlanDevIdx] += skb->len; if(rg_db.systemGlobal.wifiEgressByteCount[pPktHdr->egressWlanDevIdx]<<3/*flow bits in time period*/ > (rg_db.systemGlobal.wifiEgressRateLimitMeter[pPktHdr->egressWlanDevIdx]<<6/*Kbps*/*RTK_RG_SWRATELIMIT_SECOND/*unit:(1/16)sec*/)/*rate limit bits in time period*/){ rg_db.systemGlobal.wifiEgressRateLimitDevOverMask|=(0x1<<pPktHdr->egressWlanDevIdx); goto OVERLIMIT_DROP; } } //Send it out!! return _rtk_rg_splitJumboSendToMasterWifi_afterLimit(pPktHdr, skb, dev); OVERLIMIT_DROP: TRACE("Drop! Wlan[%d] egress packet rate higher than %d kbps.",pPktHdr->egressWlanDevIdx,rg_db.systemGlobal.wifiEgressRateLimitMeter[pPktHdr->egressWlanDevIdx]); if(skb) _rtk_rg_dev_kfree_skb_any(skb); return RG_FWDENGINE_RET_RATE_LIMIT_DROP; } #endif int _rtk_rg_qosDscpRemarking(rtk_rg_mac_port_idx_t egressPort,rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb){ int dscp_off; dscp_off = (pPktHdr->pTos)-(skb->data); if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]){ TRACE("QoS dscp Remarking by port[%d]:%s",egressPort,rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]?"ENABLED":"DISABLED"); if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]==ENABLED_DSCP_REMARK_AND_SRC_FROM_INTERNALPRI){ _rtk_rg_dscpRemarkToSkb(ENABLED_DSCP_REMARK_AND_SRC_FROM_INTERNALPRI,pPktHdr,skb,dscp_off); }else if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]==ENABLED_DSCP_REMARK_AND_SRC_FROM_DSCP){ _rtk_rg_dscpRemarkToSkb(ENABLED_DSCP_REMARK_AND_SRC_FROM_DSCP,pPktHdr,skb,dscp_off); } } return (RT_ERR_RG_OK); } __IRAM_FWDENG int _rtk_rg_portIsolationFilter(struct sk_buff *skb, rtk_rg_pktHdr_t *pPktHdr) { if(pPktHdr->ingressLocation==RG_IGR_PHY_PORT) rg_kernel.txDesc.tx_tx_portmask&=rg_db.systemGlobal.portIsolation[pPktHdr->ingressPort].portmask; else rg_kernel.txDesc.tx_tx_portmask&=rg_db.systemGlobal.portIsolation[RTK_RG_PORT_CPU].portmask; if(rg_kernel.txDesc.tx_tx_portmask) return (RG_FWDENGINE_RET_CONTINUE); pPktHdr->fwdDecision=RG_FWD_DECISION_PORT_ISO; if(skb) _rtk_rg_dev_kfree_skb_any(skb); return (RG_FWDENGINE_RET_DIRECT_TX); } __IRAM_FWDENG int _rtk_rg_egressPacketSend(struct sk_buff *skb, rtk_rg_pktHdr_t *pPktHdr) { #if defined(CONFIG_RTL9600_SERIES) uint32 pon_is_cfport, rgmii_is_cfport; pon_is_cfport = rg_db.systemGlobal.pon_is_cfport; rgmii_is_cfport = rg_db.systemGlobal.rgmii_is_cfport; #endif //Port isolation if(rg_kernel.txDesc.tx_tx_portmask) //directTx if(_rtk_rg_portIsolationFilter(skb, pPktHdr)==RG_FWDENGINE_RET_DIRECT_TX) return RG_FWDENGINE_RET_DIRECT_TX; //20140528LUKE:for ipv4, we recaculate Layer4 checksum if napt; for ipv6, we just keep the original value //20140902LUKE: for packet from shortcut won't be fragmented, so let hw do layer4 checksum. //20141203LUKE: for fagment packet, we shoud disable hw L4 checksum offload. //20141211LUKE: for IPv6 statful fragment, shortcutStatus will be RG_SC_MATCH, so we can just check packet itself. rg_kernel.txDescMask.tx_l4cs=1; if( #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //20160331LUKE: checksum by sw offload (pPktHdr->egressTagif&PPPOE_TAGIF)||(pPktHdr->egressTagif&PPTP_TAGIF)|| #endif (pPktHdr->ipv6FragPacket)||(pPktHdr->ipv4FragPacket)) rg_kernel.txDesc.tx_l4cs=0; else rg_kernel.txDesc.tx_l4cs=1; #if defined(CONFIG_RTL9602C_SERIES) //patch for gmac udp checksum issue if((pPktHdr->tagif&IPV6_TAGIF)&&(pPktHdr->tagif&UDP_TAGIF)&&(pPktHdr->l4Len==10)) { rg_kernel.txDesc.tx_l4cs=0; if(pPktHdr->algAction==RG_ALG_ACT_TO_FWDENGINE #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT || pPktHdr->fwdDecision==RG_FWD_DECISION_V6NAPT || pPktHdr->fwdDecision==RG_FWD_DECISION_V6NAPTR #endif ) { uint16 *pL4Checksum; TRACE("recalculate ipv6+udp checksum"); pL4Checksum=(uint16*)&skb->data[pPktHdr->l4Offset+6]; *pL4Checksum=0; *pL4Checksum=htons(inet_chksum_pseudoV6(skb->data+pPktHdr->l4Offset, skb->len-pPktHdr->l4Offset, pPktHdr->pIpv6Sip, pPktHdr->pIpv6Dip, pPktHdr->ipProtocol)); } } #endif #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //switch support pppoe tag offload rg_kernel.txDescMask.tx_tx_pppoe_action=0x3; rg_kernel.txDescMask.tx_tx_pppoe_idx=0xf; if((pPktHdr->tagif&PPPOE_TAGIF)==0 && (pPktHdr->egressTagif&PPPOE_TAGIF)) //untag => tag { rg_kernel.txDesc.tx_tx_pppoe_action=RTK_RG_CPUTAG_PPPOEACT_ADD; //keep or add (always tag) rg_kernel.txDesc.tx_tx_pppoe_idx=pPktHdr->netifIdx&0xf; } else if((pPktHdr->tagif&PPPOE_TAGIF) && (pPktHdr->egressTagif&PPPOE_TAGIF)==0) //tag => untag { rg_kernel.txDesc.tx_tx_pppoe_action=RTK_RG_CPUTAG_PPPOEACT_REMOVE; //remove rg_kernel.txDesc.tx_tx_pppoe_idx=0; } else //untag => untag, tag => tag { rg_kernel.txDesc.tx_tx_pppoe_action=RTK_RG_CPUTAG_PPPOEACT_KEEP; //keep rg_kernel.txDesc.tx_tx_pppoe_idx=0; } #endif //20151023LUKE: for packet to non-PON port, we suppose cputag_psel should be zero for disable NIC padding if necessary. //And if CF unmatch action is DROP or Permit_without_PON, we should always set psel to 1. #if defined(CONFIG_RTL9600_SERIES) if((((rg_kernel.txDesc.tx_tx_portmask & (1<<RTK_RG_MAC_PORT_PON))&&pon_is_cfport) || ((rg_kernel.txDesc.tx_tx_portmask & (1<<RTK_RG_MAC_PORT_RGMII)) && rgmii_is_cfport)) &&(rg_db.systemGlobal.initParam.wanPortGponMode)) { //20160506CHUCK: patch pppoe gpon little-bandwidth issue. Direct-Tx to RGMII will effect by H/W CF-unmatch-drop (because RGMII doesn't have streamID action, not the same as PON port), so we force change the port back to PON directly (skip loop back directly in software) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl){ if((rg_kernel.txDesc.tx_tx_portmask & (1<<RTK_RG_MAC_PORT_RGMII)))//hit the loop-back patch, translate the port direct to PON. { rg_kernel.txDesc.tx_tx_portmask &= ~(1<<RTK_RG_MAC_PORT_RGMII); rg_kernel.txDesc.tx_tx_portmask |= (1<<RTK_RG_MAC_PORT_PON); TRACE("Direct TX translate egress port from RGMII back to PON."); } } #else if((rg_kernel.txDesc.tx_tx_portmask & (1<<RTK_RG_MAC_PORT_PON))&& (rg_db.systemGlobal.initParam.wanPortGponMode)){ #endif #ifdef CONFIG_GPON_FEATURE //20151023LUKE: if we are arrived here means we are hit CF rulte to decide stream or LLID. #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) rg_kernel.txDescMask.tx_cputag_psel=1; rg_kernel.txDescMask.tx_tx_dst_stream_id=0x7f; rg_kernel.txDesc.tx_cputag_psel=1; //20140902LUKE: remarking stream ID only when ponMacMode is GPON if(rg_db.systemGlobal.initParam.wanPortGponMode){ //for GPON, set StreamID from CF rg_kernel.txDesc.tx_tx_dst_stream_id=pPktHdr->streamID; TRACE("remarking streamID %d!!",pPktHdr->streamID); }else{ //for EPON, always set LLID to zero. rg_kernel.txDesc.tx_tx_dst_stream_id=0; //set LLID to zero } #else rg_kernel.txDescMask.tx_cputag_psel=1; rg_kernel.txDescMask.tx_tx_dst_stream_id=0x7f; rg_kernel.txDesc.tx_cputag_psel=1; //for PON, set StreamID or LLID from CF rg_kernel.txDesc.tx_tx_dst_stream_id=pPktHdr->streamID; TRACE("remarking streamID or LLID %d!!",pPktHdr->streamID); #endif #else //20151023LUKE: directTX should bypass HW cf unmatch action, since HW can't decide interface index! rg_kernel.txDescMask.tx_cputag_psel=1; rg_kernel.txDescMask.tx_tx_dst_stream_id=0x7f; rg_kernel.txDesc.tx_cputag_psel=1; rg_kernel.txDesc.tx_tx_dst_stream_id=0; //don't care #endif } //Assign CPUtag priority rg_kernel.txDescMask.tx_aspri=0x1; rg_kernel.txDescMask.tx_cputag_pri=0x7; rg_kernel.txDesc.tx_aspri=0x1; rg_kernel.txDesc.tx_cputag_pri=pPktHdr->internalPriority; if(pPktHdr->egressUniPortmask!=0) { rg_kernel.txDescMask.tx_tx_portmask=0x3f; rg_kernel.txDesc.tx_tx_portmask=pPktHdr->egressUniPortmask; } #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) _rtk_rg_fpgaTest_txPPPoEUpdate(skb, pPktHdr); _rtk_rg_fpgaTest_txChecksumUpdate(skb, pPktHdr); #endif #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) //VLAN remarking for shortcut packets _rtk_rg_TranslateVlanSvlan2Packet(skb,pPktHdr,0); #endif #if defined(CONFIG_RTL9602C_SERIES) //re-calculate the L3 or L4 checksum if using TPID2, because gamc can not offload TPID2 checksum recount. if(pPktHdr->egressServiceVlanTagif==0x2 || pPktHdr->egressServiceVlanTagif==0x3)//the CF action could use stag tpid without 0x8100 { if(pPktHdr->tagif&IPV4_TAGIF && (pPktHdr->l3Modify||(pPktHdr->fwdDecision==RG_FWD_DECISION_ROUTING)) && pPktHdr->ipv4Checksum==*pPktHdr->pIpv4Checksum) //update checksum only when checksum is not change { TRACE("recount L3 checksum"); //could be outbound *pPktHdr->pIpv4Checksum=htons(_rtk_rg_fwdengine_L3checksumUpdate(*pPktHdr->pIpv4Checksum,pPktHdr->ipv4Sip,pPktHdr->ipv4TTL,pPktHdr->ipProtocol,ntohl(*pPktHdr->pIpv4Sip),*pPktHdr->pIpv4TTL)); //could be inbound *pPktHdr->pIpv4Checksum=htons(_rtk_rg_fwdengine_L3checksumUpdate(*pPktHdr->pIpv4Checksum,pPktHdr->ipv4Dip,0,pPktHdr->ipProtocol,ntohl(*pPktHdr->pIpv4Dip),0)); //TTL should not count again,assign zero if(pPktHdr->l4Modify && pPktHdr->fwdDecision!=RG_FWD_DECISION_ROUTING && pPktHdr->ipv4FragPacket==0 && pPktHdr->l4Checksum==*pPktHdr->pL4Checksum) //update checksum only when checksum is not change { if(pPktHdr->l4Direction==RG_NAPT_OUTBOUND_FLOW){//outbound if(pPktHdr->tagif&TCP_TAGIF){ TRACE("recount L4 checksum(outbound)"); *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(pPktHdr->tcpFlags.ack,*pPktHdr->pL4Checksum,pPktHdr->ipv4Sip,pPktHdr->sport,pPktHdr->tcpSeq,pPktHdr->tcpAck,ntohl(*pPktHdr->pIpv4Sip),ntohs(*pPktHdr->pSport),ntohl(*pPktHdr->pTcpSeq),ntohl(*pPktHdr->pTcpAck))); }else if(pPktHdr->tagif&UDP_TAGIF){ TRACE("recount L4 checksum(outbound)"); *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Sip,pPktHdr->sport,0,0,ntohl(*pPktHdr->pIpv4Sip),ntohs(*pPktHdr->pSport),0,0)); } }else if(pPktHdr->l4Direction==RG_NAPTR_INBOUND_FLOW){//inbound if(pPktHdr->tagif&TCP_TAGIF){ TRACE("recount L4 checksum(inbound)"); *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(pPktHdr->tcpFlags.ack,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,pPktHdr->tcpSeq,pPktHdr->tcpAck,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),ntohl(*pPktHdr->pTcpSeq),ntohl(*pPktHdr->pTcpAck))); } else if(pPktHdr->tagif&UDP_TAGIF){ TRACE("recount L4 checksum(inbound)"); *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,0,0,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),0,0)); } } } } } #endif //ring num -> Master: 0, 2, 4 slave: 1, 3 if((pPktHdr->etherType==0x8864)&& (pPktHdr->tagif&IPV4_TAGIF)==0x0 && (pPktHdr->tagif&IPV6_TAGIF)==0x0)//garentee PPPoE LCP can send with higher ring to avoid PPPoE dis-connection. { //Call NIC driver api to send packet with higher ring_num DEBUG("PPPoE LCP packet, send with higher NIC ring_num."); return _rtk_rg_splitJumboSendToNicWithTxInfoAndMask(pPktHdr,skb,(struct tx_info*)&rg_kernel.txDesc,4,(struct tx_info*)&rg_kernel.txDescMask); } else { //Call NIC driver api to send packet return _rtk_rg_splitJumboSendToNicWithTxInfoAndMask(pPktHdr,skb,(struct tx_info*)&rg_kernel.txDesc,0,(struct tx_info*)&rg_kernel.txDescMask); } } #if 0 int _rtk_rg_broadcastForward(struct sk_buff *skb, unsigned int internalVlanID, unsigned int srcPort,unsigned extSpa) { return _rtk_rg_broadcastForwardWithPkthdr(NULL,skb,internalVlanID,srcPort,extSpa); } #endif #if defined(CONFIG_APOLLO) __SRAM_FWDENG_SLOWPATH rtk_rg_fwdEngineReturn_t _rtk_rg_bindingRuleCheck(rtk_rg_pktHdr_t *pPktHdr, int *wanGroupIdx) { //unsigned int srcPort=pPktHdr->pRxDesc->rx_src_port_num; //unsigned int extSpa=pPktHdr->ingressPort; rtk_rg_vbind_linkList_t *pVbdEntry; #ifdef CONFIG_MASTER_WLAN0_ENABLE //20140707LUKE:Check if the WLAN0 devices are binded! //TRACE("pPktHdr->ingressPort is %d, pkthdr->wlan_dev_idx is %d",pPktHdr->ingressPort,pPktHdr->wlan_dev_idx); if(pPktHdr->ingressPort==RTK_RG_EXT_PORT0 && rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].set_bind) { TRACE("Hit!! WLAN-Device-bind to WAN%d",rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].bind_wanIntf); *wanGroupIdx=rg_db.systemGlobal.interfaceInfo[rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].bind_wanIntf].lan_or_wan_index; return RG_FWDENGINE_RET_HIT_BINDING; } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && pPktHdr->ingressPort==RTK_RG_EXT_PORT1 && rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].set_bind) { TRACE("Hit!! WLAN1-Device-bind to WAN%d",rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].bind_wanIntf); *wanGroupIdx=rg_db.systemGlobal.interfaceInfo[rg_db.systemGlobal.wlan0BindDecision[pPktHdr->wlan_dev_idx].bind_wanIntf].lan_or_wan_index; return RG_FWDENGINE_RET_HIT_BINDING; } #endif #endif //Check port-binding if((rg_db.systemGlobal.non_binding_pmsk.portmask&(0x1<<pPktHdr->ingressPort))==0) { //Check Vlan-Binding if has 1Q tag if(pPktHdr->tagif&CVLAN_TAGIF) { #if 0 for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.vidLan==pPktHdr->ctagVid) { if((rg_db.bind[i].rtk_bind.portMask.bits[0]&(0x1<<srcPort)) || (srcPort==RTK_RG_PORT_CPU && (rg_db.bind[i].rtk_bind.extPortMask.bits[0]&(0x1<<(extSpa-RTK_RG_PORT_CPU))))) { //Vlan-Binding!! TRACE("Hit!! Vlan-bind to WAN%d",rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx); *wanGroupIdx=rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].lan_or_wan_index; return (RG_FWDENGINE_RET_HIT_BINDING); } } } #else if(!list_empty(&rg_db.vlanBindingListHead[pPktHdr->ingressPort])) { list_for_each_entry(pVbdEntry,&rg_db.vlanBindingListHead[pPktHdr->ingressPort],vbd_list) { if(pVbdEntry->vlanId==pPktHdr->ctagVid) { //Vlan-Binding!! TRACE("Hit!! Vlan-bind to WAN%d",pVbdEntry->wanIdx); *wanGroupIdx=rg_db.systemGlobal.interfaceInfo[pVbdEntry->wanIdx].lan_or_wan_index; return RG_FWDENGINE_RET_HIT_BINDING; } } } #endif } if(rg_db.systemGlobal.portbinding_wan_idx[pPktHdr->ingressPort]>=0) { //Port-Binding!! TRACE("Hit!! Port-bind to WAN%d",rg_db.systemGlobal.portbinding_wan_idx[pPktHdr->ingressPort]); *wanGroupIdx=rg_db.systemGlobal.interfaceInfo[(int)rg_db.systemGlobal.portbinding_wan_idx[pPktHdr->ingressPort]].lan_or_wan_index; return RG_FWDENGINE_RET_HIT_BINDING; } } return RG_FWDENGINE_RET_CONTINUE; } #endif // defined(CONFIG_APOLLO) /* Cheney: Code Refactoring, per port to handle broadcast packet, 20151014 */ int _rtk_rg_broadcastForwardWithPkthdr(rtk_rg_pktHdr_t *pPktHdr, struct sk_buff *skb, unsigned int internalVlanID, unsigned int srcPort,unsigned int extSpa) { struct sk_buff *bcSkb=NULL; int i,j,isGatewayMac=0; int groupIdx; int aclRet, aclRetPON = 0; unsigned char *pSourceMac; unsigned int dpMask,wanPortMask=0,wanSentPortMask=0,bindWanPortMask=0,vlanBindTagPortMask=0,vlanBindUntagPortMask=0, psTxPortMask=0; //rtk_rg_port_idx_t egressPortIdx = RTK_RG_PORT_CPU;//initial with a useless value(none-CF port). int gw_netifIdx=FAIL; unsigned int outputPortIdx = 0, allDestPortMask = 0; unsigned char byPassWAN = 0; unsigned char runPONPortAclCheck = FALSE; // setup a flag to make sure run PON port ACL checking only one time. int orinetifIdx = pPktHdr->netifIdx; uint16 l3Offset_ori = pPktHdr->l3Offset; uint16 l4Offset_ori = pPktHdr->l4Offset; #if defined(CONFIG_RG_IGMP_SNOOPING) uint32 egress_filter_portmask=0x0; #endif //Return RT_ERR_RG_FAILED to protocol stack, return RT_ERR_RG_OK after packets sended pPktHdr->egressMACPort=7; //for cf check //TRACE("srcPort=%d, extSpa=%d",srcPort,extSpa); #if 1 if(pPktHdr->ingressLocation==RG_IGR_PHY_PORT) { if(memcmp(skb->data,"\x01\x80\xc2",3)==0) { FIXME("trap 01:80:C2:XX:XX:XX!"); return RG_FWDENGINE_RET_TO_PS; } } #endif //control by proc/rg/unknownDA_Trap_to_PS : trap unknownDA packet originally to PS if(rg_db.systemGlobal.unknownDA_Trap_to_PS_enable){ //if((pPktHdr->pDmac[0]&0x1)==0x0){ if(((pPktHdr->pDmac[0]&0x1)==0x0)&&(pPktHdr->ingressPort==RTK_RG_PORT_PON)) { TRACE("Trap unknownDA to PS"); return RG_FWDENGINE_RET_ACL_TO_PS; } } //cpSkb=rtk_rg_skbCopyToPreAllocSkb(skb); //if(cpSkb==NULL) goto OUT_OF_MEM; // clear old configed fields. rg_kernel.txDescMask.opts1.dw=0; rg_kernel.txDescMask.opts2.dw=0; rg_kernel.txDescMask.opts3.dw=0; //rg_kernel.txDescMask.opts4.dw=0; //not used now! rg_kernel.txDesc.opts1.dw=0; rg_kernel.txDesc.opts2.dw=0; rg_kernel.txDesc.opts3.dw=0; //rg_kernel.txDesc.opts4.dw=0; //not used now! rg_kernel.txDescMask.tx_ipcs=1; rg_kernel.txDescMask.tx_l4cs=1; rg_kernel.txDescMask.tx_keep=1; rg_kernel.txDescMask.tx_dislrn=1; //rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; rg_kernel.txDesc.tx_ipcs=1; rg_kernel.txDesc.tx_l4cs=1; rg_kernel.txDesc.tx_keep=1; rg_kernel.txDesc.tx_dislrn=1; //DEBUG("in %s, the internalVlanId is %d, srcport is %d",__FUNCTION__,internalVlanID,srcPort); //Check for internalVLAN contains WAN port or not pSourceMac=pPktHdr->pSmac; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.interfaceInfo[i].valid) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan) { //WAN interface if(memcmp(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.gmac.octet,pSourceMac,ETHER_ADDR_LEN)==0) { isGatewayMac=1; gw_netifIdx = pPktHdr->netifIdx; //keep the gateway index, for broadcast to normalWan the pPktHdr->netifIdx will be changed. break; } } #if 0 //BC packet from LAN should not send to WAN else { //LAN interface if(memcmp(rg_db.systemGlobal.interfaceInfo[i].storedInfo.lan_intf.gmac.octet,pSourceMac,ETHER_ADDR_LEN)==0) { isGatewayMac=1; break; } } #endif } } //Do ACL egress pattern check for PON port, cheney. //Use correct wan intf index if hit binding rule if(rg_db.systemGlobal.initParam.macBasedTagDecision && _rtk_rg_bindingRuleCheck(pPktHdr, &groupIdx)==RG_FWDENGINE_RET_HIT_BINDING && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf != NULL && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_port_idx == RTK_RG_PORT_PON) // focus on PON port { unsigned char sendToBindindWAN = FALSE; dpMask=0x1<<rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_port_idx; bindWanPortMask|=dpMask; pPktHdr->netifIdx=rg_db.systemGlobal.wanIntfGroup[groupIdx].index; if(rg_db.systemGlobal.port_binding_by_protocal==1){//IPv4 Routing, IPv6 Bridge if((rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE)|| ((pPktHdr->internalVlanID==4005)&&(pPktHdr->tagif&IPV6_TAGIF)&&(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->none_internet==0))|| ((pPktHdr->tagif&PPPOE_TAGIF))) { sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end }else if(rg_db.systemGlobal.port_binding_by_protocal==2){//IPv6 Routing, IPv4 Bridge if((rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE)|| ((pPktHdr->internalVlanID==4005)&&((pPktHdr->tagif&IPV4_TAGIF)||(pPktHdr->tagif&ARP_TAGIF))&&(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->none_internet==0))|| ((pPktHdr->tagif&PPPOE_TAGIF))) { sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end }else{// IPv4+IPv6 both Bridge if(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end } // If packet needs to transmit, process it here. if (sendToBindindWAN){ #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &dpMask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &dpMask); if(dpMask == 0x0) goto BYPASS_WAN; #endif pPktHdr->internalVlanID = rg_db.netif[pPktHdr->netifIdx].rtk_netif.vlan_id; } } DEBUG("Run Egresss ACL checking and decision for PON port"); assert_ok(_rtk_rg_egressACLPatternCheck(RG_FWD_DECISION_BRIDGING,0,pPktHdr,skb,0,0,RTK_RG_PORT_PON)); //egressPort use RTK_RG_PORT_PON to pass updtream direction check aclRetPON = _rtk_rg_egressACLAction(RG_FWD_DECISION_BRIDGING,pPktHdr); memcpy(&rg_db.aclDecisionBackup, &pPktHdr->aclDecision, sizeof(rtk_rg_aclHitAndAction_t)); // backup PON port ACL decision and return value. if(aclRetPON==RG_FWDENGINE_RET_ACL_TO_PS){ DEBUG("Follow ACL Action: TRAP to PS"); return RG_FWDENGINE_RET_ACL_TO_PS; } //Reset pattern pPktHdr->netifIdx = orinetifIdx; pPktHdr->internalVlanID = internalVlanID; //Do ACL egress pattern check for non-PON port. DEBUG("Run Egresss ACL checking and decision for Non-PON port"); assert_ok(_rtk_rg_egressACLPatternCheck(RG_FWD_DECISION_BRIDGING,0,pPktHdr,skb,0,0,RTK_RG_PORT_MAX)); //egressPort use RTK_RG_PORT_PON to pass updtream direction check aclRet = _rtk_rg_egressACLAction(RG_FWD_DECISION_BRIDGING,pPktHdr); if(aclRet==RG_FWDENGINE_RET_DROP) goto BC_PROCESS_END; if(aclRet==RG_FWDENGINE_RET_ACL_TO_PS){ DEBUG("Follow ACL Action: TRAP to PS"); return RG_FWDENGINE_RET_ACL_TO_PS; } //3/** Broadcast - Path 1. From PS **/ if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK) { //WARNING("srcPort is %d, isGatewayMac is %d, rg_kernel.protocolStackTxPortMask is %x",srcPort,isGatewayMac,rg_kernel.protocolStackTxPortMask); //20140505LUKE:Check txPortMask, if equals to LAN port, send by VLAN (eth0, eth0.2, eth0.3, eth0.4, eth0.5) TRACE("rg_kernel.protocolStackTxPortMask is %x, rg_db.systemGlobal.wanPortMask.portmask is %x",rg_kernel.protocolStackTxPortMask,rg_db.systemGlobal.wanPortMask.portmask); if((rg_kernel.protocolStackTxPortMask&rg_db.systemGlobal.wanPortMask.portmask)==0x0) goto PERPORT_SCAN; #if 0 for(i=0;i<=RTK_RG_PORT_CPU;i++){//reverse portmask to portIdx. if((rg_kernel.protocolStackTxPortMask&(1<<i))!=0x0 ){ egressPortIdx = i; break; } } if((egressPortIdx!=RTK_RG_PORT_PON) && (egressPortIdx!=RTK_RG_PORT_RGMII)){//make sure rg_kernel.protocolStackTxPortMask include CF port WARNING("Form PS packet broadcast to non-CF port, egressPortIdx=%d rg_kernel.protocolStackTxPortMask=0x%x",egressPortIdx,rg_kernel.protocolStackTxPortMask); } #endif //20140505LUKE:Otherwise, send to single port here(WAN) //20140509LUKE:From WAN and can not decide interface, we just directTx it! if(pPktHdr->netifIdx==FAIL) { // per port send to rg_kernel.protocolStackTxPortMask only! psTxPortMask = rg_kernel.protocolStackTxPortMask; DEBUG("Path 1 - srcPort is %d, isGatewayMac is %d, rg_kernel.protocolStackTxPortMask is 0x%x",srcPort,isGatewayMac,rg_kernel.protocolStackTxPortMask); } } PERPORT_SCAN: /* Start to check per port output */ for(outputPortIdx = RTK_RG_MAC_PORT0; outputPortIdx < RTK_RG_MAC_PORT_CPU; outputPortIdx++) { pPktHdr->egressMACPort=outputPortIdx; //for ACL/CF UNI action of _rtk_rg_modifyPacketByACLAction() #if !defined(CONFIG_RTL9602C_SERIES) //20160506CHUCK: patch pppoe gpon little-bandwidth issue. RGMII is using for loop-back patch, all case should not egress send to RGMII. if(rg_db.systemGlobal.pppoeGponSmallbandwithControl){ if(outputPortIdx==RTK_RG_MAC_PORT_RGMII)//hit the loop-back patch, translate the port direct to PON. { TRACE("BROADCAST egress port skip RGMII."); continue; } } #endif if( rg_db.portLinkStatusInitDone==TRUE && (rg_db.portLinkupMask & (1<<outputPortIdx))== 0){ // If port status is link down, skip checkcing this port. If port status init fail, keep broadcast to all ports TRACE("Portmask [0x%x] **STOP** sending packet because port is Link Down, linkup mask = 0x%x ", 1<<outputPortIdx, rg_db.portLinkupMask); continue; } if(outputPortIdx == srcPort){ // do source port filter in the beginning TRACE("Portmask [0x%x] **STOP** sending packet because of src filter", 1<<outputPortIdx); continue; } if(psTxPortMask){ // If path1 [from PS and netifidx==-1], skip those ports which not belong to protocolStackTxPortMask. if((psTxPortMask & (0x1<<outputPortIdx)) == 0){ TRACE("Portmask [0x%x] **STOP** sending packet because of psTxPortMask = 0x%x", 1<<outputPortIdx, psTxPortMask); continue; } } //Cheney: re-init 1p priority. No necessary to init DSCP because original value is saved in skb(-tos). pPktHdr->egressPriority=(pPktHdr->tagif&CVLAN_TAGIF?pPktHdr->ctagPri:(rg_db.systemGlobal.qosInternalDecision.qosPortBasedPriority[pPktHdr->pRxDesc->rx_src_port_num]&0x7)); rg_kernel.txDesc.tx_cvlan_prio = pPktHdr->egressPriority; pPktHdr->netifIdx = orinetifIdx; pPktHdr->l3Offset = l3Offset_ori; pPktHdr->l4Offset = l4Offset_ori; pPktHdr->internalVlanID = internalVlanID; DEBUG("BC - process port[%d] *****************", outputPortIdx); //3/** Broadcast - Path 2. LAN to Binding WAN **/ //Check if hit binding rule if(rg_db.systemGlobal.initParam.macBasedTagDecision && _rtk_rg_bindingRuleCheck(pPktHdr, &groupIdx)==RG_FWDENGINE_RET_HIT_BINDING && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf != NULL && rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_port_idx == outputPortIdx) // output per port { unsigned char sendToBindindWAN = FALSE; DEBUG("Path 2 - LAN to binding wan, groupIdx = %d, netifIdx = %d, wan port id = %d", groupIdx, rg_db.systemGlobal.wanIntfGroup[groupIdx].index, rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_port_idx); dpMask=0x1<<rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_port_idx; bindWanPortMask|=dpMask; pPktHdr->netifIdx=rg_db.systemGlobal.wanIntfGroup[groupIdx].index; if(rg_db.systemGlobal.port_binding_by_protocal==1){//IPv4 Routing, IPv6 Bridge TRACE("IPv4 Routing, IPv6 Bridge"); if((rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE)|| ((pPktHdr->internalVlanID==4005)&&(pPktHdr->tagif&IPV6_TAGIF)&&(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->none_internet==0))|| ((pPktHdr->tagif&PPPOE_TAGIF))) { TRACE("Broadcast to Binding WAN[%d]!!",pPktHdr->netifIdx); sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED //&& rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { TRACE("PPPoE Passthrough to Wan[%d]",pPktHdr->netifIdx); sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end }else if(rg_db.systemGlobal.port_binding_by_protocal==2){//IPv6 Routing, IPv4 Bridge TRACE("IPv6 Routing, IPv4 Bridge"); if((rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE)|| ((pPktHdr->internalVlanID==4005)&&((pPktHdr->tagif&IPV4_TAGIF)||(pPktHdr->tagif&ARP_TAGIF))&&(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->none_internet==0))|| ((pPktHdr->tagif&PPPOE_TAGIF))) { TRACE("Broadcast to Binding WAN[%d]!!",pPktHdr->netifIdx); sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED //&& rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { TRACE("PPPoE Passthrough to Wan[%d]",pPktHdr->netifIdx); sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end }else{// IPv4+IPv6 both Bridge TRACE("IPv4+IPv6 both Bridge"); if(rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { TRACE("Broadcast to Binding WAN[%d]!!",pPktHdr->netifIdx); sendToBindindWAN = TRUE; } //patch for PPPoE routing + passthrough hybrid mode. else if( rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED //&& rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type==RTK_RG_PPPoE && (pPktHdr->tagif&PPPOE_TAGIF)) { TRACE("PPPoE Passthrough to Wan[%d]",pPktHdr->netifIdx); sendToBindindWAN = TRUE; }//patch for PPPoE routing + passthrough hybrid mode. end } // If packet needs to transmit, process it here. if (sendToBindindWAN){ #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &dpMask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &dpMask); if(dpMask == 0x0) goto BYPASS_WAN; #endif pPktHdr->internalVlanID = rg_db.netif[pPktHdr->netifIdx].rtk_netif.vlan_id; TRACE("Replace internalVlanID to %d", pPktHdr->internalVlanID); bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; if(outputPortIdx == RTK_RG_PORT_PON && !runPONPortAclCheck){ // Check PON ACL result and use pkthdr for PON if egress port is PON port memcpy(&pPktHdr->aclDecision, &rg_db.aclDecisionBackup, sizeof(rtk_rg_aclHitAndAction_t)); // PON port: recover ACL decision and check return value. runPONPortAclCheck = TRUE; if(aclRetPON==RG_FWDENGINE_RET_DROP){// 20151228 Cheney: CF drop, skip transmitting packet to this wan interface TRACE("CF_DROP (skip port mask 0x%x)", dpMask); if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); byPassWAN = TRUE; goto BYPASS_WAN; } } aclRet=_rtk_rg_sendBroadcastToWan(pPktHdr,bcSkb,pPktHdr->netifIdx,dpMask); } byPassWAN = TRUE; goto BYPASS_WAN; } //3/** Broadcast - Path 3. to Bridge WAN **/ //Normal WAN Check for none-binding packet!! if(byPassWAN) goto BYPASS_WAN; // Cheney: hit binding port, so skip bridge WAN broadcast for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { pPktHdr->l3Offset = l3Offset_ori; pPktHdr->l4Offset = l4Offset_ori; //if((WANMask&(0x1<<(rg_db.systemGlobal.wanIntfGroup[i].index)))>0) if( rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf != NULL && srcPort!=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx && /*src block*/ outputPortIdx == rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx) // output per port { dpMask=0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx; wanPortMask|=dpMask; if(((rg_db.algFunctionMask & RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT) > 0) || //if pppoe pass through is turn on, all WAN has to be sended (srcPort==RTK_RG_MAC_PORT_CPU && isGatewayMac ) || //if src port is CPU and SA==GMAC, send it (!rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE && (rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&dpMask)>0 && !rg_db.systemGlobal.wanIntfGroup[i].disableBroadcast)) //otherwise only boardcast to internet bridge WAN, and this WAN's VLANID didn't to any LAN intf's VLANID { DEBUG("Path 3 - check bridge wan[%d]", rg_db.systemGlobal.wanIntfGroup[i].index); if(srcPort==RTK_RG_MAC_PORT_CPU && isGatewayMac ) { if(pPktHdr->tagif&CVLAN_TAGIF) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id!=pPktHdr->internalVlanID) continue; } else { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_tag_on==1) continue; } //20140508LUKE:from procotol stack we should send to only one interface, so compare if this is the one we want! if(gw_netifIdx!=rg_db.systemGlobal.wanIntfGroup[i].index) continue; } if((rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) && (rg_db.systemGlobal.initParam.macBasedTagDecision==0) && (rg_db.vlan[internalVlanID].fidMode==VLAN_FID_IVL)) { // prevent sending broadcast to the bridge WAN which was configured as different VLAN. if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf && (rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id!=pPktHdr->internalVlanID)) { DEBUG("The bridge wan egress vlan %d doesn't match with internalVID - skip sending", rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id); continue; } } if( pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK && rg_db.systemGlobal.initParam.macBasedTagDecision==1){ //if broadcast from PS, we should let it compared by egress ACL and assigned sid/cvlan. pPktHdr->netifIdx = rg_db.systemGlobal.wanIntfGroup[i].index; } TRACE("Broadcast to WAN[%d]!!",rg_db.systemGlobal.wanIntfGroup[i].index); #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &dpMask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &dpMask); if(dpMask == 0x0) goto BYPASS_WAN; #endif bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; if(outputPortIdx == RTK_RG_PORT_PON && !runPONPortAclCheck){ // Check PON ACL result and use pkthdr for PON if egress port is PON port memcpy(&pPktHdr->aclDecision, &rg_db.aclDecisionBackup, sizeof(rtk_rg_aclHitAndAction_t)); // PON port: recover ACL decision and check return value. runPONPortAclCheck = TRUE; if(aclRetPON==RG_FWDENGINE_RET_DROP){// 20151224 Cheney: CF drop, skip transmitting packet to this wan interface wanSentPortMask|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.wan_port_idx); TRACE("CF_DROP (skip port mask 0x%x)", wanSentPortMask); if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); continue; } } aclRet=_rtk_rg_sendBroadcastToWan(pPktHdr,bcSkb,rg_db.systemGlobal.wanIntfGroup[i].index,dpMask); //20140505LUKE:from protocol stack should send to WAN only once!! if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK) return RG_FWDENGINE_RET_DROP; //20150123LUKE: should not send broadcast to same VLAN more than once!! if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id==pPktHdr->internalVlanID) wanSentPortMask|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.wan_port_idx); } } /*else { //handle broadcast packet from WAN if((rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))>0) //contain CPU port, return to PS { DEBUG("WAN to WAN broadcast, trap to protocol stack..."); return RG_FWDENGINE_RET_TO_PS; } }*/ } BYPASS_WAN: //3/** Broadcast - Path 4. from VLAN Binding WAN to LAN **/ //reflash binding Wan decision pPktHdr->netifIdx = FAIL; //if this broadcast from bridge WAN, send to all Vlan-binding LAN port if(rg_db.systemGlobal.initParam.macBasedTagDecision==1 && pPktHdr->ingressLocation!=RG_IGR_PROTOCOL_STACK) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { pPktHdr->l3Offset = l3Offset_ori; pPktHdr->l4Offset = l4Offset_ori; //if((WANMask&(0x1<<(rg_db.systemGlobal.wanIntfGroup[i].index)))>0) if( rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf != NULL && srcPort==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id==internalVlanID) { //Check VLAN-binding, if any port binding to this WAN, send it back! for(j=0;j<MAX_BIND_SW_TABLE_SIZE;j++) { if(rg_db.bind[j].valid && rg_db.bind[j].rtk_bind.vidLan!=0 && rg_db.nexthop[rg_db.wantype[rg_db.bind[j].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==rg_db.systemGlobal.wanIntfGroup[i].index) { //Hit!!Send packet to binding port with tag //20140424LUKE:FIXME:directTX can not send to extension port, so ext_port binding won't get packet!! if(rg_db.bind[j].rtk_bind.portMask.bits[0]>0 && ((rg_db.bind[j].rtk_bind.portMask.bits[0] & (0x1<<outputPortIdx)) > 0)) // output per port { DEBUG("Path 4 - VLAN binding to LAN"); dpMask = 0x1<<outputPortIdx; vlanBindTagPortMask|=dpMask; //20140519LUKE:If get BC packet from Other WAN, it should not receive untag packet for the binding port! if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->none_internet) vlanBindUntagPortMask|=dpMask; #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &dpMask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &dpMask); if(dpMask == 0x0) continue; #endif // Start handle packet content! bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; allDestPortMask = dpMask; // If vlan binding path hit force forward, now SW still do filter process: _rtk_rg_egressPortMaskCheck() // Force being vlan tag and assign vlanID by binding rule pPktHdr->egressVlanTagif=1; pPktHdr->egressVlanID=rg_db.bind[j].rtk_bind.vidLan; _rtk_rg_egressPacketDoQosRemarkingDecision(pPktHdr, skb, bcSkb, dpMask, rg_db.bind[j].rtk_bind.vidLan); rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; rg_kernel.txDesc.tx_tx_portmask=dpMask; aclRet = _rtk_rg_BroadcastPacketToLanWithEgressACLModification(RG_FWD_DECISION_BRIDGING, 0, pPktHdr,bcSkb,0,0, dpMask, allDestPortMask, -1); if(aclRet==RG_FWDENGINE_RET_DROP) { //free the copied skb if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); } } } } } } } //3/** Broadcast - Path 5 port is belong to Tag or Untag set **/ dpMask = (0x1 << outputPortIdx); //Cheney: output per port if(pPktHdr->fwdDecision==RG_FWD_DECISION_PPPOE_MC || pPktHdr->fwdDecision==RG_FWD_DECISION_DSLITE_MC) { dpMask &= pPktHdr->multicastMacPortMask.portmask; } #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) else if (pPktHdr->fwdDecision==RG_FWD_DECISION_FLOW_MC) { dpMask &= pPktHdr->multicastMacPortMask.portmask; allDestPortMask = pPktHdr->multicastMacPortMask.portmask; } #endif else { dpMask&=(~(0x1<<srcPort)); #if defined(CONFIG_RTL9607C_SERIES) dpMask&= (~(RTK_RG_ALL_CPU_PORTMASK)); #else dpMask&=(~(0x1<<RTK_RG_PORT_CPU)); //FIXME:not sending to CPU, but how to WLAN? #endif allDestPortMask = rg_db.vlan[internalVlanID].MemberPortmask.bits[0]; //Cheney //20140516LUKE:if macBasedDecision is on, all WAN port should be mask here! if(rg_db.systemGlobal.initParam.macBasedTagDecision) { //20140515LUKE:hit binding should mask WAN port!! dpMask&=(~bindWanPortMask); //20140516LUKE:from Other WAN's packet should not send to vlan-binding port! dpMask&=(~vlanBindUntagPortMask); //If vlan is not added by cvlan, filter dpMask to prevent redundant packet. e.g. vid 4005. if(rg_db.vlan[pPktHdr->internalVlanID].addedAsCustomerVLAN==0) dpMask&=(~wanPortMask); } else { //20150123LUKE: should not send broadcast to same VLAN more than once!! dpMask&=(~wanSentPortMask); } //20140506LUKE:Check if we are sending packet from eth0.2, eth0.3, eth0.4, eth0.5 //if so, we should send to tx port only!! if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK) { if(rg_kernel.protocolStackTxPortMask>0) dpMask&=rg_kernel.protocolStackTxPortMask; else dpMask&=(~(rg_db.systemGlobal.wanPortMask.portmask)); //filter all WAN port if send to eth0 } } //20140408LUKE:add LAN should not include WAN port, if WAN port is needed to receive packet, just add WAN instead! //20140424LUKE:broadcast should follow VLAN member port setting, we should not have any presumptions //dpMask&=(~wanPortMask); #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &dpMask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &dpMask); #endif if(dpMask == 0) { TRACE("Portmask [0x%x] **STOP** sending packet because of no needed", 1<<outputPortIdx); } else{ unsigned char doTAG = FALSE; // with CVLAN TAG or not // Start handle packet content! bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; // Decide: destport is belongs to Tag or Untag set if(dpMask & (rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(~(rg_db.vlan[internalVlanID].UntagPortmask.bits[0])))){ //port is in Tag set doTAG = TRUE; //20140516LUKE:from other WAN's packet should not send to vlan-binding port! if(vlanBindTagPortMask>0) dpMask&=(~vlanBindTagPortMask); //if dpMask == 0x0, call continue to skip remaining process. if (dpMask == 0x0){ if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); continue; } } if(outputPortIdx == RTK_RG_PORT_PON && !runPONPortAclCheck){ // Check PON ACL result and use pkthdr for PON if egress port is PON port memcpy(&pPktHdr->aclDecision, &rg_db.aclDecisionBackup, sizeof(rtk_rg_aclHitAndAction_t)); // PON port: recover ACL decision and check return value. runPONPortAclCheck = TRUE; if(aclRetPON==RG_FWDENGINE_RET_DROP){// 20151224 Cheney: CF drop, skip transmitting packet to this wan interface TRACE("CF_DROP (skip port mask 0x%x)", dpMask); if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); continue; } } pPktHdr->egressVlanTagif=doTAG; pPktHdr->egressVlanID=internalVlanID; DEBUG("Path 5 - in [%s] set", doTAG?"Tag":"Untag"); _rtk_rg_egressPacketDoQosRemarkingDecision(pPktHdr, skb, bcSkb, dpMask, internalVlanID); rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; rg_kernel.txDesc.tx_tx_portmask=dpMask; //dump_packet(bcSkb->data,bcSkb->len,"broadcast packet"); aclRet = _rtk_rg_BroadcastPacketToLanWithEgressACLModification(RG_FWD_DECISION_BRIDGING, 0, pPktHdr,bcSkb,0,0, dpMask, allDestPortMask, 0); if(aclRet==RG_FWDENGINE_RET_DROP) { //free the copied skb if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); } } // end of dpMask != 0 }// end of outputPort for loop //3 Wireless: pPktHdr->egressMACPort=RTK_RG_MAC_PORT_CPU; //for ACL/CF UNI action of _rtk_rg_modifyPacketByACLAction() //20160309LUKE:reassign pktHdr value for consistent. pPktHdr->internalVlanID = internalVlanID; pPktHdr->egressVlanID = internalVlanID; //20140505LUKE:from protocol stack should send to physical LAN only!! if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK) return RG_FWDENGINE_RET_DROP; #ifdef CONFIG_RG_WLAN_HWNAT_ACCELERATION if(((rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(1<<RTK_RG_PORT_CPU))&& (rg_db.vlan[internalVlanID].Ext_portmask.bits[0]&(1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)))) || ((pPktHdr->fwdDecision==RG_FWD_DECISION_PPPOE_MC || pPktHdr->fwdDecision==RG_FWD_DECISION_DSLITE_MC) && (pPktHdr->multicastExtPortMask.portmask&(1<<(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU))))) { //1 FIXME: from Master to Master, this should not be filtered!! //if((srcPort!=RTK_RG_PORT_CPU)||((srcPort==RTK_RG_PORT_CPU)&&(extSpa!=RTK_RG_EXT_PORT0))) //from phyiscal port OR ext1,2,3,4 //{ rtk_rg_mbssidDev_t intf_idx; #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) //egress_filter_portmask = (1<<(RTK_RG_EXT_PORT0+MAX_WLAN_DEVICE_NUM))-(1<<(RTK_RG_EXT_PORT0)); //egress_filter_portmask = (1<<RTK_RG_PORT_MAX)-(1<<RTK_RG_EXT_PORT0); egress_filter_portmask = 0xF8; //TRACE("WLAN0 egress_filter_portmask[0x%x]",egress_filter_portmask); _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); //TRACE("WLAN0 egress_filter_portmask[0x%x]",egress_filter_portmask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); //TRACE("WLAN0 egress_filter_portmask[0x%x]",egress_filter_portmask); if(egress_filter_portmask==0x0) goto send_to_master_wifi_end; #endif bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; //do aclMidify assert_ok(_rtk_rg_egressACLPatternCheck(RG_FWD_DECISION_BRIDGING,0,pPktHdr,bcSkb,0,0,RTK_RG_EXT_PORT0)); aclRet = _rtk_rg_egressACLAction(RG_FWD_DECISION_BRIDGING,pPktHdr); if(aclRet==RG_FWDENGINE_RET_DROP){ if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); goto send_to_master_wifi_end; } aclRet = _rtk_rg_modifyPacketByACLAction(bcSkb,pPktHdr,RTK_RG_EXT_PORT0); if(aclRet==RG_FWDENGINE_RET_DROP){ if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); goto send_to_master_wifi_end; } //20150618LUKE: for multicast packet to wifi, we should check CPU port is in vlan's untag set or not. if(rg_db.vlan[pPktHdr->internalVlanID].valid && rg_db.vlan[pPktHdr->internalVlanID].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) pPktHdr->egressVlanTagif=0; else pPktHdr->egressVlanTagif=1; if(rg_db.systemGlobal.gponDsBCModuleEnable && (pPktHdr->ingressPort==RTK_RG_PORT_PON) && (((pPktHdr->pDmac[0]&pPktHdr->pDmac[1]&pPktHdr->pDmac[2]&pPktHdr->pDmac[3]&pPktHdr->pDmac[4]&pPktHdr->pDmac[5])==0xff)||(pPktHdr->pDmac[0]==0x01 && pPktHdr->pDmac[1]==0x00 && pPktHdr->pDmac[2]==0x5e)) && (rg_db.systemGlobal.initParam.wanPortGponMode==1))//must be GPON, BC, from PON { _rtk_rg_egressPacketSend_for_gponDsBcFilterAndRemarking(bcSkb,pPktHdr,1); } else{ intf_idx=_rtk_master_wlan_mbssid_tx(pPktHdr,bcSkb); if(intf_idx==RG_RET_MBSSID_NOT_FOUND) { if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); } if(intf_idx==RG_RET_MBSSID_FLOOD_ALL_INTF) { TRACE("Broadcast to master WLAN(flooding)"); } else { TRACE("Broadcast to master WLAN(intf=%d)",intf_idx); } }//end of if((pPktHdr->ingressPort==RTK_RG_PORT_PON) && ((pPktHdr->pDmac[0]&pPktHdr->pDmac[1]&pPktHdr->pDmac[2]&pPktHdr->pDmac[3]&pPktHdr->pDmac[4]&pPktHdr->pDmac[5])==0xff) && (rg_db.systemGlobal.initParam.wanPortGponMode==1))//must be GPON, BC, from PON //} } send_to_master_wifi_end: #ifdef CONFIG_DUALBAND_CONCURRENT // send broadcast to slave wifi if(((rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(1<<RTK_RG_PORT_CPU))&& (rg_db.vlan[internalVlanID].Ext_portmask.bits[0]&(1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))) || ((pPktHdr->fwdDecision==RG_FWD_DECISION_PPPOE_MC || pPktHdr->fwdDecision==RG_FWD_DECISION_DSLITE_MC) && (pPktHdr->multicastExtPortMask.portmask&(1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU))))) { if((srcPort!=RTK_RG_PORT_CPU)||((srcPort==RTK_RG_PORT_CPU)&&(extSpa!=RTK_RG_EXT_PORT1))) //from phyiscal port OR ext0,2,3,4 { #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) #if 0 egress_filter_portmask = (1<<RTK_RG_EXT_PORT1); #else #define NIPQUAD(addr) \ ((unsigned char *)&addr)[0], \ ((unsigned char *)&addr)[1], \ ((unsigned char *)&addr)[2], \ ((unsigned char *)&addr)[3] #define NIP4QUAD(addr) \ NIPQUAD((addr[0])) #define NIP6QUAD(addr) \ NIPQUAD((addr[0])), \ NIPQUAD((addr[1])), \ NIPQUAD((addr[2])), \ NIPQUAD((addr[3])) #define IP4D "%d.%d.%d.%d" #define IP4H "%X:%X:%X:%X" #define IP6D IP4D" "IP4D" "IP4D" "IP4D #define IP6H IP4H" "IP4H" "IP4H" "IP4H if (!rg_db.systemGlobal.initParam.igmpSnoopingEnable) { SNOOPING_DIS: egress_filter_portmask = (1<<RTK_RG_EXT_PORT1); TRACE("Flood to all VLAN-matched Slave WIFI intf!"); } else { TRACE("Flood to IGMP indicated Slave WIFI intf!"); { struct rtl_multicastDataInfo multicastDataInfo; struct rtl_multicastFwdInfo multicastFwdInfo; //unsigned int wifi0_full_mask=0x0; //unsigned int wifi1_full_mask=0x0; int retVal; bzero(&multicastDataInfo, sizeof(struct rtl_multicastDataInfo)); multicastDataInfo.vlanId = rg_db.pktHdr->internalVlanID; if((pPktHdr->tagif & IPV4_TAGIF && rg_db.systemGlobal.multicastProtocol == RG_MC_MLD_ONLY)|| (pPktHdr->tagif & IPV6_TAGIF && rg_db.systemGlobal.multicastProtocol == RG_MC_IGMP_ONLY)) goto SNOOPING_DIS; if (pPktHdr->tagif & IPV4_TAGIF) { multicastDataInfo.ipVersion = IP_VERSION4; multicastDataInfo.sourceIp[0] = pPktHdr->ipv4Sip; multicastDataInfo.groupAddr[0] = pPktHdr->ipv4Dip; DEBUG("MC Data SrcIP(" IP4D ")", NIPQUAD(multicastDataInfo.sourceIp)); DEBUG("MC Data GrpIP(" IP4D ")", NIPQUAD(multicastDataInfo.groupAddr)); } else if (pPktHdr->tagif & IPV6_TAGIF) { multicastDataInfo.ipVersion=IP_VERSION6; multicastDataInfo.groupAddr[0] = (pPktHdr->pIpv6Dip[0] <<12)+(pPktHdr->pIpv6Dip[1] <<8)+(pPktHdr->pIpv6Dip[2] <<4)+(pPktHdr->pIpv6Dip[3]); multicastDataInfo.groupAddr[1] = (pPktHdr->pIpv6Dip[4] <<12)+(pPktHdr->pIpv6Dip[5] <<8)+(pPktHdr->pIpv6Dip[6] <<4)+(pPktHdr->pIpv6Dip[7]); multicastDataInfo.groupAddr[2] = (pPktHdr->pIpv6Dip[8] <<12)+(pPktHdr->pIpv6Dip[9] <<8)+(pPktHdr->pIpv6Dip[10]<<4)+(pPktHdr->pIpv6Dip[11]); multicastDataInfo.groupAddr[3] = (pPktHdr->pIpv6Dip[12]<<12)+(pPktHdr->pIpv6Dip[13]<<8)+(pPktHdr->pIpv6Dip[14]<<4)+(pPktHdr->pIpv6Dip[15]); multicastDataInfo.sourceIp[0] = (pPktHdr->pIpv6Sip[0] <<12)+(pPktHdr->pIpv6Sip[1] <<8)+(pPktHdr->pIpv6Sip[2] <<4)+(pPktHdr->pIpv6Sip[3]); multicastDataInfo.sourceIp[1] = (pPktHdr->pIpv6Sip[4] <<12)+(pPktHdr->pIpv6Sip[5] <<8)+(pPktHdr->pIpv6Sip[6] <<4)+(pPktHdr->pIpv6Sip[7]); multicastDataInfo.sourceIp[2] = (pPktHdr->pIpv6Sip[8] <<12)+(pPktHdr->pIpv6Sip[9] <<8)+(pPktHdr->pIpv6Sip[10]<<4)+(pPktHdr->pIpv6Sip[11]); multicastDataInfo.sourceIp[3] = (pPktHdr->pIpv6Sip[12]<<12)+(pPktHdr->pIpv6Sip[13]<<8)+(pPktHdr->pIpv6Sip[14]<<4)+(pPktHdr->pIpv6Sip[15]); DEBUG("MC Data SrcIP(" IP6H ")", NIP6QUAD(multicastDataInfo.sourceIp)); DEBUG("MC Data GrpIP(" IP6H ")", NIP6QUAD(multicastDataInfo.groupAddr)); } else { IGMP("ignore non-IPv4 or non-IPv6 MC packet"); } retVal = rtl_getMulticastDataFwdInfo(rg_db.systemGlobal.nicIgmpModuleIndex, &multicastDataInfo, &multicastFwdInfo); if (retVal!=SUCCESS) { DEBUG("FAIL: rtl_getMulticastDataFwdInfo\n"); } #if 0 if (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { #if defined(CONFIG_RTL9600_SERIES) mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask & ((1<<RTK_RG_MAC_PORT_MAX) - 1); #else mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask; #endif } else { mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask; } #endif #if 0 DEBUG("Mode:%d l2PortMask :0x%X, fwdPortMask: 0x%X", multicastFwdInfo.srcFilterMode, multicastFwdInfo.l2PortMask, multicastFwdInfo.fwdPortMask); wifi0_full_mask=((1<<RG_RET_MBSSID_MASTER_CLIENT_INTF)-1); wifi1_full_mask=((1<<RG_RET_MBSSID_SLAVE_CLIENT_INTF)-1) & (~((1<<WLAN_DEVICE_NUM)-1)); DEBUG("wifi0_full_mask :0x%X, wifi1_full_mask: 0x%X", wifi0_full_mask, wifi1_full_mask); if ((multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_INCLUDE) && (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_DONT_CARE_SRC)) { if (!(multicastFwdInfo.fwdPortMask & 0x80)) { DEBUG("in-mo, before wifi0 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi0_full_mask); DEBUG("in-mo, after wifi0 capable_dev_mask=%X", capable_dev_mask); } if (!(multicastFwdInfo.fwdPortMask & 0x100)) { DEBUG("in-mo, before wifi1 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi1_full_mask); DEBUG("in-mo, after wifi1 capable_dev_mask=%X", capable_dev_mask); } } else if (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { if (!(multicastFwdInfo.l2PortMask & 0x80)) //if (!(multicastFwdInfo.fwdPortMask & 0x80)) { DEBUG("ex-mo, efore wifi0 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi0_full_mask); DEBUG("ex-mo, after wifi0 capable_dev_mask=%X", capable_dev_mask); } if (!(multicastFwdInfo.l2PortMask & 0x100)) //if (!(multicastFwdInfo.fwdPortMask & 0x100)) { DEBUG("ex-mo, before wifi1 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi1_full_mask); DEBUG("ex-mo, after wifi1 capable_dev_mask=%X", capable_dev_mask); } } else { DEBUG("Do not care igmp"); DEBUG("capable_dev_mask=%X", capable_dev_mask); } #else if ((multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_INCLUDE) && (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_DONT_CARE_SRC)) { if (!(multicastFwdInfo.fwdPortMask & (0x1<<RTK_RG_EXT_PORT1))) //0x100 { DEBUG("in/dc-mode IGMP indicat slave wifi port"); egress_filter_portmask = (1<<RTK_RG_EXT_PORT1); } } else if (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { if (!(multicastFwdInfo.l2PortMask & (0x1<<RTK_RG_EXT_PORT1))) //0x100 { DEBUG("ex-mode IGMP indicat slave wifi port"); egress_filter_portmask = (1<<RTK_RG_EXT_PORT1); } } else { DEBUG("igmp do not indicate slave wifi port"); } #endif } } #if 0 #define MACH "%02X:%02X:%02X:%02X:%02X:%02X" #define NMAC(addr) \ ((unsigned char *)(addr))[0], \ ((unsigned char *)(addr))[1], \ ((unsigned char *)(addr))[2], \ ((unsigned char *)(addr))[3], \ ((unsigned char *)(addr))[4], \ ((unsigned char *)(addr))[5] DEBUG("DA("MACH") SA("MACH")", NMAC(skb->data), NMAC(skb->data+6) ); #endif #endif _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); if(egress_filter_portmask==0x0) goto send_to_slave_wifi_end; #endif // WLAN port (tag & untag) bcSkb=rtk_rg_skbCopyToPreAllocSkb(skb); if(bcSkb==NULL) goto OUT_OF_MEM; pPktHdr->egressVlanID=CONFIG_DEFAULT_TO_SLAVE_GMAC_VID; pPktHdr->egressVlanTagif=1; pPktHdr->egressPriority=CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI; rg_kernel.txDescMask.tx_dislrn=1; rg_kernel.txDescMask.tx_keep=1; rg_kernel.txDescMask.tx_l34_keep=1; rg_kernel.txDescMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; //no cpu port rg_kernel.txDesc.tx_dislrn=1; // patch for reason 192 rg_kernel.txDesc.tx_keep=1;//20141104LUKE: when L34Keep is on, Keep is also needed for gpon. rg_kernel.txDesc.tx_l34_keep=1; rg_kernel.txDesc.tx_tx_portmask=0; //HWLOOKUP (because: HW do not have extension port & CPU port bit) //txinfo_debug(&rg_kernel.txDesc); //memDump(bcSkb->data,bcSkb->len,"BC-to-WIFI2"); TRACE("Broadcast to slave WLAN by GMAC(VID=%d,PRI=%d,HWLOOKUP)",CONFIG_DEFAULT_TO_SLAVE_GMAC_VID,CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI); assert_ok(_rtk_rg_egressACLPatternCheck(RG_FWD_DECISION_BRIDGING,0,pPktHdr,bcSkb,0,0,RTK_RG_EXT_PORT1)); aclRet = _rtk_rg_egressACLAction(RG_FWD_DECISION_BRIDGING,pPktHdr); if(aclRet==RG_FWDENGINE_RET_DROP){ if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); //goto send_to_master_wifi_end; }else{ aclRet = _rtk_rg_modifyPacketByACLAction(bcSkb,pPktHdr,RTK_RG_EXT_PORT1); if(aclRet==RG_FWDENGINE_RET_DROP){ _rtk_rg_dev_kfree_skb_any(bcSkb); }else{ _rtk_rg_egressPacketSend(bcSkb,pPktHdr); } } } } else //debug { //debug if ( ((rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(1<<RTK_RG_PORT_CPU))&& (rg_db.vlan[internalVlanID].Ext_portmask.bits[0]&(1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))) ) TRACE("No BC to slave WLAN 1"); // || if ( (pPktHdr->fwdDecision==RG_FWD_DECISION_PPPOE_MC && (pPktHdr->multicastExtPortMask.portmask&(1<<(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)))) ) TRACE("No BC to slave WLAN 2"); } //debug #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) send_to_slave_wifi_end: #endif #endif #endif //Check VLAN contain CPU port or not dpMask=rg_db.vlan[internalVlanID].MemberPortmask.bits[0]&(~(0x1<<srcPort)); if(((dpMask&(0x1<<RTK_RG_PORT_CPU))>0)||(extSpa>RTK_RG_PORT_CPU)) //from EXT Port shall not be src block. { #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) //last packet to CPU, if no need to send just return DROP. egress_filter_portmask = (1<<RTK_RG_PORT_CPU); _rtk_rg_igmpReport_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); _rtk_rg_igmpMldQuery_portmask_check_and_limit(pPktHdr, &egress_filter_portmask); if(egress_filter_portmask==0x0) return RG_FWDENGINE_RET_DROP; #endif //the original packet continue to protocol stack //DEBUG("the original packet continue to protocol stack!"); //if(cpSkb) _rtk_rg_dev_kfree_skb_any(cpSkb); if(pPktHdr->fwdDecision==RG_FWD_DECISION_NO_PS_BC) return RG_FWDENGINE_RET_DROP; //20150616LUKE: sometimes pppeo proxy only allow pppoe-wan-binding packet. if so, we just drop here. if(rg_db.systemGlobal.initParam.macBasedTagDecision && rg_db.systemGlobal.pppoeProxyAllowBindingOnly && (pPktHdr->etherType==0x8863)){ if((_rtk_rg_bindingRuleCheck(pPktHdr, &groupIdx)!=RG_FWDENGINE_RET_HIT_BINDING)|| (rg_db.systemGlobal.wanIntfGroup[groupIdx].p_wanIntfConf->wan_type!=RTK_RG_PPPoE)) return RG_FWDENGINE_RET_DROP; } //REC20131224:recovery egressVlanID which replaced by slave Wifi settings pPktHdr->egressVlanID=internalVlanID; pPktHdr->egressVlanTagif=((rg_db.vlan[internalVlanID].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))==0); if(rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemarkByInternalPriEgressPortEnable[RTK_RG_MAC_PORT_CPU]==RTK_RG_ENABLED)//Qos dotip remarking by internal pPktHdr->egressPriority=pPktHdr->internalPriority; else//no Qos dot1p remarking pPktHdr->egressPriority=pPktHdr->ctagPri; //special case: to CPU, check the gponDsBcFilter by original skb, not copied skb. if(rg_db.systemGlobal.gponDsBCModuleEnable && (pPktHdr->ingressPort==RTK_RG_PORT_PON) && (((pPktHdr->pDmac[0]&pPktHdr->pDmac[1]&pPktHdr->pDmac[2]&pPktHdr->pDmac[3]&pPktHdr->pDmac[4]&pPktHdr->pDmac[5])==0xff)||(pPktHdr->pDmac[0]==0x01 && pPktHdr->pDmac[1]==0x00 && pPktHdr->pDmac[2]==0x5e)) && (rg_db.systemGlobal.initParam.wanPortGponMode==1))//must be GPON, BC, from PON { _rtk_rg_egressPacketSend_for_gponDsBcFilterAndRemarking(skb,pPktHdr,2); } return RG_FWDENGINE_RET_TO_PS; } else { //_rtk_rg_dev_kfree_skb_any(skb); TRACE("the original packet stop here...internalVlanID=%d dpMask=%x srcPort=%x vlan[%x]",internalVlanID,dpMask,srcPort,rg_db.vlan[internalVlanID].MemberPortmask.bits[0]); //if(cpSkb) _rtk_rg_dev_kfree_skb_any(cpSkb); return RG_FWDENGINE_RET_DROP; } OUT_OF_MEM: #if RTK_RG_SKB_PREALLOCATE FIXME("Out of pre-alloc memory(%s:%d)\n",__FUNCTION__,__LINE__); #else FIXME("Out of memory(%s:%d)\n",__FUNCTION__,__LINE__); #endif BC_PROCESS_END: if(bcSkb) _rtk_rg_dev_kfree_skb_any(bcSkb); //if(cpSkb) _rtk_rg_dev_kfree_skb_any(cpSkb); return RG_FWDENGINE_RET_DROP; //return RG_FWDENGINE_RET_DIRECT_TX; } int _rtk_rg_layer2GarbageCollection(int l2Idx) { int search_idx,count=0,ret,i,invalidNum=0,smallestIdx=4; rtk_l2_addr_table_t l2Entry; DEBUG("_rtk_rg_layer2GarbageCollection, l2Idx is %d",l2Idx); //Compare from l2Idx, if the entry in software table is not in hardware table anymore, the entry will be used for new entry do { search_idx=l2Idx+count; ret=rtk_l2_nextValidEntry_get(&search_idx,&l2Entry); if(ret!=RT_ERR_OK)return 4; if(search_idx>=l2Idx+4 || search_idx<l2Idx) //no valid entry in this 4-way hased address { //reset lasting software LUT table to invalid invalidNum=4-count; DEBUG("the index after %d is invalid in hw table, so invalid the lasting %d software entries!!",l2Idx+count,invalidNum); //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.saLearningLimitLock); if(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port>=RTK_RG_MAC_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&rg_db.lut[l2Idx+count].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); //decrease wlan's device count if(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[l2Idx+count].wlan_device_idx]); #endif } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[l2Idx+count].wlan_device_idx]); #endif } #endif } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port))&&rg_db.lut[l2Idx+count].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port]); } if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry)==RT_ERR_RG_OK) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[l2Idx+count].category]); //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.saLearningLimitLock); memset(&rg_db.lut[l2Idx+count],0,(sizeof(rtk_rg_table_lut_t)*invalidNum)); if(smallestIdx>count) smallestIdx=count; break; } invalidNum=search_idx-(l2Idx+count); //how many invalid entries between start_idx and return_idx for(i=0;i<invalidNum;i++) { if(smallestIdx>(count+i)) smallestIdx=count+i; DEBUG("invalid the index %d in software table",l2Idx+count); //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.saLearningLimitLock); if(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port>=RTK_RG_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&rg_db.lut[l2Idx+count].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); //decrease wlan's device count if(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[l2Idx+count].wlan_device_idx]); #endif } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[l2Idx+count].wlan_device_idx]); #endif } #endif } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port))&&rg_db.lut[l2Idx+count].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry.port]); } if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[l2Idx+count].rtk_lut.entry.l2UcEntry)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[l2Idx+count].category]); //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.saLearningLimitLock); memset(&rg_db.lut[l2Idx+count],0,sizeof(rtk_rg_table_lut_t)); //clean invalid entries } count+=(invalidNum+1); }while(count<4); DEBUG("the return of smallestIdx is %d",smallestIdx); return smallestIdx; } void _rtk_rg_layer2CleanL34ReferenceTable(int l2Idx) { int i,k; ipaddr_t victim_ip; //may be more than one #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT rtk_rg_ipv6_layer4_linkList_t *pV6L4List,*nextEntry; #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT rtk_ipv6_addr_t zeroV6Ip; bzero(zeroV6Ip.ipv6_addr, IPV6_ADDR_LEN); #endif #ifdef CONFIG_ROME_NAPT_SHORTCUT for(i=0; i<MAX_NAPT_SHORTCUT_SIZE; i++) { #if defined(CONFIG_RTL9600_SERIES) if(rg_db.naptShortCut[i].sip!=0 && rg_db.naptShortCut[i].new_lut_idx==l2Idx) #else //support lut traffic bit if(rg_db.naptShortCut[i].sip!=0 && (rg_db.naptShortCut[i].new_lut_idx==l2Idx || rg_db.naptShortCut[i].smacL2Idx==l2Idx)) #endif { TABLE("del v4 shortcut[%d].", i); _rtk_rg_v4ShortCut_delete(i); } } #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT for(i=0; i<MAX_NAPT_V6_SHORTCUT_SIZE; i++) { #if defined(CONFIG_RTL9600_SERIES) if(memcmp(rg_db.naptv6ShortCut[i].sip.ipv6_addr, zeroV6Ip.ipv6_addr, IPV6_ADDR_LEN)!=0 && rg_db.naptv6ShortCut[i].new_lut_idx==l2Idx) #else //support lut traffic bit if(memcmp(rg_db.naptv6ShortCut[i].sip.ipv6_addr, zeroV6Ip.ipv6_addr, IPV6_ADDR_LEN)!=0 && (rg_db.naptv6ShortCut[i].new_lut_idx==l2Idx || rg_db.naptv6ShortCut[i].smacL2Idx==l2Idx)) #endif { TABLE("del v6 shortcut[%d].", i); _rtk_rg_v6ShortCut_delete(i); } } #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT for(i=0; i<MAX_IPV6_STATEFUL_HASH_HEAD_SIZE; i++) { list_for_each_entry_safe(pV6L4List,nextEntry,&rg_db.ipv6Layer4HashListHead[i],layer4_list) { #if defined(CONFIG_RTL9600_SERIES) if(pV6L4List->dmacL2Idx==l2Idx) #else //support lut traffic bit if(pV6L4List->dmacL2Idx==l2Idx || pV6L4List->smacL2Idx==l2Idx) #endif { TABLE("del v6 stateful shortcut."); //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.ipv6StatefulLock); _rtk_rg_fwdEngine_ipv6ConnList_del(pV6L4List); //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.ipv6StatefulLock); } } } #endif //WARNING(" choose victim from arp_used entry:%d",first_dynArp); for(i=0;i<MAX_ARP_SW_TABLE_SIZE;i++) { if(rg_db.arp[i].rtk_arp.valid && rg_db.arp[i].rtk_arp.nhIdx==l2Idx) { //look up for napt DIP are same with victim victim_ip=rg_db.arp[i].ipv4Addr; //WARNING(" Found ARP!! ip is %x",victim_ip); for(k=0;k<MAX_NAPT_OUT_SW_TABLE_SIZE;k++) { int inIdx=rg_db.naptOut[k].rtk_naptOut.hashIdx; if(rg_db.naptOut[k].state>0 && ((rg_db.naptOut[k].remoteIp==victim_ip)||(rg_db.naptIn[inIdx].rtk_naptIn.intIp==victim_ip))) { //Delete arp entry!! assert_ok(rtk_rg_apollo_naptConnection_del(k)); } } //Deleting the ARP and the dynamic bCAM LUT assert_ok(rtk_rg_apollo_arpEntry_del(i)); } } //scan NXP for deleting entries reference to this LUT entry //20140806LUKE: nexthop always point to static LUT which won't be deleted here! /*for(i=0;i<MAX_NEXTHOP_HW_TABLE_SIZE;i++) { if(rg_db.nexthop[i].rtk_nexthop.nhIdx==l2Idx) { //WARNING(" Found NXP!! idx is %d",i); //Delete nexthop entry!! WARNING("nexthop[%d] count is %d",j,rg_db.systemGlobal.nxpRefCount[j]); assert_ok(_rtk_rg_decreaseNexthopReference(i)); } }*/ //scan neighbor table for deleting entries reference to this LUT entry for(i=0;i<MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE;i++) { if(rg_db.v6neighbor[i].rtk_v6neighbor.valid && rg_db.v6neighbor[i].staticEntry==0 && rg_db.v6neighbor[i].rtk_v6neighbor.l2Idx==l2Idx) { //WARNING(" Found Neighbor!! idx is %d",i); //Detele neighbor entry!! assert_ok(rtk_rg_apollo_neighborEntry_del(i)); } } } rtk_rg_entryGetReturn_t _rtk_rg_layer2LeastRecentlyUsedReplace(int l2Idx) { #if defined(CONFIG_RG_LAYER2_SOFTWARE_LEARN) #if defined(CONFIG_RTL9600_SERIES) //when the 4-way is full, check the bCAM list for free to add, //if the bCAM is also full, look for victim which dynamic unicast first, //when there is no more dynamic unicast victim could choose, find a arp-used entry for the victim, //first scan all arp-used entries in ARP table, choose no arp entries first, if there is no such l2 entry, //choose arp-used and really referenced by ARP entry, //scan all ARP, NXP, NAPT, shortcut for referencing this entry and delete it. //arpState=0, means no arp entry referenced //arpState=1, means arp entry referenced, but arp is dynamic //arpState=2, means arp entry referenced, and arp is static //arpState=4, means no neighbor entry referenced //arpState=8, means neighbor entry referenced, but neighbor is dynamic //arpState=16, means neighbor entry referenced, and neighbor is static //first_dyn: first L2 entry which it's not static and not arp-used. //first_noArp: first L2 entry which it's arp-used(non-static) and not referenced by arp or neighbor entry. //first_dynArp: first L2 entry which it's arp-used(non-static) and referenced by arp or neighbor entry. int ret,first_dyn=0,first_noArp=0,first_dynArp=0,victim_idx=FAIL; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else int i,lut_arpState=0; #endif rtk_rg_lut_linkList_t *pLutBCAMList,*pNextLutBCAMList,*pDynBCAMList=NULL,*pNoArpBCAMList=NULL,*pDynArpBCAMList=NULL; rtk_l2_ucastAddr_t *pL2Addr; rtk_mac_t first_dynArp_mac; RECHOOSE_BYPASS: list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMLinkListHead,lut_list) { if(rg_db.lut[pLutBCAMList->idx].valid) { if(rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L2UC) { if(first_dyn==0 && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2UcEntry.flags&(RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_ARP_USED))==0) //dynamic { first_dyn=pLutBCAMList->idx; pDynBCAMList=pLutBCAMList; } //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) else if((first_noArp==0) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) //non-static and arp-used { first_noArp=pLutBCAMList->idx; pNoArpBCAMList=pLutBCAMList; } #else else if((first_noArp==0||first_dynArp==0) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) //non-static and arp-used { lut_arpState=0; //Check if this arp-used lut did not really referenced by ARP for(i=0;i<MAX_ARP_SW_TABLE_SIZE;i++) { if(rg_db.arp[i].rtk_arp.valid && rg_db.arp[i].rtk_arp.nhIdx==pLutBCAMList->idx) { if(rg_db.arp[i].staticEntry) lut_arpState=2; else lut_arpState=1; break; } } lut_arpState+=4; for(i=0;i<MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE;i++) { if(rg_db.v6neighbor[i].rtk_v6neighbor.valid && rg_db.v6neighbor[i].rtk_v6neighbor.l2Idx==pLutBCAMList->idx) { if(rg_db.v6neighbor[i].staticEntry) lut_arpState+=16; else lut_arpState+=8; break; } } if(lut_arpState==4 && first_noArp==0) { first_noArp=pLutBCAMList->idx; pNoArpBCAMList=pLutBCAMList; } else if((lut_arpState==5||lut_arpState==12||lut_arpState==13) && first_dynArp==0) //this bCAM entry can be deleted because it is not referenced by static ARP or neighbor!! { first_dynArp=pLutBCAMList->idx; pDynArpBCAMList=pLutBCAMList; } } #endif } } else { //WARNING("add to bCAM idx %d!!",pLutBCAMList->idx); return pLutBCAMList->idx; //find empty, use it } } //WARNING("bCAM is full...."); //if there is bCAM had been deleted, move from chosen list back if(!list_empty(&rg_db.lutBCAMChosenLinkListHead)) { list_for_each_entry_safe(pLutBCAMList,pNextLutBCAMList,&rg_db.lutBCAMChosenLinkListHead,lut_list) { if(rg_db.lut[pLutBCAMList->idx].valid==0) { //WARNING("ADD BACK chosen victim[%d] before return!",pLutBCAMList->idx); list_move(&pLutBCAMList->lut_list,&rg_db.lutBCAMLinkListHead); return pLutBCAMList->idx; //find empty, use it } } } if(first_dyn!=0) { //choose this dynamic unicast entry as victim!! //invalid the LRU entry, otherwise the new entry won't add pL2Addr=&rg_db.lut[first_dyn].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); if(ret!=RT_ERR_OK) { WARNING("Layer2LRU failed when deleting Dynamic victim[%d]...ret=%x",first_dyn,ret); } else { list_move(&pDynBCAMList->lut_list,&rg_db.lutBCAMChosenLinkListHead); victim_idx=first_dyn; DEBUG(" choose victim from dynamic entry:%d",victim_idx); } } else if(first_noArp!=0) { //choose this arp-used but no ARP unicast entry as victim!! //invalid the LRU entry, otherwise the new entry won't add pL2Addr=&rg_db.lut[first_noArp].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); if(ret!=RT_ERR_OK) { WARNING("Layer2LRU failed when deleting noArp victim[%d]...ret=%x",first_noArp,ret); } else { list_move(&pNoArpBCAMList->lut_list,&rg_db.lutBCAMChosenLinkListHead); victim_idx=first_noArp; DEBUG(" choose victim from noArp entry:%d",victim_idx); } } else { //WARNING("check if the bCAM is referenced by static ARP..."); //scan ARP, NAPT and shortcut for deleting entries reference to this LUT entry /*list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMLinkListHead,lut_list) { if(rg_db.lut[pLutBCAMList->idx].valid) { if(rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L2UC) { if((rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0 && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)>0) //arp_used, dynamic { arp_static=0; //Check if this lut referenced by static ARP for(i=0;i<MAX_ARP_SW_TABLE_SIZE;i++) { if(rg_db.arp[i].rtk_arp.valid && rg_db.arp[i].rtk_arp.nhIdx==pLutBCAMList->idx && rg_db.arp[i].staticEntry) { arp_static=1; break; } } if(arp_static==0) //this bCAM entry can be deleted because it is not referenced by static ARP!! { first_dynArp=pLutBCAMList->idx; break; } } } } }*/ //WARNING(" the arp_static is %d, first_dynArp is %d",arp_static,first_dynArp); if(first_dynArp!=0) { memcpy(&first_dynArp_mac,&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry.mac,sizeof(rtk_mac_t)); //invalid the LRU entry, otherwise the new entry won't add if(rg_db.lut[first_dynArp].valid && rg_db.lut[first_dynArp].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry.mac,&first_dynArp_mac,sizeof(rtk_mac_t))==0) { pL2Addr=&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); if(ret!=RT_ERR_OK) { WARNING("Layer2LRU failed when deleting Arp victim[%d]...ret=%x",first_dynArp,ret); } else { list_move(&pDynArpBCAMList->lut_list,&rg_db.lutBCAMChosenLinkListHead); victim_idx=first_dynArp; //DEBUG(" choose victim from arp entry:%d",victim_idx); } } else { _rtk_rg_layer2CleanL34ReferenceTable(first_dynArp); list_move(&pDynArpBCAMList->lut_list,&rg_db.lutBCAMChosenLinkListHead); victim_idx=first_dynArp; //DEBUG(" choose victim from arp entry:%d",victim_idx); } } else { //re-add before victim, and rechoose again! list_for_each_entry_safe(pLutBCAMList,pNextLutBCAMList,&rg_db.lutBCAMChosenLinkListHead,lut_list) { //DEBUG("ADD BACK chosen victim[%d] before...rechoose again",pLutBCAMList->idx); list_move(&pLutBCAMList->lut_list,&rg_db.lutBCAMLinkListHead); } goto RECHOOSE_BYPASS; } } //re-add all before victim back for next time choose if(list_empty(&rg_db.lutBCAMLinkListHead)) { list_for_each_entry_safe(pLutBCAMList,pNextLutBCAMList,&rg_db.lutBCAMChosenLinkListHead,lut_list) { //DEBUG("ADD BACK chosen victim[%d] before return!",pLutBCAMList->idx); list_move(&pLutBCAMList->lut_list,&rg_db.lutBCAMLinkListHead); } } return victim_idx; #else //support lut traffic bit //when the 4-way is full, check the bCAM list for free to add, //if the bCAM is also full, look for victim. //choose longest idle time entry from non-static unicast entries(same L2 hash index) as victim. int ret, search_index, longestIdx=FAIL, victim_idx=FAIL; uint32 longestIdleTime=0; short count=0; rtk_l2_ucastAddr_t *pL2Addr; rtk_rg_lut_linkList_t *pLutCamEntry, *pNextLutCamEntry; if(!list_empty(&rg_db.lutBCAMFreeListHead)) { list_for_each_entry_safe(pLutCamEntry,pNextLutCamEntry,&rg_db.lutBCAMFreeListHead,lut_list) //just return the first entry right behind of head { victim_idx = pLutCamEntry->idx; break; } return victim_idx; } else // 4-way and bcam are full, do LRU { DEBUG("LutCam is full, do LRU!\n"); l2Idx=l2Idx&0xfffffffc; do { search_index = l2Idx+count; if(rg_db.lut[search_index].valid==0) //find empty lut entry { WARNING("Expect that 4-way lut should not exist empty entry!, empty idx:%d", search_index); return RG_RET_ENTRY_NOT_GET; } else { if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0 && (rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0) { if(rg_db.lut[search_index].idleSecs > longestIdleTime) { longestIdx = search_index; longestIdleTime = rg_db.lut[search_index].idleSecs; } } } count++; //search from next entry } while(count < 4); list_for_each_entry_safe(pLutCamEntry,pNextLutCamEntry,&rg_db.lutBCAMTableHead[l2Idx>>2],lut_list) { search_index = pLutCamEntry->idx; if(rg_db.lut[search_index].valid==0) //find empty bcam entry { WARNING("Expect that lutCam should not exist empty entry!, empty idx:%d", search_index); return RG_RET_ENTRY_NOT_GET; } else { if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0 && (rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0) { if(rg_db.lut[search_index].idleSecs > longestIdleTime) { longestIdx = search_index; longestIdleTime = rg_db.lut[search_index].idleSecs; } } } } if(longestIdx!=FAIL) { pL2Addr = &rg_db.lut[longestIdx].rtk_lut.entry.l2UcEntry; ret = RTK_L2_ADDR_DEL(pL2Addr); if(ret!=RT_ERR_OK) { WARNING("Layer2LRU failed when deleting victim[%d]...ret=%x", longestIdx, ret); } else { victim_idx = longestIdx; DEBUG("Layer2LRU choose victim[%d]", victim_idx); } } else { WARNING("Layer2LRU failed because longestIdx can not be chosen !!"); } return victim_idx; } #endif #else // end CONFIG_RG_LAYER2_SOFTWARE_LEARN return RG_RET_ENTRY_NOT_GET; #endif } //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) int _rtk_rg_layer2HashedReplace(int l2Idx) { //check each LUT entry which hashed to same index, if the age is smallest, the entry is least recently used one //we choose the LRU entry to be replace for the new entry;if the age are all the same, we choose the biggest index, //because the smallest one always means it is first add into the table, since they have the same age value, it indicate //that first added one has traffic as well as the last added one, therefore we choose the last added one. //If all 4-way are ARP_USED, choose from "next of the newest" index to be our victim. //arpState=0, means no arp entry referenced //arpState=1, means arp entry referenced, but arp is dynamic //arpState=2, means arp entry referenced, and arp is static //arpState=4, means no neighbor entry referenced //arpState=8, means neighbor entry referenced, but neighbor is dynamic //arpState=16, means neighbor entry referenced, and neighbor is static int search_idx,LRU_age=8,LRU_index=-1,victim_idx; int first_noArp=-1,first_dynArp=-1,lut_arpState=0; int ret,i,j; rtk_l2_ucastAddr_t *pL2Addr,*pL2NewAddr; rtk_mac_t first_dynArp_mac; victim_idx=rg_db.layer2NextOfNewestCountIdx[l2Idx>>2]; search_idx=l2Idx+victim_idx; for(i=0;i<4;i++) { if(rg_db.lut[search_idx].valid==0) { //free, use it return LRU_index; } else if(rg_db.lut[search_idx].rtk_lut.entryType==RTK_LUT_L2UC) { if((rg_db.lut[search_idx].rtk_lut.entry.l2UcEntry.flags&(RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_ARP_USED))==0) { //Dynamic if(rg_db.lut[search_idx].rtk_lut.entry.l2UcEntry.age<LRU_age) { LRU_age=rg_db.lut[search_idx].rtk_lut.entry.l2UcEntry.age; LRU_index=search_idx; } } else if(i!=4 && (first_noArp==-1||first_dynArp==-1) && (rg_db.lut[search_idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) //only check non-newest 3 entries, to ensure the newest insert entry won't be chosen this time { //ARP-USED, check if there is ARP or neighbor reference this entry //Check if this arp-used lut did not really referenced by ARP lut_arpState=0; for(j=0;j<MAX_ARP_SW_TABLE_SIZE;j++) { if(rg_db.arp[j].rtk_arp.valid && rg_db.arp[j].rtk_arp.nhIdx==search_idx) { if(rg_db.arp[j].staticEntry) lut_arpState=2; else lut_arpState=1; break; } } lut_arpState+=4; for(j=0;j<MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE;j++) { if(rg_db.v6neighbor[j].rtk_v6neighbor.valid && rg_db.v6neighbor[j].rtk_v6neighbor.l2Idx==search_idx) { if(rg_db.v6neighbor[j].staticEntry) lut_arpState+=16; else lut_arpState+=8; break; } } if(first_noArp==-1 && lut_arpState==4) { first_noArp=search_idx; } else if(first_dynArp==-1 && (lut_arpState==5||lut_arpState==12||lut_arpState==13)) //this bCAM entry can be deleted because it is not referenced by static ARP or neighbor!! { first_dynArp=search_idx; } } } victim_idx+=1; victim_idx%=4; search_idx=l2Idx+victim_idx; } victim_idx=FAIL; if(LRU_index>=0) { //choose dynamic to replace, swap to bCAM victim_idx=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); if(victim_idx>=0) { pL2NewAddr=&rg_db.lut[LRU_index].rtk_lut.entry.l2UcEntry; pL2NewAddr->index=victim_idx; //swap ret=RTK_L2_ADDR_ADD(pL2NewAddr); } //invalid the LRU entry, otherwise the new entry won't add pL2Addr=&rg_db.lut[LRU_index].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); //WARNING(" choose victim from dynamic entry:%d",victim_idx); victim_idx=LRU_index; } else if(first_noArp>=0)//Choose ARP-USED but no ARP referenced first { //choose dynamic to replace, swap to bCAM victim_idx=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); if(victim_idx>=0) { pL2NewAddr=&rg_db.lut[first_noArp].rtk_lut.entry.l2UcEntry; pL2NewAddr->index=victim_idx; //swap ret=RTK_L2_ADDR_ADD(pL2NewAddr); } //invalid the LRU entry, otherwise the new entry won't add pL2Addr=&rg_db.lut[first_noArp].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); //WARNING(" choose victim from dynamicARP entry:%d",victim_idx); victim_idx=first_noArp; } else if(first_dynArp>=0)//if no such entry, choose first ARP as the victim and clean ARP, NEXTHOP, neighbor, napt, shortcut { memcpy(&first_dynArp_mac,&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry.mac,sizeof(rtk_mac_t)); //invalid the LRU entry, otherwise the new entry won't add if(rg_db.lut[first_dynArp].valid && rg_db.lut[first_dynArp].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry.mac,&first_dynArp_mac,sizeof(rtk_mac_t))==0) { pL2Addr=&rg_db.lut[first_dynArp].rtk_lut.entry.l2UcEntry; ret=RTK_L2_ADDR_DEL(pL2Addr); if(ret!=RT_ERR_OK) { WARNING("Layer2LRU failed when deleting Arp victim[%d]...ret=%x",first_dynArp,ret); } else { //WARNING(" choose victim from arp entry:%d",victim_idx); victim_idx=first_dynArp; } } else { //WARNING(" choose victim from arp entry:%d",victim_idx); _rtk_rg_layer2CleanL34ReferenceTable(first_dynArp); victim_idx=first_dynArp; } } return victim_idx; } #endif #if !defined(__KERNEL__) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) void _rtk_rg_interfaceVlanIDPriority_directTX(rtk_rg_pktHdr_t *pPktHdr,rtk_rg_intfInfo_t *pStoredInfo,struct tx_info *ptxInfo,struct tx_info *ptxInfoMask) #else void _rtk_rg_interfaceVlanIDPriority_directTX(rtk_rg_pktHdr_t *pPktHdr,rtk_rg_intfInfo_t *pStoredInfo,rtk_rg_txdesc_t *ptxInfo,rtk_rg_txdesc_t *ptxInfoMask) #endif { if(pPktHdr->netifIdx==FAIL){ //from Layer2 bridge, the egress vlan should be the ingress vlan //and the egress priority should be by tagged ctag pri or internal pri //printk("->netifIdx==FAIL\n"); //memDump(skb->data,skb->len,"fail"); pPktHdr->egressVlanID = pPktHdr->internalVlanID; if(pPktHdr->tagif&CVLAN_TAGIF) pPktHdr->egressPriority=pPktHdr->ctagPri&0x7; else pPktHdr->egressPriority=rg_db.systemGlobal.qosInternalDecision.qosPortBasedPriority[pPktHdr->pRxDesc->rx_src_port_num]&0x7; }else _rtk_rg_interfaceVlanIDPriority(pPktHdr, pStoredInfo, ptxInfo, ptxInfoMask); } #if !defined(__KERNEL__) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) void _rtk_rg_interfaceVlanIDPriority(rtk_rg_pktHdr_t *pPktHdr,rtk_rg_intfInfo_t *pStoredInfo,struct tx_info *ptxInfo,struct tx_info *ptxInfoMask) #else void _rtk_rg_interfaceVlanIDPriority(rtk_rg_pktHdr_t *pPktHdr,rtk_rg_intfInfo_t *pStoredInfo,rtk_rg_txdesc_t *ptxInfo,rtk_rg_txdesc_t *ptxInfoMask) #endif { //DEBUG("%s pStoredInfo->is_wan==%d",__FUNCTION__,pStoredInfo->is_wan); //Here we just decide VLANID and priority, tag or untag will postpone to _rtk_rg_fwdEngineDMAC2CVIDTransfer if(pStoredInfo->is_wan==0){ //FIXME: here should consider both LAN and WAN interface //ptxInfoMask->opts2.bit.tx_vlan_action=0x3; /*if(rg_db.vlan[pStoredInfo->lan_intf.intf_vlan_id].UntagPortmask&) { ptxInfo->opts2.bit.tx_vlan_action = 0x3; //remarking tag DEBUG("tagged with %d",pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_id); }*/ //set up priority if(pPktHdr!=NULL){ pPktHdr->egressVlanID = pStoredInfo->lan_intf.intf_vlan_id; pPktHdr->internalVlanID = pStoredInfo->lan_intf.intf_vlan_id; if(pPktHdr->tagif&CVLAN_TAGIF){ pPktHdr->egressPriority=pPktHdr->ctagPri&0x7; }else{ pPktHdr->egressPriority=rg_db.systemGlobal.qosInternalDecision.qosPortBasedPriority[pPktHdr->pRxDesc->rx_src_port_num]&0x7; } } }else{ //ptxInfoMask->opts2.bit.tx_vlan_action=0x3; //DEBUG("wanVID is%d, the vidl is %x, vidh is %x",pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_id,ptxInfo->opts2.bit.vidl,ptxInfo->opts2.bit.vidh); /*if(pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_tag_on) { ptxInfo->opts2.bit.tx_vlan_action = 0x3; //remarking tag DEBUG("tagged with %d",pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_id); }*/ //set up priority if(pPktHdr!=NULL){ pPktHdr->egressVlanID = pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_id; pPktHdr->internalVlanID = pStoredInfo->wan_intf.wan_intf_conf.egress_vlan_id; if(pPktHdr->tagif&CVLAN_TAGIF){ pPktHdr->egressPriority=pPktHdr->ctagPri&0x7; }else{ pPktHdr->egressPriority=rg_db.systemGlobal.qosInternalDecision.qosPortBasedPriority[pPktHdr->pRxDesc->rx_src_port_num]&0x7; } } } } int _rtk_rg_arpGeneration(uint8 netIfIdx,ipaddr_t gwIpAddr,rtk_rg_arp_request_t *arpReq) { #ifdef __KERNEL__ #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) //struct tx_info txInfo,txInfoMask; struct sk_buff *skb; unsigned char *bufptr; int ret_code=0; int vlan_offset=0; int i; uint32 bitmask=0; rg_kernel.tracefilterShow =0; //disable tracefilter show DEBUG("arp send: request ip=%x\n",arpReq->reqIp); //backup original pkthdr rg_db.pktHdr=&rg_db.systemGlobal.pktHeader_2; skb=_rtk_rg_getAlloc(RG_FWDENGINE_PKT_LEN); if(skb==NULL){ TRACE("alloc skb failed..return"); return 0; } //call fwdEngineInput, the alloc counter will be added. so don't need to add again if((skb)&&(rg_db.systemGlobal.fwdStatistic)) { #if RTK_RG_SKB_PREALLOCATE rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_uc[rg_db.pktHdr->ingressPort]--; #else rg_db.systemGlobal.statistic.perPortCnt_skb_alloc[rg_db.pktHdr->ingressPort]--; #endif } for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //DEBUG("the wan type is %d, ip is %x",rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr); if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo!=NULL && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf!=NULL && rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE) { if(((rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr& rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask) ==(arpReq->reqIp&rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask))&& (rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask>bitmask)) { gwIpAddr=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr; netIfIdx=rg_db.systemGlobal.wanIntfGroup[i].index; bitmask=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask; } } } TRACE("ARP requestIp(%x): gwIpAddr=%x netIfIdx=%d!",arpReq->reqIp,gwIpAddr,netIfIdx); //use rsvd to save the netif idx for Layer2 forward rg_kernel.rxInfoFromARPND.rx_netIfIdx=netIfIdx; bzero(skb->data,RG_FWDENGINE_PKT_LEN);//clean ptk buffer skb_reserve(skb, RX_OFFSET); bufptr=skb->data; /* Construct destination MAC */ memset(bufptr,0xff, ETHER_ADDR_LEN); /* Construct source MAC */ if(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan==1) memcpy(bufptr + 6,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); else memcpy(bufptr + 6,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.lan_intf.gmac.octet,ETHER_ADDR_LEN); if(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan==1) { vlan_offset=4; *(uint16 *)(bufptr + 12)= htons(0x8100); *(uint16 *)(bufptr + 14)= htons(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); } /* construct Ethtype+ARP header */ *(uint32 *)(bufptr + 12 + vlan_offset) = htonl(0x08060001); *(uint32 *)(bufptr + 16 + vlan_offset) = htonl(0x08000604); *(uint16 *)(bufptr + 20 + vlan_offset) = htons(0x0001); if(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan==1) memcpy(bufptr + 22 + vlan_offset,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); else memcpy(bufptr + 22 + vlan_offset,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.lan_intf.gmac.octet,ETHER_ADDR_LEN); *(uint32 *)(bufptr + 28 + vlan_offset) = htonl(gwIpAddr); *(uint32 *)(bufptr + 38 + vlan_offset) = htonl(arpReq->reqIp); skb_put(skb, 60); /*memset(&txInfo,0,sizeof(txInfo)); memset(&txInfoMask,0,sizeof(txInfoMask)); //arp request with vlan tag _rtk_rg_interfaceVlanTagged(pStoredInfo,&txInfo,&txInfoMask); //FIXME: lookup by vlan table txInfoMask.tx_cputag=1; txInfo.tx_cputag=1; txInfoMask.tx_l34_keep=1; txInfoMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; txInfo.tx_l34_keep=1; txInfo.tx_tx_portmask=0; //let hardware to auto look up //txInfo.tx_tx_portmask=(1<<RTK_RG_MAC_PORT0)|(1<<RTK_RG_MAC_PORT_RGMII); */ *(u32*)(skb->data+skb->len)=0; //save null point into end of skb data.(for trace filter debug) #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) ret_code = rtk_rg_fwdEngineInput(NULL,skb,(void*)&rg_kernel.rxInfoFromARPND); //Processing packets if(ret_code == RG_FWDENGINE_RET_TO_PS) { //FIXME:iPhone 5 change wireless connection from master to slave will send strange unicast ARP request for LAN gateway IP, and forwarded by protocol stack TRACE("ARP_GEN[%x]: To Protocol-Stack...FREE SKB!!",(unsigned int)skb&0xffff); //dump_packet(skb->data,skb->len,"dump_back_to_PS"); _rtk_rg_dev_kfree_skb_any(skb); } else if (ret_code == RG_FWDENGINE_RET_DROP) { TRACE("ARP_GEN[%x]: Drop...FREE SKB!!",(unsigned int)skb&0xffff); _rtk_rg_dev_kfree_skb_any(skb); } else { TRACE("ARP_GEN[%x]: Forward",(unsigned int)skb&0xffff); } //point back to original pkthdr rg_db.pktHdr=&rg_db.systemGlobal.pktHeader_1; #else ret_code=_rtk_rg_broadcastForward(skb,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id,RTK_RG_MAC_PORT_CPU,0); if(ret_code==RG_FWDENGINE_RET_DROP) _rtk_rg_dev_kfree_skb_any(skb); #endif //memDump(bufptr,skb->len,"ARPGEN"); //rtk_rg_fwdEngine_xmit(skb,&txInfo,NULL); //re8686_send_with_txInfo(skb,&txInfo,0); //re8686_send_with_txInfo_and_mask(skb,&txInfo,0,&txInfoMask); #else //FIXME:in module code, we need some other api to send packets #endif #endif return 0; } unsigned short _rtk_rg_checkSumICMPv6(unsigned short *sip,unsigned short *dip,unsigned short payloadLength,unsigned short nextHeader,unsigned short *buffer, int size) { unsigned long cksum=0; int ipSize; while(size >1) { cksum+=*buffer++; size -=sizeof(unsigned short); } if(size) cksum += *(unsigned char*)buffer; //caculate IPv6 pseudo header ipSize=IPV6_ADDR_LEN; //bytes while(ipSize >1) { cksum+=*sip++; ipSize -=sizeof(unsigned short); } if(ipSize) cksum += *(unsigned char*)sip; ipSize=IPV6_ADDR_LEN; //bytes while(ipSize >1) { cksum+=*dip++; ipSize -=sizeof(unsigned short); } if(ipSize) cksum += *(unsigned char*)dip; cksum+=payloadLength; cksum+=nextHeader; while (cksum >> 16) { cksum = (cksum & 0xffffUL) + (cksum >> 16); } return ~(cksum&0xffff); } int _rtk_rg_NDGeneration(uint8 netIfIdx,rtk_ipv6_addr_t gwIpAddr,rtk_rg_neighbor_discovery_t *neighborDisc) { #ifdef __KERNEL__ #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) //struct tx_info txInfo,txInfoMask; unsigned short tmpChksum; int ret_code=0; struct sk_buff *skb; unsigned char *bufptr; int vlan_offset=0; rg_kernel.tracefilterShow =0; //disable tracefilter show DEBUG("neighbor send: request ip=%08x:%08x:%08x:%08x\n",*(unsigned int *)neighborDisc->reqIp.ipv6_addr, *(unsigned int *)(neighborDisc->reqIp.ipv6_addr+4), *(unsigned int *)(neighborDisc->reqIp.ipv6_addr+8), *(unsigned int *)(neighborDisc->reqIp.ipv6_addr+12)); //use rsvd to save the netif idx for Layer2 forward rg_kernel.rxInfoFromARPND.rx_netIfIdx=netIfIdx; //backup original pkthdr rg_db.pktHdr=&rg_db.systemGlobal.pktHeader_2; skb=_rtk_rg_getAlloc(RG_FWDENGINE_PKT_LEN); if(skb==NULL){ TRACE("alloc skb failed..return"); return 0; } //call fwdEngineInput, the alloc counter will be added. so don't need to add again if((skb)&&(rg_db.systemGlobal.fwdStatistic)) { #if RTK_RG_SKB_PREALLOCATE rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_uc[rg_db.pktHdr->ingressPort]--; #else rg_db.systemGlobal.statistic.perPortCnt_skb_alloc[rg_db.pktHdr->ingressPort]--; #endif } bzero(skb->data,RG_FWDENGINE_PKT_LEN);//clear pkt buffer skb_reserve(skb, RX_OFFSET); bufptr=skb->data; // Construct destination MAC: //33:33:ff plus IPv6 address last 24bits *(unsigned char *)(bufptr)=0x33; *(unsigned char *)(bufptr+1)=0x33; *(unsigned char *)(bufptr+2)=0xff; memcpy(bufptr + 3,&neighborDisc->reqIp.ipv6_addr[13],3); // Construct source MAC memcpy(bufptr + 6,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); if(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan==1) { vlan_offset=4; *(uint16 *)(bufptr + 12)= htons(0x8100); *(uint16 *)(bufptr + 14)= htons(rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); } // construct IPv6 header *(unsigned int *)(bufptr + 12+ vlan_offset) = htonl(0x86dd6000); //etherType, ipv6 version *(unsigned int *)(bufptr + 16+ vlan_offset) = htonl(0x00000020); //payload length: 32bytes *(unsigned short *)(bufptr + 20+ vlan_offset) = htons(0x3aff); //next header: ICMPv6(58), Hop limit: 255 memcpy(bufptr + 22+ vlan_offset,&gwIpAddr.ipv6_addr,16); //source IP *(unsigned int *)(bufptr + 38+ vlan_offset) = htonl(0xff020000); //Destination IP: ff02::1:ff00/104 + ipv6 last 24bits *(unsigned int *)(bufptr + 42+ vlan_offset) = htonl(0x00000000); //Destination IP: ff02::1:ff00/104 + ipv6 last 24bits *(unsigned int *)(bufptr + 46+ vlan_offset) = htonl(0x00000001); //Destination IP: ff02::1:ff00/104 + ipv6 last 24bits *(unsigned char *)(bufptr+50+ vlan_offset)=0xff; //Destination IP: ff02::1:ff00/104 + ipv6 last 24bits memcpy(bufptr + 51+ vlan_offset,&neighborDisc->reqIp.ipv6_addr[13],3); //Destination IP: ff02::1:ff00/104 + ipv6 last 24bits // construct ICMPv6 for Neighbor Solicitation *(unsigned int *)(bufptr + 54+ vlan_offset) = htonl(0x87000000); //type:neighbor solicitation(135), code:0, checksum: 0 at first *(unsigned int *)(bufptr + 58+ vlan_offset) = htonl(0x00000000); //reserved:0 memcpy(bufptr + 62+ vlan_offset,&neighborDisc->reqIp,16); //Destination IP *(unsigned short *)(bufptr + 78+ vlan_offset) = htons(0x0101); //ICMPv6 optional:type=1,source link-layer address, length=1(8 bytes) memcpy(bufptr + 80+ vlan_offset,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); // Caculate checksum tmpChksum=_rtk_rg_checkSumICMPv6((unsigned short *)(bufptr+22+ vlan_offset),(unsigned short *)(bufptr+38+ vlan_offset),0x0020,0x003a,(unsigned short *)(bufptr+54+ vlan_offset),32); *(unsigned short *)(bufptr + 56+ vlan_offset) = htons(tmpChksum); skb_put(skb, 86+vlan_offset); //plus CRC length or not? /*memset(&txInfo,0,sizeof(txInfo)); memset(&txInfoMask,0,sizeof(txInfoMask)); //neighbor discovery with vlan tag _rtk_rg_interfaceVlanTagged(pStoredInfo,&txInfo,&txInfoMask); txInfoMask.tx_cputag=1; txInfoMask.tx_l34_keep=1; txInfoMask.tx_tx_portmask=RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; txInfo.tx_cputag=1; txInfoMask.tx_ipcs=1; txInfoMask.tx_l4cs=1; txInfoMask.tx_cputag_ipcs=1; txInfoMask.tx_cputag_l4cs=1; txInfo.tx_l34_keep=1; txInfo.tx_tx_portmask=0x1<<pStoredInfo->wan_intf.wan_intf_conf.wan_port_idx; //since we are multicast packet, we should use DirectTX //txInfo.tx_tx_portmask=(1<<RTK_RG_MAC_PORT0)|(1<<RTK_RG_MAC_PORT_RGMII); txInfo.tx_ipcs=1; txInfo.tx_l4cs=1; txInfo.tx_cputag_ipcs=1; txInfo.tx_cputag_l4cs=1; */ //memDump(bufptr,skb->len,"ARPGEN"); //rtk_rg_fwdEngine_xmit(skb,&txInfo,NULL); //re8686_send_with_txInfo(skb,&txInfo,0); //re8686_send_with_txInfo_and_mask(skb,&txInfo,0,&txInfoMask); *(u32*)(skb->data+skb->len)=0; //save null point into end of skb data.(for trace filter debug) #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) ret_code = rtk_rg_fwdEngineInput(NULL,skb,(void*)&rg_kernel.rxInfoFromARPND); //Processing packets if(ret_code == RG_FWDENGINE_RET_TO_PS) { //FIXME:iPhone 5 change wireless connection from master to slave will send strange unicast ARP request for LAN gateway IP, and forwarded by protocol stack TRACE("ND_GEN[%x]: To Protocol-Stack...FREE SKB!!",(unsigned int)skb&0xffff); //dump_packet(skb->data,skb->len,"dump_back_to_PS"); _rtk_rg_dev_kfree_skb_any(skb); } else if (ret_code == RG_FWDENGINE_RET_DROP) { TRACE("ND_GEN[%x]: Drop...FREE SKB!!",(unsigned int)skb&0xffff); _rtk_rg_dev_kfree_skb_any(skb); } else { TRACE("ND_GEN[%x]: Forward",(unsigned int)skb&0xffff); } //point back to original pkthdr rg_db.pktHdr=&rg_db.systemGlobal.pktHeader_1; #else ret_code=_rtk_rg_broadcastForward(skb,rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id,RTK_RG_MAC_PORT_CPU,0); if(ret_code==RG_FWDENGINE_RET_DROP) _rtk_rg_dev_kfree_skb_any(skb); #endif #else //FIXME:in module code, we need some other api to send packets #endif #endif return 0; } rtk_rg_err_code_t _rtk_rg_decCountSetStaticForGWMAC(ipaddr_t ipAddr, int l2Idx) { int errorno=RT_ERR_RG_OK; rtk_rg_macEntry_t macEt; int valid_macIdx; rtk_rg_arpInfo_t arpInfo; int arp_valid_idx; //Get Mac address valid_macIdx=l2Idx; errorno=rtk_rg_apollo_macEntry_find(&macEt, &valid_macIdx); if(errorno!=RT_ERR_RG_OK || valid_macIdx!=l2Idx)goto RET_ERR; if((rg_db.lut[valid_macIdx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0){ //remove from limit count if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(macEt.port_idx))&&rg_db.lut[valid_macIdx].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[macEt.port_idx]); //decrease wlan's device count if(macEt.port_idx==RTK_RG_EXT_PORT0 #ifdef CONFIG_DUALBAND_CONCURRENT ||(rg_db.systemGlobal.enableSlaveSSIDBind && macEt.port_idx==RTK_RG_EXT_PORT1) #endif ) { #ifdef CONFIG_MASTER_WLAN0_ENABLE _rtk_rg_wlanDeviceCount_dec(0,macEt.mac.octet,NULL); #endif } if(_rtK_rg_checkCategoryPortmask_spa(macEt.port_idx)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[valid_macIdx].category]); //Set mac as static for gw macEt.static_entry=1; #if defined(CONFIG_RTL9602C_SERIES) //20160907LUKE: for 9602c hw would check arp_used for L34 linked lut entry. macEt.arp_used=1; #endif errorno=rtk_rg_apollo_macEntry_add(&macEt,&valid_macIdx); if(errorno!=RT_ERR_RG_OK || valid_macIdx!=l2Idx)goto RET_ERR; //20141013LUKE: find ARP and set it to static, too arpInfo.arpEntry.ipv4Addr=ipAddr; arp_valid_idx=-1; //find by IP if(rtk_rg_apollo_arpEntry_find(&arpInfo, &arp_valid_idx)==RT_ERR_RG_OK){ arpInfo.arpEntry.staticEntry=1; errorno=rtk_rg_apollo_arpEntry_add(&arpInfo.arpEntry, &arp_valid_idx); } } RET_ERR: return (errorno); } rtk_rg_err_code_t _rtk_rg_decCountSetStaticForV6GWMAC(unsigned char *ipv6Addr, int l2Idx) { int errorno=RT_ERR_RG_OK; rtk_rg_macEntry_t macEt; int valid_macIdx; rtk_rg_neighborInfo_t neighborInfo; int neighbor_valid_idx; int rtidx; //Get Mac address valid_macIdx=l2Idx; errorno=rtk_rg_apollo_macEntry_find(&macEt, &valid_macIdx); if(errorno!=RT_ERR_RG_OK || valid_macIdx!=l2Idx)goto RET_ERR; if((rg_db.lut[valid_macIdx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0){ //remove from limit count if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(macEt.port_idx))&&rg_db.lut[valid_macIdx].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[macEt.port_idx]); //decrease wlan's device count if(macEt.port_idx==RTK_RG_EXT_PORT0 #ifdef CONFIG_DUALBAND_CONCURRENT ||(rg_db.systemGlobal.enableSlaveSSIDBind && macEt.port_idx==RTK_RG_EXT_PORT1) #endif ) { #ifdef CONFIG_MASTER_WLAN0_ENABLE _rtk_rg_wlanDeviceCount_dec(0,macEt.mac.octet,NULL); #endif } if(_rtK_rg_checkCategoryPortmask_spa(macEt.port_idx)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[valid_macIdx].category]); //Set mac as static for gw macEt.static_entry=1; #if defined(CONFIG_RTL9602C_SERIES) //20160907LUKE: for 9602c hw would check arp_used for L34 linked lut entry. macEt.arp_used=1; #endif errorno=rtk_rg_apollo_macEntry_add(&macEt,&valid_macIdx); if(errorno!=RT_ERR_RG_OK || valid_macIdx!=l2Idx)goto RET_ERR; //20160601LUKE: find Neighbor and set it to static, too rtidx=_rtk_rg_v6L3lookup(ipv6Addr); if(rtidx>=0){ memcpy(neighborInfo.neighborEntry.interfaceId,ipv6Addr+8,8); neighborInfo.neighborEntry.matchRouteIdx=rtidx; neighbor_valid_idx=-1; //find by IFID if(rtk_rg_apollo_neighborEntry_find(&neighborInfo,&neighbor_valid_idx)==RT_ERR_RG_OK){ neighborInfo.neighborEntry.staticEntry=1; errorno=rtk_rg_apollo_neighborEntry_add(&neighborInfo.neighborEntry,&neighbor_valid_idx); } } } RET_ERR: return (errorno); } int _rtk_rg_internal_GWMACSetup_stage2(int matchIdx, int l2Idx) { int i,nxtidx,eipIdx=-1,rtidx=-1,ret,errorno,netmask,remote_gw_autolearn,softl3RtIdx=-1; int ori_l2Idx=rg_db.systemGlobal.defaultTrapLUTIdx; int napt_enable=0, default_route=0, static_route_by_arp=0; rtk_l34_routing_entry_t rtEntry; rtk_l34_nexthop_entry_t nxpEt; rtk_l34_ext_intip_entry_t extipEntry; //rtk_l34_pppoe_entry_t pppoeEt; ipaddr_t wan_ext_ip=0,wan_ext_ip_mask=0,remote_gw_ip=0,remote_host_ip=0; rtk_rg_ipv4RoutingEntry_t cb_routEt; rtk_rg_wan_type_t wan_type; //int valid_macIdx; //int ori_wantype=0; //rtk_wanType_entry_t wantEt; wan_ext_ip=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ip_addr; wan_ext_ip_mask=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ip_network_mask; remote_gw_ip=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->gateway_ipv4_addr; remote_gw_autolearn=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4; napt_enable=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->napt_enable; default_route=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv4_default_gateway_on; static_route_by_arp=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->static_route_with_arp; wan_type=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.wan_intf_conf.wan_type; netmask=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ip_network_mask; remote_host_ip=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->host_ip_addr; //nxtidx=RG_GLB_WAN_TYPE[matchIdx]; nxtidx=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.nexthop_ipv4; //20150622LUKE: if the nxtidx is not ready, choose one and use it! if(nxtidx<0) { //Check for empty entry errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.nxpRefCount[i] == 0) break; } if(i==MAX_NEXTHOP_SW_TABLE_SIZE)goto RET_NEXTHOP_ERR; nxtidx = i; //Keep //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; bzero(&nxpEt,sizeof(rtk_l34_nexthop_entry_t)); nxpEt.ifIdx=matchIdx; // if WAN is PPPoE, LAN is untag. (keepPppoe=1 will send untag packet to WAN) if((wan_type == RTK_RG_PPPoE)||(wan_type == RTK_RG_PPPoE_DSLITE)){ nxpEt.type=L34_NH_PPPOE; #if defined(CONFIG_RTL9602C_SERIES) nxpEt.keepPppoe=2; /* If original tagged, keep. Otherwise add tag with PPPIDX session id */ #else nxpEt.keepPppoe=0; #endif nxpEt.pppoeIdx=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.pppoe_idx; }else{ nxpEt.type=L34_NH_ETHER; nxpEt.keepPppoe=1; nxpEt.pppoeIdx=0; } // FIXME: here should to use binding remote host mac index, if port-binding is set nxpEt.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; //use this DUMMY index to force packet TRAP to CPU rg_db.nexthop[nxtidx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; rg_db.systemGlobal.nxpRefCount[nxtidx]++; //add for deleting it when del interface rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.nexthop_ipv4=nxtidx; } //Check if we had set nexthop before //20140926LUKE: for PPTP and L2TP, we will never add nexthop. if(rg_db.nexthop[nxtidx].rtk_nexthop.nhIdx!=l2Idx) { //errorno=RT_ERR_RG_NXP_GET_FAIL; bzero(&nxpEt, sizeof(rtk_l34_nexthop_entry_t)); memcpy(&nxpEt, &rg_db.nexthop[nxtidx].rtk_nexthop,sizeof(rtk_l34_nexthop_entry_t)); //ret = rtk_l34_nexthopTable_get(nxtidx, &nxpEt); //if(ret!=RT_ERR_OK)goto RET_ERR; //Check for routing table //errorno=RT_ERR_RG_ROUTE_GET_FAIL; //ret = rtk_l34_routingTable_get(MAX_L3_SW_TABLE_SIZE-1, &rtEntry); //get default route setting //if(ret!=RT_ERR_OK)goto RET_ERR; //errorno=RT_ERR_RG_DEF_ROUTE_EXIST; //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; ori_l2Idx = nxpEt.nhIdx; //Keep nxpEt.nhIdx = l2Idx; // TODO:LUT table index point to Gateway rg_db.nexthop[nxtidx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; } //20160524LUKE: check if static route's nexthop point to same interface index for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++){ if(rg_db.nexthop[i].rtk_nexthop.ifIdx==matchIdx && rg_db.nexthop[i].rtk_nexthop.nhIdx==rg_db.systemGlobal.defaultTrapLUTIdx) rg_db.nexthop[i].rtk_nexthop.nhIdx=l2Idx; } //20150610LUKE: Lookup for available extip table index //20160128LUKE: for static host route, we do not need extIP entry. //20160329LUKE: for routing mode, we should never add extIP entry, even static route. if(napt_enable/* || (default_route==0 && remote_gw_ip!=0 && netmask!=0xffffffff)*/) { if(rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.extip_idx<0) { errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_EXTIP_SW_TABLE_SIZE;i++) { if(!rg_db.extip[i].rtk_extip.valid) break; } if(i==MAX_EXTIP_SW_TABLE_SIZE)goto RET_NEXTHOP_ERR; eipIdx=i; //keep //Set up Internal External IP table for NAPT or STATIC ROUTE //20140328LUKE:STATIC ROUTE should always add IP table, even napt_enable is 0!! //20141001LUKE: PPTP should add EXTIP in pptpClientInfoAfterDial_set //20141020LUKE: L2TP should add EXTIP in l2tpClientInfoAfterDial_set errorno=RT_ERR_RG_EXTIP_SET_FAIL; bzero(&extipEntry,sizeof(rtk_l34_ext_intip_entry_t)); extipEntry.intIpAddr=0; //napt special extipEntry.extIpAddr=wan_ext_ip; //20150107LUKE: update EIP from primitive WAN interface, not from STATIC ROUTE's WAN. if(((wan_ext_ip_mask<=0xfffffff8)&&((wan_ext_ip&wan_ext_ip_mask)==wan_ext_ip)) || //subnet IP (wan_ext_ip_mask==0xffffffff&&remote_gw_autolearn&&wan_type!=RTK_RG_PPPoE&&wan_type!=RTK_RG_PPPoE_DSLITE&&wan_type!=RTK_RG_PPTP&&wan_type!=RTK_RG_L2TP)) //static host route with napt mode { for(i=0,ret=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //DEBUG("the wan type is %d, ip is %x",rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr); if((rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE)&& ((rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr&rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask) ==(remote_gw_ip&rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask))&& (rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask>ret)) { extipEntry.extIpAddr=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr; ret=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask; } } if(ret==0)goto RET_EIP_ERR; } extipEntry.nhIdx=nxtidx; extipEntry.prival=0; extipEntry.pri=0; extipEntry.type=L34_EXTIP_TYPE_NAPT; extipEntry.valid=1; ret = RTK_L34_EXTINTIPTABLE_SET(eipIdx, &extipEntry); if(ret!=RT_ERR_OK)goto RET_EIP_ERR; rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.extip_idx=eipIdx; //keep in interface structure //DEBUG("set ext ip table %d as %x, nexthop is %d",matchIdx, wan_ext_ip,nxtidx); rg_db.systemGlobal.nxpRefCount[nxtidx]++; //nexthop reference by IP table } } //DEBUG("the wan_ip is %x, napt_enable is %d, default_route is %d nxtidx is %d static_route_arp is %d...",wan_ext_ip,napt_enable,default_route,nxtidx,static_route_by_arp); //Check and setup Routing table for default route //WARNING("the default_route is %d, the defaultRouteSet is %d, matchidx is %d",default_route,rg_db.systemGlobal.defaultRouteSet,matchIdx); if(default_route == 1) { if(rg_db.systemGlobal.defaultRouteSet == matchIdx) { if(wan_type!=RTK_RG_PPTP && wan_type!=RTK_RG_L2TP #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) && wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE #endif ) { errorno=RT_ERR_RG_ROUTE_SET_FAIL; bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.valid=1; rtEntry.process=L34_PROCESS_NH; if(napt_enable == 1) rtEntry.internal=0; //external interface else rtEntry.internal=1; //internal interface rtEntry.ipAddr=0; rtEntry.ipMask=0; // TODO:if load-balance is needed, here should be changed rtEntry.nhStart=nxtidx; /*exact index*/ rtEntry.nhNxt=nxtidx; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 rtEntry.rt2waninf=1; } else { //for PPTP and L2TP, we should set default route to TRAP! //for DSLITE, too. errorno=RT_ERR_RG_ROUTE_SET_FAIL; bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; if(napt_enable == 1) rtEntry.internal=0; //external interface else rtEntry.internal=1; //internal interface rtEntry.ipAddr=0; rtEntry.ipMask=0; rtEntry.netifIdx=matchIdx; // TODO:if load-balance is needed, here should be changed rtEntry.nhStart=nxtidx; /*exact index*/ rtEntry.nhNxt=nxtidx; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 rtEntry.rt2waninf=1; } //WARNING("the added default routing's nexthop is %d, internal is %d, defaultRouteSet is %d, dhcpWAN_sem is %d", //rtEntry.nhStart,rtEntry.internal,rg_db.systemGlobal.defaultRouteSet,rg_db.systemGlobal.dhcpWAN_sem); //callback information bzero(&cb_routEt,sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=0; cb_routEt.ip_mask=0; cb_routEt.nexthop=remote_gw_ip; cb_routEt.wan_intf_idx=matchIdx; //WARNING("set to hardware table directly!!"); ret = RTK_L34_ROUTINGTABLE_SET(V4_DEFAULT_ROUTE_IDX, &rtEntry); //set default route if(ret!=RT_ERR_OK)goto RET_DEF_ROUTE_ERR; // TODO:Call the initParam's routngAddByHwCallBack if(rg_db.systemGlobal.initParam.routingAddByHwCallBack != NULL) { rg_db.systemGlobal.initParam.routingAddByHwCallBack(&cb_routEt); } if(wan_type!=RTK_RG_PPTP && wan_type!=RTK_RG_L2TP && wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE)rg_db.systemGlobal.nxpRefCount[nxtidx]++; //nexthop reference by routing table } } if((default_route != 1)||(wan_type==RTK_RG_DSLITE)||(wan_type==RTK_RG_PPPoE_DSLITE)) { //set STATIC ROUTE for nexthop //20150109LUKE: for dslite we should set nexthop information if((remote_gw_ip!=0 && !static_route_by_arp)||(wan_type==RTK_RG_DSLITE)||(wan_type==RTK_RG_PPPoE_DSLITE)) { //check which routing entry we are setting bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); for(i=0; i<MAX_L3_SW_TABLE_SIZE ; i++) { if(i== V4_DEFAULT_ROUTE_IDX) continue; if(rg_db.l3[i].rtk_l3.valid && rg_db.l3[i].rtk_l3.process==L34_PROCESS_CPU && ((wan_ext_ip_mask!=0xffffffff && (wan_ext_ip&wan_ext_ip_mask)==rg_db.l3[i].rtk_l3.ipAddr && wan_ext_ip_mask==rg_db.l3[i].netmask) || ((wan_ext_ip_mask==0xffffffff && remote_host_ip==0) && remote_gw_ip==rg_db.l3[i].rtk_l3.ipAddr && wan_ext_ip_mask==rg_db.l3[i].netmask) || ((wan_ext_ip_mask==0xffffffff && remote_host_ip!=0) && remote_host_ip==rg_db.l3[i].rtk_l3.ipAddr && wan_ext_ip_mask==rg_db.l3[i].netmask))) { DEBUG("Match! %d",i); rtidx=i; break; } } if(rtidx>=0) { errorno=RT_ERR_RG_ROUTE_SET_FAIL; memcpy(&rtEntry, &rg_db.l3[rtidx].rtk_l3,sizeof(rtk_l34_routing_entry_t)); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if((wan_type!=RTK_RG_DSLITE)&&(wan_type!=RTK_RG_PPPoE_DSLITE)) #endif rtEntry.process=L34_PROCESS_NH; // TODO:if load-balance is needed, here should be changed rtEntry.nhStart=nxtidx; /*exact index*/ rtEntry.nhNxt=nxtidx; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 rtEntry.rt2waninf=1; //callback information bzero(&cb_routEt,sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=rtEntry.ipAddr; cb_routEt.ip_mask=wan_ext_ip_mask; cb_routEt.nexthop=remote_gw_ip; cb_routEt.wan_intf_idx=matchIdx; ret = RTK_L34_ROUTINGTABLE_SET(rtidx, &rtEntry); //set default route if(ret!=RT_ERR_OK)goto RET_DEF_ROUTE_ERR; // TODO:Call the initParam's routngAddByHwCallBack if(rg_db.systemGlobal.initParam.routingAddByHwCallBack != NULL) { rg_db.systemGlobal.initParam.routingAddByHwCallBack(&cb_routEt); } if((wan_type!=RTK_RG_DSLITE)&&(wan_type!=RTK_RG_PPPoE_DSLITE))rg_db.systemGlobal.nxpRefCount[nxtidx]++; //nexthop reference by routing table } } } for(i=0;i<MAX_L3_SW_TABLE_SIZE;i++) { if(i == V4_DEFAULT_ROUTE_IDX) continue; if((rg_db.l3[i].valid) && (((rg_db.l3[i].rtk_l3.process==L34_PROCESS_ARP) && (rg_db.l3[i].rtk_l3.netifIdx==matchIdx)) || ((rg_db.l3[i].rtk_l3.process==L34_PROCESS_NH)&&(rg_db.nexthop[rg_db.l3[i].rtk_l3.nhStart].rtk_nexthop.ifIdx==matchIdx))) ) { softl3RtIdx=i; break; } } if((((eipIdx >=0) && (eipIdx <MAX_EXTIP_SW_TABLE_SIZE)) && rg_db.extip[eipIdx].valid==SOFTWARE_ONLY_ENTRY) || (((nxtidx>=0)&& (nxtidx <MAX_NEXTHOP_SW_TABLE_SIZE)) && rg_db.nexthop[nxtidx].valid == SOFTWARE_ONLY_ENTRY) || ((matchIdx>=MAX_NETIF_HW_TABLE_SIZE)&&(matchIdx<MAX_NETIF_SW_TABLE_SIZE)) || (rg_db.l3[softl3RtIdx].valid==SOFTWARE_ONLY_ENTRY)) { if(rg_db.systemGlobal.interfaceInfo[matchIdx].valid !=SOFTWARE_ONLY_ENTRY) { rtk_rg_aclAndCf_reserved_dip_mask_trap_t dip_mask_trap; bzero(&dip_mask_trap,sizeof(dip_mask_trap)); dip_mask_trap.dip=rg_db.l3[softl3RtIdx].rtk_l3.ipAddr; dip_mask_trap.mask =~((1<<(31-(rg_db.l3[softl3RtIdx].rtk_l3.ipMask)))-1); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP +softl3RtIdx, &dip_mask_trap); rg_db.systemGlobal.interfaceInfo[matchIdx].valid=SOFTWARE_ONLY_ENTRY; WARNING("ReservedRuleAdd software data path eipIdx=%d nxtidx=%d netif=%d L3Idx=%d",eipIdx,nxtidx,matchIdx,softl3RtIdx); } } if (rg_db.systemGlobal.interfaceInfo[matchIdx].valid == SOFTWARE_ONLY_ENTRY)// pure software netif { int iterPort=0; //Port bind , trap spa=bindPort packet for(iterPort=0 ;iterPort <RTK_RG_PORT_CPU ; iterPort++ ) { if( rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask & (1<<iterPort)) { _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PORT0_TRAP + iterPort, NULL); WARNING("ReservedRuleAdd Port Bind trap Port=%d",iterPort); } } } return (RT_ERR_RG_OK); RET_DEF_ROUTE_ERR: //Delete the default route entry bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(V4_DEFAULT_ROUTE_IDX, &rtEntry); RET_EIP_ERR: //Delete the eip entry if(eipIdx>=0) { bzero(&extipEntry,sizeof(rtk_l34_ext_intip_entry_t)); RTK_L34_EXTINTIPTABLE_SET(eipIdx, &extipEntry); } RET_NEXTHOP_ERR: //Recover original L2 idx nxpEt.nhIdx=ori_l2Idx; rg_db.nexthop[nxtidx].valid=1; RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); return (errorno); } int _rtk_rg_internal_GWMACSetup(ipaddr_t ipAddr, int l2Idx) { int i,matchIdx=-1,errorno; // TODO:After the Gateway mac is learned, we can finish add routing entry // TODO:and modify nexthop entry to correct LUT index //Check l2Idx for success or timeout errorno=RT_ERR_RG_ARP_NOT_FOUND; if(l2Idx == -1)goto END; //Check each wan interface for matching IPaddr for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { //Bridge WAN won't be compared with if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) continue; if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->gateway_ipv4_addr == ipAddr) { matchIdx=rg_db.systemGlobal.wanIntfGroup[i].index; errorno=_rtk_rg_decCountSetStaticForGWMAC(ipAddr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("before GWMACsetup_stage 2 !!! ip is %x matchidx is %d, l2idx is %d",ipAddr,matchIdx,l2Idx); errorno = _rtk_rg_internal_GWMACSetup_stage2(matchIdx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.systemGlobal.intfArpRequest[matchIdx].finished = 1; //break; } } errorno=RT_ERR_RG_INVALID_PARAM; if(matchIdx == -1)goto END; return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.intfArpRequest[matchIdx].finished = -1; END: RETURN_ERR(errorno); } int _rtk_rg_internal_PPTPMACSetup(ipaddr_t ipAddr, int l2Idx) { int i,matchIdx=-1,errorno; // TODO:After the Gateway mac is learned, we can finish add routing entry // TODO:and modify nexthop entry to correct LUT index //Check l2Idx for success or timeout errorno=RT_ERR_RG_ARP_NOT_FOUND; if(l2Idx == -1)goto RET_ERR; //Check each wan interface for matching IPaddr for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_PPTP) continue; DEBUG("[%d] gateway %x ipaddr %x",i,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->gateway_ipv4_addr,ipAddr); if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.pptp_info.before_dial.pptp_ipv4_addr == ipAddr) { matchIdx=rg_db.systemGlobal.wanIntfGroup[i].index; errorno=_rtk_rg_decCountSetStaticForGWMAC(ipAddr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("PPTP before GWMACsetup_stage 2 !!! ip is %x matchidx is %d, l2idx is %d",ipAddr,matchIdx,l2Idx); errorno = _rtk_rg_internal_GWMACSetup_stage2(matchIdx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.systemGlobal.intfArpRequest[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = 1; //break; } } errorno=RT_ERR_RG_INVALID_PARAM; if(matchIdx == -1)goto RET_ERR; return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.intfArpRequest[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = -1; RETURN_ERR(errorno); } int _rtk_rg_internal_L2TPMACSetup(ipaddr_t ipAddr, int l2Idx) { int i,matchIdx=-1,errorno; // TODO:After the Gateway mac is learned, we can finish add routing entry // TODO:and modify nexthop entry to correct LUT index //Check l2Idx for success or timeout errorno=RT_ERR_RG_ARP_NOT_FOUND; if(l2Idx == -1)goto RET_ERR; //Check each wan interface for matching IPaddr for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_L2TP) continue; DEBUG("[%d] gateway %x ipaddr %x",i,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->gateway_ipv4_addr,ipAddr); if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.l2tp_info.before_dial.l2tp_ipv4_addr == ipAddr) { matchIdx=rg_db.systemGlobal.wanIntfGroup[i].index; errorno=_rtk_rg_decCountSetStaticForGWMAC(ipAddr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("L2TP before GWMACsetup_stage 2 !!! ip is %x matchidx is %d, l2idx is %d",ipAddr,matchIdx,l2Idx); errorno = _rtk_rg_internal_GWMACSetup_stage2(matchIdx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.systemGlobal.intfArpRequest[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = 1; //break; } } errorno=RT_ERR_RG_INVALID_PARAM; if(matchIdx == -1)goto RET_ERR; return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.intfArpRequest[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = -1; RETURN_ERR(errorno); } int _rtk_rg_internal_STATICROUTEMACSetup(ipaddr_t ipAddr, int l2Idx) { int i,nxtidx,errorno; for(i=0;i<MAX_STATIC_ROUTE_SIZE;i++){ if(rg_db.staticRoute[i].valid && !rg_db.staticRoute[i].info.ip_version && rg_db.staticRoute[i].info.ipv4.nexthop==ipAddr){ nxtidx=rg_db.l3[rg_db.staticRoute[i].route_idx].rtk_l3.nhStart; errorno=_rtk_rg_decCountSetStaticForGWMAC(ipAddr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.nexthop[nxtidx].rtk_nexthop.nhIdx=l2Idx; rg_db.nexthop[nxtidx].valid=1; ASSERT_EQ(RTK_L34_NEXTHOPTABLE_SET(nxtidx, &rg_db.nexthop[nxtidx].rtk_nexthop),RT_ERR_OK); TRACE("set staticRoute[%d]'s nexthop[%d] to Lut[%d]",i,nxtidx,l2Idx); //break; } } return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.staticRouteArpReq[i].finished = -1; RETURN_ERR(errorno); } int _rtk_rg_internal_STATICROUTEV6MACSetup(unsigned char *ipv6Addr, int l2Idx) { int i,nxtidx,errorno; for(i=0;i<MAX_STATIC_ROUTE_SIZE;i++){ if(rg_db.staticRoute[i].valid && rg_db.staticRoute[i].info.ip_version && !memcmp(&rg_db.staticRoute[i].info.ipv6.nexthop.ipv6_addr,ipv6Addr,IPV6_ADDR_LEN)){ nxtidx=rg_db.v6route[rg_db.staticRoute[i].route_idx].rtk_v6route.nhOrIfidIdx; errorno=_rtk_rg_decCountSetStaticForV6GWMAC(ipv6Addr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.nexthop[nxtidx].rtk_nexthop.nhIdx=l2Idx; rg_db.nexthop[nxtidx].valid=1; ASSERT_EQ(RTK_L34_NEXTHOPTABLE_SET(nxtidx, &rg_db.nexthop[nxtidx].rtk_nexthop),RT_ERR_OK); TRACE("set staticRoute[%d]'s nexthop[%d] to Lut[%d]",i,nxtidx,l2Idx); //break; } } return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.staticRouteNBDiscovery[i].finished = -1; RETURN_ERR(errorno); } int _rtk_rg_internal_IPV6GWMACSetup_stage2(int matchIdx, int l2Idx) { int i,nxtidx,rtidx=-1,ret,errorno; int ori_l2Idx=rg_db.systemGlobal.defaultTrapLUTIdx; int default_route=0; int wan_ext_ip_mask; rtk_ipv6Routing_entry_t rtv6Entry; rtk_l34_nexthop_entry_t nxpEt; //rtk_l34_pppoe_entry_t pppoeEt; rtk_ipv6_addr_t wan_ext_ip,remote_gw_ip,zeroIP={{0}}; rtk_rg_ipv6RoutingEntry_t cb_routv6Et; rtk_wanType_entry_t wantEt; unsigned int tmppmsk,tmpexpmsk; rtk_portmask_t out_mac_pmask,out_ext_pmask; rtk_rg_wan_type_t wan_type; //int valid_macIdx; //int ori_wantype=0; //rtk_wanType_entry_t wantEt; DEBUG("in _rtk_rg_internal_IPV6GWMACSetup_stage2, matchIdx is %d, l2Idx is %d",matchIdx,l2Idx); memcpy(&wan_ext_ip,&rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv6_addr,sizeof(rtk_ipv6_addr_t)); wan_ext_ip_mask=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv6_mask_length; memcpy(&remote_gw_ip,&rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->gateway_ipv6_addr,sizeof(rtk_ipv6_addr_t)); default_route=rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv6_default_gateway_on; wan_type=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.wan_intf_conf.wan_type; if(rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6<0) { nxtidx=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.nexthop_ipv6; if(nxtidx<0){ //Check for empty entry errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.nxpRefCount[i] == 0) break; } if(i==MAX_NEXTHOP_SW_TABLE_SIZE)RETURN_ERR(errorno); nxtidx=i; //keep //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; bzero(&nxpEt,sizeof(rtk_l34_nexthop_entry_t)); nxpEt.ifIdx=matchIdx; // if WAN is PPPoE, LAN is untag. (keepPppoe=1 will send untag packet to WAN) if((wan_type == RTK_RG_PPPoE)||(wan_type == RTK_RG_PPPoE_DSLITE)){ nxpEt.type=L34_NH_PPPOE; #if defined(CONFIG_RTL9602C_SERIES) nxpEt.keepPppoe=2; /* If original tagged, keep. Otherwise add tag with PPPIDX session id */ #else nxpEt.keepPppoe=0; #endif nxpEt.pppoeIdx=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.pppoe_idx; }else{ nxpEt.type=L34_NH_ETHER; nxpEt.keepPppoe=1; nxpEt.pppoeIdx=0; } // FIXME: here should to use binding remote host mac index, if port-binding is set nxpEt.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; //use this DUMMY index to force packet TRAP to CPU rg_db.nexthop[nxtidx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); if(ret!=RT_ERR_OK)RETURN_ERR(errorno); rg_db.systemGlobal.nxpRefCount[nxtidx]++; //add for deleting it when del interface rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.nexthop_ipv6=nxtidx; } errorno=RT_ERR_RG_WANTYPE_SET_FAIL; bzero(&wantEt, sizeof(rtk_wanType_entry_t)); wantEt.nhIdx=nxtidx; wantEt.wanType=L34_WAN_TYPE_L3_ROUTE; //IPv6 only routing mode #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT //DEBUG("ipv6_napt_enable=%d",rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv6_napt_enable); if(rg_db.systemGlobal.interfaceInfo[matchIdx].p_wanStaticInfo->ipv6_napt_enable){ wantEt.wanType=L34_WAN_TYPE_L34NAT_ROUTE; //IPv6 NAPT } #endif for(i=0;i<MAX_WANTYPE_SW_TABLE_SIZE;i++) { if(rg_db.wantype[i].valid==0) break; } if(i==MAX_WANTYPE_SW_TABLE_SIZE)RETURN_ERR(errorno); rg_db.wantype[i].valid=1; ret = RTK_L34_WANTYPETABLE_SET(i, &wantEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; RETURN_ERR(errorno); } if(ret!=RT_ERR_OK)RETURN_ERR(errorno); DEBUG("### Add WANTYPE[%d]:(wanType=%d, wantEt=%d) ",wantEt.wanType,wantEt.nhIdx); rg_db.systemGlobal.nxpRefCount[wantEt.nhIdx]++; //nexthop reference by WAN type table rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6=i; DEBUG("create IPv6 wantype[%d]!!",rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6); //update binding rules _rtk_rg_portmask_translator(rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask,&out_mac_pmask,&out_ext_pmask); tmppmsk=out_mac_pmask.bits[0]; tmpexpmsk=out_ext_pmask.bits[0]>>0x1; //FIXME:translator contain cpu port, but binding should not contain it, so shift it _rtk_rg_deletingPortBindFromInterface(matchIdx); _rtk_rg_addBindFromPortmask(tmppmsk,tmpexpmsk,matchIdx,rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv4,rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6); //update vlan-binding ret=_rtk_rg_updatingVlanBind(matchIdx,rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6); if(ret!=RT_ERR_RG_OK)RETURN_ERR(ret); } nxtidx=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.nexthop_ipv6; //DEBUG("nxtidx=%d (matchIdx=%d bind_wan_type_ipv6=%d)",nxtidx,matchIdx,rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.bind_wan_type_ipv6); bzero(&nxpEt, sizeof(rtk_l34_nexthop_entry_t)); //20140623LUKE:copy from original nexthop for netifIdx and pppoeIdx memcpy(&nxpEt, &rg_db.nexthop[nxtidx].rtk_nexthop,sizeof(rtk_l34_nexthop_entry_t)); //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; ori_l2Idx=nxpEt.nhIdx; //Keep nxpEt.nhIdx=l2Idx; // TODO:LUT table index point to Gateway //20140623LUKE:IPv6 should use 8~MAX_NEXTHOP_HW_TABLE_SIZE range //nxtidx+=MAX_NETIF_HW_TABLE_SIZE; rg_db.nexthop[nxtidx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; DEBUG("### Add NEXTHOP[%d]:(ifIdx=%d, l2Idx=%d) ",nxtidx,nxpEt.ifIdx,nxpEt.nhIdx); //DEBUG("default_route = %d",default_route); //Setup Routing table for default route if(default_route == 1) { //DEBUG("rg_db.systemGlobal.defaultIPV6RouteSet = %d, matchIdx=%d",rg_db.systemGlobal.defaultIPV6RouteSet,matchIdx); if(rg_db.systemGlobal.defaultIPV6RouteSet == matchIdx) { errorno=RT_ERR_RG_ROUTE_SET_FAIL; bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_GLOBAL; rtv6Entry.nhOrIfidIdx=nxtidx; rtv6Entry.rt2waninf=1; //callback information bzero(&cb_routv6Et,sizeof(rtk_rg_ipv6RoutingEntry_t)); cb_routv6Et.NhOrIntfIdx=nxtidx; cb_routv6Et.type=rtv6Entry.type; //WARNING("set default route to real ipv6 hw table of %d, next is %d",V6_DEFAULT_ROUTE_IDX,nxtidx); ret=RTK_L34_IPV6ROUTINGTABLE_SET(V6_DEFAULT_ROUTE_IDX, &rtv6Entry); //set default route if(ret!=RT_ERR_OK)goto RET_DEF_ROUTE_ERR; // TODO:Call the initParam's v6RoutingAddByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack != NULL) { rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack(&cb_routv6Et); } rg_db.systemGlobal.nxpRefCount[nxtidx]++; //nexthop reference by v6 routing table } } else { //set STATIC ROUTE for nexthop if(memcmp(&remote_gw_ip,&zeroIP,sizeof(rtk_ipv6_addr_t))) { WARNING("IPv6 STATIC ROUTE is set, all WANIP subnet will be transfer to remote gateway!! If packets head for this WAN ip, please add ACL rule for trapping it!!"); //check which routing entry we are setting bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); for(i=0; i<MAX_IPV6_ROUTING_HW_TABLE_SIZE - 1; i++) { if(rg_db.v6route[i].rtk_v6route.valid && rg_db.v6route[i].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_LOCAL && !memcmp(&wan_ext_ip,&rg_db.v6route[i].rtk_v6route.ipv6Addr,sizeof(rtk_ipv6_addr_t)) && (wan_ext_ip_mask==rg_db.v6route[i].rtk_v6route.ipv6PrefixLen)) { //Match! rtidx=i; break; } } if(rtidx>=0) { errorno=RT_ERR_RG_ROUTE_SET_FAIL; rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_GLOBAL; rtv6Entry.nhOrIfidIdx=nxtidx; rtv6Entry.ipv6PrefixLen=wan_ext_ip_mask; memcpy(&rtv6Entry.ipv6Addr,&wan_ext_ip,sizeof(rtk_ipv6_addr_t)); rtv6Entry.rt2waninf=1; //callback information bzero(&cb_routv6Et,sizeof(rtk_rg_ipv6RoutingEntry_t)); cb_routv6Et.NhOrIntfIdx=nxtidx; cb_routv6Et.type=rtv6Entry.type; ret=RTK_L34_IPV6ROUTINGTABLE_SET(rtidx, &rtv6Entry); //set default route if(ret!=RT_ERR_OK)goto RET_DEF_ROUTE_ERR; // TODO:Call the initParam's v6RoutingAddByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack != NULL) { rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack(&cb_routv6Et); } rg_db.systemGlobal.nxpRefCount[nxtidx]++; //nexthop reference by v6 routing table } } } return (RT_ERR_RG_OK); RET_DEF_ROUTE_ERR: TRACE("Set default route error"); //Delete the default route entry bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); RTK_L34_IPV6ROUTINGTABLE_SET(3, &rtv6Entry); RET_NEXTHOP_ERR: //Recover original L2 idx nxpEt.nhIdx=ori_l2Idx; rg_db.nexthop[nxtidx].valid=1; RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEt); return (errorno); } int _rtk_rg_internal_IPV6GWMACSetup(unsigned char *ipv6Addr, int l2Idx) { int i,matchIdx=-1,errorno; // TODO:After the Gateway mac is learned, we can finish add routing entry // TODO:and modify nexthop entry to correct LUT index DEBUG("in _rtk_rg_internal_IPV6GWMACSetup, l2idx is %d",l2Idx); //Check l2Idx for success or timeout errorno=RT_ERR_RG_ARP_NOT_FOUND; if(l2Idx == -1)goto END; //Check each wan interface for matching IPaddr for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { //Bridge WAN won't be compared with if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) continue; if(memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->gateway_ipv6_addr.ipv6_addr,ipv6Addr,IPV6_ADDR_LEN)==0) { matchIdx=rg_db.systemGlobal.wanIntfGroup[i].index; errorno=_rtk_rg_decCountSetStaticForV6GWMAC(ipv6Addr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("before IPV6GWMACsetup_stage 2 !!! matchidx is %d, l2idx is %d",matchIdx,l2Idx); errorno = _rtk_rg_internal_IPV6GWMACSetup_stage2(matchIdx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.systemGlobal.intfNeighborDiscovery[matchIdx].finished = 1; //break; } } errorno=RT_ERR_RG_INVALID_PARAM; if(matchIdx == -1)goto END; return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.intfNeighborDiscovery[matchIdx].finished = -1; END: return (errorno); } int _rtk_rg_internal_IPV6AFTRMACSetup(unsigned char *ipv6Addr, int l2Idx) { int i,matchIdx=-1,errorno; #if defined(CONFIG_RTL9602C_SERIES) rtk_l34_dsliteInf_entry_t *dsliteHw; #else rtk_rg_aclAndCf_reserved_intf_dslite_trap_t intf_dslite_trap_para; #endif DEBUG("in _rtk_rg_internal_IPV6AFTRMACSetup, l2idx is %d",l2Idx); //Check l2Idx for success or timeout errorno=RT_ERR_RG_ARP_NOT_FOUND; if(l2Idx == -1)goto RET_ERR; //Check each wan interface for matching IPaddr for(i=0; i<rg_db.systemGlobal.wanIntfTotalNum; i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_DSLITE) continue; if(memcmp(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.dslite_info.rtk_dslite.ipAftr.ipv6_addr,ipv6Addr,IPV6_ADDR_LEN)==0) { matchIdx=rg_db.systemGlobal.wanIntfGroup[i].index; errorno=_rtk_rg_decCountSetStaticForV6GWMAC(ipv6Addr, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; DEBUG("before AFTR GWMACsetup_stage 2 !!! matchidx is %d, l2idx is %d",matchIdx,l2Idx); errorno = _rtk_rg_internal_GWMACSetup_stage2(matchIdx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_ERR; rg_db.systemGlobal.intfNeighborDiscovery[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = 1; #if defined(CONFIG_RTL9602C_SERIES) dsliteHw=&rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.dslite_info.rtk_dslite; dsliteHw->index=rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.dslite_idx; dsliteHw->valid=1; ASSERT_EQ(RTK_L34_DSLITEINFTABLE_SET(dsliteHw),RT_ERR_OK); #else //enable reserve ACL trap if(matchIdx < MAX_NETIF_HW_TABLE_SIZE) { memcpy(intf_dslite_trap_para.ipv6_dip.ipv6_addr, rg_db.systemGlobal.interfaceInfo[matchIdx].storedInfo.wan_intf.dslite_info.rtk_dslite.ipB4.ipv6_addr, IPV6_ADDR_LEN); memcpy(intf_dslite_trap_para.smac.octet, rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet, ETHER_ADDR_LEN); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_INTF0_DSLITE_TRAP+matchIdx,&intf_dslite_trap_para); } #endif //break; } } errorno=RT_ERR_RG_INVALID_PARAM; if(matchIdx == -1)goto RET_ERR; return (RT_ERR_RG_OK); RET_ERR: rg_db.systemGlobal.intfNeighborDiscovery[matchIdx+MAX_NETIF_SW_TABLE_SIZE].finished = -1; return (errorno); } void _rtk_rg_arpRequestTimerFunc(unsigned long netIfIdx) { #ifdef __KERNEL__ ipaddr_t ipAddr=0; rtk_l34_routing_entry_t rtEntry; rtk_rg_ipv4RoutingEntry_t cb_routEt; int i; if(netIfIdx>=MAX_NETIF_SW_TABLE_SIZE) return; if(rg_db.systemGlobal.intfArpRequest[netIfIdx].finished==0) { if(rg_db.systemGlobal.interfaceInfo[netIfIdx].valid == 1 && rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan == 1) ipAddr=rg_db.systemGlobal.interfaceInfo[netIfIdx].p_wanStaticInfo->ip_addr; else return; _rtk_rg_arpGeneration(netIfIdx,ipAddr,&rg_db.systemGlobal.intfArpRequest[netIfIdx]); rg_kernel.arpRequestTimerCounter[netIfIdx]++; //if(rg_kernel.arpRequestTimerCounter[netIfIdx]<10) if(1) //nerver timeout (always send arp): until finished=1 { mod_timer(&rg_kernel.arpRequestTimer[netIfIdx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } else { //error happen..recovery what we did before //Check which ARP routing entry we added for(i=0; i<MAX_L3_SW_TABLE_SIZE ; i++) //because idx MAX_L3_SW_TABLE_SIZE-1 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; //bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); //rtk_l34_routingTable_get(i, &rtEntry); //if(rtEntry.ipAddr == ipAddr && rtEntry.process == L34_PROCESS_ARP) if(rg_db.l3[i].rtk_l3.ipAddr == ipAddr && rg_db.l3[i].rtk_l3.process == L34_PROCESS_ARP) { //Delete the routing entry added and call callback function bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=rg_db.l3[i].rtk_l3.ipAddr; cb_routEt.ip_mask=rg_db.l3[i].netmask; RTK_L34_ROUTINGTABLE_SET(i, &rtEntry); if(rg_db.systemGlobal.initParam.routingDelByHwCallBack != NULL) { cb_routEt.nexthop=0; cb_routEt.wan_intf_idx=netIfIdx; rg_db.systemGlobal.initParam.routingDelByHwCallBack(&cb_routEt); } break; } } //reset Global variable bzero(rg_db.systemGlobal.interfaceInfo[netIfIdx].p_wanStaticInfo, sizeof(rtk_rg_ipStaticInfo_t)); rg_db.systemGlobal.intfArpRequest[netIfIdx].finished=-1; rtlglue_printf("the ARP request failed when set up WAN interface..\n"); } } #endif } void _rtk_rg_staticRouteArpOrNbReqTimerFunc(unsigned long sridx) { #ifdef __KERNEL__ if(sridx>=MAX_STATIC_ROUTE_SIZE) return; if(rg_db.systemGlobal.staticRouteArpReq[sridx].finished==0){ _rtk_rg_arpGeneration(rg_db.staticRoute[sridx].nxtip_intfidx,rg_db.l3[rg_db.staticRoute[sridx].nxtip_rtidx].gateway_ip,&rg_db.systemGlobal.staticRouteArpReq[sridx]); }else if(rg_db.systemGlobal.staticRouteNBDiscovery[sridx].finished==0){ _rtk_rg_NDGeneration(rg_db.staticRoute[sridx].nxtip_intfidx,rg_db.v6route[rg_db.staticRoute[sridx].nxtip_rtidx].gateway_ipv6Addr,&rg_db.systemGlobal.staticRouteNBDiscovery[sridx]); } rg_kernel.staticRouteArpOrNBTimerCounter[sridx]++; //if(rg_kernel.staticRouteArpOrNBReqTimer[netIfIdx]<10) if(1) //nerver timeout (always send arp): until finished=1 { mod_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } #endif } void _rtk_rg_PPTPL2TPDiscoveryTimerFunc(unsigned long netIfIdx) { #ifdef __KERNEL__ ipaddr_t ipAddr=0; int realIfIdx=netIfIdx-MAX_NETIF_SW_TABLE_SIZE; int matchIdx; if(netIfIdx>=(MAX_NETIF_SW_TABLE_SIZE<<1)) return; if(rg_db.systemGlobal.intfArpRequest[netIfIdx].finished==0){ if(rg_db.systemGlobal.interfaceInfo[realIfIdx].valid == 1 && rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.is_wan == 1){ matchIdx=_rtk_rg_l3lookup(rg_db.systemGlobal.intfArpRequest[netIfIdx].reqIp); if(rg_db.l3[matchIdx].rtk_l3.process==L34_PROCESS_NH && realIfIdx!=rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.ifIdx){ //use the NH to get MAC idx!! if(rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP) _rtk_rg_internal_PPTPMACSetup(rg_db.systemGlobal.intfArpRequest[netIfIdx].reqIp, rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.nhIdx); else _rtk_rg_internal_L2TPMACSetup(rg_db.systemGlobal.intfArpRequest[netIfIdx].reqIp, rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.nhIdx); rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.wan_intf.baseIntf_idx=rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.ifIdx; return; }else if(rg_db.l3[matchIdx].rtk_l3.process==L34_PROCESS_CPU){ if((rg_db.systemGlobal.interfaceInfo[rg_db.l3[matchIdx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP || rg_db.systemGlobal.interfaceInfo[rg_db.l3[matchIdx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_L2TP || rg_db.systemGlobal.interfaceInfo[rg_db.l3[matchIdx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DSLITE || rg_db.systemGlobal.interfaceInfo[rg_db.l3[matchIdx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE_DSLITE)&& realIfIdx!=rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.ifIdx){ //use the NH to get MAC idx!! if(rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP) _rtk_rg_internal_PPTPMACSetup(rg_db.systemGlobal.intfArpRequest[netIfIdx].reqIp, rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.nhIdx); else _rtk_rg_internal_L2TPMACSetup(rg_db.systemGlobal.intfArpRequest[netIfIdx].reqIp, rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.nhIdx); rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.wan_intf.baseIntf_idx=rg_db.nexthop[rg_db.l3[matchIdx].rtk_l3.nhStart].rtk_nexthop.ifIdx; return; } } ipAddr=rg_db.systemGlobal.interfaceInfo[realIfIdx].p_wanStaticInfo->ip_addr; }else return; _rtk_rg_arpGeneration(realIfIdx,ipAddr,&rg_db.systemGlobal.intfArpRequest[netIfIdx]); rg_kernel.arpRequestTimerCounter[netIfIdx]++; //if(rg_kernel.arpRequestTimerCounter[netIfIdx]<10) if(1){ //nerver timeout (always send arp): until finished=1 mod_timer(&rg_kernel.arpRequestTimer[netIfIdx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } } #endif } void _rtk_rg_PPTPLearningTimerInitialize(int wan_intf_idx) { int arp_req_idx=wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE; rg_db.systemGlobal.intfArpRequest[arp_req_idx].finished=0; rg_db.systemGlobal.intfArpRequest[arp_req_idx].reqIp=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.before_dial.pptp_ipv4_addr; rg_db.systemGlobal.intfArpRequest[arp_req_idx].gwMacReqCallBack=_rtk_rg_internal_PPTPMACSetup; #ifdef __KERNEL__ if(timer_pending(&rg_kernel.arpRequestTimer[arp_req_idx])) del_timer(&rg_kernel.arpRequestTimer[arp_req_idx]); init_timer(&rg_kernel.arpRequestTimer[arp_req_idx]); rg_kernel.arpRequestTimer[arp_req_idx].data = (unsigned long)(arp_req_idx); rg_kernel.arpRequestTimer[arp_req_idx].function = _rtk_rg_PPTPL2TPDiscoveryTimerFunc; rg_kernel.arpRequestTimerCounter[arp_req_idx]=0; DEBUG("PPTP miss, request arp=%x\n",rg_db.systemGlobal.intfArpRequest[arp_req_idx].reqIp); mod_timer(&rg_kernel.arpRequestTimer[arp_req_idx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif } void _rtk_rg_L2TPLearningTimerInitialize(int wan_intf_idx) { int arp_req_idx=wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE; rg_db.systemGlobal.intfArpRequest[arp_req_idx].finished=0; rg_db.systemGlobal.intfArpRequest[arp_req_idx].reqIp=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.l2tp_info.before_dial.l2tp_ipv4_addr; rg_db.systemGlobal.intfArpRequest[arp_req_idx].gwMacReqCallBack=_rtk_rg_internal_L2TPMACSetup; #ifdef __KERNEL__ if(timer_pending(&rg_kernel.arpRequestTimer[arp_req_idx])) del_timer(&rg_kernel.arpRequestTimer[arp_req_idx]); init_timer(&rg_kernel.arpRequestTimer[arp_req_idx]); rg_kernel.arpRequestTimer[arp_req_idx].data = (unsigned long)(arp_req_idx); rg_kernel.arpRequestTimer[arp_req_idx].function = _rtk_rg_PPTPL2TPDiscoveryTimerFunc; rg_kernel.arpRequestTimerCounter[arp_req_idx]=0; DEBUG("L2TP miss, request arp=%x\n",rg_db.systemGlobal.intfArpRequest[arp_req_idx].reqIp); mod_timer(&rg_kernel.arpRequestTimer[arp_req_idx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif } __SRAM_FWDENG_SLOWPATH rtk_rg_successFailReturn_t _rtk_rg_skipNeighborLearningOrNot(int l3Idx, uint8 *sip, int srcPortIdx, uint8 *smac) { int hashValue, neighborIdx, i, l2Idx; uint8 *interfaceId; if(rg_db.systemGlobal.antiMacSpoofStatus==RTK_RG_ENABLED) { interfaceId = sip+8; hashValue=_rtk_rg_IPv6NeighborHash(interfaceId, l3Idx); neighborIdx=(hashValue<<3); for(i=0; i<8; i++) { //8-way hash //find the same entry first. if((rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.valid==1)&& (rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.ipv6RouteIdx==l3Idx)&& (memcmp(&rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.ipv6Ifid, interfaceId, 8)==0)) { l2Idx = rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.l2Idx; if(rg_db.lut[l2Idx].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet, smac,ETHER_ADDR_LEN)) { TRACE("Anti spoofing: neighbor entry already exist (%d), discard learning !", neighborIdx+i); return RG_RET_FAIL; } } } } return RG_RET_SUCCESS; } __SRAM_FWDENG_SLOWPATH rtk_rg_successFailReturn_t _rtk_rg_skipARPLearningOrNot(int l3Idx, ipaddr_t sip, int srcPortIdx, uint8 *smac) { // 1. Gateway IP should not add to ARP table // 2. if anti spoofing is enabled, discard arp update from same IP with diff MAC. int arpIdx, arpValid=0, l2Idx; if(rg_db.l3[l3Idx].rtk_l3.valid && rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].valid) { if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.is_wan) { if(rg_db.l3[l3Idx].gateway_ip==sip) { TRACE("source IP equals to WAN gateway's IP...skip learning"); return RG_RET_FAIL; } #if 0 //support multiple WAN using the same IP/subnet, but different VLAN/MAC/Port. else { //Check Src port in interface's VLAN member or not if(srcPortIdx>=RTK_RG_PORT_CPU) { if((rg_db.vlan[rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].Ext_portmask.bits[0]&(0x1<<(srcPortIdx-RTK_RG_PORT_CPU)))==0) //extension port not exist { TRACE("source Extension Port %d is not in WAN interface[%d]'s VLAN[%d]...skip learning",srcPortIdx,rg_db.l3[l3Idx].rtk_l3.netifIdx,rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); return RG_RET_FAIL; } } /* else if((rg_db.vlan[rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].MemberPortmask.bits[0]&(0x1<<srcPortIdx))==0) //utp port not exist { TRACE("source Port %d is not in WAN interface[%d]'s VLAN...skip learning",srcPortIdx,rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); return RG_RET_FAIL; } */ } #endif } else { if(rg_db.systemGlobal.antiMacSpoofStatus==RTK_RG_ENABLED) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) arpIdx = (rg_db.l3[l3Idx].rtk_l3.arpStart<<2)+(sip & ((1<<(31-rg_db.l3[l3Idx].rtk_l3.ipMask))-1)); arpValid = rg_db.arp[arpIdx].rtk_arp.valid; #elif defined(CONFIG_RTL9602C_SERIES) rtk_rg_arp_linkList_t *pSwArpList; rtk_rg_arp_linkList_t *pHwArpList; _rtk_rg_softwareArpTableLookUp(l3Idx,sip,&pSwArpList,1); if(pSwArpList) { arpValid = 1; arpIdx = pSwArpList->idx; } else { _rtk_rg_hardwareArpTableLookUp(l3Idx,sip,&pHwArpList,1); if(pHwArpList) { arpValid = 1; arpIdx = pHwArpList->idx; } else { arpValid = 0; } } #elif defined(CONFIG_RTL9607C_SERIES) rtk_rg_arp_linkList_t *pSwArpList; _rtk_rg_softwareArpTableLookUp(l3Idx,sip,&pSwArpList,1); if(pSwArpList) { arpValid = 1; arpIdx = pSwArpList->idx; } #else #error #endif if(arpValid) { l2Idx = rg_db.arp[arpIdx].rtk_arp.nhIdx; if(rg_db.lut[l2Idx].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet, smac,ETHER_ADDR_LEN)) { TRACE("Anti MAC spoofing: ARP entry already exist, discard learning !"); return RG_RET_FAIL; } } } if(rg_db.l3[l3Idx].gateway_ip==sip) { TRACE("source IP equals to LAN gateway's IP...skip learning"); return RG_RET_FAIL; } else { //Check Src port in interface's VLAN member or not if(_rtk_rg_fwdEngineVLANFiltering(rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.lan_intf.intf_vlan_id, rg_db.pktHdr->ingressMacPort, rg_db.pktHdr->ingressMacExtPort)==RG_FWDENGINE_RET_DROP) { TRACE("source Port %d is not in LAN interface[%d]'s VLAN[%d]...skip learning", srcPortIdx, rg_db.l3[l3Idx].rtk_l3.netifIdx,rg_db.systemGlobal.interfaceInfo[rg_db.l3[l3Idx].rtk_l3.netifIdx].storedInfo.lan_intf.intf_vlan_id); return RG_RET_FAIL; } } } return RG_RET_SUCCESS; } return RG_RET_FAIL; } rtk_rg_successFailReturn_t _rtk_rg_arpAndMacEntryAdd(ipaddr_t sip, int sipL3Idx, uint8 *pSmac, int srcPortIdx, int srcWlanDevIdx, int *pL2Idx, int cvid, int cvidForceAdd, int arpEntryForceAdd) { rtk_rg_arpEntry_t arpEntry; rtk_rg_macEntry_t macEntry; rtk_rg_arp_linkList_t *pSwArpList; #if defined(CONFIG_RTL9602C_SERIES) rtk_rg_arp_linkList_t *pHwArpList; #endif int arpIdx,l2Idx; rtk_rg_successFailReturn_t ret; short l3Idx,search_index,sw_lut_vlan=-1; short count=0,first_invalid=-1; char arp_valid=0,mac_exist=0,addArp=1;//,dmac2CVID_Untag=0; rtk_rg_lut_linkList_t *pSoftLut,*pSoftLutNext; if(sipL3Idx==FAIL) l3Idx=_rtk_rg_l3lookup(sip); else l3Idx=sipL3Idx; //init memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); if(rg_db.l3[l3Idx].rtk_l3.process==L34_PROCESS_ARP) { //Check if we skip ARP learning or not ret=_rtk_rg_skipARPLearningOrNot(l3Idx,sip,srcPortIdx, pSmac); if(ret!=RG_RET_SUCCESS)return ret; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) goto check_sw_arp; #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) arpIdx=(rg_db.l3[l3Idx].rtk_l3.arpStart<<2)+(sip & ((1<<(31-rg_db.l3[l3Idx].rtk_l3.ipMask))-1)); arp_valid = rg_db.arp[arpIdx].rtk_arp.valid; if(arp_valid) { //20141013LUKE: update ARP idle time rg_db.arp[arpIdx].idleSecs=0; rg_db.arp[arpIdx].sendReqCount=0; if(arpEntryForceAdd) { if(rg_db.arp[arpIdx].staticEntry) //the static entry should not be replaced { TRACE("ARP entry is valid and STATIC...skip add ARP"); //don't add ARP, but create new MAC entry addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[arpIdx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else { //don't add ARP, but create new MAC entry addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[arpIdx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } #elif defined(CONFIG_RTL9602C_SERIES) _rtk_rg_softwareArpTableLookUp(l3Idx,sip,&pSwArpList,1); if(pSwArpList!=NULL) //sw arp is found { arp_valid=1; if(arpEntryForceAdd) //need to replace ARP link-list { if(rg_db.arp[pSwArpList->idx].staticEntry==0) { //Delete old link-list first TRACE("delete the old dynamic ARP link-list...since arpEntryForceAdd==1"); _rtk_rg_softwareArpTableDel(pSwArpList); addArp=2; //add to sw link-list } else { TRACE("software ARP link-list is valid and STATIC...skip add ARP"); addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else //did not add to software ARP table { TRACE("software ARP entry is added...skip add ARP"); addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else // sw arp is not found { _rtk_rg_hardwareArpTableLookUp(l3Idx,sip,&pHwArpList,1); if(pHwArpList!=NULL) //hw arp is found { arp_valid = 1; if(arpEntryForceAdd) { if(rg_db.arp[pHwArpList->idx].staticEntry==0) { TRACE("HW ARP entry is added again...since arpEntryForceAdd==1"); addArp=1; } else //the static entry should not be replaced { TRACE("HW ARP entry is valid and STATIC...skip add ARP"); //don't add ARP, but create new MAC entry addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pHwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else { TRACE("HW ARP entry is added...skip add ARP"); //don't add ARP, but create new MAC entry addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pHwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else // sw arp and hw arp are not found { arp_valid=0; if(list_empty(&rg_db.hardwareArpFreeListHead)) addArp=2; //add to sw link-list else addArp=1; //add to hw table } } #endif //get VID macEntry.vlan_id=rg_db.netif[rg_db.l3[l3Idx].rtk_l3.netifIdx].rtk_netif.vlan_id; } else if(rg_db.l3[l3Idx].rtk_l3.process==L34_PROCESS_CPU) { if(rg_db.l3[l3Idx].rtk_l3.ipAddr>0) //non-default route's TRAP routing should add to sw ARP table { //Check if we skip ARP learning or not ret=_rtk_rg_skipARPLearningOrNot(l3Idx,sip,srcPortIdx, pSmac); if(ret!=RG_RET_SUCCESS)return ret; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) check_sw_arp: #endif //FIXME:till now default route to CPU is to protocol, if sw routing link-list is implement, //the default route in hw will means routing should check sw routing link-list, too. _rtk_rg_softwareArpTableLookUp(l3Idx,sip,&pSwArpList,1); if(pSwArpList!=NULL) { if(arpEntryForceAdd) //need to replace ARP link-list { if(rg_db.arp[pSwArpList->idx].staticEntry==0) { //Delete old link-list first TRACE("delete the old dynamic ARP link-list since arpEntryForceAdd==1"); _rtk_rg_softwareArpTableDel(pSwArpList); addArp=2; //add to sw link-list } else { TRACE("software ARP link-list is valid and STATIC...skip add ARP"); arp_valid=1; addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else //did not add to software ARP table { TRACE("software ARP entry is added...skip add ARP"); arp_valid=1; addArp=0; if(pL2Idx!=NULL)*pL2Idx=rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx; //return the l2 idx which pointed by arp entry } } else addArp=2; //add to sw link-list //get VID macEntry.vlan_id=rg_db.netif[rg_db.l3[l3Idx].rtk_l3.netifIdx].rtk_netif.vlan_id; } else { TRACE("%x from default route with ingress VID %d!!...skip add ARP",sip,cvid); //from default route, do not add ARP addArp=0; //get VID from ingress VID if(rg_db.vlan[cvid].valid) macEntry.vlan_id=cvid; else return RG_RET_FAIL; //VLAN not exist } } else if(rg_db.l3[l3Idx].rtk_l3.process==L34_PROCESS_NH) //default route should not add to ARP table, and non-default gateway host should not enable arp_used field { addArp=0; TRACE("from NH interface!!"); if(rg_db.lut[rg_db.nexthop[rg_db.l3[l3Idx].rtk_l3.nhStart].rtk_nexthop.nhIdx].valid && rg_db.lut[rg_db.nexthop[rg_db.l3[l3Idx].rtk_l3.nhStart].rtk_nexthop.nhIdx].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(rg_db.lut[rg_db.nexthop[rg_db.l3[l3Idx].rtk_l3.nhStart].rtk_nexthop.nhIdx].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN)==0) { //20141013LUKE: update software ARP idle time if match!! _rtk_rg_softwareArpTableLookUp(l3Idx, sip, &pSwArpList, 1); if(pL2Idx!=NULL)*pL2Idx=rg_db.nexthop[rg_db.l3[l3Idx].rtk_l3.nhStart].rtk_nexthop.nhIdx; return RG_RET_SUCCESS; //default already added } TRACE("non-default gateway host....add MAC without arp_used!!"); //get VID macEntry.vlan_id=rg_db.netif[rg_db.nexthop[rg_db.l3[l3Idx].rtk_l3.nhStart].rtk_nexthop.ifIdx].rtk_netif.vlan_id; } //if((macEntryForceAdd==0) && (arp_valid==1)) return RG_RET_SUCCESS; //Find interface and check VLAN mode //DEBUG("macEntry.vlan_id is %d, l3Idx is %d",macEntry.vlan_id,l3Idx); macEntry.isIVL=rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL?1:0; //fidMode is IVL, isIVL should be 1 macEntry.fid=rg_db.vlan[macEntry.vlan_id].fid; if(macEntry.isIVL) { l2Idx=_rtk_rg_hash_mac_vid_efid(pSmac,macEntry.vlan_id,0); //FIXME;current efid is always 0 } else { ADD_SVL_LUT: macEntry.isIVL=0; macEntry.arp_used=0; count=0; mac_exist=0; arp_valid=0; //forced to add SVL MAC first_invalid=-1; l2Idx=_rtk_rg_hash_mac_fid_efid(pSmac,macEntry.fid,0); //FIXME;current efid is always 0 } l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty count++; //search from next entry continue; } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (!memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN))) { if((macEntry.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || (macEntry.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //DEBUG("MAC is exist!"); mac_exist=1; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit rg_db.lut[search_index].idleSecs = 0; #endif break; } } count++; //search from next entry } while(count < 4); if(count==4) //no enough space for new lut entry { //Check bCAM LUT first, if match, just return. for(search_index=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE;search_index<MAX_LUT_HW_TABLE_SIZE;search_index++) { if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC) { if(memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN)==0) { if((macEntry.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || (macEntry.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //HIT! //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rtk_l2_ucastAddr_t *pL2Addr; pL2Addr=&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry; TRACE("### delete l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x SPA=%d ###\n",search_index,pL2Addr->mac.octet[0],pL2Addr->mac.octet[1],pL2Addr->mac.octet[2],pL2Addr->mac.octet[3],pL2Addr->mac.octet[4],pL2Addr->mac.octet[5],pL2Addr->port); assert_ok(RTK_L2_ADDR_DEL(pL2Addr)); #else mac_exist=1; //support lut traffic bit rg_db.lut[search_index].idleSecs = 0; #endif break; } } } } //Check software LUT, if exist, return without add ARP and MAC if(!list_empty(&rg_db.softwareLutTableHead[l2Idx>>2])) { list_for_each_entry_safe(pSoftLut,pSoftLutNext,&rg_db.softwareLutTableHead[l2Idx>>2],lut_list) { if(memcmp(rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN)==0) { if(((macEntry.isIVL==1) && rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //HIT! #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT //20150429LUKE: from WWAN, if we are using ARP and NEXTHOP to point to L2, we should add it to hw lut table! if(rg_db.lut[pSoftLut->idx].wlan_device_idx==RG_WWAN_WLAN0_VXD || rg_db.lut[pSoftLut->idx].wlan_device_idx==RG_WWAN_WLAN1_VXD) { sw_lut_vlan=rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.vid; //Delete from head list list_del_init(&pSoftLut->lut_list); //set lut invalid(quicker than set all data to zero) rg_db.lut[pSoftLut->idx].valid=0; //Add back to free list list_add(&pSoftLut->lut_list,&rg_db.softwareLutFreeListHead); MACLN("remove sw lut for WWAN, keep vlan as %d",sw_lut_vlan); break; } else #endif { rtk_rg_arp_linkList_t *pSoftwareArpEntry; MACLN("this LUT had been added to software!! return without add hwARP..."); //20160615LUKE: for sw-lut, we need sw-arp for returning lanNetInfo. _rtk_rg_softwareArpTableLookUp(MAX_L3_SW_TABLE_SIZE,sip,&pSoftwareArpEntry,1); if(pSoftwareArpEntry==NULL){ assert_ok(_rtk_rg_softwareArpTableAdd(MAX_L3_SW_TABLE_SIZE,sip,pSoftLut->idx,(rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0)); rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_ARP_USED; } if(macEntry.isIVL)goto ADD_SVL_LUT; //add svl lut, too return RG_RET_SUCCESS; } } } } } if(mac_exist==0 && first_invalid==-1) { count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table if(count==4) { //Check per port SA learning limit if(rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]>=0 && rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]<=atomic_read(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx])) //no way to learn { TRACE("Port %d SA learning limit is reached(%d)...won't add MAC!!",srcPortIdx,rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]); return RG_RET_SUCCESS; } //Check Wlan limit if set if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]>=0 && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]<=atomic_read(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx])) //no way to learn { DEBUG("Wlan dev %d SA learning limit is reached(%d)...won't add MAC!!",srcWlanDevIdx,rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]); return RG_RET_SUCCESS; } #endif } //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) search_index=_rtk_rg_layer2HashedReplace(l2Idx);//_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry #else search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry #endif if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); return RG_RET_FAIL; } } else search_index=l2Idx+count; } } l2Idx=search_index; //DEBUG("l2Idx is %d, first_invalid is %d, arp_valid %d, addArp %d, arpEntryForceAdd %d, mac_exist %d",l2Idx,first_invalid,arp_valid,addArp,arpEntryForceAdd,mac_exist); if(arp_valid==0 || arpEntryForceAdd==1) { if(mac_exist==0) { //Use the first meet valid empty index if(first_invalid>=0) { l2Idx=first_invalid; } //Check per port SA learning limit if(rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]>=0 && rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]<=atomic_read(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx])) //no way to learn { TRACE("Port %d SA learning limit is reached(%d)...won't add MAC!!",srcPortIdx,rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]); return RG_RET_SUCCESS; } //Check Wlan limit if set if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]>=0 && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]<=atomic_read(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx])) //no way to learn { DEBUG("Wlan dev %d SA learning limit is reached(%d)...won't add MAC!!",srcWlanDevIdx,rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]); return RG_RET_SUCCESS; } #endif } memcpy(macEntry.mac.octet,pSmac,ETHER_ADDR_LEN); //Use interface infomation set MAC entry macEntry.port_idx=srcPortIdx; macEntry.static_entry=0; if(cvidForceAdd==1) { macEntry.vlan_id=cvid; } else if(macEntry.isIVL==0) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(srcPortIdx>=RTK_RG_PORT_CPU) { if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))>0) macEntry.vlan_id=0; } else { if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0) macEntry.vlan_id=0; } #else // support ctag_if if(srcPortIdx>=RTK_RG_PORT_CPU) macEntry.ctag_if=((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))>0)?0:1; else macEntry.ctag_if=((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0)?0:1; #endif //20150512LUKE: for lut move from sw to hw, keep the vid without change if(sw_lut_vlan>=0)macEntry.vlan_id=sw_lut_vlan; /*for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if((rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->ip_addr& rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->ip_network_mask)== rg_db.l3[l3Idx].rtk_l3.ipAddr) //20130301-store IP addr after masked { macEntry.vlan_id=rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->intf_vlan_id; //Because DMAC2CVID is enabled, therefore non-zero CVID will always tagged, ignored untag set!! if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0) dmac2CVID_Untag=1; //FIXME("vid get from lan config=%d",macEntry.vlan_id); break; } } if(macEntry.vlan_id==0) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //Bridge WAN won't be compared with if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo==NULL) continue; if((rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr& rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask)== rg_db.l3[l3Idx].rtk_l3.ipAddr) //20130301-store IP addr after masked { macEntry.vlan_id=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; //Because DMAC2CVID is enabled, therefore non-zero CVID will always tagged, ignored untag set!! if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on==0) dmac2CVID_Untag=1; //FIXME("vid get from wan config=%d",macEntry.vlan_id); break; } } }*/ } //if(dmac2CVID_Untag==1) //macEntry.vlan_id=0; //FIXME("vid=%d\n",macEntry.vlan_id); TRACE("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,pSmac[0],pSmac[1],pSmac[2],pSmac[3],pSmac[4],pSmac[5]); macEntry.arp_used=addArp; ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); assert_ok(ret); //20150430LUKE: for WWAN learning rg_db.lut[l2Idx].wlan_device_idx=rg_db.pktHdr->wlan_dev_idx; //20161004LUKE: if we need to add MAC here, we should always permit for L34. rg_db.lut[l2Idx].permit_for_l34_forward=1; //add to SA learning count atomic_inc(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx]); if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_inc(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx]); #endif } } else if((rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0 && addArp && (rg_db.lut[l2Idx].permit_for_l34_forward||rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_UNLIMIT)) //20160929LUKE: only enable ARP_USED when this LUT is permited for L34 or wanAccessLimit is off. { //keep original data, only toggle arp_used to 1 memcpy(macEntry.mac.octet,rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 201221203 macEntry.fid=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.fid; macEntry.isIVL=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL)>0?1:0; macEntry.port_idx=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.port; if(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.port==RTK_RG_PORT_CPU) macEntry.port_idx+=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.ext_port; macEntry.vlan_id=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.vid; macEntry.static_entry=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else // support ctag_if macEntry.ctag_if=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_CTAG_IF)>0?1:0; #endif TRACE("### enable arp_used in l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x at port=%d ###\n",l2Idx,pSmac[0],pSmac[1],pSmac[2],pSmac[3],pSmac[4],pSmac[5],macEntry.port_idx); macEntry.arp_used=1; ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); assert_ok(ret); } if(addArp==1) //arp will point to IVL one, or SVL { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //20160615LUKE: remove software ARP for sw-lut if any. rtk_rg_arp_linkList_t *pSoftwareArpEntry=NULL; _rtk_rg_softwareArpTableLookUp(MAX_L3_SW_TABLE_SIZE,sip,&pSoftwareArpEntry,0); if(pSoftwareArpEntry)_rtk_rg_softwareArpTableDel(pSoftwareArpEntry); #endif //DEBUG("add ARP %x! l2Idx is %d",sip,l2Idx); arpEntry.ipv4Addr=sip; arpEntry.macEntryIdx=l2Idx; arpEntry.staticEntry=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0; if(pL2Idx!=NULL)*pL2Idx=l2Idx; //return the l2 idx which pointed by arp entry //20160929LUKE: only add ARP when this LUT is permited for L34. //20161004LUKE: check this only when wanAccessLimit is turn on! if(rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_UNLIMIT || rg_db.lut[l2Idx].permit_for_l34_forward || rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC) assert_ok(rtk_rg_apollo_arpEntry_add(&arpEntry,&arpIdx)); if(macEntry.isIVL) { addArp=0; //arp already added goto ADD_SVL_LUT; //add svl lut, too } } else if(addArp==2) //add arp entry to sw link-list { TRACE("add software ARP %x! l2Idx is %d",sip,l2Idx); if(pL2Idx!=NULL)*pL2Idx=l2Idx; //return the l2 idx which pointed by arp entry //20160929LUKE: only add ARP when this LUT is permited for L34. //20161004LUKE: check this only when wanAccessLimit is turn on! if(rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_UNLIMIT || rg_db.lut[l2Idx].permit_for_l34_forward || rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC) assert_ok(_rtk_rg_softwareArpTableAdd(l3Idx,sip,l2Idx,(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0)); if(macEntry.isIVL) { addArp=0; //sw-arp already added goto ADD_SVL_LUT; //add svl lut, too } } else if(pL2Idx!=NULL)*pL2Idx=l2Idx; //return the l2 idx only return RG_RET_SUCCESS; } return RG_RET_SUCCESS; } void _rtk_rg_neighborDiscoveryTimerFunc(unsigned long netIfIdx) { #ifdef __KERNEL__ #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) rtk_ipv6_addr_t ipAddr; //rtk_l34_routing_entry_t rtEntry; //rtk_rg_ipv4RoutingEntry_t cb_routEt; //int i; if(netIfIdx>=MAX_NETIF_SW_TABLE_SIZE) return; bzero(ipAddr.ipv6_addr,IPV6_ADDR_LEN); if(rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx].finished==0) { if(rg_db.systemGlobal.interfaceInfo[netIfIdx].valid == 1 && rg_db.systemGlobal.interfaceInfo[netIfIdx].storedInfo.is_wan == 1) memcpy(&ipAddr,&rg_db.systemGlobal.interfaceInfo[netIfIdx].p_wanStaticInfo->ipv6_addr,sizeof(rtk_ipv6_addr_t)); else return; _rtk_rg_NDGeneration(netIfIdx,ipAddr,&rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx]); rg_kernel.neighborDiscoveryTimerCounter[netIfIdx]++; //if(rg_kernel.arpRequestTimerCounter[netIfIdx]<10) if(1) //nerver timeout (always send arp): until finished=1 { mod_timer(&rg_kernel.neighborDiscoveryTimer[netIfIdx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } else { //error happen..recovery what we did before rtlglue_printf("the Neighbor Discovery failed when set up WAN interface..\n"); } } #endif #endif } void _rtk_rg_AFTRDiscoveryTimerFunc(unsigned long netIfIdx) { #ifdef __KERNEL__ #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) int l3Idx; rtk_ipv6_addr_t ipAddr; int realIfIdx=netIfIdx-MAX_NETIF_SW_TABLE_SIZE; if(netIfIdx>=(MAX_NETIF_SW_TABLE_SIZE<<1)) return; bzero(ipAddr.ipv6_addr,IPV6_ADDR_LEN); if(rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx].finished==0) { l3Idx=_rtk_rg_v6L3lookup(rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx].reqIp.ipv6_addr); DEBUG("l3Idx is %d",l3Idx); if(l3Idx>=0) { if(rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_GLOBAL) { DEBUG("set AFTR from nexthop[%d]",rg_db.v6route[l3Idx].rtk_v6route.nhOrIfidIdx); _rtk_rg_internal_IPV6AFTRMACSetup(rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx].reqIp.ipv6_addr, rg_db.nexthop[rg_db.v6route[l3Idx].rtk_v6route.nhOrIfidIdx].rtk_nexthop.nhIdx); return; } else if(rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_LOCAL) { if(rg_db.systemGlobal.interfaceInfo[realIfIdx].valid && rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.is_wan) memcpy(&ipAddr,&rg_db.systemGlobal.interfaceInfo[realIfIdx].storedInfo.wan_intf.dslite_info.rtk_dslite.ipB4,sizeof(rtk_ipv6_addr_t)); else return; _rtk_rg_NDGeneration(realIfIdx,ipAddr,&rg_db.systemGlobal.intfNeighborDiscovery[netIfIdx]); rg_kernel.neighborDiscoveryTimerCounter[netIfIdx]++; } } //if(rg_kernel.arpRequestTimerCounter[netIfIdx]<10) if(1) //nerver timeout (always send arp): until finished=1 { mod_timer(&rg_kernel.neighborDiscoveryTimer[netIfIdx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } else { //error happen..recovery what we did before rtlglue_printf("the Neighbor Discovery failed when set up WAN interface..\n"); } } #endif #endif } uint8 _rtk_rg_CompareIFID(uint8* dip, uint64 interfaceid) { uint8 res = 0; if(dip[0] != ((interfaceid>>56)&0xff)) { return res; } else if(dip[1] != ((interfaceid>>48)&0xff)) { return res; } else if(dip[2] != ((interfaceid>>40)&0xff)) { return res; } else if(dip[3] != ((interfaceid>>32)&0xff)) { return res; } else if(dip[4] != ((interfaceid>>24)&0xff)) { return res; } else if(dip[5] != ((interfaceid>>16)&0xff)) { return res; } else if(dip[6] != ((interfaceid>>8)&0xff)) { return res; } else if(dip[7] != (interfaceid&0xff)) { return res; } return 1; } rtk_rg_successFailReturn_t _rtk_rg_neighborAndMacEntryAdd(unsigned char *sip,int sipOrDipL3Idx,uint8 *pSmac,int srcPortIdx,int srcWlanDevIdx,int *pNeighborOrMacIdx) { rtk_rg_neighborEntry_t neighborEntry; rtk_rg_macEntry_t macEntry; int l2Idx,ret,i; int l3Idx; int count=0; int neighbor_valid_idx; int mac_exist=0,search_index,addNeighbor=1,first_invalid=-1,sw_lut_vlan=-1;//,dmac2CVID_Untag=0; #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) int dslite=0; #endif rtk_rg_lut_linkList_t *pSoftLut,*pSoftLutNext; //int prefix; //unsigned short idx,bitMask; //init memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); //20140829LUKE: if SIP and DIP are both fe80::/64, we treat it as normal global ip address! //20140826LUKE: handle link-local address if(*((unsigned int *)sip)==0xfe800000 && *((unsigned int *)(sip+4))==0x0 && _rtk_rg_v6L3lookup(sip)!=sipOrDipL3Idx) { //DEBUG("Link-local address.. add macEntry only!!%d",sipOrDipL3Idx); l3Idx=sipOrDipL3Idx; //use DIP to find VLAN if(l3Idx==-1) //look up fail return RG_RET_FAIL; //20140904LUKE: for STATIC ROUTE, we add one TRAP before LOCAL route, so we can use it to find out interface! if(rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_LOCAL || rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_TRAP) macEntry.vlan_id=rg_db.netif[rg_db.v6route[l3Idx].rtk_v6route.nhOrIfidIdx].rtk_netif.vlan_id; else if(rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_GLOBAL) macEntry.vlan_id=rg_db.netif[rg_db.nexthop[rg_db.v6route[l3Idx].rtk_v6route.nhOrIfidIdx].rtk_nexthop.ifIdx].rtk_netif.vlan_id; else return RG_RET_FAIL; addNeighbor=0; goto LINK_LOCAL; } else { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_DSLITE && memcmp(sip,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.dslite_info.rtk_dslite.ipAftr.ipv6_addr,IPV6_ADDR_LEN)==0) { DEBUG("Match DSLITE AFTR ipaddr!! add mac without neighbor..."); macEntry.vlan_id=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id; l3Idx=sipOrDipL3Idx; addNeighbor=0; #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) dslite=1; #endif goto LINK_LOCAL; } } } if(sipOrDipL3Idx==FAIL) l3Idx=_rtk_rg_v6L3lookup(sip); else l3Idx=sipOrDipL3Idx; if(l3Idx==-1) //look up fail return RG_RET_FAIL; if(rg_db.v6route[l3Idx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_LOCAL) { ret=_rtk_rg_skipNeighborLearningOrNot(l3Idx,sip,srcPortIdx, pSmac); if(ret!=RG_RET_SUCCESS)return ret; //Find interface and check VLAN mode //memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); macEntry.vlan_id=rg_db.netif[rg_db.v6route[l3Idx].rtk_v6route.nhOrIfidIdx].rtk_netif.vlan_id; LINK_LOCAL: macEntry.isIVL=rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL?1:0; //fidMode is IVL, isIVL should be 1 macEntry.fid=rg_db.vlan[macEntry.vlan_id].fid; if(macEntry.isIVL) { l2Idx=_rtk_rg_hash_mac_vid_efid(pSmac,macEntry.vlan_id,0); //FIXME;current efid is always 0 } else { ADD_SVL_LUT: macEntry.isIVL=0; count=0; mac_exist=0; first_invalid=-1; l2Idx=_rtk_rg_hash_mac_fid_efid(pSmac,macEntry.fid,0); //FIXME;current efid is always 0 } l2Idx<<=2; do { search_index = l2Idx+count; //rtlglue_printf("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty count++; //search from next entry continue; } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC && (!memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN))) { if((macEntry.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || (macEntry.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { mac_exist=1; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit rg_db.lut[search_index].idleSecs = 0; #endif break; } } count++; //search from next entry } while(count < 4); if(count==4) //no enough space for new lut entry { //Check bCAM LUT first, if match, just return. for(search_index=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE;search_index<MAX_LUT_HW_TABLE_SIZE;search_index++) { if(rg_db.lut[search_index].valid && rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2UC) { if(memcmp(rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN)==0) { if((macEntry.isIVL==1 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || (macEntry.isIVL==0 && rg_db.lut[search_index].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //HIT! //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) { rtk_l2_ucastAddr_t *pL2Addr; pL2Addr=&rg_db.lut[search_index].rtk_lut.entry.l2UcEntry; TRACE("### delete l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x SPA=%d ###\n",search_index,pL2Addr->mac.octet[0],pL2Addr->mac.octet[1],pL2Addr->mac.octet[2],pL2Addr->mac.octet[3],pL2Addr->mac.octet[4],pL2Addr->mac.octet[5],pL2Addr->port); assert_ok(RTK_L2_ADDR_DEL(pL2Addr)); } #else mac_exist=1; //support lut traffic bit rg_db.lut[search_index].idleSecs = 0; #endif break; } } } } //Check software LUT, if exist, return without add ARP and MAC if(!list_empty(&rg_db.softwareLutTableHead[l2Idx>>2])) { list_for_each_entry_safe(pSoftLut,pSoftLutNext,&rg_db.softwareLutTableHead[l2Idx>>2],lut_list) { if(memcmp(rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.mac.octet,pSmac,ETHER_ADDR_LEN)==0) { if(((macEntry.isIVL==1) && rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.vid==macEntry.vlan_id) || ((macEntry.isIVL==0) && rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.fid==macEntry.fid)) { //HIT! //20150429LUKE: from WWAN, if we are using ARP and NEXTHOP to point to L2, we should add it to hw lut table! if(rg_db.lut[pSoftLut->idx].wlan_device_idx==RG_WWAN_WLAN0_VXD || rg_db.lut[pSoftLut->idx].wlan_device_idx==RG_WWAN_WLAN1_VXD) { //Delete from head list sw_lut_vlan=rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.vid; list_del_init(&pSoftLut->lut_list); //set lut invalid(quicker than set all data to zero) rg_db.lut[pSoftLut->idx].valid=0; //Add back to free list list_add(&pSoftLut->lut_list,&rg_db.softwareLutFreeListHead); MACLN("remove sw lut for WWAN, keep vlan as %d",sw_lut_vlan); break; } else { MACLN("this LUT had been added to software!! return without add Neighbor..."); return RG_RET_SUCCESS; } } } } } if(mac_exist==0 && first_invalid==-1) { count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table if(count==4) { //Check per port SA learning limit if(rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]>=0 && rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]<=atomic_read(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx])) //no way to learn { TRACE("Port %d SA learning limit is reached(%d)...won't add MAC!!",srcPortIdx,rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]); return RG_RET_SUCCESS; } //Check Wlan limit if set if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]>=0 && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]<=atomic_read(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx])) //no way to learn { DEBUG("Wlan dev %d SA learning limit is reached(%d)...won't add MAC!!",srcWlanDevIdx,rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]); return RG_RET_SUCCESS; } #endif } //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) search_index=_rtk_rg_layer2HashedReplace(l2Idx);//_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry #else search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry #endif if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); return RG_RET_FAIL; } } else search_index=l2Idx+count; } } l2Idx=search_index; if(mac_exist==0) { //Use the first meet valid empty index if(first_invalid>=0) { l2Idx=first_invalid; } //Check per port SA learning limit if(rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]>=0 && rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]<=atomic_read(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx])) //no way to learn { TRACE("Port %d SA learning limit is reached(%d)...won't add MAC!!",srcPortIdx,rg_db.systemGlobal.sourceAddrLearningLimitNumber[srcPortIdx]); return RG_RET_SUCCESS; } //Check Wlan limit if set if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]>=0 && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]<=atomic_read(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx])) //no way to learn { TRACE("Wlan dev %d SA learning limit is reached(%d)...won't add MAC!!",srcWlanDevIdx,rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[srcWlanDevIdx]); return RG_RET_SUCCESS; } #endif } memcpy(macEntry.mac.octet,pSmac,ETHER_ADDR_LEN); //Use interface infomation set MAC entry macEntry.port_idx=srcPortIdx; macEntry.static_entry=0; /*for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { prefix=rg_db.v6route[l3Idx].rtk_v6route.ipv6PrefixLen; if(prefix==128&&memcmp(rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr,rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN)) { //host route macEntry.vlan_id=rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->intf_vlan_id; DEBUG("host route, vid get from lan config=%d",macEntry.vlan_id); if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0) dmac2CVID_Untag=1; break; } idx=(prefix>>3)&0xff; if((prefix&0x7)==0) bitMask=0xff; else bitMask=(0xff<<(8-(prefix&0x7)))&0xff; //DEBUG("prefix = %d, idx = %d, bitMask =%02x",prefix,idx,bitMask); //DEBUG("ip=%x mask=%d iplookup=%x\n",rg_db.l3[i].rtk_l3.ipAddr,rg_db.l3[i].rtk_l3.ipMask,ip); if(memcmp(rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr,rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->ipv6_addr.ipv6_addr,(idx-1))==0 && ((rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr[idx]&bitMask)==(rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->ipv6_addr.ipv6_addr[idx]&bitMask))) { macEntry.vlan_id=rg_db.systemGlobal.lanIntfGroup->p_intfInfo->p_lanIntfConf->intf_vlan_id; DEBUG("vid get from lan config=%d",macEntry.vlan_id); if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0) dmac2CVID_Untag=1; break; } } if(macEntry.vlan_id==0) { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //Bridge WAN won't be compared with if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo==NULL) continue; prefix=rg_db.v6route[l3Idx].rtk_v6route.ipv6PrefixLen; if(prefix==128&&memcmp(rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN)) { //host route macEntry.vlan_id=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; DEBUG("host route, vid get from wan config=%d",macEntry.vlan_id); if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on==0) dmac2CVID_Untag=1; break; } idx=(prefix>>3)&0xff; if((prefix&0x7)==0) bitMask=0xff; else bitMask=(0xff<<(8-(prefix&0x7)))&0xff; //DEBUG("prefix = %d, idx = %d, bitMask =%02x",prefix,idx,bitMask); //DEBUG("ip=%x mask=%d iplookup=%x\n",rg_db.l3[i].rtk_l3.ipAddr,rg_db.l3[i].rtk_l3.ipMask,ip); if(memcmp(rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ipv6_addr.ipv6_addr,(idx-1))==0 && ((rg_db.v6route[l3Idx].rtk_v6route.ipv6Addr.ipv6_addr[idx]&bitMask)==(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ipv6_addr.ipv6_addr[idx]&bitMask))) { macEntry.vlan_id=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; DEBUG("vid get from wan config=%d",macEntry.vlan_id); if(rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on==0) dmac2CVID_Untag=1; break; } } } if(dmac2CVID_Untag==1) macEntry.vlan_id=0;*/ if(macEntry.isIVL==0) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(srcPortIdx>=RTK_RG_PORT_CPU) { if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))>0) macEntry.vlan_id=0; } else { if((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0) macEntry.vlan_id=0; } #else // support ctag_if if(srcPortIdx>=RTK_RG_PORT_CPU) macEntry.ctag_if=((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU))>0)?0:1; else macEntry.ctag_if=((rg_db.vlan[macEntry.vlan_id].UntagPortmask.bits[0]&(0x1<<srcPortIdx))>0)?0:1; #endif //20150512LUKE: for lut move from sw to hw, keep the vid without change if(sw_lut_vlan>=0)macEntry.vlan_id=sw_lut_vlan; } //FIXME("vid=%d\n",macEntry.vlan_id); TRACE("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,pSmac[0],pSmac[1],pSmac[2],pSmac[3],pSmac[4],pSmac[5]); if(addNeighbor #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) ||dslite #endif )macEntry.arp_used=1; ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); assert_ok(ret); //20150430LUKE: for WWAN learning rg_db.lut[l2Idx].wlan_device_idx=rg_db.pktHdr->wlan_dev_idx; //20161004LUKE: if we need to add MAC here, we should always permit for L34. rg_db.lut[l2Idx].permit_for_l34_forward=1; //add to SA learning count atomic_inc(&rg_db.systemGlobal.sourceAddrLearningCount[srcPortIdx]); if( #ifdef CONFIG_DUALBAND_CONCURRENT (srcPortIdx==RTK_RG_EXT_PORT0 || (rg_db.systemGlobal.enableSlaveSSIDBind && srcPortIdx==RTK_RG_EXT_PORT1)) #else srcPortIdx==RTK_RG_EXT_PORT0 #endif && srcWlanDevIdx!=FAIL) { #ifdef CONFIG_MASTER_WLAN0_ENABLE atomic_inc(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[srcWlanDevIdx]); #endif } } else if((rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0 && ((addNeighbor && (rg_db.lut[l2Idx].permit_for_l34_forward||rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_UNLIMIT)) //20160929LUKE: only enable ARP_USED when this LUT is permited for L34 or wanAccessLimit is off. #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) ||dslite #endif )) //Check if this LUT entry has not yet enable arp_used { //keep original data, only toggle arp_used to 1 memcpy(macEntry.mac.octet,rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.mac.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 201221203 macEntry.fid=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.fid; macEntry.isIVL=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL)>0?1:0; macEntry.port_idx=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.port; if(macEntry.port_idx==RTK_RG_PORT_CPU) macEntry.port_idx+=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.ext_port; macEntry.vlan_id=rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.vid; macEntry.static_entry=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else // support ctag_if macEntry.ctag_if=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_CTAG_IF)>0?1:0; #endif TRACE("### enable arp_used in l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,pSmac[0],pSmac[1],pSmac[2],pSmac[3],pSmac[4],pSmac[5]); macEntry.arp_used=1; ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); assert_ok(ret); if(macEntry.isIVL)goto ADD_SVL_LUT; //add ARP_USED to svl lut, too } *pNeighborOrMacIdx=l2Idx; if(addNeighbor) { #if 0 hashIdx=_rtk_rg_IPv6NeighborHash(sip,l3Idx); hashIdx<<=3; for(i=0;i<8;i++) //8-way hash { //check for matching neighbor_valid_idx = hashIdx+i; if((rg_db.v6neighbor[neighbor_valid_idx].rtk_v6neighbor.valid==1) && (rg_db.v6neighbor[neighbor_valid_idx].rtk_v6neighbor.ipv6RouteIdx==l3Idx) && (_rtk_rg_CompareIFID(sip+8, rg_db.v6neighbor[neighbor_valid_idx].rtk_v6neighbor.ipv6Ifid)==1)) break; //added before if(rg_db.v6neighbor[neighbor_valid_idx].rtk_v6neighbor.valid==0) { bzero(&neighborEntry,sizeof(rtk_rg_neighborEntry_t)); neighborEntry.l2Idx=l2Idx; neighborEntry.matchRouteIdx=l3Idx; memcpy(neighborEntry.interfaceId,sip+8,8); //interface ID is the 64~127bits of IPv6 ip address neighborEntry.valid=1; neighborEntry.staticEntry=0; ret=rtk_rg_neighborEntry_add(&neighborEntry,&neighbor_valid_idx); assert_ok(ret); *pNeighborIdx=neighbor_valid_idx; break; } } if(i==8) //no enough space for new neighbor entry return RG_RET_FAIL; if(macEntry.isIVL) { addNeighbor=0; //already added goto ADD_SVL_LUT; } #else neighborEntry.l2Idx=l2Idx; neighborEntry.matchRouteIdx=l3Idx; memcpy(neighborEntry.interfaceId,sip+8,8); neighborEntry.valid=1; neighborEntry.staticEntry=(rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)>0?1:0; //20160929LUKE: only add ARP when this LUT is permited for L34. //20161004LUKE: check this only when wanAccessLimit is turn on! if(rg_db.systemGlobal.activeLimitFunction==RG_ACCESSWAN_TYPE_UNLIMIT || rg_db.lut[l2Idx].permit_for_l34_forward || rg_db.lut[l2Idx].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC) assert_ok(rtk_rg_apollo_neighborEntry_add(&neighborEntry,&neighbor_valid_idx)); //20140904LUKE: if we are link-local, we always return MAC index, otherwise we return neighbor Idx. if(*((unsigned int *)sip)!=0xfe800000 || *((unsigned int *)(sip+4))!=0x0)*pNeighborOrMacIdx=neighbor_valid_idx; #endif } } return RG_RET_SUCCESS; } rtk_rg_ip_updated_t _rtk_rg_wanStaticInfoReaddCheck(int orig_wan_intf_idx, rtk_rg_ipStaticInfo_t *new_static_info) { rtk_rg_ip_updated_t ip_updated=NO_IP_UPDATED; //if any of these information changed, tag IPv4_changed if((!(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ip_version==IPVER_V6ONLY&&new_static_info->ip_version==IPVER_V6ONLY))&&( rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->static_route_with_arp != new_static_info->static_route_with_arp || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->napt_enable != new_static_info->napt_enable || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ip_addr != new_static_info->ip_addr || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ip_network_mask != new_static_info->ip_network_mask || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on != new_static_info->ipv4_default_gateway_on || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr != new_static_info->gateway_ipv4_addr || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4 != new_static_info->gw_mac_auto_learn_for_ipv4 || (rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4==0 && memcmp(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv4.octet,new_static_info->gateway_mac_addr_for_ipv4.octet,ETHER_ADDR_LEN)))) //check mac only when the autoLearn is off ip_updated=ONLY_IPV4_UPDATED; //if any of these information changed, tag IPv6_changed if((!(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ip_version==IPVER_V4ONLY&&new_static_info->ip_version==IPVER_V4ONLY))&&( memcmp(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ipv6_addr.ipv6_addr,new_static_info->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN) || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ipv6_mask_length != new_static_info->ipv6_mask_length || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on != new_static_info->ipv6_default_gateway_on || memcmp(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gateway_ipv6_addr.ipv6_addr,new_static_info->gateway_ipv6_addr.ipv6_addr,IPV6_ADDR_LEN) || rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv6 != new_static_info->gw_mac_auto_learn_for_ipv6 || (rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv6==0 && memcmp(rg_db.systemGlobal.interfaceInfo[orig_wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv6.octet,new_static_info->gateway_mac_addr_for_ipv6.octet,ETHER_ADDR_LEN)))) //check mac only when the autoLearn is off { if(ip_updated==ONLY_IPV4_UPDATED) ip_updated=IPV4_IPV6_UPDATED; else ip_updated=ONLY_IPV6_UPDATED; } return ip_updated; } int32 _rtk_rg_internal_wanSet(int wan_intf_idx, rtk_rg_ipStaticInfo_t *hw_static_info) { int ret,i,rtidx=-1,static_rtidx=-1,rtv6idx=-1,static_rtv6idx=-1,errorno,routingAdded=0,v6RoutingAdd=0,arpMissed=0,neighborMissed=0,ipv4Enable=0,ipv6Enable=0; int neighbor_valid_idx,l2Idx;//,subnet_same_idx; unsigned int input_ipmsk,wan_set_mask; rtk_rg_ip_updated_t ip_update_state=IPV4_IPV6_UPDATED; // rtk_l34_netif_entry_t intfEt; rtk_l34_routing_entry_t rtEntry; rtk_ipv6Routing_entry_t rtv6Entry; rtk_rg_ipv4RoutingEntry_t cb_routEt; rtk_rg_ipv6RoutingEntry_t cb_routv6Et; rtk_rg_macEntry_t macEntry; rtk_mac_t zeroMAC={{0}}; rtk_ipv6_addr_t zeroIPv6={{0}}; #if 0 rtk_rg_macEntry_t defaultGatewayMAC; int defaultGatewayMAC_idx; rtk_rg_arpEntry_t defaultGatewayARP; #endif int gateway_NeighborOrMac_idx=FAIL; //unsigned short ipv6HashIdx; //rtk_rg_wanIntfConf_t wanConfiguration; rtk_wanType_entry_t wantEt; rtk_rg_neighborInfo_t neighborInfo; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) int arp_valid_idx; rtk_rg_routing_arpInfo_t newAddingEntry; #endif rtk_l34_ext_intip_entry_t extipEntry; rtk_l34_nexthop_entry_t nxpEt; rtk_rg_arp_linkList_t *pSoftwareArpEntry; rtk_rg_wan_type_t wan_type; if(hw_static_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); DEBUG("wan_intf_idx:%d",wan_intf_idx); DEBUG("IP version:%d",hw_static_info->ip_version); DEBUG("napt_enable:%d",hw_static_info->napt_enable); DEBUG("ipv6_napt_enable:%d",hw_static_info->ipv6_napt_enable); DEBUG("ip addr:%08x",hw_static_info->ip_addr); DEBUG("host ip addr:%08x",hw_static_info->host_ip_addr); DEBUG("ip addr mask:%08x",hw_static_info->ip_network_mask); DEBUG("ipv4 default gateway:%d",hw_static_info->ipv4_default_gateway_on); DEBUG("ipv4 gateway addr:%08x",hw_static_info->gateway_ipv4_addr); DEBUG("ipv6 addr:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", hw_static_info->ipv6_addr.ipv6_addr[0],hw_static_info->ipv6_addr.ipv6_addr[1],hw_static_info->ipv6_addr.ipv6_addr[2],hw_static_info->ipv6_addr.ipv6_addr[3], hw_static_info->ipv6_addr.ipv6_addr[4],hw_static_info->ipv6_addr.ipv6_addr[5],hw_static_info->ipv6_addr.ipv6_addr[6],hw_static_info->ipv6_addr.ipv6_addr[7], hw_static_info->ipv6_addr.ipv6_addr[8],hw_static_info->ipv6_addr.ipv6_addr[9],hw_static_info->ipv6_addr.ipv6_addr[10],hw_static_info->ipv6_addr.ipv6_addr[11], hw_static_info->ipv6_addr.ipv6_addr[12],hw_static_info->ipv6_addr.ipv6_addr[13],hw_static_info->ipv6_addr.ipv6_addr[14],hw_static_info->ipv6_addr.ipv6_addr[15]); DEBUG("ipv6 mask length:%d",hw_static_info->ipv6_mask_length); DEBUG("ipv6 default gateway on:%d",hw_static_info->ipv6_default_gateway_on); DEBUG("ipv6 gateway addr:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x", hw_static_info->gateway_ipv6_addr.ipv6_addr[0],hw_static_info->gateway_ipv6_addr.ipv6_addr[1],hw_static_info->gateway_ipv6_addr.ipv6_addr[2],hw_static_info->gateway_ipv6_addr.ipv6_addr[3], hw_static_info->gateway_ipv6_addr.ipv6_addr[4],hw_static_info->gateway_ipv6_addr.ipv6_addr[5],hw_static_info->gateway_ipv6_addr.ipv6_addr[6],hw_static_info->gateway_ipv6_addr.ipv6_addr[7], hw_static_info->gateway_ipv6_addr.ipv6_addr[8],hw_static_info->gateway_ipv6_addr.ipv6_addr[9],hw_static_info->gateway_ipv6_addr.ipv6_addr[10],hw_static_info->gateway_ipv6_addr.ipv6_addr[11], hw_static_info->gateway_ipv6_addr.ipv6_addr[12],hw_static_info->gateway_ipv6_addr.ipv6_addr[13],hw_static_info->gateway_ipv6_addr.ipv6_addr[14],hw_static_info->gateway_ipv6_addr.ipv6_addr[15]); DEBUG("mtu:%d",hw_static_info->mtu); DEBUG("ipv4 gw mac auto learn:%d",hw_static_info->gw_mac_auto_learn_for_ipv4); DEBUG("ipv4 gmac:%02x-%02x-%02x-%02x-%02x-%02x", hw_static_info->gateway_mac_addr_for_ipv4.octet[0],hw_static_info->gateway_mac_addr_for_ipv4.octet[1] ,hw_static_info->gateway_mac_addr_for_ipv4.octet[2],hw_static_info->gateway_mac_addr_for_ipv4.octet[3] ,hw_static_info->gateway_mac_addr_for_ipv4.octet[4],hw_static_info->gateway_mac_addr_for_ipv4.octet[5]); DEBUG("ipv6 gw mac auto learn:%d",hw_static_info->gw_mac_auto_learn_for_ipv6); DEBUG("ipv6 gmac:%02x-%02x-%02x-%02x-%02x-%02x", hw_static_info->gateway_mac_addr_for_ipv6.octet[0],hw_static_info->gateway_mac_addr_for_ipv6.octet[1] ,hw_static_info->gateway_mac_addr_for_ipv6.octet[2],hw_static_info->gateway_mac_addr_for_ipv6.octet[3] ,hw_static_info->gateway_mac_addr_for_ipv6.octet[4],hw_static_info->gateway_mac_addr_for_ipv6.octet[5]); DEBUG("static route with arp:%d",hw_static_info->static_route_with_arp); //Check input parameters if((wan_intf_idx >= MAX_NETIF_HW_TABLE_SIZE) && (hw_static_info->ipv4_default_gateway_on || hw_static_info->ipv6_default_gateway_on)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); // we expect default_gateway netif < MAX_NETIF_HW_TABLE_SIZE if(wan_intf_idx < 0 || wan_intf_idx >= MAX_NETIF_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(hw_static_info->mtu == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(hw_static_info->static_route_with_arp && hw_static_info->host_ip_addr!=0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(hw_static_info->static_route_with_arp && hw_static_info->ipv4_default_gateway_on) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(hw_static_info->host_ip_addr!=0 && (hw_static_info->ip_network_mask!=0xffffffff || hw_static_info->ipv4_default_gateway_on || hw_static_info->gateway_ipv4_addr==0)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_RTL9602C_SERIES) //patch for mismatching mib ipv6 netif problem if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE && hw_static_info->ip_version==IPVER_V4V6 && hw_static_info->napt_enable==1 && wan_intf_idx>=(MAX_NETIF_SW_TABLE_SIZE/2)) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } #endif //Check Wan type wan_type=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type; if(hw_static_info->static_route_with_arp && (wan_type!=RTK_RG_STATIC && wan_type!=RTK_RG_DHCP)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(hw_static_info->host_ip_addr!=0 && (wan_type!=RTK_RG_STATIC && wan_type!=RTK_RG_DHCP && wan_type!=RTK_RG_PPPoE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check IP version if(hw_static_info->ip_version==IPVER_V4ONLY || hw_static_info->ip_version==IPVER_V4V6) { ipv4Enable=1; //Check parameters //20140620LUKE:if we set IP as zero, we mean the IP address should be invalided!! if(hw_static_info->ip_addr == 0 || hw_static_info->ip_network_mask == 0) ipv4Enable=0;//RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } if(hw_static_info->ip_version==IPVER_V6ONLY || hw_static_info->ip_version==IPVER_V4V6) { ipv6Enable=1; //Check parameters //20140620LUKE:if we set IP as zero, we mean the IP address should be invalided!! if((*(unsigned int *)hw_static_info->ipv6_addr.ipv6_addr == 0 && *(unsigned int *)(hw_static_info->ipv6_addr.ipv6_addr+4) == 0 && *(unsigned int *)(hw_static_info->ipv6_addr.ipv6_addr+8) == 0 && *(unsigned int *)(hw_static_info->ipv6_addr.ipv6_addr+12) == 0) || hw_static_info->ipv6_mask_length == 0) ipv6Enable=0;//RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20140924LUKE: we don't support IPv6 now. if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_L2TP) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } //Check if default gateway is on, gateway ip should be valid //if(hw_static_info->default_gateway_on == 1 && hw_static_info->gateway_ipv4_addr == 0) //without gw ip, we can't set default route //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #ifdef CONFIG_APOLLO_LITEROMEDRIVER //for lite-romeDriver, the auto_learn should not be turn on, and MAC address should not be invalid if(hw_static_info->gw_mac_auto_learn_for_ipv4 == 1 || (ipv4Enable==1&&memcmp(&hw_static_info->gateway_mac_addr_for_ipv4,&zeroMAC,sizeof(rtk_mac_t))==0)) RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); if(hw_static_info->gw_mac_auto_learn_for_ipv6 == 1 || (ipv6Enable==1&&memcmp(&hw_static_info->gateway_mac_addr_for_ipv6,&zeroMAC,sizeof(rtk_mac_t))==0)) RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); #else //for romeDriver, if you do not want to send ARP automatically, you must assign one valid MAC address for the valid IP address if(hw_static_info->gw_mac_auto_learn_for_ipv4 == 0) { if((ipv4Enable==1&&hw_static_info->gateway_ipv4_addr>0&&memcmp(&hw_static_info->gateway_mac_addr_for_ipv4,&zeroMAC,sizeof(rtk_mac_t))==0)) RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); } //20160128LUKE: support static host route auto-learn now. /*else { //turn on auto-learn, but we are unable to get gateway mac without ARP routing if(ipv4Enable==1&&hw_static_info->ip_network_mask==0xffffffff) //host route RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); }*/ if(hw_static_info->gw_mac_auto_learn_for_ipv6 == 0) { if((ipv6Enable==1&&memcmp(hw_static_info->gateway_ipv6_addr.ipv6_addr,zeroIPv6.ipv6_addr,IPV6_ADDR_LEN)&&memcmp(&hw_static_info->gateway_mac_addr_for_ipv6,&zeroMAC,sizeof(rtk_mac_t))==0)) RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); } else { //turn on auto-learn, but we are unable to get gateway mac without ARP routing if((ipv6Enable==1&&hw_static_info->ipv6_mask_length==128)) //host route RETURN_ERR(RT_ERR_RG_GW_MAC_NOT_SET); } #endif //Check if we are set the same WAN interface twice if((rg_db.systemGlobal.wanInfoSet & (0x1<<wan_intf_idx)) > 0) { #if 0 //Get, Del, Re-add wan interface before re-set it bzero(&wanConfiguration,sizeof(rtk_rg_wanIntfConf_t)); memcpy(&wanConfiguration,&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf,sizeof(rtk_rg_wanIntfConf_t)); //Don't disconnect PPP interface in protocol stack, since we just reset the IP and SessionID into hw table if(wanConfiguration.wan_type==RTK_RG_PPPoE) rg_db.systemGlobal.not_disconnect_ppp=1; rg_db.systemGlobal.intfIdxForReset=wan_intf_idx; ret=rtk_rg_interface_del(wan_intf_idx); if(ret!=RT_ERR_RG_OK)return ret; //Keep wan interface index before add ret=rtk_rg_wanInterface_add(&wanConfiguration,&i); if(ret!=RT_ERR_RG_OK)return ret; #else #if defined(CONFIG_RTL9602C_SERIES) //patch for mismatching mib ipv6 netif problem if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type!=RTK_RG_BRIDGE && rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_version==IPVER_V4V6 && rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->napt_enable==1 && !(hw_static_info->ip_version==IPVER_V4V6 && hw_static_info->napt_enable==1) ) { rtk_l34_netif_entry_t intfV6Entry; bzero(&intfV6Entry, sizeof(rtk_l34_netif_entry_t)); ret = RTK_L34_NETIFTABLE_SET(wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2), &intfV6Entry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_SET_FAIL); //bzero(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2)], sizeof(rtk_rg_interface_info_global_t)); } } #endif //20140613LUKE:Check if we are just change IPv4 or IPV6 settings, //if so, just reset one protocol without change other protocol settings. ip_update_state=_rtk_rg_wanStaticInfoReaddCheck(wan_intf_idx,hw_static_info); if(ip_update_state==ONLY_IPV4_UPDATED || ip_update_state==IPV4_IPV6_UPDATED) { DEBUG("change IPv4 settings only!! IPv4 enable is %d",ipv4Enable); //just delete IPv4 related setting, do v4_only procedure later if(ip_update_state==ONLY_IPV4_UPDATED)ipv6Enable=0; //Stop ARP request timer if this WAN interface is ipv4 default route if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on == 1) rg_db.systemGlobal.intfArpRequest[wan_intf_idx].finished = 1; //20140623LUKE:when delete ARP if need, it should not convert software ARP to HW, since we are going to reset right after! rg_db.systemGlobal.intfIdxForReset=wan_intf_idx; ret=_rtk_rg_deleteIPv4Routing(wan_intf_idx); rg_db.systemGlobal.intfIdxForReset=-1; if(ret!=RT_ERR_RG_OK)return ret; //Clear software data structure if(rg_db.systemGlobal.defaultRouteSet == wan_intf_idx) rg_db.systemGlobal.defaultRouteSet=-1; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->napt_enable=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_addr=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_network_mask=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4=0; bzero(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv4,sizeof(rtk_mac_t)); } if(ip_update_state==ONLY_IPV6_UPDATED || ip_update_state==IPV4_IPV6_UPDATED) { DEBUG("change IPv6 settings only!! ipv6enable is %d",ipv6Enable); //just delete IPv6 related setting, do v6_only procedure later if(ip_update_state==ONLY_IPV6_UPDATED)ipv4Enable=0; //Stop Neighbor Discovery request timer if this WAN interface is ipv6 default route if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on == 1) rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].finished = 1; ret=_rtk_rg_deleteIPv6Routing(wan_intf_idx); if(ret!=RT_ERR_RG_OK)return ret; //Clear software data structure if(rg_db.systemGlobal.defaultIPV6RouteSet == wan_intf_idx) rg_db.systemGlobal.defaultIPV6RouteSet=-1; bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_mask_length=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on=0; bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv6=0; bzero(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv6,sizeof(rtk_mac_t)); } if(ip_update_state==NO_IP_UPDATED) goto SETUP_HW_MTU; #endif } //Check if there is default route setup before if(ipv4Enable && hw_static_info->ipv4_default_gateway_on && rg_db.systemGlobal.defaultRouteSet != -1) RETURN_ERR(RT_ERR_RG_DEF_ROUTE_EXIST); if(ipv6Enable && hw_static_info->ipv6_default_gateway_on && rg_db.systemGlobal.defaultIPV6RouteSet != -1) RETURN_ERR(RT_ERR_RG_DEF_ROUTE_EXIST); //Setup WAN type table if(hw_static_info->napt_enable==1 && rg_db.systemGlobal.initParam.macBasedTagDecision) //L4 { errorno=RT_ERR_RG_WANTYPE_SET_FAIL; memcpy(&wantEt, &rg_db.wantype[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].rtk_wantype, sizeof(rtk_wanType_entry_t)); wantEt.wanType=L34_WAN_TYPE_L34NAT_ROUTE; rg_db.wantype[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4].valid=1; ret = RTK_L34_WANTYPETABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv4, &wantEt); if(ret!=RT_ERR_OK) return (errorno); } SETUP_HW_MTU: DEBUG("set up MTU to %d",hw_static_info->mtu); rg_db.netif[wan_intf_idx].rtk_netif.mtu=hw_static_info->mtu; #if defined(CONFIG_RTL9600_SERIES) if(rg_kernel.apolloChipId==APOLLOMP_CHIP_ID) { //Patch for 0601 and 6266, when WAN is pppoe, the L34 will minus extra 8 bytes, //therefore we have to patch hardware here to add more 8 bytes for it to subtract if(wan_type==RTK_RG_PPPoE) rg_db.netif[wan_intf_idx].rtk_netif.mtu += PATCH_6266_MTU_PPPOE; //Patch for 0601 and 6266, when binding to interface happened, //the packet size have 2 byte wouldn't decrease, causing TRAP reason 224. //therefore the hardware setting should be set as preferred value plus 2 here if(rg_db.systemGlobal.initParam.macBasedTagDecision) rg_db.netif[wan_intf_idx].rtk_netif.mtu += PATCH_6266_MTU_BINDING; } #endif #if defined(CONFIG_RTL9602C_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { //20151014LUKE: setup interface table's L4 IP address if napt, otherwise set it to zero. if(hw_static_info->napt_enable) rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=hw_static_info->ip_addr; else rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=0; } else if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT1)) { //init rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=0; rg_db.netif[wan_intf_idx].rtk_netif.isIpv6=0; if(hw_static_info->ip_version==IPVER_V4ONLY || hw_static_info->ip_version==IPVER_V4V6) { if(hw_static_info->napt_enable) rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=hw_static_info->ip_addr; else rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=0; } if(hw_static_info->ip_version==IPVER_V6ONLY || hw_static_info->ip_version==IPVER_V4V6) { rg_db.netif[wan_intf_idx].rtk_netif.isIpv6=1; //special case: ipv6 only and not-dslite WAN if(hw_static_info->ip_version==IPVER_V6ONLY && wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE) rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=0xffffffff; } } #elif defined(CONFIG_RTL9607C_SERIES) if(hw_static_info->napt_enable) rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=hw_static_info->ip_addr; else rg_db.netif[wan_intf_idx].rtk_netif.ipAddr=0; if(!ipv4Enable) rg_db.netif[wan_intf_idx].rtk_netif.deny_ipv4 = TRUE; if(!ipv6Enable) rg_db.netif[wan_intf_idx].rtk_netif.deny_ipv6 = TRUE; #endif ret = RTK_L34_NETIFTABLE_SET(wan_intf_idx, &rg_db.netif[wan_intf_idx].rtk_netif); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_SET_FAIL); //reset software MTU should keep original MTU, only hardware MTU need to patch!! rg_db.netif[wan_intf_idx].rtk_netif.mtu=hw_static_info->mtu; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->mtu=hw_static_info->mtu; //Since we do not need to change IPv4 or IPv6 settings for the already-created WAN, we just return here if(ip_update_state==NO_IP_UPDATED)goto DO_INTF_CALLBACK; //if 255.255.255.255, we don't need to add routing entry if(ipv4Enable==1) { //Check routing table available or not rtidx = MAX_L3_SW_TABLE_SIZE; bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); //subnet_same_idx = MAX_L3_SW_TABLE_SIZE; for(i=0; i<MAX_L3_SW_TABLE_SIZE ; i++) //because idx MAX_L3_SW_TABLE_SIZE-1 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; if(rg_db.l3[i].rtk_l3.valid == 0 && rtidx == MAX_L3_SW_TABLE_SIZE) { rtidx = i; //keep first invalid entry //break; } else { //Check if there is any same IP-range routing entry added, //and the routing entry must point to the netif entry which VLAN is THE SAME if((hw_static_info->ip_network_mask!=0xffffffff && (hw_static_info->ip_addr&hw_static_info->ip_network_mask)==rg_db.l3[i].rtk_l3.ipAddr && hw_static_info->ip_network_mask==rg_db.l3[i].netmask) || ((hw_static_info->ip_network_mask==0xffffffff && hw_static_info->host_ip_addr==0) && hw_static_info->gateway_ipv4_addr==rg_db.l3[i].rtk_l3.ipAddr && hw_static_info->ip_network_mask==rg_db.l3[i].netmask) || ((hw_static_info->ip_network_mask==0xffffffff && hw_static_info->host_ip_addr!=0) && hw_static_info->host_ip_addr==rg_db.l3[i].rtk_l3.ipAddr && hw_static_info->ip_network_mask==rg_db.l3[i].netmask)) { //20150226LUKE: return fail if same subnet with different type //20160329LUKE: consider routing_type also errorno=RT_ERR_RG_SUBNET_INTERFACE_ASYMMETRIC; if(rg_db.l3[i].rtk_l3.process==L34_PROCESS_ARP){ if((hw_static_info->gateway_ipv4_addr>0)&&(hw_static_info->static_route_with_arp==0))//NH goto RET_CHECK_ERR; }else if(rg_db.l3[i].rtk_l3.process==L34_PROCESS_NH){ if((hw_static_info->gateway_ipv4_addr==0)||(hw_static_info->static_route_with_arp==1))//ARP goto RET_CHECK_ERR; } /* if(rg_db.netif[wan_intf_idx].rtk_netif.vlan_id != rg_db.netif[rg_db.l3[i].rtk_l3.netifIdx].rtk_netif.vlan_id) { errorno=RT_ERR_RG_SUBNET_INTERFACE_ASYMMETRIC; goto RET_CHECK_ERR; } */ routingAdded=1; rtidx=i; memcpy(&rtEntry,&rg_db.l3[i].rtk_l3,sizeof(rtk_l34_routing_entry_t)); break; } } } if(routingAdded==1) { if(!hw_static_info->static_route_with_arp&& (hw_static_info->ip_network_mask==0xffffffff || (hw_static_info->ipv4_default_gateway_on==0 && hw_static_info->gateway_ipv4_addr!=0 && hw_static_info->napt_enable==0 && (hw_static_info->ip_network_mask!=0xffffffff && ((hw_static_info->ip_addr&hw_static_info->ip_network_mask)!=hw_static_info->ip_addr))))) { int rtTrapIdx = MAX_L3_SW_TABLE_SIZE; rtk_l34_routing_entry_t rtTrapEntry; uint8 rtTrapAdd = 0; for(i=0; i<MAX_L3_SW_TABLE_SIZE ; i++) //because idx MAX_L3_SW_TABLE_SIZE-1 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; if(rg_db.l3[i].rtk_l3.valid==0) { if(rtTrapIdx==MAX_L3_SW_TABLE_SIZE) rtTrapIdx = i; //keep first invalid entry } else { if(rg_db.l3[i].rtk_l3.ipAddr==hw_static_info->ip_addr && rg_db.l3[i].netmask==0xffffffff && rg_db.l3[i].rtk_l3.process==L34_PROCESS_CPU) rtTrapAdd = 1; } } if(rtTrapAdd==0) { errorno=RT_ERR_RG_ENTRY_FULL; if(rtTrapIdx == MAX_L3_SW_TABLE_SIZE) goto RET_CHECK_ERR; static_rtidx = rtTrapIdx; bzero(&rtTrapEntry, sizeof(rtk_l34_routing_entry_t)); //Set up Routing table -- TRAP rtTrapEntry.netifIdx=wan_intf_idx; rtTrapEntry.valid=1; rtTrapEntry.process=L34_PROCESS_CPU; //default to sw table rtTrapEntry.ipAddr=hw_static_info->ip_addr; //20140820: here should equal to gateway IP if(hw_static_info->napt_enable) rtTrapEntry.internal=0; //external host from outside, NAPT or NAT else rtTrapEntry.internal=1; //pure routing rtTrapEntry.rt2waninf=0; input_ipmsk=0xffffffff; RG_ONE_COUNT(input_ipmsk); rtTrapEntry.ipMask=input_ipmsk-1; errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_ROUTINGTABLE_SET(static_rtidx, &rtTrapEntry); if(ret!=RT_ERR_OK)goto RET_STATIC_ROUTE_ERR; //20140703LUKE: keep original IP address for check gateway IP rg_db.l3[static_rtidx].gateway_ip=hw_static_info->ip_addr; } } } if(routingAdded==0) { errorno=RT_ERR_RG_ENTRY_FULL; if(rtidx == MAX_L3_SW_TABLE_SIZE)goto RET_CHECK_ERR; //20140528LUKE:when setup STATIC ROUTE for routing mode, we need one more routing entry for traping WAN gateway ip to CPU!! //20140925LUKE: PPTP and L2TP wan should add trap rule for gateway IP! //20141107LUKE: when static_route_with_arp is enable, we should add ARP routing. //20160321LUKE: for domain IP in routing mode, we can omit this routing entry //20160824: when ip mask is 255.255.255.255(point to point connection), we need one more routing entry for traping WAN gateway ip to CPU(it includes napt wan, but it can be delete after learning gw mac). if(!hw_static_info->static_route_with_arp&& (hw_static_info->ip_network_mask==0xffffffff || (hw_static_info->ipv4_default_gateway_on==0 && hw_static_info->gateway_ipv4_addr!=0 && hw_static_info->napt_enable==0 && (hw_static_info->ip_network_mask!=0xffffffff && ((hw_static_info->ip_addr&hw_static_info->ip_network_mask)!=hw_static_info->ip_addr))) || wan_type==RTK_RG_PPTP || wan_type==RTK_RG_L2TP #if defined(CONFIG_RTL9602C_SERIES) || (hw_static_info->napt_enable==0&&(wan_type==RTK_RG_DSLITE||wan_type==RTK_RG_PPPoE_DSLITE)) //20160901LUKE: for dslite routing mode, we should add one more trap routing entry for gateway IP. #endif )) { static_rtidx = rtidx; //Set up Routing table -- TRAP rtEntry.netifIdx=wan_intf_idx; rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //default to sw table rtEntry.ipAddr=hw_static_info->ip_addr; //20140820: here should equal to gateway IP if(wan_type==RTK_RG_PPTP || wan_type==RTK_RG_L2TP) { if(hw_static_info->napt_enable) rtEntry.internal=0; //external host from outside, NAPT or NAT else rtEntry.internal=1; //pure routing rtEntry.rt2waninf=1; input_ipmsk=hw_static_info->ip_network_mask; // TODO:if load-balance is needed, here should be changed rtEntry.nhStart=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4; /*exact index*/ rtEntry.nhNxt=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 rtEntry.rt2waninf=1; } else { if(hw_static_info->napt_enable) rtEntry.internal=0; //external host from outside, NAPT or NAT else rtEntry.internal=1; //pure routing rtEntry.rt2waninf=0; input_ipmsk=0xffffffff; } RG_ONE_COUNT(input_ipmsk); rtEntry.ipMask=input_ipmsk-1; errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_ROUTINGTABLE_SET(static_rtidx, &rtEntry); if(ret!=RT_ERR_OK)goto RET_STATIC_ROUTE_ERR; //20140703LUKE: keep original IP address for check gateway IP rg_db.l3[static_rtidx].gateway_ip=hw_static_info->ip_addr; rtidx = MAX_L3_SW_TABLE_SIZE; bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); for(i=0; i<MAX_L3_SW_TABLE_SIZE ; i++) //because idx MAX_L3_SW_TABLE_SIZE-1 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; if(rg_db.l3[i].rtk_l3.valid == 0 && rtidx == MAX_L3_SW_TABLE_SIZE) { rtidx = i; //keep first invalid entry break; } } errorno=RT_ERR_RG_ENTRY_FULL; if(rtidx == MAX_L3_SW_TABLE_SIZE)goto RET_STATIC_ROUTE_ERR; } //20140925LUKE: PPTP and L2TP should not occupy ARP table since it just need to trap gateway IP //20150108LUKE: dSlite should add trap rule for gateway IP, too. //20160824: when ip mask is 255.255.255.255(point to point connection) and it's static wan, we need setup a route entry for remote point. if(wan_type!=RTK_RG_PPTP && wan_type!=RTK_RG_L2TP) { if(hw_static_info->ip_network_mask==0xffffffff && hw_static_info->host_ip_addr==0) { if(hw_static_info->ipv4_default_gateway_on==0 && hw_static_info->gateway_ipv4_addr!=0) { //Set up Routing table -- ARP rtEntry.netifIdx=wan_intf_idx; rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //default to sw table if(hw_static_info->napt_enable) rtEntry.internal=0; //external host from outside, NAPT or NAT else rtEntry.internal=1; //pure routing rtEntry.ipAddr=hw_static_info->gateway_ipv4_addr; //20130301-store IP addr after masked rtEntry.rt2waninf=1; input_ipmsk=0xffffffff; RG_ONE_COUNT(input_ipmsk); rtEntry.ipMask=input_ipmsk-1; errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_ROUTINGTABLE_SET(rtidx, &rtEntry); if(ret!=RT_ERR_OK)goto RET_ROUTE_ERR; //20140703LUKE: keep original IP address for check gateway IP rg_db.l3[rtidx].gateway_ip=hw_static_info->ip_addr; } else { rtidx = static_rtidx; memcpy(&rtEntry, &rg_db.l3[static_rtidx].rtk_l3, sizeof(rtk_l34_routing_entry_t)); } } else #if defined(CONFIG_RTL9602C_SERIES) //20160901LUKE: for non-default-route dslite or other WAN would we need this routing entry. if((wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE)||((wan_type==RTK_RG_DSLITE || wan_type==RTK_RG_PPPoE_DSLITE)&&hw_static_info->ipv4_default_gateway_on==0)) #endif { //Set up Routing table -- ARP rtEntry.netifIdx=wan_intf_idx; rtEntry.valid=1; rtEntry.process=L34_PROCESS_CPU; //default to sw table if(hw_static_info->napt_enable) rtEntry.internal=0; //external host from outside, NAPT or NAT else rtEntry.internal=1; //pure routing if(hw_static_info->host_ip_addr==0) rtEntry.ipAddr=hw_static_info->ip_addr&hw_static_info->ip_network_mask; //20130301-store IP addr after masked else rtEntry.ipAddr=hw_static_info->host_ip_addr&hw_static_info->ip_network_mask; //20130301-store IP addr after masked rtEntry.rt2waninf=1; input_ipmsk=hw_static_info->ip_network_mask; RG_ONE_COUNT(input_ipmsk); rtEntry.ipMask=input_ipmsk-1; //for PPPoE WAN 255.255.255.255, we need one host route to trap gateway packets to CPU!!! //20140328LUKE: If we add non-default route WAN with gateway address, create STATIC ROUTE! //20141107LUKE: when static_route_with_arp is enable, we should add ARP routing here. //20150108LUKE: dSlite should not occupy ARP table since it just need to trap gateway IP if(wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE && hw_static_info->ip_network_mask!=0xffffffff && ((hw_static_info->ipv4_default_gateway_on==0 && (hw_static_info->gateway_ipv4_addr==0||hw_static_info->static_route_with_arp)) || hw_static_info->ipv4_default_gateway_on==1)) { //20140617LUKE:if add Other WAN, default add to software ARP table if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.none_internet) { WARNING("Other-WAN[%d] will be added to software ARP table...",wan_intf_idx); } else { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Check for ARP table for enough entry // TODO:Check for ARP range and add to rg_db.systemGlobal.routingArpInfoArray bzero(&newAddingEntry,sizeof(rtk_rg_routing_arpInfo_t)); newAddingEntry.routingIdx=rtidx; newAddingEntry.intfIdx=wan_intf_idx; newAddingEntry.notMask=~hw_static_info->ip_network_mask; //20140827LUKE: solve corner case: if mask=255.255.255.254 if(input_ipmsk>=0x1e) newAddingEntry.bitNum=2; else newAddingEntry.bitNum=32-input_ipmsk; newAddingEntry.isLan=0; //WAN if(newAddingEntry.bitNum <= 8) //if need more than or equal to 512 entries, recorded in fwdEngine { errorno=_rtk_rg_addArpRoutingArray(&newAddingEntry,hw_static_info->ip_addr,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); if(errorno==RT_ERR_RG_OK) { rtEntry.arpStart = newAddingEntry.arpStart; rtEntry.arpEnd = newAddingEntry.arpEnd; rtEntry.process=L34_PROCESS_ARP; } else if(errorno==RT_ERR_RG_ADD_ARP_TO_SW_TABLE) //for sw table, routing entry just set process to CPU { WARNING("HW table is not enough...will add WAN[%d] to software ARP table!",wan_intf_idx); } else goto RET_CHECK_ERR; } else { WARNING("HW table is not enough...will add WAN[%d] to software ARP table!",wan_intf_idx); } #elif defined(CONFIG_RTL9602C_SERIES) rtEntry.process=L34_PROCESS_ARP; #endif } } errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_ROUTINGTABLE_SET(rtidx, &rtEntry); if(ret!=RT_ERR_OK)goto RET_ROUTE_ERR; //20140703LUKE: keep original IP address for check gateway IP rg_db.l3[rtidx].gateway_ip=hw_static_info->ip_addr; } } routingAdded=1; } } if(ipv6Enable==1/* && hw_static_info->ipv6_mask_length!=128*/) { //20140904LUKE: when setup IPv6 STATIC ROUTE for routing, we need one more routing entry for traping WAN gateway ip to CPU!! #if 0 if(hw_static_info->ipv6_default_gateway_on==0 && memcmp(&hw_static_info->gateway_ipv6_addr,&zeroIPv6,sizeof(rtk_ipv6_addr_t)) && hw_static_info->ipv6_mask_length!=128) { //Check routing table available or not static_rtv6idx = MAX_IPV6_ROUTING_SW_TABLE_SIZE; for(i=0; i<MAX_IPV6_ROUTING_SW_TABLE_SIZE - 1; i++) //because idx 3 is reserved for default route { if(rg_db.v6route[i].rtk_v6route.valid == 0) { static_rtv6idx = i; //keep break; } } errorno=RT_ERR_RG_ENTRY_FULL; if(static_rtv6idx == MAX_IPV6_ROUTING_SW_TABLE_SIZE)goto RET_ROUTE_ERR; //Set up Routing table -- TRAP bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_TRAP; rtv6Entry.nhOrIfidIdx=wan_intf_idx; rtv6Entry.ipv6PrefixLen=128; memcpy(&rtv6Entry.ipv6Addr,&hw_static_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_IPV6ROUTINGTABLE_SET(static_rtv6idx,&rtv6Entry); if(ret!=RT_ERR_OK)goto RET_ROUTE_ERR; } #endif //Check routing table available or not rtv6idx = MAX_IPV6_ROUTING_SW_TABLE_SIZE; for(i=0; i<MAX_IPV6_ROUTING_SW_TABLE_SIZE ; i++) //because idx 3 is reserved for default route { if(i == V6_HW_DEFAULT_ROUTE_IDX) continue; if(rg_db.v6route[i].rtk_v6route.valid == 0) { rtv6idx = i; //keep break; } } errorno=RT_ERR_RG_ENTRY_FULL; if(rtv6idx == MAX_IPV6_ROUTING_SW_TABLE_SIZE)goto RET_CHECK_ERR; //Set up Routing table -- NEIGHBOR bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); rtv6Entry.valid=1; rtv6Entry.type=L34_IPV6_ROUTE_TYPE_LOCAL; rtv6Entry.nhOrIfidIdx=wan_intf_idx; rtv6Entry.ipv6PrefixLen=hw_static_info->ipv6_mask_length; memcpy(&rtv6Entry.ipv6Addr,&hw_static_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); rtv6Entry.rt2waninf=1; //local route, routing to WAN errorno=RT_ERR_RG_ROUTE_SET_FAIL; ret = RTK_L34_IPV6ROUTINGTABLE_SET(rtv6idx,&rtv6Entry); if(ret!=RT_ERR_OK)goto RET_ROUTE_ERR; //20160601LUKE: keep original IPv6 address for check gateway IP memcpy(&rg_db.v6route[rtv6idx].gateway_ipv6Addr,&hw_static_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); DEBUG("###add v6Route[%d]: %s prefixLen(%d) DIP(%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x) ###", rtv6idx, (rtv6Entry.rt2waninf==TRUE? "RT2WAN": "RT2LAN"), rtv6Entry.ipv6PrefixLen, rtv6Entry.ipv6Addr.ipv6_addr[0],rtv6Entry.ipv6Addr.ipv6_addr[1],rtv6Entry.ipv6Addr.ipv6_addr[2],rtv6Entry.ipv6Addr.ipv6_addr[3], rtv6Entry.ipv6Addr.ipv6_addr[4],rtv6Entry.ipv6Addr.ipv6_addr[5],rtv6Entry.ipv6Addr.ipv6_addr[6],rtv6Entry.ipv6Addr.ipv6_addr[7], rtv6Entry.ipv6Addr.ipv6_addr[8],rtv6Entry.ipv6Addr.ipv6_addr[9],rtv6Entry.ipv6Addr.ipv6_addr[10],rtv6Entry.ipv6Addr.ipv6_addr[11], rtv6Entry.ipv6Addr.ipv6_addr[12],rtv6Entry.ipv6Addr.ipv6_addr[13],rtv6Entry.ipv6Addr.ipv6_addr[14],rtv6Entry.ipv6Addr.ipv6_addr[15]); v6RoutingAdd=1; } //store information in Global variable if(ip_update_state==IPV4_IPV6_UPDATED)memset(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo,0,sizeof(rtk_rg_ipStaticInfo_t)); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_version=hw_static_info->ip_version; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->napt_enable=hw_static_info->napt_enable; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->mtu=hw_static_info->mtu; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv4=hw_static_info->gw_mac_auto_learn_for_ipv4; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gw_mac_auto_learn_for_ipv6=hw_static_info->gw_mac_auto_learn_for_ipv6; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->static_route_with_arp=hw_static_info->static_route_with_arp; //Save default gateway wan interface to this index if(hw_static_info->ipv4_default_gateway_on == 1) rg_db.systemGlobal.defaultRouteSet = wan_intf_idx; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_addr=hw_static_info->ip_addr; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->host_ip_addr=hw_static_info->host_ip_addr; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ip_network_mask=hw_static_info->ip_network_mask; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv4_default_gateway_on=hw_static_info->ipv4_default_gateway_on; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_ipv4_addr=hw_static_info->gateway_ipv4_addr; memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv4,&hw_static_info->gateway_mac_addr_for_ipv4, sizeof(rtk_mac_t)); //Save default gateway wan interface to this index if(hw_static_info->ipv6_default_gateway_on == 1) rg_db.systemGlobal.defaultIPV6RouteSet = wan_intf_idx; memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_addr,&hw_static_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_mask_length=hw_static_info->ipv6_mask_length; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->ipv6_default_gateway_on=hw_static_info->ipv6_default_gateway_on; memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_ipv6_addr,&hw_static_info->gateway_ipv6_addr,sizeof(rtk_ipv6_addr_t)); memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo->gateway_mac_addr_for_ipv6,&hw_static_info->gateway_mac_addr_for_ipv6, sizeof(rtk_mac_t)); bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.intf_name,32); sprintf(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.intf_name,"%d_%s_%s_%c", wan_intf_idx, hw_static_info->ipv4_default_gateway_on==1?"V4INTERNET":"V4OTHER", hw_static_info->ipv6_default_gateway_on==1?"V6INTERNET":"V6OTHER", wan_type==RTK_RG_BRIDGE?'B':'R' ); //Set WAN set mask rg_db.systemGlobal.wanInfoSet |= (0x1<<wan_intf_idx); //Check ARP table first if we add route before, otherwise call ARP request after that //20160329LUKE: add nexthop entry with gw_mac assigned, for napt mode, add IP table also. if(ipv4Enable==1 || (((hw_static_info->napt_enable==0)||(hw_static_info->gateway_ipv4_addr!=0))&&(hw_static_info->gw_mac_auto_learn_for_ipv4==0))) { bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); if(routingAdded==1) { if(((hw_static_info->ipv4_default_gateway_on)||(hw_static_info->gateway_ipv4_addr!=0))&&(hw_static_info->gw_mac_auto_learn_for_ipv4==0)) { if(memcmp(&hw_static_info->gateway_mac_addr_for_ipv4, &zeroMAC, sizeof(rtk_mac_t))) //MAC != 0, otherwise do nothing { memcpy(macEntry.mac.octet,hw_static_info->gateway_mac_addr_for_ipv4.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //20150527LUKE: for WWAN we should add remote gateway mac at ext-port0 if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) macEntry.port_idx=RTK_RG_EXT_PORT0; else macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; //if(rg_db.vlan[macEntry.vlan_id].fidMode) // macEntry.isIVL=0; //else //macEntry.isIVL=1; //macEntry.fid=rg_db.vlan[macEntry.vlan_id].fid; macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); if(ret!=RT_ERR_RG_OK) goto RET_GLB_ERR; DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2], macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); //20150527LUKE: keep WWAN wlan-dev index in rg_db.lut if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) rg_db.lut[l2Idx].wlan_device_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan; if(hw_static_info->gateway_ipv4_addr == 0) { ret = _rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx,l2Idx); errorno=ret; } else { DEBUG("add ipv4 gateway ARP and MAC entry.."); errorno=RT_ERR_RG_ADD_ARP_MAC_FAILED; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); ret = _rtk_rg_arpAndMacEntryAdd(hw_static_info->gateway_ipv4_addr,rtidx,hw_static_info->gateway_mac_addr_for_ipv4.octet,RTK_RG_PORT_RGMII,FAIL,NULL,0,0,1); }else #endif ret = _rtk_rg_arpAndMacEntryAdd(hw_static_info->gateway_ipv4_addr,rtidx,hw_static_info->gateway_mac_addr_for_ipv4.octet,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx,FAIL,NULL,0,0,1); } if(ret!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } //Check if ARP and MAC had auto learned before if(hw_static_info->gateway_ipv4_addr != 0) { arpMissed=1; if(rtEntry.process==L34_PROCESS_ARP) { #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) goto check_sw_arp; #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) arp_valid_idx=rtEntry.arpStart<<0x2; while(arp_valid_idx < ((rtEntry.arpEnd+1)<<0x2)) { if(rg_db.arp[arp_valid_idx].rtk_arp.valid && (rg_db.arp[arp_valid_idx].ipv4Addr==hw_static_info->gateway_ipv4_addr)) { errorno = _rtk_rg_internal_GWMACSetup(rg_db.arp[arp_valid_idx].ipv4Addr, rg_db.arp[arp_valid_idx].rtk_arp.nhIdx); if(errorno!=RT_ERR_RG_OK) { goto RET_GLB_ERR; } else { arpMissed=0; break; //arpMissed=0 } } arp_valid_idx++; } #elif defined(CONFIG_RTL9602C_SERIES) { //Check for hardware ARP _rtk_rg_hardwareArpTableLookUp(rtidx,hw_static_info->gateway_ipv4_addr,&pSoftwareArpEntry,0); if(pSoftwareArpEntry==NULL) { //Check for software ARP _rtk_rg_softwareArpTableLookUp(rtidx,hw_static_info->gateway_ipv4_addr,&pSoftwareArpEntry,0); } if(pSoftwareArpEntry!=NULL) { arpMissed=0; //errorno = _rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx,rg_db.arp[pArpEntry->idx].rtk_arp.nhIdx); errorno = _rtk_rg_internal_GWMACSetup(rg_db.arp[pSoftwareArpEntry->idx].ipv4Addr,rg_db.arp[pSoftwareArpEntry->idx].rtk_arp.nhIdx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } #endif } else if(rtEntry.process==L34_PROCESS_CPU) //for pppoe WAN { #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) check_sw_arp: #endif //Check for software ARP _rtk_rg_softwareArpTableLookUp(rtidx,hw_static_info->gateway_ipv4_addr,&pSoftwareArpEntry,0); if(pSoftwareArpEntry!=NULL) { arpMissed=0; //20140529LUKE:fix STATIC ROUTE nexthop won't be set as static LUT //errorno = _rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx,rg_db.arp[pSoftwareArpEntry->idx].rtk_arp.nhIdx); errorno = _rtk_rg_internal_GWMACSetup(rg_db.arp[pSoftwareArpEntry->idx].ipv4Addr,rg_db.arp[pSoftwareArpEntry->idx].rtk_arp.nhIdx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } else if(rtEntry.process==L34_PROCESS_NH&&hw_static_info->gw_mac_auto_learn_for_ipv4==0) { //20160329LUKE: for two exactly same WAN with routing process as nexthop, we can add nexthop without waste routing entry. arpMissed=0; DEBUG("original process is nexthop, and gw mac != zero, add to nexthop directly!"); errorno = _rtk_rg_internal_GWMACSetup(hw_static_info->gateway_ipv4_addr, l2Idx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } //Set up Internal External IP table for NAPT or STATIC ROUTE //20140328LUKE:STATIC ROUTE should always add IP table, even napt_enable is 0!! //20141001LUKE: PPTP should add EXTIP in pptpClientInfoAfterDial_set //20141020LUKE: L2TP should add EXTIP in l2tpClientInfoAfterDial_set //20150625LUKE: if we are napt, we need eip table setup, either here or _rtk_rg_internal_GWMACSetup_stage2 if(hw_static_info->napt_enable) { if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.extip_idx<0) { errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_EXTIP_SW_TABLE_SIZE;i++) { if(!rg_db.extip[i].rtk_extip.valid) break; } if(i==MAX_EXTIP_SW_TABLE_SIZE)goto RET_GLB_ERR; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.extip_idx=i; //keep bzero(&extipEntry,sizeof(rtk_l34_ext_intip_entry_t)); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4<0) { //Check for empty entry errorno=RT_ERR_RG_ENTRY_FULL; for(i=0;i<MAX_NEXTHOP_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.nxpRefCount[i] == 0) break; } if(i==MAX_NEXTHOP_SW_TABLE_SIZE)goto RET_GLB_ERR; extipEntry.nhIdx = i; //Keep //Setup Nexthop table in nxtidx errorno=RT_ERR_RG_NXP_SET_FAIL; bzero(&nxpEt,sizeof(rtk_l34_nexthop_entry_t)); nxpEt.ifIdx=wan_intf_idx; // if WAN is PPPoE, LAN is untag. (keepPppoe=1 will send untag packet to WAN) if((wan_type == RTK_RG_PPPoE)||(wan_type == RTK_RG_PPPoE_DSLITE)){ nxpEt.type=L34_NH_PPPOE; #if defined(CONFIG_RTL9602C_SERIES) nxpEt.keepPppoe=2; /* If original tagged, keep. Otherwise add tag with PPPIDX session id */ #else nxpEt.keepPppoe=0; #endif nxpEt.pppoeIdx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx; }else{ nxpEt.type=L34_NH_ETHER; nxpEt.keepPppoe=1; nxpEt.pppoeIdx=0; } // FIXME: here should to use binding remote host mac index, if port-binding is set nxpEt.nhIdx=rg_db.systemGlobal.defaultTrapLUTIdx; //use this DUMMY index to force packet TRAP to CPU rg_db.nexthop[extipEntry.nhIdx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(extipEntry.nhIdx, &nxpEt); if(ret!=RT_ERR_OK)goto RET_GLB_ERR; rg_db.systemGlobal.nxpRefCount[extipEntry.nhIdx]++; //add for deleting it when del interface rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4=extipEntry.nhIdx; } extipEntry.nhIdx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4; extipEntry.intIpAddr=0; //napt special extipEntry.extIpAddr=hw_static_info->ip_addr; //20150107LUKE: update EIP from primitive WAN interface, not from STATIC ROUTE's WAN. if(((hw_static_info->ip_network_mask<=0xfffffff8)&&(hw_static_info->ip_addr&hw_static_info->ip_network_mask)==hw_static_info->ip_addr)|| //subnet IP (hw_static_info->ip_network_mask==0xffffffff&&wan_type!=RTK_RG_PPPoE&&wan_type!=RTK_RG_PPPoE_DSLITE&&wan_type!=RTK_RG_PPTP&&wan_type!=RTK_RG_L2TP)) //static host route with napt mode { for(i=0,ret=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //DEBUG("the wan type is %d, ip is %x",rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type,rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr); if((rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE)&& ((rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr&rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask) ==(hw_static_info->gateway_ipv4_addr&rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask))&& (rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask>ret)) { extipEntry.extIpAddr=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_addr; ret=rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->ip_network_mask; } } if(ret==0)goto RET_GLB_ERR; } extipEntry.prival=0; extipEntry.pri=0; extipEntry.type=L34_EXTIP_TYPE_NAPT; extipEntry.valid=1; errorno=RT_ERR_RG_EXTIP_SET_FAIL; ret = RTK_L34_EXTINTIPTABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.extip_idx, &extipEntry); if(ret!=RT_ERR_OK)goto RET_GLB_ERR; //DEBUG("set ext ip table %d as %x, nexthop is %d",wan_intf_idx, hw_static_info->ip_addr,rg_db.wantype[wan_intf_idx].rtk_wantype.nhIdx); rg_db.systemGlobal.nxpRefCount[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4]++; //nexthop reference by IP table } } } else { //if we are host route(255.255.255.255), must be manual add MAC //only add IP table, NEXTHOP table, and l2 table bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,hw_static_info->gateway_mac_addr_for_ipv4.octet,ETHER_ADDR_LEN); macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } //20150527LUKE: for WWAN we should add remote gateway mac at ext-port0 if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) macEntry.port_idx=RTK_RG_EXT_PORT0; else macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); if(ret!=RT_ERR_RG_OK) goto RET_GLB_ERR; DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); //20150527LUKE: keep WWAN wlan-dev index in rg_db.lut if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) rg_db.lut[l2Idx].wlan_device_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan; errorno = _rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx,l2Idx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } // TODO:IPv6 Neighbor Discovery has to be done here!!!! //Check Neighbor table first if we add them before, otherwise call Neighbor Discovery after that if(ipv6Enable==1 || (hw_static_info->ipv6_default_gateway_on&&(hw_static_info->ipv6_napt_enable==0)&&(hw_static_info->gw_mac_auto_learn_for_ipv6==0))) { bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); if(v6RoutingAdd==1) { bzero(&zeroIPv6,sizeof(rtk_ipv6_addr_t)); if(hw_static_info->gw_mac_auto_learn_for_ipv6==0) { if(memcmp(&hw_static_info->gateway_mac_addr_for_ipv6, &zeroMAC, sizeof(rtk_mac_t))) //MAC != 0, otherwise do nothing { //Add gateway mac and Default route memcpy(macEntry.mac.octet,hw_static_info->gateway_mac_addr_for_ipv6.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; //20150527LUKE: for WWAN we should add remote gateway mac at ext-port0 if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) macEntry.port_idx=RTK_RG_EXT_PORT0; else macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); if(ret!=RT_ERR_RG_OK) goto RET_GLB_ERR; DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); //20150527LUKE: keep WWAN wlan-dev index in rg_db.lut if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) rg_db.lut[l2Idx].wlan_device_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan; if(memcmp(&hw_static_info->gateway_ipv6_addr,&zeroIPv6,sizeof(rtk_ipv6_addr_t))==0) //ipv6 == 0 { errorno = _rtk_rg_internal_IPV6GWMACSetup_stage2(wan_intf_idx,l2Idx); } else{ //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); errorno = _rtk_rg_neighborAndMacEntryAdd(hw_static_info->gateway_ipv6_addr.ipv6_addr,rtv6idx,hw_static_info->gateway_mac_addr_for_ipv6.octet,RTK_RG_PORT_RGMII,FAIL,&gateway_NeighborOrMac_idx); } else #endif errorno = _rtk_rg_neighborAndMacEntryAdd(hw_static_info->gateway_ipv6_addr.ipv6_addr,rtv6idx,hw_static_info->gateway_mac_addr_for_ipv6.octet,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx,FAIL,&gateway_NeighborOrMac_idx); } if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } //Check if Neighbor and MAC had auto learned before if(memcmp(&hw_static_info->gateway_ipv6_addr,&zeroIPv6,sizeof(rtk_ipv6_addr_t))!=0) //ipv6 != 0 { //get hash index //ipv6HashIdx = _rtk_rg_IPv6NeighborHash(hw_static_info->gateway_ipv6_addr.ipv6_addr+8, rtv6idx); memcpy(neighborInfo.neighborEntry.interfaceId,hw_static_info->gateway_ipv6_addr.ipv6_addr+8,8); neighborInfo.neighborEntry.matchRouteIdx=rtv6idx; neighbor_valid_idx=-1; ret=rtk_rg_apollo_neighborEntry_find(&neighborInfo,&neighbor_valid_idx); if(ret==RT_ERR_RG_OK) { //Found DEBUG("Found old neighbor!! neighbor_valid_idx=%d",neighbor_valid_idx); errorno = _rtk_rg_internal_IPV6GWMACSetup(hw_static_info->gateway_ipv6_addr.ipv6_addr, rg_db.v6neighbor[neighbor_valid_idx].rtk_v6neighbor.l2Idx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT //DEBUG("hw_static_info->ipv6_napt_enable=%d, hw_static_info->ipv6_default_gateway_on=%d",hw_static_info->ipv6_napt_enable,hw_static_info->ipv6_default_gateway_on); if(hw_static_info->ipv6_napt_enable==1){//IPv6 NAPT Wan //Set software IPv6 Routing info rg_db.v6route[rtv6idx].internal = 1; //Routing is External (pure software information) //Set software IPv6 ExtIp info rg_db.v6Extip[wan_intf_idx].valid =1; memcpy(rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr,hw_static_info->ipv6_addr.ipv6_addr,sizeof(rtk_ipv6_addr_t)); rg_db.v6Extip[wan_intf_idx].nextHopIdx = rg_db.wantype[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].rtk_wantype.nhIdx; DEBUG("### add v6ExtIp[%d] (NextHop=%d EXT_IP %02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x) ###",rtv6idx,rg_db.v6Extip[wan_intf_idx].nextHopIdx, rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[0],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[1],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[2],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[3], rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[4],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[5],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[6],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[7], rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[8],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[9],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[10],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[11], rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[12],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[13],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[14],rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr[15]); if(hw_static_info->ipv6_default_gateway_on){//default gateway routing/naptWan following the Wan setting. TRACE("Set ipv6 defaulte route as external"); rg_db.v6route[V6_DEFAULT_ROUTE_IDX].internal=1; //NaptWan } } #endif } else if(gateway_NeighborOrMac_idx!=FAIL) //20140904LUKE: link-local case, therefore no neighbor, just return MAC idx { errorno = _rtk_rg_internal_IPV6GWMACSetup_stage2(wan_intf_idx,gateway_NeighborOrMac_idx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT //DEBUG("hw_static_info->ipv6_napt_enable=%d, hw_static_info->ipv6_default_gateway_on=%d",hw_static_info->ipv6_napt_enable,hw_static_info->ipv6_default_gateway_on); if(hw_static_info->ipv6_napt_enable==1){//IPv6 NAPT Wan //Set software IPv6 Routing info rg_db.v6route[rtv6idx].internal = 1; //Routing is External (pure software information) //Set software IPv6 ExtIp info rg_db.v6Extip[wan_intf_idx].valid =1; memcpy(rg_db.v6Extip[wan_intf_idx].externalIp.ipv6_addr,hw_static_info->ipv6_addr.ipv6_addr,sizeof(rtk_ipv6_addr_t)); //DEBUG("hw_static_info->ipv6_addr=(%02X%02X) ExtIp=(%02X%02X)",hw_static_info->ipv6_addr.ipv6_addr[0],hw_static_info->ipv6_addr.ipv6_addr[1],rg_db.v6Extip[rtv6idx].externalIp.ipv6_addr[0],rg_db.v6Extip[rtv6idx].externalIp.ipv6_addr[1]); //DEBUG("wan_intf_idx=%d bind_wan_type_ipv6=%d rtv6idx=%d rg_db.v6Extip[rtv6idx].nextHopIdx=%d",wan_intf_idx,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6,rtv6idx,rg_db.v6Extip[rtv6idx].nextHopIdx); rg_db.v6Extip[wan_intf_idx].nextHopIdx = rg_db.wantype[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.bind_wan_type_ipv6].rtk_wantype.nhIdx; if(hw_static_info->ipv6_default_gateway_on){//default gateway routing/naptWan following the Wan setting. TRACE("Set ipv6 defaulte route as external"); rg_db.v6route[V6_DEFAULT_ROUTE_IDX].internal=1; //NaptWan } } #endif } else neighborMissed=1; } } else { //if we are IPv6 host route(128), do nothing //only add NEXTHOP table, and l2 table bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,hw_static_info->gateway_mac_addr_for_ipv6.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; //20150527LUKE: for WWAN we should add remote gateway mac at ext-port0 if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) macEntry.port_idx=RTK_RG_EXT_PORT0; else macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry ret=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); if(ret!=RT_ERR_RG_OK) goto RET_GLB_ERR; DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); //20150527LUKE: keep WWAN wlan-dev index in rg_db.lut if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan!=RG_WWAN_WIRED) rg_db.lut[l2Idx].wlan_device_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wirelessWan; errorno = _rtk_rg_internal_IPV6GWMACSetup_stage2(wan_intf_idx,l2Idx); if(errorno!=RT_ERR_RG_OK) goto RET_GLB_ERR; } } // TODO:Call the initParam's routngAddByHwCallBack if(routingAdded==1 && rg_db.systemGlobal.initParam.routingAddByHwCallBack!=NULL) { bzero(&cb_routEt, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routEt.dest_ip=hw_static_info->ip_addr; cb_routEt.ip_mask=hw_static_info->ip_network_mask; cb_routEt.nexthop=0; cb_routEt.wan_intf_idx=wan_intf_idx; rg_db.systemGlobal.initParam.routingAddByHwCallBack(&cb_routEt); } // TODO:Call the initParam's v6RoutingAddByHwCallBack if(v6RoutingAdd==1 && rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack!=NULL) { bzero(&cb_routv6Et, sizeof(rtk_rg_ipv6RoutingEntry_t)); memcpy(&cb_routv6Et.dest_ip,&hw_static_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); cb_routv6Et.prefix_len=hw_static_info->ipv6_mask_length; cb_routv6Et.NhOrIntfIdx=wan_intf_idx; cb_routv6Et.type=rtv6Entry.type; rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack(&cb_routv6Et); } // TODO:Use ARP protocol to find out default gateway's mac save in L2idx //20150109LUKE: for dslite, ipv4 nexthop equal to ipv6 if(arpMissed==1 && wan_type!=RTK_RG_DSLITE && wan_type!=RTK_RG_PPPoE_DSLITE) { #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) rg_db.systemGlobal.intfArpRequest[wan_intf_idx].finished=0; rg_db.systemGlobal.intfArpRequest[wan_intf_idx].reqIp=hw_static_info->gateway_ipv4_addr; rg_db.systemGlobal.intfArpRequest[wan_intf_idx].gwMacReqCallBack=_rtk_rg_internal_GWMACSetup; #ifdef __KERNEL__ if(timer_pending(&rg_kernel.arpRequestTimer[wan_intf_idx])) del_timer(&rg_kernel.arpRequestTimer[wan_intf_idx]); init_timer(&rg_kernel.arpRequestTimer[wan_intf_idx]); rg_kernel.arpRequestTimer[wan_intf_idx].data = (unsigned long)wan_intf_idx; rg_kernel.arpRequestTimer[wan_intf_idx].function = _rtk_rg_arpRequestTimerFunc; rg_kernel.arpRequestTimerCounter[wan_intf_idx]=0; DEBUG("arp miss, request arp=%x\n",rg_db.systemGlobal.intfArpRequest[wan_intf_idx].reqIp); mod_timer(&rg_kernel.arpRequestTimer[wan_intf_idx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif #else //for lite romeDriver, here should not be reached errorno=RT_ERR_RG_GW_MAC_NOT_SET; goto RET_GLB_ERR; #endif } // TODO:IPv6 Neighbor Discovery has to be done here!!!! if(neighborMissed==1) { #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].finished=0; memcpy(&rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].reqIp,&hw_static_info->gateway_ipv6_addr,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].ipv6GwMacReqCallBack=_rtk_rg_internal_IPV6GWMACSetup; //DEBUG("wan intf is %d, function pointer is %p",wan_intf_idx,rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].ipv6GwMacReqCallBack); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx])) del_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx]); init_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx]); rg_kernel.neighborDiscoveryTimer[wan_intf_idx].data = (unsigned long)wan_intf_idx; rg_kernel.neighborDiscoveryTimer[wan_intf_idx].function = _rtk_rg_neighborDiscoveryTimerFunc; rg_kernel.neighborDiscoveryTimerCounter[wan_intf_idx]=0; DEBUG("neighbor miss, discovery neighbor =%08x:%08x:%08x:%08x\n",*(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].reqIp.ipv6_addr), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].reqIp.ipv6_addr+4), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].reqIp.ipv6_addr+8), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx].reqIp.ipv6_addr+12)); mod_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif #else //for lite romeDriver, here should not be reached errorno=RT_ERR_RG_GW_MAC_NOT_SET; goto RET_GLB_ERR; #endif } DO_INTF_CALLBACK: //add wan-interfcae callback to sync protocal-stack if(rg_db.systemGlobal.initParam.interfaceAddByHwCallBack != NULL) { //rtk_rg_intfInfo_t intfInfo; //bzero(&intfInfo,sizeof(intfInfo)); //memcpy(&intfInfo, &rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo, sizeof(intfInfo)); rg_db.systemGlobal.initParam.interfaceAddByHwCallBack(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo,&wan_intf_idx); } #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20141224LUKE: since IP subnet is modified, we should rearrange ACL which use the WAN interface as egress interface if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num && ip_update_state!=NO_IP_UPDATED) ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif assert_ok(_rtk_rg_shortCut_clear()); #if defined(CONFIG_RTL9602C_SERIES) //patch for mismatching mib ipv6 netif problem if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { if(wan_type!=RTK_RG_BRIDGE && hw_static_info->ip_version==IPVER_V4V6 && hw_static_info->napt_enable==1) { rtk_l34_netif_entry_t intfV6Entry; memcpy(&intfV6Entry, &rg_db.netif[wan_intf_idx].rtk_netif, sizeof(rtk_l34_netif_entry_t)); intfV6Entry.ipAddr = 0; ret = RTK_L34_NETIFTABLE_SET(wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2), &intfV6Entry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_SET_FAIL); //memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx+(MAX_NETIF_SW_TABLE_SIZE/2)], &rg_db.systemGlobal.interfaceInfo[wan_intf_idx], sizeof(rtk_rg_interface_info_global_t)); } } #endif #if defined(CONFIG_RTL9602C_SERIES) //20160418LUKE: for DSlite routing mode, we should trap TCP SYN for MSS clamping. if(wan_type==RTK_RG_PPPoE_DSLITE || wan_type==RTK_RG_DSLITE) _rtk_rg_dslite_routing_reserved_acl_decision(); #endif { int32 tmpNexthopv4Idx,tmpExtipIdx,tmpPPPoEIdx; tmpNexthopv4Idx =rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv4; tmpExtipIdx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.extip_idx; tmpPPPoEIdx =rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx; //tmpNexthopv6Idx = rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.nexthop_ipv6 ; if((((tmpExtipIdx >=0) && (tmpExtipIdx <MAX_EXTIP_SW_TABLE_SIZE)) && rg_db.extip[tmpExtipIdx].valid==SOFTWARE_ONLY_ENTRY) || (((tmpNexthopv4Idx>=0)&& (tmpNexthopv4Idx <MAX_NEXTHOP_SW_TABLE_SIZE)) && rg_db.nexthop[tmpNexthopv4Idx].valid == SOFTWARE_ONLY_ENTRY) || ((tmpPPPoEIdx>=MAX_PPPOE_HW_TABLE_SIZE)&&(tmpPPPoEIdx<MAX_PPPOE_SW_TABLE_SIZE)) || ((wan_intf_idx>=MAX_NETIF_HW_TABLE_SIZE)&&(wan_intf_idx<MAX_NETIF_SW_TABLE_SIZE)) || (rg_db.l3[rtidx].valid==SOFTWARE_ONLY_ENTRY)) { if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid !=SOFTWARE_ONLY_ENTRY) { rtk_rg_aclAndCf_reserved_dip_mask_trap_t dip_mask_trap; bzero(&dip_mask_trap,sizeof(dip_mask_trap)); dip_mask_trap.dip=rg_db.l3[rtidx].rtk_l3.ipAddr; dip_mask_trap.mask =~((1<<(31-(rg_db.l3[rtidx].rtk_l3.ipMask)))-1); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP +rtidx, &dip_mask_trap); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid=SOFTWARE_ONLY_ENTRY; WARNING("ReservedRuleAdd software data path ExtipIdx=%d Nexthopv4Idx=%d PPPoEIdx=%d netifIdx=%d L3Idx=%d",tmpExtipIdx,tmpNexthopv4Idx,tmpPPPoEIdx,wan_intf_idx,rtidx); } } if (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == SOFTWARE_ONLY_ENTRY)// pure software netif { int iterPort=0; //Port bind , trap spa=bindPort packet for(iterPort=0 ;iterPort <RTK_RG_PORT_CPU ; iterPort++ ) { if( rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.port_binding_mask.portmask & (1<<iterPort)) { _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PORT0_TRAP + iterPort, NULL); WARNING("ReservedRuleAdd Port Bind trap Port=%d",iterPort); } } } } return (RT_ERR_RG_OK); RET_GLB_ERR: //Clear global variable wan_set_mask=0x1<<wan_intf_idx; rg_db.systemGlobal.wanInfoSet &= ~(wan_set_mask); bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.intf_name, 32); if(hw_static_info->ipv4_default_gateway_on == 1) //recovery default route interface index rg_db.systemGlobal.defaultRouteSet=-1; if(hw_static_info->ipv6_default_gateway_on == 1) //recovery default route interface index rg_db.systemGlobal.defaultIPV6RouteSet=-1; bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo, sizeof(rtk_rg_ipStaticInfo_t)); RET_ROUTE_ERR: //Delete the routing table entry if(rtidx>=0) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(rtidx, &rtEntry); } if(rtv6idx>=0) { bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); RTK_L34_IPV6ROUTINGTABLE_SET(rtv6idx, &rtv6Entry); } RET_STATIC_ROUTE_ERR: //Delete the static routing table TRAP entry if(static_rtidx>=0) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(static_rtidx, &rtEntry); } if(static_rtv6idx>=0) { bzero(&rtv6Entry, sizeof(rtk_ipv6Routing_entry_t)); RTK_L34_IPV6ROUTINGTABLE_SET(static_rtv6idx, &rtv6Entry); } RET_CHECK_ERR: return (errorno); } int32 rtk_rg_ipv6_externalIp_set(int index, rtk_rg_table_v6ExtIp_t v6ExtIp_entry){ if(index<0 || index>MAX_IPV6_ROUTING_SW_TABLE_SIZE) return RT_ERR_RG_INVALID_PARAM; rg_db.v6Extip[index] = v6ExtIp_entry; return RT_ERR_RG_OK; } int32 _rtk_rg_ipv6_externalIp_get(int index, rtk_rg_table_v6ExtIp_t *v6ExtIp_entry){ if(index<0 || index>=MAX_NETIF_SW_TABLE_SIZE) return RT_ERR_RG_INVALID_PARAM; if(v6ExtIp_entry==NULL) return RT_ERR_RG_NULL_POINTER; *v6ExtIp_entry = rg_db.v6Extip[index] ; return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_staticInfo_set(int wan_intf_idx, rtk_rg_ipStaticInfo_t *static_info) { int errorno;//,i,rtidx=-1,errorno,routingAdd=0,arp_valid_idx,arpMissed=0; // unsigned int input_ipmsk,last_arp=0; // rtk_l34_netif_entry_t intfEt; // rtk_l34_routing_entry_t rtEntry; //rtk_l34_ext_intip_entry_t extipEt; // rtk_rg_ipv4RoutingEntry_t cb_routEt; //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_STATIC) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Check if we are reentried //rg_lock(&rg_kernel.wanStaticCalled); errorno = _rtk_rg_internal_wanSet(wan_intf_idx, static_info); //Set up Internal External IP table for NAPT - we will do this at _rtk_rg_internal_GWMACSetup #if 0 if(static_info->napt_enable) { //interface table is 1-by-1 mapping to iP table //therefore we do not need to go through whole table //Checking table for availability /*for(i=0;i<8;i++) { memset(&extipEt, 0, sizeof(extipEt)); ret = rtk_l34_extIntIPTable_get(i, &extipEt); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_EXTIP_FAIL); if(extipEt.valid == 0) break; } if(i==8 && extipEt.valid == 1) RETURN_ERR(RT_ERR_RG_ENTRY_FULL);*/ errorno=RT_ERR_RG_EXTIP_GET_FAIL; bzero(&extipEt, sizeof(rtk_l34_ext_intip_entry_t)); ret = rtk_l34_extIntIPTable_get(wan_intf_idx, &extipEt); if(ret!=RT_ERR_OK || extipEt.valid==1)goto RET_ROUTE_ERR; extipEt.intIpAddr=0; //napt special extipEt.extIpAddr=static_info->ip_addr; extipEt.nhIdx=0; //used 0 as reserved entry extipEt.prival=0; extipEt.pri=0; extipEt.type=L34_EXTIP_TYPE_NAPT; extipEt.valid=1; errorno=RT_ERR_RG_EXTIP_SET_FAIL; ret = RTK_L34_EXTINTIPTABLE_SET(wan_intf_idx, &extipEt); if(ret!=RT_ERR_OK)goto RET_IPTABLE_ERR; } #endif //RG_GLB_STATIC_CALLED=0; //clear the lock flag //rg_unlock(&RG_GLB_STATIC_CALLED); //return (RT_ERR_RG_OK); #if 0 RET_IPTABLE_ERR: //Delete the ip table entry if(static_info->napt_enable) { bzero(&extipEt, sizeof(rtk_l34_ext_intip_entry_t)); RTK_L34_EXTINTIPTABLE_SET(wan_intf_idx, &extipEt); } #endif //RG_GLB_STATIC_CALLED=0; //clear the lock flag //rg_unlock(&rg_kernel.wanStaticCalled); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_dsliteInfo_set(int wan_intf_idx, rtk_rg_ipDslitStaticInfo_t *dslite_info) { int errorno,l2Idx; rtk_rg_macEntry_t macEntry; rtk_mac_t zeroMAC={{0}}; rtk_ipv6_addr_t zeroIPv6={{0}}; rtk_l34_dsliteMc_entry_t dsliteMcEntry; #if defined(CONFIG_RTL9602C_SERIES) int dsliteIdx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_idx; #else rtk_rg_aclAndCf_reserved_intf_dslite_trap_t intf_dslite_trap_para; #endif //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_DSLITE) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(!memcmp(dslite_info->rtk_dslite.ipB4.ipv6_addr,zeroIPv6.ipv6_addr,IPV6_ADDR_LEN))RETURN_ERR(RT_ERR_RG_B4_IP_NOT_SET); if(!memcmp(dslite_info->rtk_dslite.ipAftr.ipv6_addr,zeroIPv6.ipv6_addr,IPV6_ADDR_LEN))RETURN_ERR(RT_ERR_RG_AFTR_IP_NOT_SET); if(dslite_info->aftr_mac_auto_learn==0 && !memcmp(dslite_info->aftr_mac_addr.octet,zeroMAC.octet,ETHER_ADDR_LEN)) RETURN_ERR(RT_ERR_RG_AFTR_MAC_NOT_SET); #if defined(CONFIG_RTL9602C_SERIES) if(dslite_info->rtk_dslite.hopLimit<=1)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(dslite_info->rtk_dslite.tcOpt>=RTK_L34_DSLITE_TC_OPT_END)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dsliteIdx<0)||(!rg_db.dslite[dsliteIdx].rtk_dslite.valid))RETURN_ERR(RT_ERR_RG_DSLITE_UNINIT); if(rg_db.dslite[dsliteIdx].intfIdx!=wan_intf_idx)RETURN_ERR(RT_ERR_RG_DSLITE_UNMATCH); #endif //20160617LUKE: check if mtu too large for dual header packet if(dslite_info->static_info.mtu>MAX_DSLITE_MTU_SIZE) WARNING("Caution! MTU too large may cause outer IPv6 header fragmentation."); //Check if we are reentried //rg_lock(&rg_kernel.wanDsliteCalled); //store aftr in global variable memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.ipB4.ipv6_addr, dslite_info->rtk_dslite.ipB4.ipv6_addr, IPV6_ADDR_LEN); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.ipAftr.ipv6_addr, dslite_info->rtk_dslite.ipAftr.ipv6_addr, IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.aftr_mac_auto_learn=dslite_info->aftr_mac_auto_learn; memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.aftr_mac_addr.octet,dslite_info->aftr_mac_addr.octet,ETHER_ADDR_LEN); #if defined(CONFIG_RTL9602C_SERIES) rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.hopLimit=dslite_info->rtk_dslite.hopLimit&0xff; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.flowLabel=dslite_info->rtk_dslite.flowLabel&0xfffff; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.tcOpt=dslite_info->rtk_dslite.tcOpt; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_info.rtk_dslite.tc=dslite_info->rtk_dslite.tc&0xff; #endif errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &dslite_info->static_info); if(errorno!=RT_ERR_RG_OK) goto OUT; //add a dsliteMc entry hit all dsliteMc packet , remove dsliteMc ipv6 header bzero(&dsliteMcEntry,sizeof(dsliteMcEntry)); RTK_L34_DSLITEMULTICAST_SET(&dsliteMcEntry); //query aftr mac address if(dslite_info->aftr_mac_auto_learn) { rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].finished=0; memcpy(&rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].reqIp,&dslite_info->rtk_dslite.ipAftr,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].ipv6GwMacReqCallBack=_rtk_rg_internal_IPV6AFTRMACSetup; DEBUG("wan intf is %d, function pointer is %p",wan_intf_idx,rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].ipv6GwMacReqCallBack); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE])) del_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE]); init_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE]); rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].data = (unsigned long)(wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE); rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].function = _rtk_rg_AFTRDiscoveryTimerFunc; rg_kernel.neighborDiscoveryTimerCounter[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE]=0; DEBUG("aftr miss, discovery neighbor =%08x:%08x:%08x:%08x\n",*(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].reqIp.ipv6_addr), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].reqIp.ipv6_addr+4), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].reqIp.ipv6_addr+8), *(unsigned int *)(rg_db.systemGlobal.intfNeighborDiscovery[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE].reqIp.ipv6_addr+12)); mod_timer(&rg_kernel.neighborDiscoveryTimer[wan_intf_idx+MAX_NETIF_SW_TABLE_SIZE], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif } else { //Add gateway mac and Default route bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,dslite_info->aftr_mac_addr.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry errorno=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); DEBUG("### add AFTR l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); if(errorno!=RT_ERR_RG_OK)goto OUT; errorno=_rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto OUT; #if defined(CONFIG_RTL9602C_SERIES) dslite_info->rtk_dslite.index=dsliteIdx; dslite_info->rtk_dslite.valid=1; ASSERT_EQ(RTK_L34_DSLITEINFTABLE_SET(&dslite_info->rtk_dslite),RT_ERR_OK); #else //enable reserve ACL trap if(wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) { memcpy(intf_dslite_trap_para.ipv6_dip.ipv6_addr, dslite_info->rtk_dslite.ipB4.ipv6_addr, IPV6_ADDR_LEN); memcpy(intf_dslite_trap_para.smac.octet, dslite_info->aftr_mac_addr.octet, ETHER_ADDR_LEN); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_INTF0_DSLITE_TRAP+wan_intf_idx,&intf_dslite_trap_para); } #endif } OUT: //rg_unlock(&rg_kernel.wanDsliteCalled); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_dhcpRequest_set(int wan_intf_idx) { DEBUG("%s is called!!!",__func__); //add dhcpRequestByHwCallBack callback to call dhcpc { if(rg_db.systemGlobal.initParam.dhcpRequestByHwCallBack != NULL) { rg_db.systemGlobal.initParam.dhcpRequestByHwCallBack(&wan_intf_idx); } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dhcpClientInfo_set(int wan_intf_idx, rtk_rg_ipDhcpClientInfo_t *dhcpClient_info) { #if 0 int i,ret,rtidx=-1,errorno,routingAdd=0,arp_valid_idx,arpMissed=0; unsigned int input_ipmsk,last_arp=0; rtk_l34_routing_entry_t rtEntry; //rtk_l34_ext_intip_entry_t extipEt; rtk_rg_ipv4RoutingEntry_t cb_routEt; #endif //rtk_l34_netif_entry_t intfEntry; int errorno; rtk_rg_ip_version_t ip_version=dhcpClient_info->hw_info.ip_version; rtk_ipv6_addr_t zeroV6={{0}}; //Check parameter //if(dhcpClient_info == NULL) //RETURN_ERR(RT_ERR_RG_NULL_POINTER); //if(wan_intf_idx<0 || wan_intf_idx>MAX_NETIF_HW_TABLE_SIZE-1) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //if(dhcpClient_info->hw_info.ip_addr == 0 || dhcpClient_info->hw_info.ip_network_mask == 0 || dhcpClient_info->hw_info.mtu == 0) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface //bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(wan_intf_idx, &intfEntry); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_DHCP) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(dhcpClient_info->stauts==DHCP_STATUS_LEASED) { if(ip_version!=IPVER_V6ONLY && dhcpClient_info->hw_info.ip_addr==0)RETURN_ERR(RT_ERR_RG_DHCP_LEASED_INVALID_IP); if(ip_version!=IPVER_V4ONLY && !memcmp(&dhcpClient_info->hw_info.ipv6_addr,&zeroV6,sizeof(rtk_ipv6_addr_t)))RETURN_ERR(RT_ERR_RG_DHCP_LEASED_INVALID_IP); } //Check if we are reentried //rg_lock(&rg_kernel.wanDHCPCalled); //20141107LUKE: if we get status=DHCP_STATUS_RELEASED, we will check ip_version and clear it!! if(dhcpClient_info->stauts==DHCP_STATUS_RELEASED && ((rg_db.systemGlobal.wanInfoSet & (0x1<<wan_intf_idx)) > 0)) { memcpy(&dhcpClient_info->hw_info,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].p_wanStaticInfo,sizeof(rtk_rg_ipStaticInfo_t)); if(ip_version!=IPVER_V6ONLY) { //Clear V4 setting!! dhcpClient_info->hw_info.napt_enable=0; dhcpClient_info->hw_info.ip_addr=0; dhcpClient_info->hw_info.ip_network_mask=0; dhcpClient_info->hw_info.ipv4_default_gateway_on=0; dhcpClient_info->hw_info.gateway_ipv4_addr=0; dhcpClient_info->hw_info.gw_mac_auto_learn_for_ipv4=0; bzero(&dhcpClient_info->hw_info.gateway_mac_addr_for_ipv4,sizeof(rtk_mac_t)); } if(ip_version!=IPVER_V4ONLY) { //Clear V6 setting!! bzero(dhcpClient_info->hw_info.ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); dhcpClient_info->hw_info.ipv6_mask_length=0; dhcpClient_info->hw_info.ipv6_default_gateway_on=0; bzero(dhcpClient_info->hw_info.gateway_ipv6_addr.ipv6_addr,IPV6_ADDR_LEN); dhcpClient_info->hw_info.gw_mac_auto_learn_for_ipv6=0; bzero(&dhcpClient_info->hw_info.gateway_mac_addr_for_ipv6,sizeof(rtk_mac_t)); } //WARNING("the dhcp status is RELEASED!!"); } errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &dhcpClient_info->hw_info); if(errorno==RT_ERR_RG_OK) rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dhcp_client_info.stauts=dhcpClient_info->stauts; //RG_GLB_DHCP_CALLED=0; //clear the lock flag //rg_unlock(&rg_kernel.wanDHCPCalled); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_pppoeClientInfoBeforeDial_set(int wan_intf_idx, rtk_rg_pppoeClientInfoBeforeDial_t *app_info) { //int ret; //rtk_l34_netif_entry_t intfEntry; //rtk_l34_routing_entry_t rtEntry; //Check parameter if(app_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx>(MAX_NETIF_SW_TABLE_SIZE-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface //bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(wan_intf_idx, &intfEntry); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPPoE) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Check if there is default route set up before, if so, we have to keep original setting here and set //default route to TRAP /*bzero(&rtEntry,sizeof(rtk_l34_routing_entry_t)); rtEntry.process=L34_PROCESS_CPU; rtEntry.valid=1; //turn on ret = RTK_L34_ROUTINGTABLE_SET(RTK_L34_ROUTINGTABLE_SET, &rtEntry); //set default route setting to TRAP, keep other setting if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_ROUTE_SET_FAIL);*/ //Save in global variable //bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, 4); //memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, app_info->username, 4); bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, 32); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.username, app_info->username, 32); //bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, 4); //memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, app_info->password, 4); bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, 32); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.password, app_info->password, 32); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.auth_type=app_info->auth_type; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.pppoe_proxy_enable=app_info->pppoe_proxy_enable; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.max_pppoe_proxy_num=app_info->max_pppoe_proxy_num; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.auto_reconnect=app_info->auto_reconnect; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.dial_on_demond=app_info->dial_on_demond; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.idle_timeout_secs=app_info->idle_timeout_secs; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.stauts=app_info->stauts; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.dialOnDemondCallBack=app_info->dialOnDemondCallBack; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial.idleTimeOutCallBack=app_info->idleTimeOutCallBack; //rg_db.systemGlobal.pppoeBeforeCalled = 1; //add pppoeBeforeDiagByHwCCallBack callback to call spppdctl { if(rg_db.systemGlobal.initParam.pppoeBeforeDiagByHwCallBack != NULL) { rtk_rg_pppoeClientInfoBeforeDial_t before_dial; bzero(&before_dial,sizeof(rtk_rg_pppoeClientInfoBeforeDial_t)); memcpy(&before_dial, &rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.before_dial, sizeof(rtk_rg_pppoeClientInfoBeforeDial_t)); rg_db.systemGlobal.initParam.pppoeBeforeDiagByHwCallBack(&before_dial,&wan_intf_idx); } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_pppoeClientInfoAfterDial_set(int wan_intf_idx, rtk_rg_pppoeClientInfoAfterDial_t *clientPppoe_info) { #if 0 int i,ret,rtidx=-1,errorno,routingAdd=0,arp_valid_idx,arpMissed=0; unsigned int input_ipmsk,last_arp=0; rtk_l34_netif_entry_t intfEntry; rtk_l34_routing_entry_t rtEntry; //rtk_l34_ext_intip_entry_t extipEt; rtk_rg_ipv4RoutingEntry_t cb_routEt; #endif //rtk_l34_netif_entry_t intfEntry; rtk_l34_pppoe_entry_t pppoeEt; rtk_rg_aclAndCf_reserved_pppoe_intf_multicast_routing_trap_t intf_gmac; int ret,errorno; if(clientPppoe_info->sessionId==0 ) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check parameter // if(rg_db.systemGlobal.pppoeBeforeCalled == 0) //did not call pppoe_before in the past period // RETURN_ERR(RT_ERR_RG_PPPOE_UNINIT); //if(clientPppoe_info == NULL) //RETURN_ERR(RT_ERR_RG_NULL_POINTER); //if(wan_intf_idx<0 || wan_intf_idx>MAX_NETIF_HW_TABLE_SIZE-1) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //if(clientPppoe_info->hw_info.ip_addr == 0 || clientPppoe_info->hw_info.ip_network_mask == 0 || clientPppoe_info->hw_info.mtu == 0) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface //bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); //ret = rtk_l34_netifTable_get(wan_intf_idx, &intfEntry); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPPoE) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Check if we are reentried //rg_lock(&rg_kernel.wanPPPOEAfterCalled); if((clientPppoe_info->hw_info.ip_network_mask>0 && clientPppoe_info->hw_info.ip_network_mask!=0xffffffff) || (clientPppoe_info->hw_info.ipv6_mask_length>0 &&clientPppoe_info->hw_info.ipv6_mask_length!=128) ) WARNING("PPPoE WAN SUBNET WARNING , point-to-point connection should mask all (v4mask=255.255.255.255 v6mask=128)"); errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &clientPppoe_info->hw_info); if(errorno==RT_ERR_RG_OK) { rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_info.after_dial.sessionId=clientPppoe_info->sessionId; //Set up PPPoE table and next hop table for this interface errorno=RT_ERR_RG_PPPOE_SET_FAIL; bzero(&pppoeEt, sizeof(rtk_l34_pppoe_entry_t)); pppoeEt.sessionID=clientPppoe_info->sessionId; ret = RTK_L34_PPPOETABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx, &pppoeEt); if(ret!=RT_ERR_OK)goto RET_PPPOE_ERR; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) // flow based: pppoe session id was saved in netif table ret = _rtk_rg_netifPPPoESession_set(wan_intf_idx, pppoeEt.sessionID); if(ret!=RT_ERR_OK)goto RET_PPPOE_ERR; #endif #ifdef CONFIG_RG_PPPOE_PASSTHROUGH #if 1 //pppoe_passthru acl disable //Check if LAN has PPPoE Pass Through /*if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on == 0) { for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->pppoe_passThrough == 1) break; } if(i<rg_db.systemGlobal.lanIntfTotalNum) {*/ //add ACL to transfer WAN to LAN packet with WAN's VLAN //1 FIXME: Patch for 201305171900, pppoe pass through has to be disabled!! //errorno=RT_ERR_OK; //errorno = _rtk_rg_aclFilterSessionID_and_VIDRemarking_add(wan_intf_idx, rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id); errorno = _rtk_rg_acl_reserved_pppoePassthrough_IntfisPppoewan_add(wan_intf_idx,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac); if(errorno!=RT_ERR_OK)goto RET_PPPOE_ERR; DEBUG("add VID logging success!! to Gmac_%02x:%02x:%02x:%02x:%02x:%02x will be logging(NOP)", rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[0], rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[1], rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[2], rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[3], rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[4], rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet[5]); //} //} #endif #endif } else RETURN_ERR(errorno); //20150429LUKE: create reserved ACL for multicast packet if(wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) { bzero(&intf_gmac,sizeof(intf_gmac)); memcpy(&intf_gmac.gmac.octet,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPOE_INTF0_MC_ROUTING_TRAP+wan_intf_idx,&intf_gmac); } #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) //9602c PPPoE Action is normal #else if(rg_db.systemGlobal.internalSupportMask&RTK_RG_INTERNAL_SUPPORT_BIT4) { rtk_rg_aclAndCf_reserved_pppoe_multicast_intf_permit_t pppoe_multicast_permit; bzero(&pppoe_multicast_permit,sizeof(pppoe_multicast_permit)); if( rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE && rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx >=0 && rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == SOFTWARE_HARDWARE_SYNC_ENTRY){ memcpy(&pppoe_multicast_permit.gmac.octet,rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.gmac.octet,ETHER_ADDR_LEN); //add theintf PPPoE Multicast permit rule if the Interface can be add to H/W _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoE_MULTICAST_INTF0_PERMIT+wan_intf_idx,&pppoe_multicast_permit); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoE_MULTICAST_DEFAULT_TRAP, NULL); } } #endif errorno=RT_ERR_RG_OK; goto RET_SUCCESS; RET_PPPOE_ERR: //Delete the pppoe etnry bzero(&pppoeEt, sizeof(rtk_l34_pppoe_entry_t)); RTK_L34_PPPOETABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx, &pppoeEt); #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) _rtk_rg_netifPPPoESession_set(wan_intf_idx, 0); #endif RET_SUCCESS: //RG_GLB_PPPOE_AFTER_CALLED=0; //clear the lock flag //rg_unlock(&rg_kernel.wanPPPOEAfterCalled); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_pppoeInterfaceIdleTime_get(int intfIdx,uint32 *idleSec){ if(rg_db.systemGlobal.interfaceInfo[intfIdx].valid == 0 || rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPPoE || rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_idx<0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); *idleSec = rg_db.pppoe[rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.pppoe_idx].idleSecs; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_pptpClientInfoBeforeDial_set(int wan_intf_idx, rtk_rg_pptpClientInfoBeforeDial_t *app_info) { //Check parameter if(app_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx>(MAX_NETIF_SW_TABLE_SIZE-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPTP) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Save in global variable bzero(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.before_dial,sizeof(rtk_rg_pptpClientInfoBeforeDial_t)); memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.before_dial,app_info,sizeof(rtk_rg_pptpClientInfoBeforeDial_t)); //add pptpBeforeDiagByHwCCallBack callback to call spppctl if(rg_db.systemGlobal.initParam.pptpBeforeDialByHwCallBack != NULL) { rtk_rg_pptpClientInfoBeforeDial_t before_dial; bzero(&before_dial,sizeof(rtk_rg_pptpClientInfoBeforeDial_t)); memcpy(&before_dial, &rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.before_dial, sizeof(rtk_rg_pptpClientInfoBeforeDial_t)); rg_db.systemGlobal.initParam.pptpBeforeDialByHwCallBack(&before_dial,&wan_intf_idx); } return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_pptpClientInfoAfterDial_set(int wan_intf_idx, rtk_rg_pptpClientInfoAfterDial_t *clientPptp_info) { int errorno,l2Idx; rtk_mac_t zeroMAC={{0}}; rtk_rg_macEntry_t macEntry; rtk_rg_ipStaticInfo_t hw_info; //Check parameter if(clientPptp_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx> (MAX_NETIF_SW_TABLE_SIZE-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPTP) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(clientPptp_info->hw_info.gw_mac_auto_learn_for_ipv4==0 && !memcmp(clientPptp_info->hw_info.gateway_mac_addr_for_ipv4.octet,zeroMAC.octet,ETHER_ADDR_LEN)) RETURN_ERR(RT_ERR_RG_PPTP_MAC_NOT_SET); //Check if we are reentried //rg_lock(&rg_kernel.wanPPTPAfterCalled); memcpy(&hw_info,&clientPptp_info->hw_info,sizeof(rtk_rg_ipStaticInfo_t)); hw_info.gateway_ipv4_addr=0; errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &hw_info); if(errorno==RT_ERR_RG_OK) { memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.after_dial,clientPptp_info,sizeof(rtk_rg_pptpClientInfoAfterDial_t)); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.sw_gre_header_server_sequence=0; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pptp_info.sw_gre_header_server_sequence_started=0; if(clientPptp_info->hw_info.gw_mac_auto_learn_for_ipv4) { _rtk_rg_PPTPLearningTimerInitialize(wan_intf_idx); } else { //Add gateway mac and Default route bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,clientPptp_info->hw_info.gateway_mac_addr_for_ipv4.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //20160428LUKE: transform from RGMII to PON #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type)==RTK_RG_PPPoE && (rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx)==RTK_RG_PORT_PON){ DEBUG("Special change WAN_PORT from PON to RGMII."); macEntry.port_idx=RTK_RG_PORT_RGMII; } #endif //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry errorno=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); DEBUG("### add PPTP l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); if(errorno!=RT_ERR_RG_OK)goto OUT; errorno=_rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto OUT; } } OUT: //rg_unlock(&rg_kernel.wanPPTPAfterCalled); return (errorno); } rtk_rg_err_code_t rtk_rg_apollo_l2tpClientInfoBeforeDial_set(int wan_intf_idx, rtk_rg_l2tpClientInfoBeforeDial_t *app_info) { //Check parameter if(app_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx>MAX_NETIF_SW_TABLE_SIZE-1) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_L2TP) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Save in global variable bzero(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.l2tp_info.before_dial, sizeof(rtk_rg_l2tpClientInfoBeforeDial_t)); memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.l2tp_info.before_dial, app_info, sizeof(rtk_rg_l2tpClientInfoBeforeDial_t)); //add l2tpBeforeDiagByHwCCallBack callback to call spppctl if(rg_db.systemGlobal.initParam.l2tpBeforeDialByHwCallBack != NULL) { rtk_rg_l2tpClientInfoBeforeDial_t before_dial; bzero(&before_dial,sizeof(rtk_rg_l2tpClientInfoBeforeDial_t)); memcpy(&before_dial, &rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.l2tp_info.before_dial, sizeof(rtk_rg_l2tpClientInfoBeforeDial_t)); rg_db.systemGlobal.initParam.l2tpBeforeDialByHwCallBack(&before_dial,&wan_intf_idx); } return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_l2tpClientInfoAfterDial_set(int wan_intf_idx, rtk_rg_l2tpClientInfoAfterDial_t *clientL2tp_info) { int errorno,l2Idx; rtk_mac_t zeroMAC={{0}}; rtk_rg_macEntry_t macEntry; rtk_rg_ipStaticInfo_t hw_info; //Check parameter if(clientL2tp_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx> (MAX_NETIF_SW_TABLE_SIZE-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_L2TP) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(clientL2tp_info->hw_info.gw_mac_auto_learn_for_ipv4==0 && !memcmp(clientL2tp_info->hw_info.gateway_mac_addr_for_ipv4.octet,zeroMAC.octet,ETHER_ADDR_LEN)) RETURN_ERR(RT_ERR_RG_L2TP_MAC_NOT_SET); //Check if we are reentried //rg_lock(&rg_kernel.wanL2TPAfterCalled); memcpy(&hw_info,&clientL2tp_info->hw_info,sizeof(rtk_rg_ipStaticInfo_t)); hw_info.gateway_ipv4_addr=0; errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &hw_info); if(errorno==RT_ERR_RG_OK) { memcpy(&rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.l2tp_info.after_dial,clientL2tp_info,sizeof(rtk_rg_l2tpClientInfoAfterDial_t)); if(clientL2tp_info->hw_info.gw_mac_auto_learn_for_ipv4) { _rtk_rg_L2TPLearningTimerInitialize(wan_intf_idx); } else { //Add gateway mac and Default route bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,clientL2tp_info->hw_info.gateway_mac_addr_for_ipv4.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry errorno=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); DEBUG("### add l2tp l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); if(errorno!=RT_ERR_RG_OK)goto OUT; errorno=_rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto OUT; } } OUT: //rg_unlock(&rg_kernel.wanL2TPAfterCalled); return (errorno); } rtk_rg_err_code_t rtk_rg_apollo_pppoeDsliteInfoBeforeDial_set(int wan_intf_idx, rtk_rg_pppoeClientInfoBeforeDial_t *app_info) { //Check parameter if(app_info == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(wan_intf_idx<0 || wan_intf_idx> (MAX_NETIF_SW_TABLE_SIZE-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPPoE_DSLITE) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Save in global variable bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.username, 32); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.username, app_info->username, 32); bzero(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.password, 32); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.password, app_info->password, 32); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.auth_type=app_info->auth_type; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.pppoe_proxy_enable=app_info->pppoe_proxy_enable; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.max_pppoe_proxy_num=app_info->max_pppoe_proxy_num; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.auto_reconnect=app_info->auto_reconnect; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.dial_on_demond=app_info->dial_on_demond; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.idle_timeout_secs=app_info->idle_timeout_secs; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.stauts=app_info->stauts; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.dialOnDemondCallBack=app_info->dialOnDemondCallBack; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial.idleTimeOutCallBack=app_info->idleTimeOutCallBack; //add pppoeDsliteBeforeDiagByHwCallBack callback to call spppdctl if(rg_db.systemGlobal.initParam.pppoeDsliteBeforeDialByHwCallBack != NULL) { rtk_rg_pppoeClientInfoBeforeDial_t before_dial; bzero(&before_dial,sizeof(rtk_rg_pppoeClientInfoBeforeDial_t)); memcpy(&before_dial, &rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.before_dial, sizeof(rtk_rg_pppoeClientInfoBeforeDial_t)); rg_db.systemGlobal.initParam.pppoeDsliteBeforeDialByHwCallBack(&before_dial,&wan_intf_idx); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_pppoeDsliteInfoAfterDial_set(int wan_intf_idx, rtk_rg_pppoeDsliteInfoAfterDial_t *pppoeDslite_info) { rtk_l34_pppoe_entry_t pppoeEt; int ret,errorno,l2Idx; rtk_rg_macEntry_t macEntry; rtk_mac_t zeroMAC={{0}}; rtk_ipv6_addr_t zeroIPv6={{0}}; rtk_l34_dsliteMc_entry_t dsliteMcEntry; #if defined(CONFIG_RTL9602C_SERIES) int dsliteIdx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.dslite_idx; #else rtk_rg_aclAndCf_reserved_intf_dslite_trap_t intf_dslite_trap_para; #endif if(pppoeDslite_info->sessionId==0 ) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Call after rtk_rg_wanInterface_add, so check if we had already add interface if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].valid == 0 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan != 1 || rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type != RTK_RG_PPPoE_DSLITE) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(!memcmp(pppoeDslite_info->dslite_hw_info.rtk_dslite.ipB4.ipv6_addr,zeroIPv6.ipv6_addr,IPV6_ADDR_LEN))RETURN_ERR(RT_ERR_RG_B4_IP_NOT_SET); if(!memcmp(pppoeDslite_info->dslite_hw_info.rtk_dslite.ipAftr.ipv6_addr,zeroIPv6.ipv6_addr,IPV6_ADDR_LEN))RETURN_ERR(RT_ERR_RG_AFTR_IP_NOT_SET); if(pppoeDslite_info->dslite_hw_info.aftr_mac_auto_learn==0 && !memcmp(pppoeDslite_info->dslite_hw_info.aftr_mac_addr.octet,zeroMAC.octet,ETHER_ADDR_LEN)) RETURN_ERR(RT_ERR_RG_AFTR_MAC_NOT_SET); #if defined(CONFIG_RTL9602C_SERIES) if(pppoeDslite_info->dslite_hw_info.rtk_dslite.tcOpt>=RTK_L34_DSLITE_TC_OPT_END)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pppoeDslite_info->dslite_hw_info.rtk_dslite.hopLimit <=1)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dsliteIdx<0)||(!rg_db.dslite[dsliteIdx].rtk_dslite.valid))RETURN_ERR(RT_ERR_RG_DSLITE_UNINIT); if(rg_db.dslite[dsliteIdx].intfIdx!=wan_intf_idx)RETURN_ERR(RT_ERR_RG_DSLITE_UNMATCH); #endif //20160617LUKE: check if mtu too large for dual header packet if(pppoeDslite_info->dslite_hw_info.static_info.mtu>MAX_PPPOEDSLITE_MTU_SIZE) WARNING("Caution! MTU too large may cause outer IPv6 header fragmentation."); if((pppoeDslite_info->dslite_hw_info.static_info.ip_network_mask>0 && pppoeDslite_info->dslite_hw_info.static_info.ip_network_mask!=0xffffffff) || (pppoeDslite_info->dslite_hw_info.static_info.ipv6_mask_length>0 && pppoeDslite_info->dslite_hw_info.static_info.ipv6_mask_length!=128)) WARNING("PPPoE WAN SUBNET WARNING , point-to-point connection should mask all (v4mask=255.255.255.255 v6mask=128)"); //Check if we are reentried //rg_lock(&rg_kernel.wanPPPOEDSLITEAfterCalled); //store aftr in global variable memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.ipB4.ipv6_addr, pppoeDslite_info->dslite_hw_info.rtk_dslite.ipB4.ipv6_addr, IPV6_ADDR_LEN); memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.ipAftr.ipv6_addr, pppoeDslite_info->dslite_hw_info.rtk_dslite.ipAftr.ipv6_addr, IPV6_ADDR_LEN); rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.aftr_mac_auto_learn=pppoeDslite_info->dslite_hw_info.aftr_mac_auto_learn; memcpy(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.aftr_mac_addr.octet,pppoeDslite_info->dslite_hw_info.aftr_mac_addr.octet,ETHER_ADDR_LEN); #if defined(CONFIG_RTL9602C_SERIES) rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.hopLimit=pppoeDslite_info->dslite_hw_info.rtk_dslite.hopLimit&0xff; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.flowLabel=pppoeDslite_info->dslite_hw_info.rtk_dslite.flowLabel&0xfffff; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.tcOpt=pppoeDslite_info->dslite_hw_info.rtk_dslite.tcOpt; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.dslite_hw_info.rtk_dslite.tc=pppoeDslite_info->dslite_hw_info.rtk_dslite.tc&0xff; #endif errorno = _rtk_rg_internal_wanSet(wan_intf_idx, &pppoeDslite_info->dslite_hw_info.static_info); if(errorno==RT_ERR_RG_OK) { //Add gateway mac and Default route bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,pppoeDslite_info->dslite_hw_info.aftr_mac_addr.octet,ETHER_ADDR_LEN); //set SVL for lanIntf, patched in 20121203 //macEntry.fid=LAN_FID; //macEntry.isIVL=0; macEntry.port_idx=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_port_idx; //macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; macEntry.fid=rg_db.vlan[rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id].fid; //set mac's vlanid by egress tagif setting macEntry.vlan_id=rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL){ macEntry.isIVL=1; //IVL should refer VLAN's untag setting to decide tag or not }else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Because Forced_DMAC2CVID is turn on, the LUT's VLANID should enter zero if untag!! if(!rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_tag_on)?1:0; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry errorno=rtk_rg_apollo_macEntry_add(&macEntry,&l2Idx); DEBUG("### add AFTR l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1], macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); if(errorno!=RT_ERR_RG_OK)goto RET_PPPOE_ERR; errorno=_rtk_rg_internal_GWMACSetup_stage2(wan_intf_idx, l2Idx); if(errorno!=RT_ERR_RG_OK)goto RET_PPPOE_ERR; rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_dslite_info.after_dial.sessionId=pppoeDslite_info->sessionId; //Set up PPPoE table and next hop table for this interface errorno=RT_ERR_RG_PPPOE_SET_FAIL; bzero(&pppoeEt, sizeof(rtk_l34_pppoe_entry_t)); pppoeEt.sessionID=pppoeDslite_info->sessionId; ret = RTK_L34_PPPOETABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx, &pppoeEt); if(ret!=RT_ERR_OK)goto RET_PPPOE_ERR; #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) // flow based: pppoe session id was saved in netif table ret = _rtk_rg_netifPPPoESession_set(wan_intf_idx, pppoeEt.sessionID); if(ret!=RT_ERR_OK)goto RET_PPPOE_ERR; #endif } else RETURN_ERR(errorno); //add a dsliteMc entry hit all dsliteMc packet , remove dsliteMc ipv6 header bzero(&dsliteMcEntry,sizeof(dsliteMcEntry)); RTK_L34_DSLITEMULTICAST_SET(&dsliteMcEntry); #if defined(CONFIG_RTL9602C_SERIES) pppoeDslite_info->dslite_hw_info.rtk_dslite.index=dsliteIdx; pppoeDslite_info->dslite_hw_info.rtk_dslite.valid=1; ASSERT_EQ(RTK_L34_DSLITEINFTABLE_SET(&pppoeDslite_info->dslite_hw_info.rtk_dslite),RT_ERR_OK); #else //enable reserve ACL trap if(wan_intf_idx < MAX_NETIF_HW_TABLE_SIZE) { memcpy(intf_dslite_trap_para.ipv6_dip.ipv6_addr, pppoeDslite_info->dslite_hw_info.rtk_dslite.ipB4.ipv6_addr, IPV6_ADDR_LEN); memcpy(intf_dslite_trap_para.smac.octet, pppoeDslite_info->dslite_hw_info.aftr_mac_addr.octet, ETHER_ADDR_LEN); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_INTF0_DSLITE_TRAP+wan_intf_idx,&intf_dslite_trap_para); } #endif errorno=RT_ERR_RG_OK; goto RET_SUCCESS; RET_PPPOE_ERR: //Delete the pppoe etnry bzero(&pppoeEt, sizeof(rtk_l34_pppoe_entry_t)); RTK_L34_PPPOETABLE_SET(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.pppoe_idx, &pppoeEt); #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) _rtk_rg_netifPPPoESession_set(wan_intf_idx, 0); #endif RET_SUCCESS: //rg_unlock(&rg_kernel.wanPPPOEDSLITEAfterCalled); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_svlanTpid_set(uint32 svlan_tag_id){ assert_ok(RTK_SVLAN_TPIDENTRY_SET(0, svlan_tag_id)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_svlanTpid_get(uint32 *pSvlanTagId){ assert_ok(rtk_svlan_tpidEntry_get(0, pSvlanTagId)); if(*pSvlanTagId!=rg_db.systemGlobal.tpid)WARNING("Svlan TPID2 is different between software and hardware. Please check if RG APIs is mixed with RTK APIs!"); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_svlanServicePort_set(rtk_port_t port, rtk_enable_t enable) { //DEBUG("\ndebug: rtk_rg_apollo_svlanServicePort_set port=0x%x enable=%d\n", port, enable); assert_ok(RTK_SVLAN_SERVICEPORT_SET(port, enable)); if (rg_db.systemGlobal.service_pmsk.portmask) rg_kernel.stag_enable=RTK_RG_ENABLED; else rg_kernel.stag_enable=RTK_RG_DISABLED; _rtk_rg_default_svlan_manipulate(); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_svlanServicePort_get(rtk_port_t port, rtk_enable_t *pEnable) { if(pEnable==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); assert_ok(RTK_SVLAN_SERVICEPORT_GET(port, pEnable)); if((*pEnable==ENABLED && ((rg_db.systemGlobal.service_pmsk.portmask&(0x1<<port))==0))|| (*pEnable==DISABLED && (rg_db.systemGlobal.service_pmsk.portmask&(0x1<<port)))) WARNING("Svlan service port is different between software and hardware. Please check if RG APIs is mixed with RTK APIs!"); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_stpBlockingPortmask_set(rtk_rg_portmask_t portMask) { rtk_rg_port_isolation_t isolation_entry; int i; rg_db.systemGlobal.stpBlockingPortmask.portmask = (portMask.portmask & RTK_RG_ALL_MAC_PORTMASK); //CPU, LAN and WAN port //printk("\nrtk_rg_apollo_stpBlockingPortmask_set Mask=0x%x\n",Mask); if(rg_db.systemGlobal.stpBlockingPortmask.portmask) { _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING,NULL); if(rg_db.systemGlobal.storedInfo.valid == 0){ rg_db.systemGlobal.storedInfo.mask.portmask = rg_db.systemGlobal.stpBlockingPortmask.portmask; for(i=0;i<=RTK_RG_PORT_CPU;i++){ rg_db.systemGlobal.storedInfo.portmask[i].portmask = rg_db.systemGlobal.portIsolation[i].portmask; isolation_entry.port = i; isolation_entry.portmask.portmask = ((~portMask.portmask) & rg_db.systemGlobal.portIsolation[i].portmask); rtk_rg_apollo_portIsolation_set(isolation_entry); } rg_db.systemGlobal.storedInfo.valid = 1; } } else //portmask is zero { _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_STPBLOCKING); if(rg_db.systemGlobal.storedInfo.valid == 1){ rg_db.systemGlobal.storedInfo.valid = 0; for(i=0;i<=RTK_RG_PORT_CPU;i++){ isolation_entry.port = i; isolation_entry.portmask.portmask = rg_db.systemGlobal.storedInfo.portmask[i].portmask; rtk_rg_apollo_portIsolation_set(isolation_entry); } } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_stpBlockingPortmask_get(rtk_rg_portmask_t *pportMask) { *pportMask = rg_db.systemGlobal.stpBlockingPortmask; //printk("\nrtk_rg_apollo_stpBlockingPortmask_get Mask=0x%x\n",*pMask); return (RT_ERR_RG_OK); } //VLAN function rtk_rg_err_code_t rtk_rg_apollo_cvlan_add(rtk_rg_cvlan_info_t *cvlan_info) { int i,ret,errorno,VLAN_USED_BY_INTERFACE=0; rtk_vlan_t vlanID; rtk_fidMode_t fidMode; rtk_portmask_t mac_pmask,ext_pmask,untag_pmask; //Check parameter if(rg_db.systemGlobal.initParam.macBasedTagDecision && cvlan_info->isIVL) //IVL can not be set when DMAC2CVID is trun on RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); #ifdef CONFIG_MASTER_WLAN0_ENABLE //20160524LUKE: check wlan-device existence _rtk_rg_check_wlan_device_exist_or_not(); #endif vlanID=cvlan_info->vlanId; #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (cvlan_info->memberPortMask.portmask & (1<<RTK_RG_PORT_PON))){ DEBUG("patch for pppoeGponSmallbandwithControl,so add RGMII to memberPortMask."); cvlan_info->memberPortMask.portmask |= (1<<RTK_RG_PORT_RGMII); } #endif //Check the VLAN ID, it can not be used in interface setting or vlan-binding //20150615LUKE: for more flexible usage, CVLAN add can forced override interface's VLAN setting. MUST BE CAREFULLY WHEN USING!! for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) if(vlanID==rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id){ VLAN_USED_BY_INTERFACE=1;break;}//RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_INTERFACE); for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) if(vlanID==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id){ VLAN_USED_BY_INTERFACE=1;break;}//RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_INTERFACE); if((rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].valid && vlanID == rg_db.systemGlobal.initParam.fwdVLAN_CPU) || (rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block].valid && vlanID == rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) || (rg_db.systemGlobal.initParam.macBasedTagDecision==1 && (vlanID == rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET || (vlanID >= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && vlanID <= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET)))) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); if(rg_db.vlan[vlanID].valid) { if(rg_db.vlan[vlanID].addedAsCustomerVLAN || VLAN_USED_BY_INTERFACE) //created before DEBUG("vlanID[%d] is %s..",vlanID,VLAN_USED_BY_INTERFACE==1?"used by intf":"exist");//RETURN_ERR(RT_ERR_RG_CVLAN_CREATED); else RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_VLANBINDING); //used in vlan-binding } //Transfer RG portmask to RTK portmask _rtk_rg_portmask_translator(cvlan_info->memberPortMask,&mac_pmask,&ext_pmask); memset(&untag_pmask,0,sizeof(rtk_portmask_t)); untag_pmask.bits[0]|=cvlan_info->untagPortMask.portmask; #if defined(CONFIG_RTL9600_SERIES) //20160524LUKE: for multicast routing packet will use ingress's VLAN untag set, therefore set all none-member port as untag!! untag_pmask.bits[0]|=(~(mac_pmask.bits[0]))&RTK_RG_ALL_MAC_PORTMASK; #endif ret = RTK_VLAN_CREATE(vlanID); //if(ret==RT_ERR_VLAN_EXIST) //RETURN_ERR(RT_ERR_RG_CVLAN_RESERVED); //the vlan had been created for system use //20150615LUKE: if we are changing existing VLAN, do not touch addedAsCustomerVLAN if(ret==RT_ERR_OK) { //Indicate that this vlan is created by customer vlan APIs rg_db.vlan[vlanID].addedAsCustomerVLAN=1; } else if(ret!=RT_ERR_VLAN_EXIST) RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); //Setting VLAN errorno=RT_ERR_RG_VLAN_SET_FAIL; if(cvlan_info->isIVL) fidMode=VLAN_FID_IVL; else fidMode=VLAN_FID_SVL; ret = RTK_VLAN_FIDMODE_SET(vlanID, fidMode); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_FID_SET(vlanID, LAN_FID); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_PORT_SET(vlanID, &mac_pmask, &untag_pmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_EXTPORT_SET(vlanID, &ext_pmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #ifdef CONFIG_MASTER_WLAN0_ENABLE //20160308LUKE: assign CVLAN's wlan0 devMask and untagMask for filter. rg_db.vlan[vlanID].wlan0DevMask=cvlan_info->wlan0DevMask; rg_db.vlan[vlanID].wlan0UntagMask=cvlan_info->wlan0UntagMask; #if defined(CONFIG_RTL9600_SERIES) //20160524LUKE: for multicast routing packet will use ingress's VLAN untag set, therefore set all none-member port as untag!! rg_db.vlan[vlanID].wlan0UntagMask|=(~(rg_db.vlan[vlanID].wlan0DevMask))&(0xffffffff>>(32-MAX_WLAN_DEVICE_NUM)); #endif #endif //Setting up priority, if enable if(cvlan_info->vlan_based_pri_enable==RTK_RG_ENABLED) { #ifdef CONFIG_DUALBAND_CONCURRENT if(cvlan_info->vlan_based_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) { errorno=RT_ERR_RG_VLAN_PRI_CONFLICT_WIFI; goto RET_VLAN_ERR; } #endif #if defined(CONFIG_RTL9602C_SERIES) WARNING("[FIXME]for 9602C, we can't set priority for VLAN directly..."); //errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; //goto RET_VLAN_ERR; #else ret = RTK_VLAN_PRIORITYENABLE_SET(vlanID,ENABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_PRIORITY_SET(vlanID,cvlan_info->vlan_based_pri); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } else { #if defined(CONFIG_RTL9602C_SERIES) #else ret = RTK_VLAN_PRIORITYENABLE_SET(vlanID,DISABLED); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; #endif } return (RT_ERR_RG_OK); RET_VLAN_ERR: //Delete the customer VLAN created RTK_VLAN_DESTROY(vlanID); RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_cvlan_del(int cvlan_id) { //Check if the VLAN ID is created by customer vlan API before if(rg_db.vlan[cvlan_id].addedAsCustomerVLAN==0) RETURN_ERR(RT_ERR_RG_VLAN_NOT_CREATED_BY_CVLAN); //Delete the customer VLAN created RTK_VLAN_DESTROY(cvlan_id); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_cvlan_get(rtk_rg_cvlan_info_t *cvlan_info) { rtk_vlan_t vlanID; //Check parameter if(rg_db.systemGlobal.initParam.macBasedTagDecision && cvlan_info->isIVL) //IVL can not be set when DMAC2CVID is trun on RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); vlanID=cvlan_info->vlanId; //Check if the vlanID valid if(vlanID <= 0 || vlanID >= 4095) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); else if(!rg_db.vlan[vlanID].valid) //invalid vid return (RT_ERR_RG_CVLAN_INVALID); cvlan_info->memberPortMask.portmask=rg_db.vlan[vlanID].MemberPortmask.bits[0]; if(rg_db.vlan[vlanID].Ext_portmask.bits[0]){ cvlan_info->memberPortMask.portmask|=1<<RTK_RG_PORT_CPU; cvlan_info->memberPortMask.portmask|=(rg_db.vlan[vlanID].Ext_portmask.bits[0]<<RTK_RG_PORT_CPU); } cvlan_info->untagPortMask.portmask=rg_db.vlan[vlanID].UntagPortmask.bits[0]; #ifdef CONFIG_MASTER_WLAN0_ENABLE cvlan_info->wlan0DevMask=rg_db.vlan[vlanID].wlan0DevMask; cvlan_info->wlan0UntagMask=rg_db.vlan[vlanID].wlan0UntagMask; #endif cvlan_info->isIVL=(rg_db.vlan[vlanID].fidMode==VLAN_FID_IVL?1:0); cvlan_info->vlan_based_pri_enable=rg_db.vlan[vlanID].priorityEn; cvlan_info->vlan_based_pri=rg_db.vlan[vlanID].priority; return (RT_ERR_RG_OK); } //VLAN Binding int _rtk_rg_vbdLinkListAdd(rtk_rg_port_idx_t portIdx, int wanIdx, int vlanId) { rtk_rg_vbind_linkList_t *pVbdEntry,*pNextEntry; //Check if we have not-used free list if(list_empty(&rg_db.vlanBindingFreeListHead)) { DEBUG("all free VLan-bind list are allocated..."); RETURN_ERR(RT_ERR_RG_FAILED); } //Get one from free list list_for_each_entry_safe(pVbdEntry,pNextEntry,&rg_db.vlanBindingFreeListHead,vbd_list) //just return the first entry right behind of head { list_del_init(&pVbdEntry->vbd_list); break; } //Setup information pVbdEntry->wanIdx=wanIdx; pVbdEntry->vlanId=vlanId; //Add to hash head list list_add(&pVbdEntry->vbd_list,&rg_db.vlanBindingListHead[portIdx]); return (RT_ERR_RG_OK); } int _rtk_rg_vbdLinkListDel(rtk_rg_port_idx_t portIdx, int wanIdx, int vlanId) { rtk_rg_vbind_linkList_t *pVbdEntry,*pNextEntry; list_for_each_entry_safe(pVbdEntry,pNextEntry,&rg_db.vlanBindingListHead[portIdx],vbd_list) { if(pVbdEntry->vlanId==vlanId && pVbdEntry->wanIdx==wanIdx) { //Delete from head list list_del_init(&pVbdEntry->vbd_list); //Add back to free list list_add(&pVbdEntry->vbd_list,&rg_db.vlanBindingFreeListHead); } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_vlanBinding_add(rtk_rg_vlanBinding_t *vlan_binding_info, int *vlan_binding_idx) { int errorno=RT_ERR_RG_OK; #if defined(CONFIG_APOLLO) int i,ret,bdIdx=0,vlan_exist=0,vlanID,intfIdx,bridge_wan_vlan=0;//,wantype_exist=0,nxpIdx=0; //unsigned int tmppmsk,tmpexpmsk; //rtk_l34_netif_entry_t intfEntry; rtk_binding_entry_t vbindEt; //rtk_classify_cfg_t cfEntry; rtk_portmask_t mbpmsk,etpmsk; rtk_portmask_t port_mask,ext_port_mask; rtk_portmask_t wanPmsk; rtk_rg_table_vlan_t ori_vlanEntry,ori_wanVlanEntry; rtk_rg_bindingEntry_t cb_bindEt; rtk_rg_wanIntfConf_t *bindWanConf; //rtk_wanType_entry_t wantEt; //rtk_l34_nexthop_entry_t nxpEt; //Check parameter if(vlan_binding_info == NULL || vlan_binding_idx == NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //20140423LUKE:at time, we just can't return VLAN-tagged packet to ext-port, therefore we prohibited such setting! if(vlan_binding_info->port_idx == RTK_RG_PORT_CPU) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //if(vlan_binding_info->port_idx < RTK_RG_PORT0 || vlan_binding_info->port_idx > RTK_RG_EXT_PORT4 || //vlan_binding_info->port_idx == RTK_RG_PORT_CPU) //RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.lanIntfTotalNum==0) RETURN_ERR(RT_ERR_RG_LAN_NOT_EXIST); if(vlan_binding_info->wan_intf_idx>=MAX_NETIF_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); if(rg_db.systemGlobal.interfaceInfo[vlan_binding_info->wan_intf_idx].valid==0 || rg_db.systemGlobal.interfaceInfo[vlan_binding_info->wan_intf_idx].storedInfo.is_wan==0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(vlan_binding_info->ingress_vid==0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.initParam.macBasedTagDecision==0) //binding is turn off RETURN_ERR(RT_ERR_RG_BIND_WITH_UNBIND_WAN); if(rg_db.systemGlobal.vlanBindTotalNum==MAX_BIND_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); vlanID=vlan_binding_info->ingress_vid; if((rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_CPU].valid && vlanID == rg_db.systemGlobal.initParam.fwdVLAN_CPU) || (rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block].valid && vlanID == rg_db.systemGlobal.initParam.fwdVLAN_Proto_Block) || vlanID == rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET || (vlanID >= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER && vlanID <= rg_db.systemGlobal.initParam.fwdVLAN_BIND_OTHER+DEFAULT_BIND_LAN_OFFSET)) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_SYSTEM); //Check if vlanID has been used in LAN, WAN or 1Q VLAN for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) if(vlanID==rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_INTERFACE); for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(vlan_binding_info->port_idx==(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(vlanID==rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->egress_vlan_id) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type==RTK_RG_BRIDGE) { WARNING("This VLAN used by Bridge-WAN!!"); } else RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_INTERFACE); } } if(rg_db.vlan[vlanID].valid && rg_db.vlan[vlanID].addedAsCustomerVLAN) RETURN_ERR(RT_ERR_RG_VLAN_USED_BY_CVLAN); intfIdx=vlan_binding_info->wan_intf_idx; bindWanConf=&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf; //Turn port index to mask if(vlan_binding_info->port_idx < RTK_RG_PORT_CPU) //MAC port { port_mask.bits[0]=0x1<<(vlan_binding_info->port_idx-RTK_RG_PORT0); ext_port_mask.bits[0]=0x0; } else //EXT port { ext_port_mask.bits[0]=0x1<<(vlan_binding_info->port_idx-RTK_RG_EXT_PORT0); port_mask.bits[0]=0x0; } for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { //bzero(&vbindEt, sizeof(rtk_binding_entry_t)); //ret = dal_apollomp_l34_bindingTable_get(i, &vbindEt); //FIXME:no RTK APIs //ret = rtk_l34_bindingTable_get(i, &vbindEt); //if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_PORT_BIND_GET_FAIL); //we should not assign the same port binding and vlan with same port' //20140718LUKE: we need to support dynamic replace port binding when add WAN, so display WARNING instead return error. if((rg_db.bind[i].rtk_bind.portMask.bits[0]&port_mask.bits[0])||(rg_db.bind[i].rtk_bind.extPortMask.bits[0]&ext_port_mask.bits[0])) { if(rg_db.bind[i].rtk_bind.vidLan==0){ WARNING("%sPortmask 0x%x is setup port-binding to WAN[%d], make sure this situation is correct!",port_mask.bits[0]==0x0?"":"Ext", rg_db.bind[i].rtk_bind.portMask.bits[0],rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx);//RETURN_ERR(RT_ERR_RG_INVALID_PARAM); }else if(rg_db.bind[i].rtk_bind.vidLan==vlanID){ RETURN_ERR(RT_ERR_RG_ENTRY_EXIST); } } } //Have to call after rtk_rg_wanInterface_add, so check if we had already add interface and it's binding WAN /*bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); ret = rtk_l34_netifTable_get(intfIdx, &intfEntry); if(ret!=RT_ERR_OK || intfEntry.valid == 0 || */ if(rg_db.systemGlobal.interfaceInfo[intfIdx].valid==0 || rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.is_wan!=1) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Patch for binding L2 bug //unnecessary here, since we had patched when WAN interface added. /*if(rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE) { intfEntry.mtu=1502; ret = RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_SET_FAIL); } */ //Set up VLAN memset(&ori_vlanEntry,0,sizeof(rtk_rg_table_vlan_t)); errorno=RT_ERR_RG_VLAN_SET_FAIL; ret = RTK_VLAN_CREATE(vlanID); if(ret == RT_ERR_VLAN_EXIST) { //keep all information of original VLAN memcpy(&ori_vlanEntry, &rg_db.vlan[vlanID],sizeof(rtk_rg_table_vlan_t)); vlan_exist=1; } else if(ret!=RT_ERR_OK) goto RET_VLAN_ERR; //Set up its member port, extension port set, and FID mode ret = RTK_VLAN_FIDMODE_SET(vlanID, VLAN_FID_SVL); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; mbpmsk.bits[0]=port_mask.bits[0]|ori_vlanEntry.MemberPortmask.bits[0]; etpmsk.bits[0]=(ext_port_mask.bits[0]<<1)|ori_vlanEntry.Ext_portmask.bits[0]; //vlan's extPmsk begin at CPU mbpmsk.bits[0]|=0x1<<RTK_RG_MAC_PORT_CPU; //CPUport always on etpmsk.bits[0]|=0x1; //CPUport always on ret = RTK_VLAN_PORT_SET(vlanID, &mbpmsk, &ori_vlanEntry.UntagPortmask); //don't touch untag set if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_EXTPORT_SET(vlanID, &etpmsk); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_FID_SET(vlanID,LAN_FID); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; if(bindWanConf->wan_type==RTK_RG_BRIDGE) { bridge_wan_vlan=bindWanConf->egress_vlan_id; memcpy(&ori_wanVlanEntry,&rg_db.vlan[bridge_wan_vlan],sizeof(rtk_rg_table_vlan_t)); rg_db.vlan[bridge_wan_vlan].MemberPortmask.bits[0]|=port_mask.bits[0]; rg_db.vlan[bridge_wan_vlan].Ext_portmask.bits[0]|=ext_port_mask.bits[0]; //add the binding port to WAN's VLAN member ret = RTK_VLAN_PORT_SET(bridge_wan_vlan, &rg_db.vlan[bridge_wan_vlan].MemberPortmask, &ori_wanVlanEntry.UntagPortmask); //don't touch untag set if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; ret = RTK_VLAN_EXTPORT_SET(bridge_wan_vlan, &rg_db.vlan[bridge_wan_vlan].Ext_portmask); if(ret!=RT_ERR_OK)goto RET_VLAN_ERR; } //Pick one binding entry that not occupied before for(i=0; i<MAX_BIND_SW_TABLE_SIZE; i++) //Port-vlan binding start from the top of Binding Table { //if(rg_db.systemGlobal.bindToIntf[i] == -1) if(rg_db.bind[i].valid == 0) break; } if(i==MAX_BIND_SW_TABLE_SIZE)goto RET_BINDING_ERR; bdIdx=i; //Keep //rg_db.systemGlobal.bindToIntf[bdIdx]=intfIdx; //rg_db.systemGlobal.bindWithVLAN[bdIdx]=vlanID; //save the vlan //RG_GLB_VLANBD_IDX[bdIdx]=i; //Add binding entry once a time errorno=RT_ERR_RG_PORT_BIND_SET_FAIL; bzero(&vbindEt, sizeof(rtk_binding_entry_t)); vbindEt.vidLan=vlanID; vbindEt.wanTypeIdx=rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv4; vbindEt.bindProto=L34_BIND_PROTO_ALL; //ALL protocol in L3, and L2 vbindEt.portMask=port_mask; vbindEt.extPortMask=ext_port_mask; //ret = dal_apollomp_l34_bindingTable_set(i, &vbindEt); //FIXME:no RTK APIs ret = RTK_L34_BINDINGTABLE_SET(bdIdx, &vbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED) { errorno=RT_ERR_RG_CHIP_NOT_SUPPORT; goto RET_BINDING_ERR; } if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; DEBUG("add binding(%d) vid=%d portMas=0x%x \n",bdIdx,rg_db.bind[bdIdx].rtk_bind.vidLan,rg_db.bind[bdIdx].rtk_bind.portMask.bits[0]); //20140807LUKE: add one more binding rule for v6 if needed errorno=_rtk_rg_updatingVlanBind(intfIdx,rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.wan_intf.bind_wan_type_ipv6); if(errorno!=RT_ERR_RG_OK)goto RET_BINDING_ERR; //20140725LUKE: update to WAN's vlan-binding-mask bindWanConf->vlan_binding_mask.portmask|=(0x1<<vlan_binding_info->port_idx); //Update non binding portmask, if portmask is zero, remove WAN port from fwdVLAN_BIND_INTERNET //otherwise add WAN port to fwdVLAN_BIND_INTERNET! //20160428LUKE: transform from RGMII to PON wanPmsk.bits[0]=0x1<<bindWanConf->wan_port_idx; #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (bindWanConf->wan_type == RTK_RG_PPPoE) && bindWanConf->wan_port_idx==RTK_RG_PORT_PON){ DEBUG("Special add RGMII to WAN_PORT_MASK."); wanPmsk.bits[0]|=0x1<<RTK_RG_PORT_RGMII; } #endif _rtk_rg_updateNoneBindingPortmask(wanPmsk.bits[0]); //add to link-list for bindingRuleCheck _rtk_rg_vbdLinkListAdd(vlan_binding_info->port_idx,vlan_binding_info->wan_intf_idx,vlan_binding_info->ingress_vid); //add vlan-bind for pure software netif if( rg_db.systemGlobal.interfaceInfo[intfIdx].valid ==SOFTWARE_ONLY_ENTRY) { rtk_rg_aclAndCf_reserved_vlan_bind_trap_t vlan_bind_trap; bzero(&vlan_bind_trap,sizeof(vlan_bind_trap)); vlan_bind_trap.portmask=port_mask.bits[0]; vlan_bind_trap.vid=vlanID; _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_VLANBIND0_TRAP+ bdIdx, &vlan_bind_trap); //(n=0~31, 1-to-1 mapping binding table) WARNING("ReservedRuleAdd Vlan binding bdIdx=%d vid=%d pmask=%x",bdIdx,vlan_bind_trap.vid,vlan_bind_trap.portmask); } #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20141224LUKE: since vlan-binding is modified, we should rearrange ACL which use the WAN interface as egress interface of the binding if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num) ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif // TODO:Call the initParam's bindingAddByHwCallBack if(rg_db.systemGlobal.initParam.bindingAddByHwCallBack != NULL) { cb_bindEt.type=BIND_TYPE_VLAN; cb_bindEt.vlan.vlan_bind_port_idx=vlan_binding_info->port_idx; cb_bindEt.vlan.vlan_bind_vlan_id=vlanID; cb_bindEt.wan_intf_idx=intfIdx; rg_db.systemGlobal.initParam.bindingAddByHwCallBack(&cb_bindEt); } #if 0 tmppmsk=port_mask.bits[0]; tmpexpmsk=ext_port_mask.bits[0]; //Add port-binding we have to set count=tmppmsk; RG_ONE_COUNT(count); for(j=0; j<count; j++) { errorno=RT_ERR_RG_ENTRY_FULL; for(i=0; i<32; i++) //Port-vlan binding start from the top of Binding Table { /*memset(&pbindEt, 0, sizeof(pbindEt)); ret = dal_apollomp_l34_bindingTable_get(i, &pbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_PORT_BIND_FAIL); if(pbindEt.portMask.bits[0]==0 && pbindEt.extPortMask.bits[0]==0) break;*/ if(rg_db.systemGlobal.bindToIntf[i] == -1) break; } if(i==32)goto RET_VLAN_ERR; rg_db.systemGlobal.bindToIntf[i]=intfIdx; RG_GLB_VLANBD_IDX[i]=bdIdx; //Add binding entry once a time errorno=RT_ERR_RG_PORT_BIND_SET_FAIL; memset(&vbindEt, 0, sizeof(vbindEt)); vbindEt.extPortMask.bits[0]=0; vbindEt.vidLan=vlanID; vbindEt.wanTypeIdx=intfIdx; vbindEt.bindProto=L34_BIND_PROTO_NOT_IPV6; //IPv4 only if((tmppmsk&0x1)==1) //PORT0 { tmppmsk&=0xfffffffe; vbindEt.portMask.bits[0]=0x1; } else if((tmppmsk&0x2)==1) //PORT1 { tmppmsk&=0xfffffffd; vbindEt.portMask.bits[0]=0x2; } else if((tmppmsk&0x4)==1) //PORT2 { tmppmsk&=0xfffffffb; vbindEt.portMask.bits[0]=0x4; } else if((tmppmsk&0x8)==1) //PORT3 { tmppmsk&=0xfffffff7; vbindEt.portMask.bits[0]=0x8; } else if((tmppmsk&0x10)==1) //PORT4 { tmppmsk&=0xffffffef; vbindEt.portMask.bits[0]=0x10; } else if((tmppmsk&0x20)==1) //PORT5 { tmppmsk&=0xffffffdf; vbindEt.portMask.bits[0]=0x20; } ret = dal_apollomp_l34_bindingTable_set(i, &vbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; } //Add extport-binding we have to set count=tmpexpmsk; RG_ONE_COUNT(count); for(j=0; j<count; j++) { errorno=RT_ERR_RG_ENTRY_FULL; for(i=0; i<32; i++) //Port-vlan binding start from the top of Binding Table { /*memset(&pbindEt, 0, sizeof(pbindEt)); ret = dal_apollomp_l34_bindingTable_get(i, &pbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_PORT_BIND_FAIL); if(pbindEt.portMask.bits[0]==0 && pbindEt.extPortMask.bits[0]==0) break;*/ if(rg_db.systemGlobal.bindToIntf[i] == -1) break; } if(i==32)goto RET_BINDING_ERR; rg_db.systemGlobal.bindToIntf[i]=intfIdx; RG_GLB_VLANBD_IDX[i]=bdIdx; //Add binding entry once a time errorno=RT_ERR_RG_EXTPORT_BIND_SET_FAIL; memset(&vbindEt, 0, sizeof(vbindEt)); vbindEt.portMask.bits[0]=0; vbindEt.vidLan=vlanID; vbindEt.wanTypeIdx=intfIdx; vbindEt.bindProto=L34_BIND_PROTO_NOT_IPV6; //IPv4 only if((tmpexpmsk&0x1)==1) //EXTPORT1 { tmpexpmsk&=0xfffffffe; vbindEt.extPortMask.bits[0]=0x1; } else if((tmpexpmsk&0x2)==1) //EXTPORT2 { tmpexpmsk&=0xfffffffd; vbindEt.extPortMask.bits[0]=0x2; } else if((tmpexpmsk&0x4)==1) //EXTPORT3 { tmpexpmsk&=0xfffffffb; vbindEt.extPortMask.bits[0]=0x4; } else if((tmpexpmsk&0x8)==1) //EXTPORT4 { tmpexpmsk&=0xfffffff7; vbindEt.extPortMask.bits[0]=0x8; } else if((tmpexpmsk&0x10)==1) //EXTPORT5 { tmpexpmsk&=0xfffffff7; vbindEt.extPortMask.bits[0]=0x10; } ret = dal_apollomp_l34_bindingTable_set(i, &vbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; } #endif //Return the binding index *vlan_binding_idx = bdIdx; //Turn on CF rule for downstream packets to tag as it outbound vlan tag /*if(rg_db.systemGlobal.vlanBindTotalNum == 0) { memset(&cfEntry, 0, sizeof(cfEntry)); cfEntry.index=RG_GLB_VLAN_BINDING_CFIDX; errorno=RT_ERR_RG_CF_ENTRY_ACCESS_FAILED; ret = rtk_classify_cfgEntry_get(&cfEntry); if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; cfEntry.valid=1; ret = rtk_classify_cfgEntry_add(&cfEntry); if(ret!=RT_ERR_OK)goto RET_BINDING_ERR; memset(&cfEntry, 0, sizeof(cfEntry)); cfEntry.index=RESERVED_CF_VLANBINDING_MAC_LEARN_ENTRY; //defined in internal.h cfEntry.direction=CLASSIFY_DIRECTION_DS; cfEntry.valid=1; cfEntry.act.dsAct.cAct=CLASSIFY_DS_CACT_ADD_CTAG_8100; cfEntry.act.dsAct.cVidAct=CLASSIFY_DS_VID_ACT_FROM_LUT; cfEntry.act.dsAct.cPriAct=CLASSIFY_DS_PRI_ACT_NOP; cfEntry.act.dsAct.uniAct=CLASSIFY_DS_UNI_ACT_NOP; //all port should be classified ret = rtk_classify_cfgEntry_add(&cfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED); }*/ rg_db.systemGlobal.vlanBindTotalNum++; //count how many vlan-binding index we have #ifdef CONFIG_MASTER_WLAN0_ENABLE //20151112LUKE: for extension port, all SSID will be enabled here! if(vlan_binding_info->port_idx == RTK_RG_EXT_PORT0){ rg_db.vlan[vlanID].wlan0DevMask = 0xffffffff; rg_db.vlan[vlanID].wlan0UntagMask = 0x0; //all tag for vlan-binding!! } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && vlan_binding_info->port_idx == RTK_RG_EXT_PORT1){ for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++){ rg_db.vlan[vlanID].wlan0DevMask |= (0x1<<i); rg_db.vlan[vlanID].wlan0UntagMask &= ~(0x1<<i); //all tag for vlan-binding!! } } #endif #endif return (RT_ERR_RG_OK); RET_BINDING_ERR: //Delete all set up binding rule for(i=0; i<MAX_BIND_SW_TABLE_SIZE; i++) { //if(rg_db.systemGlobal.bindToIntf[i]==intfIdx && rg_db.systemGlobal.bindWithVLAN[i]==vlanID) if(rg_db.bind[i].valid&& rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==intfIdx&& rg_db.bind[i].rtk_bind.vidLan==vlanID) { bzero(&vbindEt, sizeof(rtk_binding_entry_t)); //dal_apollomp_l34_bindingTable_get(i, &vbindEt); //FIXME:no RTK APIs /*rtk_l34_bindingTable_get(i, &vbindEt); if(vbindEt.vidLan==vlanID) {*/ //dal_apollomp_l34_bindingTable_set(i, &vbindEt); //FIXME:no RTK APIs RTK_L34_BINDINGTABLE_SET(i, &vbindEt); //rg_db.systemGlobal.bindToIntf[i]=-1; //rg_db.systemGlobal.bindWithVLAN[i]=-1; //} } //Reset the "index" /*if(RG_GLB_VLANBD_IDX[i]==bdIdx) { RG_GLB_VLANBD_IDX[i]=-1; }*/ } #if 0 RET_WANTYPE_ERR: //Delete WAN type entry if(wantype_exist==0) { bzero(&wantEt, sizeof(rtk_wanType_entry_t)); //dal_apollomp_l34_wanTypeTable_set(intfIdx, &wantEt); //FIXME:no RTK APIs rtk_l34_wanTypeTable_set(intfIdx, &wantEt); } RET_NEXTHOP_ERR: //Delete nexthop entry if(wantype_exist==0) { bzero(&nxpEt, sizeof(rtk_l34_nexthop_entry_t)); RTK_L34_NEXTHOPTABLE_SET(nxpIdx, &nxpEt); } #endif RET_VLAN_ERR: //recovery vlan setting if(vlan_exist) { RTK_VLAN_PORT_SET(vlanID, &ori_vlanEntry.MemberPortmask, &ori_vlanEntry.UntagPortmask); RTK_VLAN_EXTPORT_SET(vlanID, &ori_vlanEntry.Ext_portmask); RTK_VLAN_FIDMODE_SET(vlanID, ori_vlanEntry.fidMode); RTK_VLAN_FID_SET(vlanID, ori_vlanEntry.fid); #if defined(CONFIG_RTL9602C_SERIES) #else RTK_VLAN_PRIORITYENABLE_SET(vlanID, ori_vlanEntry.priorityEn); RTK_VLAN_PRIORITY_SET(vlanID, ori_vlanEntry.priority); #endif } else { RTK_VLAN_DESTROY(vlanID); } if(bridge_wan_vlan>0) { RTK_VLAN_PORT_SET(bridge_wan_vlan, &ori_wanVlanEntry.MemberPortmask, &ori_wanVlanEntry.UntagPortmask); RTK_VLAN_EXTPORT_SET(bridge_wan_vlan, &ori_wanVlanEntry.Ext_portmask); } #endif RETURN_ERR(errorno); } rtk_rg_err_code_t rtk_rg_apollo_vlanBinding_del(int vlan_binding_idx) { #if defined(CONFIG_APOLLO) int i,idx,ret; //rtk_l34_netif_entry_t intfEntry; rtk_binding_entry_t vbindEt; rtk_rg_bindingEntry_t cb_bindEt; rtk_rg_wanIntfConf_t *bindWanConf; rtk_portmask_t bind_mac_pmask,bind_ext_pmask; rtk_portmask_t wanPmsk; unsigned int vlan_bind_pmsk=0,vlan_bind_extpmsk=0; //rtk_classify_cfg_t cfEntry; //Check parameter if(vlan_binding_idx<0 || vlan_binding_idx>=MAX_BIND_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.vlanBindTotalNum==0 || rg_db.bind[vlan_binding_idx].valid==0 || rg_db.bind[vlan_binding_idx].rtk_bind.vidLan==0) //never delete vlan-binding before add one return (RT_ERR_RG_VLAN_BIND_UNINIT); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Check for matching index in RG_GLB_VLANBD_IDX array, delete HW binding table, //and reset RG_GLB_VLANBD_IDX #if 0 for(i=0; i<BINDING_MAX_IDX; i++) { if(RG_GLB_VLANBD_IDX[i]==vlan_binding_idx) { memset(&vbindEt, 0, sizeof(vbindEt)); ret = dal_apollomp_l34_bindingTable_set(i, &vbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_SET_FAIL); //Reset the index RG_GLB_VLANBD_IDX[i]=-1; } } #endif /*for(i=vlan_binding_idx;i<BINDING_MAX_IDX;i++) { if(rg_db.systemGlobal.bindToIntf[i]!=-1) //not valid break; } if(i==BINDING_MAX_IDX)RETURN_ERR(RT_ERR_RG_VLAN_BIND_UNINIT); idx=i; //Keep*/ idx=vlan_binding_idx; bindWanConf=&rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[rg_db.bind[vlan_binding_idx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].storedInfo.wan_intf.wan_intf_conf; //Patch for binding L2 problem //unnecessary here, since we had patched when WAN interface added. /*if(rg_db.systemGlobal.interfaceInfo[rg_db.bind[idx].rtk_bind.wanTypeIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE) { //Check binding table if there is any other entry has the same interface idx, including vlan-binding and port-binding //P.S. wanTypeIdx is equal to interface idx here for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(i!=idx && rg_db.bind[i].rtk_bind.wanTypeIdx==rg_db.bind[idx].rtk_bind.wanTypeIdx) break; } if(i==MAX_BIND_SW_TABLE_SIZE) //no other vlan-binding using same interface { bzero(&intfEntry, sizeof(rtk_l34_netif_entry_t)); ret = rtk_l34_netifTable_get(rg_db.bind[idx].rtk_bind.wanTypeIdx, &intfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_GET_FAIL); intfEntry.mtu=1500; ret = RTK_L34_NETIFTABLE_SET(rg_db.bind[idx].rtk_bind.wanTypeIdx, &intfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_INTF_SET_FAIL); } }*/ //idx=RG_GLB_VLANBD_IDX[vlan_binding_idx]; //bzero(&vbindEt, sizeof(rtk_binding_entry_t)); //ret = rtk_l34_bindingTable_get(idx, &vbindEt); //if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_GET_FAIL); //Set up for callback function cb_bindEt.type=BIND_TYPE_VLAN; if(rg_db.bind[idx].rtk_bind.portMask.bits[0]>0) { if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT0)) > 0) //RTK_RG_PORT0 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT0; else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT1)) > 0) //RTK_RG_PORT1 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT1; #if !defined(CONFIG_RTL9602C_SERIES) else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT2)) > 0) //RTK_RG_PORT2 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT2; else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT3)) > 0) //RTK_RG_PORT3 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT3; else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_RGMII)) > 0) //RTK_RG_PORT_RGMII cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT_RGMII; #endif else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_PON)) > 0) //RTK_RG_PORT_PON cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT_PON; else if((rg_db.bind[idx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) > 0) //RTK_RG_PORT_CPU cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_PORT_CPU; } else { if((rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT0)) > 0) //RTK_RG_EXT_PORT0 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_EXT_PORT0; else if((rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT1)) > 0) //RTK_RG_EXT_PORT1 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_EXT_PORT1; else if((rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT2)) > 0) //RTK_RG_EXT_PORT2 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_EXT_PORT2; else if((rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT3)) > 0) //RTK_RG_EXT_PORT3 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_EXT_PORT3; else if((rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT4)) > 0) //RTK_RG_EXT_PORT4 cb_bindEt.vlan.vlan_bind_port_idx=RTK_RG_EXT_PORT4; } cb_bindEt.vlan.vlan_bind_vlan_id=rg_db.bind[idx].rtk_bind.vidLan; cb_bindEt.wan_intf_idx=rg_db.nexthop[rg_db.wantype[rg_db.bind[idx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx; //20140807LUKE: for deleting, check if we add v6 before bzero(&vbindEt, sizeof(rtk_binding_entry_t)); if(rg_db.bind[idx].rtk_bind.bindProto!=L34_BIND_PROTO_ALL) //means that we have v6bind added { for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(i!=idx && rg_db.bind[i].valid && rg_db.bind[i].rtk_bind.bindProto==L34_BIND_PROTO_NOT_IPV4 && rg_db.bind[i].rtk_bind.vidLan==rg_db.bind[idx].rtk_bind.vidLan && !memcmp(&rg_db.bind[i].rtk_bind.portMask,&rg_db.bind[idx].rtk_bind.portMask,sizeof(rtk_portmask_t)) && !memcmp(&rg_db.bind[i].rtk_bind.extPortMask,&rg_db.bind[idx].rtk_bind.extPortMask,sizeof(rtk_portmask_t)) && rg_db.nexthop[rg_db.wantype[rg_db.bind[i].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx==rg_db.nexthop[rg_db.wantype[rg_db.bind[idx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx) { //delete it first!! DEBUG("delete v6bind[%d]",i); ret = RTK_L34_BINDINGTABLE_SET(i, &vbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_SET_FAIL); if(rg_db.systemGlobal.vlanBindTotalNum>0) rg_db.systemGlobal.vlanBindTotalNum--; if(rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[rg_db.bind[idx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].valid == SOFTWARE_ONLY_ENTRY) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_VLANBIND0_TRAP+i); } } } ret=0; for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { if(i!=idx && rg_db.bind[i].valid) { if(rg_db.bind[i].rtk_bind.vidLan==rg_db.bind[idx].rtk_bind.vidLan) ret++; else if(rg_db.bind[i].rtk_bind.vidLan!=0 && rg_db.bind[i].rtk_bind.wanTypeIdx==rg_db.bind[idx].rtk_bind.wanTypeIdx && ((rg_db.bind[i].rtk_bind.portMask.bits[0]==rg_db.bind[idx].rtk_bind.portMask.bits[0]) || (rg_db.bind[i].rtk_bind.extPortMask.bits[0]==rg_db.bind[idx].rtk_bind.extPortMask.bits[0]))) { //if there is other VLAN-binding with same port of same WAN, we shoud keep the port in WAN's VLAN! vlan_bind_pmsk|=rg_db.bind[i].rtk_bind.portMask.bits[0]; vlan_bind_extpmsk|=rg_db.bind[i].rtk_bind.extPortMask.bits[0]; } } } //20140916LUKE: we should check WAN interface also! if(rg_db.bind[idx].rtk_bind.vidLan==bindWanConf->egress_vlan_id && bindWanConf->wan_type==RTK_RG_BRIDGE) { ret++; //if this binding port located in Other WAN's port-binding member, we should keep it in WAN's VLAN! if(bindWanConf->none_internet) { _rtk_rg_portmask_translator(bindWanConf->port_binding_mask,&bind_mac_pmask,&bind_ext_pmask); vlan_bind_pmsk|=bind_mac_pmask.bits[0]; vlan_bind_extpmsk|=bind_ext_pmask.bits[0]; } else { //if this binding port located in fmember, we should keep it in WAN's VLAN! vlan_bind_pmsk|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].MemberPortmask.bits[0]; vlan_bind_extpmsk|=rg_db.vlan[rg_db.systemGlobal.initParam.fwdVLAN_BIND_INTERNET].Ext_portmask.bits[0]; } } if(ret==0) //no other used this VLAN right now { ret = RTK_VLAN_DESTROY(rg_db.bind[idx].rtk_bind.vidLan); DEBUG("####RTK_VLAN_DESTROY ret=0x%x idx=%d vid=%d portMask=0x%x wanTypeIdx=%d\n",ret,idx,rg_db.bind[idx].rtk_bind.vidLan,rg_db.bind[idx].rtk_bind.portMask.bits[0],rg_db.bind[idx].rtk_bind.wanTypeIdx); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } //20140725LUKE: update to WAN's vlan-binding-mask if no other same-port vlan-binding to this WAN if(vlan_bind_pmsk==0 && vlan_bind_extpmsk==0) bindWanConf->vlan_binding_mask.portmask&=(~(0x1<<cb_bindEt.vlan.vlan_bind_port_idx)); //remove the binding port from WAN's VLAN member if(bindWanConf->wan_type==RTK_RG_BRIDGE) { //20140725LUKE: remove this port only if the WAN didn't have other vlan-binding or port-binding from this port _rtk_rg_portmask_translator(bindWanConf->port_binding_mask,&bind_mac_pmask,&bind_ext_pmask); vlan_bind_pmsk|=bind_mac_pmask.bits[0]; vlan_bind_extpmsk|=bind_ext_pmask.bits[0]; if(!(rg_db.bind[idx].rtk_bind.portMask.bits[0]&vlan_bind_pmsk)) { rg_db.vlan[bindWanConf->egress_vlan_id].MemberPortmask.bits[0]&=(~(rg_db.bind[idx].rtk_bind.portMask.bits[0])); } if(!(rg_db.bind[idx].rtk_bind.extPortMask.bits[0]&vlan_bind_extpmsk)) { rg_db.vlan[bindWanConf->egress_vlan_id].Ext_portmask.bits[0]&=(~(rg_db.bind[idx].rtk_bind.extPortMask.bits[0])); } ret = RTK_VLAN_PORT_SET(bindWanConf->egress_vlan_id, &rg_db.vlan[bindWanConf->egress_vlan_id].MemberPortmask, &rg_db.vlan[bindWanConf->egress_vlan_id].UntagPortmask); //don't touch untag set if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); ret = RTK_VLAN_EXTPORT_SET(bindWanConf->egress_vlan_id, &rg_db.vlan[bindWanConf->egress_vlan_id].Ext_portmask); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_SET_FAIL); } ret = RTK_L34_BINDINGTABLE_SET(idx, &vbindEt); if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_SET_FAIL); if(rg_db.systemGlobal.interfaceInfo[rg_db.nexthop[rg_db.wantype[rg_db.bind[idx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx].valid == SOFTWARE_ONLY_ENTRY) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_VLANBIND0_TRAP+idx); //remove from link-list _rtk_rg_vbdLinkListDel(cb_bindEt.vlan.vlan_bind_port_idx,cb_bindEt.wan_intf_idx,cb_bindEt.vlan.vlan_bind_vlan_id); //Update non binding portmask, if portmask is zero, remove WAN port from fwdVLAN_BIND_INTERNET //otherwise add WAN port to fwdVLAN_BIND_INTERNET! //20160428LUKE: transform from RGMII to PON wanPmsk.bits[0]=0x1<<bindWanConf->wan_port_idx; #if !defined(CONFIG_RTL9602C_SERIES) if(rg_db.systemGlobal.pppoeGponSmallbandwithControl && (bindWanConf->wan_type == RTK_RG_PPPoE) && bindWanConf->wan_port_idx==RTK_RG_PORT_PON){ DEBUG("Special add RGMII to WAN_PORT_MASK."); wanPmsk.bits[0]|=0x1<<RTK_RG_PORT_RGMII; } #endif _rtk_rg_updateNoneBindingPortmask(wanPmsk.bits[0]); #if CONFIG_ACL_EGRESS_WAN_INTF_TRANSFORM //20141224LUKE: since vlan-binding is modified, we should rearrange ACL which use the WAN interface as egress interface of the binding if(rg_db.systemGlobal.acl_SW_egress_intf_type_zero_num) ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif // TODO:Call the initParam's bindingDelByHwCallBack if(rg_db.systemGlobal.initParam.bindingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.bindingDelByHwCallBack(&cb_bindEt); } //Reset the index //rg_db.systemGlobal.bindWithVLAN[idx]=-1; //rg_db.systemGlobal.bindToIntf[idx]=-1; if(rg_db.systemGlobal.vlanBindTotalNum>0) rg_db.systemGlobal.vlanBindTotalNum--; //Turn off CF rule for downstream packets to tag as it outbound vlan tag if there is no vlan-binding if(rg_db.systemGlobal.vlanBindTotalNum==0) { /*memset(&cfEntry, 0, sizeof(cfEntry)); cfEntry.index=VLAN_BINDING_CFIDX; ret = rtk_classify_cfgEntry_get(&cfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED); cfEntry.valid=0; ret = rtk_classify_cfgEntry_add(&cfEntry); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED);*/ /*ret = rtk_classify_cfgEntry_del(RESERVED_CF_VLANBINDING_MAC_LEARN_ENTRY); if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED);*/ } #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_vlanBinding_find(rtk_rg_vlanBinding_t *vlan_binding_info, int *valid_idx) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else int i,/*ret,*/vbindIdx,/*vlanID,wtIdx,*/portIdx=-1; //rtk_binding_entry_t vbindEt; //rtk_portmask_t mbpmsk,etpmsk; //Check parameter if(vlan_binding_info==NULL || valid_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(rg_db.systemGlobal.vlanBindTotalNum==0) //never find vlan-binding before add one return (RT_ERR_RG_VLAN_BIND_UNINIT); //Check if VLAN init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); vbindIdx=*valid_idx; if(vbindIdx<0 || vbindIdx>=MAX_BIND_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if 0 for(i=vbindIdx; i<BINDING_MAX_IDX; i++) { for(j=0; j<32; j++) { if(RG_GLB_VLANBD_IDX[i]==vbindIdx) //this index is valid { isValid=1; break; } } if(isValid==1) break; } if(i==32)RETURN_ERR(RT_ERR_RG_VLAN_BIND_GET_FAIL); #endif for(i=vbindIdx; i<MAX_BIND_SW_TABLE_SIZE; i++) { //if(rg_db.systemGlobal.bindWithVLAN[i]!=-1) //this index is valid if(rg_db.bind[i].valid ) break; } if(i==MAX_BIND_SW_TABLE_SIZE)return (RT_ERR_RG_VLAN_BIND_UNINIT); vbindIdx=i; //Keep //now vbindIdx is contain a valid index for binding rules //Get binding rule with valid_idx, and save port and extport mask in info //bzero(&vbindEt, sizeof(rtk_binding_entry_t)); //memcpy(&vbindEt, &rg_db.bind[vbindIdx].rtk_bind, sizeof(rtk_binding_entry_t)); //ret = dal_apollomp_l34_bindingTable_get(vbindIdx, &vbindEt); //FIXME:no RTK APIs //ret = rtk_l34_bindingTable_get(vbindIdx, &vbindEt); //if(ret==RT_ERR_CHIP_NOT_SUPPORTED)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); //if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_GET_FAIL); #if 0 mbpmsk.bits[0]=0x0; etpmsk.bits[0]=0x0; for(i=0; i<32; i++) { if(RG_GLB_VLANBD_IDX[i]==vbindIdx) { memset(&vbindEt, 0, sizeof(vbindEt)); ret = dal_apollomp_l34_bindingTable_get(i, &vbindEt); //FIXME:no RTK APIs if(ret!=RT_ERR_OK)RETURN_ERR(RT_ERR_RG_VLAN_BIND_GET_FAIL); //Union all port and extension port mask mbpmsk.bits[0]|=vbindEt.portMask.bits[0]; etpmsk.bits[0]|=vbindEt.extPortMask.bits[0]; } } #endif //vlanID=rg_db.bind[vbindIdx].rtk_bind.vidLan;//vbindEt.vidLan; //wtIdx=rg_db.bind[vbindIdx].rtk_bind.wanTypeIdx;//vbindEt.wanTypeIdx; if(rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]>0) { //mac port is set up if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT0)) > 0) portIdx=RTK_RG_PORT0; else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT1)) > 0) portIdx=RTK_RG_PORT1; #if !defined(CONFIG_RTL9602C_SERIES) else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT2)) > 0) portIdx=RTK_RG_PORT2; else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT3)) > 0) portIdx=RTK_RG_PORT3; else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_RGMII)) > 0) portIdx=RTK_RG_PORT_RGMII; #endif else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_PON)) > 0) portIdx=RTK_RG_PORT_PON; else if((rg_db.bind[vbindIdx].rtk_bind.portMask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) > 0) portIdx=RTK_RG_PORT_CPU; } else { //extension port is set up if((rg_db.bind[vbindIdx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT0)) > 0) portIdx=RTK_RG_EXT_PORT0; else if((rg_db.bind[vbindIdx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT1)) > 0) portIdx=RTK_RG_EXT_PORT1; else if((rg_db.bind[vbindIdx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT2)) > 0) portIdx=RTK_RG_EXT_PORT2; else if((rg_db.bind[vbindIdx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT3)) > 0) portIdx=RTK_RG_EXT_PORT3; else if((rg_db.bind[vbindIdx].rtk_bind.extPortMask.bits[0]&(0x1<<RTK_RG_BD_EXT_PORT4)) > 0) portIdx=RTK_RG_EXT_PORT4; } if(portIdx==-1) RETURN_ERR(RT_ERR_RG_VLAN_BIND_GET_FAIL); //Return the valid index *valid_idx = vbindIdx; vlan_binding_info->port_idx=portIdx; vlan_binding_info->ingress_vid=rg_db.bind[vbindIdx].rtk_bind.vidLan; vlan_binding_info->wan_intf_idx=rg_db.nexthop[rg_db.wantype[rg_db.bind[vbindIdx].rtk_bind.wanTypeIdx].rtk_wantype.nhIdx].rtk_nexthop.ifIdx; #endif return (RT_ERR_RG_OK); } //ALG void _rtk_rg_alg_setPort(unsigned short int portNum, int TCP, int enable) { int algIdx; unsigned int algBitValue; if(portNum<=0)return; if(TCP==1) { algIdx=portNum>>5; algBitValue=0x1<<(portNum&0x1f); //DEBUG("before TCP enable[%d] is %x, algbitvalue = %x",algIdx,rg_db.algTcpExternPortEnabled[algIdx],algBitValue); if(enable) rg_db.algTcpExternPortEnabled[algIdx]|=algBitValue; else rg_db.algTcpExternPortEnabled[algIdx]&=(~algBitValue); //DEBUG("after TCP enable[%d] is %x, algbitvalue = %x",algIdx,rg_db.algTcpExternPortEnabled[algIdx],algBitValue); } else { algIdx=portNum>>5; algBitValue=0x1<<(portNum&0x1f); //DEBUG("before UDP enable[%d] is %x, algbitvalue = %x",algIdx,rg_db.algUdpExternPortEnabled[algIdx],algBitValue); if(enable) rg_db.algUdpExternPortEnabled[algIdx]|=algBitValue; else rg_db.algUdpExternPortEnabled[algIdx]&=(~algBitValue); //DEBUG("after UDP enable[%d] is %x, algbitvalue = %x",algIdx,rg_db.algUdpExternPortEnabled[algIdx],algBitValue); } } void _rtk_rg_alg_setSrvInLanPortWithIntIP(unsigned short int portNum, int TCP, int enable, ipaddr_t srvInLanIP) { int algIdx,i,virtualSrvIdx,ret; unsigned int algBitValue; rtk_rg_virtualServer_t virtualSrv; if(portNum<=0)return; algIdx=portNum>>5; algBitValue=0x1<<(portNum&0x1f); //DEBUG("before %s enable[%d]@SrvInLAN is %x, algbitvalue = %x",TCP==1?"TCP":"UDP",portNum,rg_db.algUdpExternPortEnabled_SrvInLan[algIdx],algBitValue); if(enable) { if(TCP==1) rg_db.algTcpExternPortEnabled_SrvInLan[algIdx]|=algBitValue; else rg_db.algUdpExternPortEnabled_SrvInLan[algIdx]|=algBitValue; if(srvInLanIP) { //Create VirtualServer for this ip and port, for each L4 WAN for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { if(rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_type!=RTK_RG_BRIDGE && rg_db.systemGlobal.wanIntfGroup[i].p_intfInfo->p_wanStaticInfo->napt_enable) //L4 WAN { bzero(&virtualSrv,sizeof(rtk_rg_virtualServer_t)); virtualSrv.is_tcp=TCP; virtualSrv.wan_intf_idx=rg_db.systemGlobal.wanIntfGroup[i].index; virtualSrv.gateway_port_start=portNum; virtualSrv.local_ip=srvInLanIP; virtualSrv.local_port_start=portNum; virtualSrv.mappingPortRangeCnt=1; ret=rtk_rg_apollo_virtualServer_add(&virtualSrv,&virtualSrvIdx); if(ret!=RT_ERR_RG_OK) { FIXME("Error when add VirtualServer for new Server In LAN.."); } else { DEBUG("Create virtualServer[%d] at WAN%d for localIP %08x:%d", virtualSrvIdx, rg_db.systemGlobal.wanIntfGroup[i].index, srvInLanIP, portNum); } } } } } else { if(TCP==1) rg_db.algTcpExternPortEnabled_SrvInLan[algIdx]&=(~algBitValue); else rg_db.algUdpExternPortEnabled_SrvInLan[algIdx]&=(~algBitValue); if(srvInLanIP) { //delete VirtualServer virtualSrvIdx=0; for(i=0;i<rg_db.systemGlobal.virtualServerTotalNum;i++) { ret=rtk_rg_apollo_virtualServer_find(&virtualSrv,&virtualSrvIdx); if(ret==RT_ERR_RG_OK && virtualSrv.is_tcp==TCP && virtualSrv.local_ip==srvInLanIP && virtualSrv.gateway_port_start==portNum && virtualSrv.local_port_start==portNum && virtualSrv.mappingPortRangeCnt==1) { assert_ok(rtk_rg_apollo_virtualServer_del(virtualSrvIdx)); DEBUG("Delete virtualServer[%d] at WAN%d for localIP %08x:%d", virtualSrvIdx, rg_db.systemGlobal.wanIntfGroup[i].index, srvInLanIP, portNum); } } } } //DEBUG("after %s enable[%d]@SrvInLAN is %x, algbitvalue = %x",TCP==1?"TCP":"UDP",portNum,rg_db.algTcpExternPortEnabled_SrvInLan[algIdx],algBitValue); } void _rtk_rg_alg_setSrvInLanPort(unsigned short int portNum, int TCP, int enable, int srvInLanIndex) { srvInLanIndex-=RTK_RG_ALG_SIP_TCP_SRV_IN_LAN; _rtk_rg_alg_setSrvInLanPortWithIntIP(portNum, TCP, enable, rg_db.algServInLanIpMapping[srvInLanIndex].serverAddress); } rtk_rg_err_code_t rtk_rg_apollo_algServerInLanAppsIpAddr_add(rtk_rg_alg_serverIpMapping_t *srvIpMapping) { int i; //Check param if((rg_db.algFunctionMask&srvIpMapping->algType)>0) //you can not add or change serverIP when the ALG function is enabled... RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_ENABLED); if(srvIpMapping->algType<RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT || srvIpMapping->algType>RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(srvIpMapping->serverAddress==0x0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Setup service/IP mapping in rg_db for(i=0;i<MAX_ALG_SERV_IN_LAN_NUM;i++) { //DEBUG("algType is %x, input is %x",rg_db.algServInLanIpMapping[i].algType,srvIpMapping->algType); if((rg_db.algServInLanIpMapping[i].algType&srvIpMapping->algType)>0) { DEBUG("add server address %08x to algType %x",srvIpMapping->serverAddress,srvIpMapping->algType); rg_db.algServInLanIpMapping[i].serverAddress=srvIpMapping->serverAddress; rg_db.algServInLanIpMask|=srvIpMapping->algType; } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_algServerInLanAppsIpAddr_del(rtk_rg_alg_type_t delServerMapping) { int i; //Check Param if((rg_db.algFunctionMask&delServerMapping)>0) //you can not delete serverIP when the ALG function is enabled... RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_ENABLED); //Delete service/IP mapping in rg_db for(i=0;i<MAX_ALG_SERV_IN_LAN_NUM;i++) { if((delServerMapping&rg_db.algServInLanIpMapping[i].algType)>0) { DEBUG("delete server address %08x from algType %x",rg_db.algServInLanIpMapping[i].serverAddress,rg_db.algServInLanIpMapping[i].algType); rg_db.algServInLanIpMapping[i].serverAddress=0; rg_db.algServInLanIpMask&=(~rg_db.algServInLanIpMapping[i].algType); } } return (RT_ERR_RG_OK); } void _rtk_rg_algDynamicPort_delete(unsigned int serverInLan, ipaddr_t intIP, unsigned short int portNum, int isTCP) { rtk_rg_alg_dynamicPort_t *pList,*pNext; //Lookup for this Port add before or not if(!list_empty(&rg_db.algDynamicCheckListHead)) { list_for_each_entry_safe(pList,pNext,&rg_db.algDynamicCheckListHead,alg_list) { if(pList->portNum==portNum && pList->serverInLan==serverInLan && pList->intIP==intIP && pList->isTCP==isTCP) { if(pList->serverInLan) _rtk_rg_alg_setSrvInLanPortWithIntIP(pList->portNum,pList->isTCP,0,pList->intIP); //delete virtual-server if intIP is valid else _rtk_rg_alg_setPort(pList->portNum,pList->isTCP,0); list_del_init(&pList->alg_list); list_add(&pList->alg_list,&rg_db.algDynamicFreeListHead); break; } } } } rtk_rg_successFailReturn_t _rtk_rg_algDynamicPort_set(p_algRegisterFunction registerFunction, unsigned int serverInLan, ipaddr_t intIP, unsigned short int portNum, int isTCP, int timeout) { rtk_rg_alg_dynamicPort_t *pList,*pNext; //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.algDynamicLock); _rtk_rg_algDynamicPort_delete(serverInLan, intIP, portNum, isTCP); //Check if we have not-used free arp list if(list_empty(&rg_db.algDynamicFreeListHead)) { WARNING("all free ALG dynamic list are allocated...LRU is needed!!"); //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.algDynamicLock); return RG_RET_FAIL; } //Get one from free list list_for_each_entry_safe(pList,pNext,&rg_db.algDynamicFreeListHead,alg_list) //just return the first entry right behind of head { list_del_init(&pList->alg_list); break; } DEBUG("the free ALG Dynamic List %p",pList); pList->algFun=registerFunction; pList->portNum=portNum; pList->isTCP=isTCP; pList->serverInLan=serverInLan; pList->intIP=intIP; //turn on ALG function for the dynamic port if(serverInLan) _rtk_rg_alg_setSrvInLanPortWithIntIP(portNum,isTCP,1,intIP); //create virtual-server if intIP is valid else _rtk_rg_alg_setPort(portNum,isTCP,1); pList->timeout=timeout; list_add(&pList->alg_list,&rg_db.algDynamicCheckListHead); //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.algDynamicLock); return RG_RET_SUCCESS; } rtk_rg_err_code_t rtk_rg_apollo_algApps_set(rtk_rg_alg_type_t alg_app) { #ifdef CONFIG_RG_PPPOE_PASSTHROUGH int i,lan_pmsk=0,wan_pmask=0; #endif int isTCP=1,isUDP=0; rtk_rg_alg_type_t serverInLanMask,checkServerInLanMask,algStateChange; checkServerInLanMask = (alg_app>>ALG_SRV_IN_LAN_IDX)&0xff; //only care about serverInLan settings serverInLanMask = rg_db.algServInLanIpMask; //DEBUG("the checkServerInLanMask is %x, serverInLanMask is %x",checkServerInLanMask,serverInLanMask); //Check param //in Lan server's ip should be setuped before enable ALG service if(((checkServerInLanMask<<ALG_SRV_IN_LAN_IDX)|serverInLanMask)!=rg_db.algServInLanIpMask) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_NO_IP); #ifdef CONFIG_RG_PPPOE_PASSTHROUGH #else if((alg_app & RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT) > 0){ //Check PPPoE pass through function enabled or not rtlglue_printf("PPPoE PassThrought not supported!\n"); RETURN_ERR(RT_ERR_RG_PPPOEPASSTHROUGHT_NOT_SUPPORTED); } #endif //Check ServerInLAN settings, only enable from disable or diable from enable state, to avoid adding virtualServer redundantly //20150123LUKE: only prevent from disable to enable while virtualServer is setup. algStateChange=alg_app^rg_db.algFunctionMask; if((algStateChange & RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isTCP, rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_TCP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_SIP_UDP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_SIP_UDP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isUDP, rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_UDP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_H323_TCP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_H323_TCP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isTCP, rg_db.algUserDefinedPort[RTK_RG_ALG_H323_TCP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_H323_UDP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_H323_UDP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isUDP, rg_db.algUserDefinedPort[RTK_RG_ALG_H323_UDP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isTCP, rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isUDP, rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_FTP_TCP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_FTP_TCP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isTCP, rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_TCP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if((algStateChange & RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) > 0) if(((alg_app & RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) > 0)&&(_rtk_rg_algSrvInLanCheckEnable(isUDP, rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_UDP_SRV_IN_LAN])==SUCCESS)) RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); //Check ServerInWAN settings _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_TCP],isTCP,(alg_app & RTK_RG_ALG_SIP_TCP_BIT)); //enable or disable SIP TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_UDP],isUDP,(alg_app & RTK_RG_ALG_SIP_UDP_BIT)); //enable or disable SIP UDP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_H323_TCP],isTCP,(alg_app & RTK_RG_ALG_H323_TCP_BIT)); //enable or disable H323 TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_H323_UDP],isUDP,(alg_app & RTK_RG_ALG_H323_UDP_BIT)); //enable or disable H323 UDP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_TCP],isTCP,(alg_app & RTK_RG_ALG_RTSP_TCP_BIT)); //enable or disable RTSP TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_UDP],isUDP,(alg_app & RTK_RG_ALG_RTSP_UDP_BIT)); //enable or disable RTSP UDP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_TCP],isTCP,(alg_app & RTK_RG_ALG_FTP_TCP_BIT)); //enable or disable FTP TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_UDP],isUDP,(alg_app & RTK_RG_ALG_FTP_UDP_BIT)); //enable or disable FTP UDP port #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_BATTLENET_TCP],1,(rg_db.algFunctionMask & RTK_RG_ALG_BATTLENET_TCP_BIT)); //enable or disable FTP UDP port #endif //Check ServerInLAN settings, only enable from disable or diable from enable state, to avoid adding virtualServer redundantly if((algStateChange & RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_TCP],isTCP,(alg_app & RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT),RTK_RG_ALG_SIP_TCP_SRV_IN_LAN); //enable or disable SIP Server in LAN TCP port if((algStateChange & RTK_RG_ALG_SIP_UDP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_SIP_UDP],isUDP,(alg_app & RTK_RG_ALG_SIP_UDP_SRV_IN_LAN_BIT),RTK_RG_ALG_SIP_UDP_SRV_IN_LAN); //enable or disable UDP SIP Server in LAN UDP port if((algStateChange & RTK_RG_ALG_H323_TCP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_H323_TCP],isTCP,(alg_app & RTK_RG_ALG_H323_TCP_SRV_IN_LAN_BIT),RTK_RG_ALG_H323_TCP_SRV_IN_LAN); //enable or disable H323 Server in LAN TCP port if((algStateChange & RTK_RG_ALG_H323_UDP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_H323_UDP],isUDP,(alg_app & RTK_RG_ALG_H323_UDP_SRV_IN_LAN_BIT),RTK_RG_ALG_H323_UDP_SRV_IN_LAN); //enable or disable UDP H323 Server in LAN UDP port if((algStateChange & RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_TCP],isTCP,(alg_app & RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN_BIT),RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN); //enable or disable RTSP Server in LAN TCP port if((algStateChange & RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_RTSP_UDP],isUDP,(alg_app & RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN_BIT),RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN); //enable or disable UDP RTSP Server in LAN UDP port if((algStateChange & RTK_RG_ALG_FTP_TCP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_TCP],isTCP,(alg_app & RTK_RG_ALG_FTP_TCP_SRV_IN_LAN_BIT),RTK_RG_ALG_FTP_TCP_SRV_IN_LAN); //enable or disable FTP Server in LAN TCP port if((algStateChange & RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) > 0) _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_FTP_UDP],isUDP,(alg_app & RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT),RTK_RG_ALG_FTP_UDP_SRV_IN_LAN); //enable or disable UDP FTP Server in LAN UDP port #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT _rtk_rg_alg_setSrvInLanPort(rg_db.algUserDefinedPort[RTK_RG_ALG_BATTLENET_TCP],1,(rg_db.algFunctionMask & RTK_RG_ALG_BATTLENET_TCP_BIT),8); //enable or disable FTP Server in LAN UDP port #endif //Check Pass through settings _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_PPTP_TCP_PASSTHROUGH],isTCP,(alg_app & RTK_RG_ALG_PPTP_TCP_PASSTHROUGH_BIT)); //enable or disable PPTP TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_PPTP_UDP_PASSTHROUGH],isUDP,(alg_app & RTK_RG_ALG_PPTP_UDP_PASSTHROUGH_BIT)); //enable or disable PPTP UDP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_L2TP_TCP_PASSTHROUGH],isTCP,(alg_app & RTK_RG_ALG_L2TP_TCP_PASSTHROUGH_BIT)); //enable or disable L2TP TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_L2TP_UDP_PASSTHROUGH],isUDP,(alg_app & RTK_RG_ALG_L2TP_UDP_PASSTHROUGH_BIT)); //enable or disable L2TP UDP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH],isTCP,(alg_app & RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH_BIT)); //enable or disable IPSEC TCP port _rtk_rg_alg_setPort(rg_db.algUserDefinedPort[RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH],isUDP,(alg_app & RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH_BIT)); //enable or disable IPSEC UDP port #ifdef CONFIG_RG_PPPOE_PASSTHROUGH //PPPoE Passthrough is special case, it do not have Laye4 port, therefore we use ACL to achieve the goal if((algStateChange & RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT) > 0) { //1 FIXME: Patch for 201305171900, pppoe pass through has to be disabled!! if((alg_app & RTK_RG_ALG_PPPOE_PASSTHROUGH_BIT) > 0) { //Gather all LAN port for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) lan_pmsk|=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask.portmask; //Gather all WAN port for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) wan_pmask|=(0x1<<rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx); #if 1 //pppoe_passthru acl disable _rtk_rg_acl_reserved_pppoePassthroughDefaultRule_add(lan_pmsk,wan_pmask,rg_db.systemGlobal.initParam.fwdVLAN_CPU); #endif //_rtk_rg_aclFilterSessionID_and_VIDRemarking_add(-1,DEFAULT_PASSTHROUGH); } else { #if 1 //pppoe_passthru acl disable //1 FIXME: Patch for 201305171900, pppoe pass through has to be disabled!! _rtk_rg_acl_reserved_pppoePassthroughDefaultRule_del(); #endif //_rtk_rg_acl_pppoe_passthrough_for_wanIntf_del(-1); } } #endif //Save input parameter into rg_db global variable rg_db.algFunctionMask = alg_app; assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_algApps_get(rtk_rg_alg_type_t *alg_app) { *alg_app=rg_db.algFunctionMask; return (RT_ERR_RG_OK); } //DMZ rtk_rg_err_code_t rtk_rg_apollo_dmzHost_set(int wan_intf_idx, rtk_rg_dmzInfo_t *dmz_info) { #ifdef CONFIG_RG_NAPT_DMZ_SUPPORT DEBUG("Set DMZ[%d] IP_VERSION[%d] IP[0x%x] IPv6[%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x]\n", wan_intf_idx,dmz_info->ipversion,dmz_info->private_ip, dmz_info->private_ipv6.ipv6_addr[0],dmz_info->private_ipv6.ipv6_addr[1],dmz_info->private_ipv6.ipv6_addr[2],dmz_info->private_ipv6.ipv6_addr[3], dmz_info->private_ipv6.ipv6_addr[4],dmz_info->private_ipv6.ipv6_addr[5],dmz_info->private_ipv6.ipv6_addr[6],dmz_info->private_ipv6.ipv6_addr[7], dmz_info->private_ipv6.ipv6_addr[8],dmz_info->private_ipv6.ipv6_addr[9],dmz_info->private_ipv6.ipv6_addr[10],dmz_info->private_ipv6.ipv6_addr[11], dmz_info->private_ipv6.ipv6_addr[12],dmz_info->private_ipv6.ipv6_addr[13],dmz_info->private_ipv6.ipv6_addr[14],dmz_info->private_ipv6.ipv6_addr[15]); if((wan_intf_idx < 0) || (wan_intf_idx > MAX_NETIF_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.is_wan == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.interfaceInfo[wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type == RTK_RG_BRIDGE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); rg_db.dmzInfo[wan_intf_idx].enabled = dmz_info->enabled; rg_db.dmzInfo[wan_intf_idx].mac_mapping_enabled = dmz_info->mac_mapping_enabled; rg_db.dmzInfo[wan_intf_idx].ipversion = dmz_info->ipversion; if(dmz_info->mac_mapping_enabled) memcpy(&rg_db.dmzInfo[wan_intf_idx].mac.octet[0],&dmz_info->mac.octet[0],6); else rg_db.dmzInfo[wan_intf_idx].private_ip = dmz_info->private_ip; memcpy(rg_db.dmzInfo[wan_intf_idx].private_ipv6.ipv6_addr,dmz_info->private_ipv6.ipv6_addr,IPV6_ADDR_LEN); #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dmzHost_get(int wan_intf_idx, rtk_rg_dmzInfo_t *dmz_info) { #ifdef CONFIG_RG_NAPT_DMZ_SUPPORT if((wan_intf_idx < 0) || (wan_intf_idx > MAX_NETIF_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); dmz_info->enabled = rg_db.dmzInfo[wan_intf_idx].enabled; memcpy(&dmz_info->mac.octet[0],&rg_db.dmzInfo[wan_intf_idx].mac.octet[0],6); dmz_info->mac_mapping_enabled = rg_db.dmzInfo[wan_intf_idx].mac_mapping_enabled; dmz_info->private_ip = rg_db.dmzInfo[wan_intf_idx].private_ip; dmz_info->ipversion = rg_db.dmzInfo[wan_intf_idx].ipversion; memcpy(dmz_info->private_ipv6.ipv6_addr,rg_db.dmzInfo[wan_intf_idx].private_ipv6.ipv6_addr,IPV6_ADDR_LEN); #endif return (RT_ERR_RG_OK); } //VirtualServer(PortForward) rtk_rg_err_code_t rtk_rg_apollo_virtualServer_add(rtk_rg_virtualServer_t *virtual_server, int *virtual_server_idx) { int i=0,j=0,count=0; unsigned int tmpmask; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(!virtual_server->disable_wan_check){ if(rg_db.systemGlobal.interfaceInfo[virtual_server->wan_intf_idx].storedInfo.is_wan == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.interfaceInfo[virtual_server->wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type == RTK_RG_BRIDGE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(virtual_server->hookAlgType!=0) RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); } // Get free entry for(i=0;i<MAX_VIRTUAL_SERVER_SW_TABLE_SIZE;i++) { if(rg_db.virtualServer[i].valid) continue; else break; } if(i==MAX_VIRTUAL_SERVER_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //20150114LUKE: hook server-in-lan ALG if(virtual_server->hookAlgType>=RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT && virtual_server->hookAlgType<=RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) { tmpmask=virtual_server->hookAlgType; while(tmpmask>>=1)count++; if(virtual_server->mappingType==VS_MAPPING_N_TO_N) { //20160126LUKE: we should add gateway port as ALG also for NAPTR traffic! //Check if we are using enabled port before we enable it for(j=0;j<virtual_server->mappingPortRangeCnt;j++){ if(_rtk_rg_naptExtPortInUsedCheck(FALSE, virtual_server->is_tcp, virtual_server->local_port_start+j, FALSE, FALSE)==1) //used RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); if(_rtk_rg_naptExtPortInUsedCheck(FALSE, virtual_server->is_tcp, virtual_server->gateway_port_start+j, FALSE, FALSE)==1) //used RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); } for(j=0;j<virtual_server->mappingPortRangeCnt;j++){ _rtk_rg_algDynamicPort_set(rg_db.algTcpFunctionMapping[count].registerFunction, 1, 0, virtual_server->local_port_start+j, virtual_server->is_tcp, -1); _rtk_rg_algDynamicPort_set(rg_db.algTcpFunctionMapping[count].registerFunction, 1, 0, virtual_server->gateway_port_start+j, virtual_server->is_tcp, -1); } } else { //Check if we are using enabled port before we enable it if(_rtk_rg_naptExtPortInUsedCheck(FALSE, virtual_server->is_tcp, virtual_server->local_port_start, FALSE, FALSE)==1) //used RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); _rtk_rg_algDynamicPort_set(rg_db.algTcpFunctionMapping[count].registerFunction, 1, 0, virtual_server->local_port_start, virtual_server->is_tcp, -1); //20160126LUKE: we should add gateway port as ALG also for NAPTR traffic! if(_rtk_rg_naptExtPortInUsedCheck(FALSE, virtual_server->is_tcp, virtual_server->gateway_port_start, FALSE, FALSE)==1) //used RETURN_ERR(RT_ERR_RG_ALG_SRV_IN_LAN_EXIST); _rtk_rg_algDynamicPort_set(rg_db.algTcpFunctionMapping[count].registerFunction, 1, 0, virtual_server->gateway_port_start, virtual_server->is_tcp, -1); } } // Add virtual server setting *virtual_server_idx = i; rg_db.virtualServer[i].ipversion = virtual_server->ipversion; rg_db.virtualServer[i].is_tcp = virtual_server->is_tcp; rg_db.virtualServer[i].local_ip = virtual_server->local_ip; memcpy(rg_db.virtualServer[i].local_ipv6.ipv6_addr,virtual_server->local_ipv6.ipv6_addr,IPV6_ADDR_LEN); rg_db.virtualServer[i].local_port_start = virtual_server->local_port_start; rg_db.virtualServer[i].gateway_port_start = virtual_server->gateway_port_start; rg_db.virtualServer[i].mappingPortRangeCnt = virtual_server->mappingPortRangeCnt; rg_db.virtualServer[i].mappingType = virtual_server->mappingType; rg_db.virtualServer[i].wan_intf_idx = virtual_server->wan_intf_idx; rg_db.virtualServer[i].valid = 1; rg_db.virtualServer[i].hookAlgType = virtual_server->hookAlgType; rg_db.virtualServer[i].disable_wan_check = virtual_server->disable_wan_check; rg_db.virtualServer[i].remote_ip = virtual_server->remote_ip; rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum].index = *virtual_server_idx; rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum].p_virtualServer = &rg_db.virtualServer[i]; rg_db.systemGlobal.virtualServerTotalNum++; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_virtualServer_del(int virtual_server_idx) { int i=0,j=0; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if( (virtual_server_idx<0) || (virtual_server_idx>=MAX_VIRTUAL_SERVER_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20150114LUKE: hook server-in-lan ALG deleting if(rg_db.virtualServer[virtual_server_idx].hookAlgType>=RTK_RG_ALG_SIP_TCP_SRV_IN_LAN_BIT && rg_db.virtualServer[virtual_server_idx].hookAlgType<=RTK_RG_ALG_FTP_UDP_SRV_IN_LAN_BIT) { if(rg_db.virtualServer[virtual_server_idx].mappingType==VS_MAPPING_N_TO_N) { //20160126LUKE: we should del gateway port also! for(j=0;j<rg_db.virtualServer[virtual_server_idx].mappingPortRangeCnt;j++){ _rtk_rg_algDynamicPort_delete(1, 0, rg_db.virtualServer[virtual_server_idx].local_port_start+j, rg_db.virtualServer[virtual_server_idx].is_tcp); _rtk_rg_algDynamicPort_delete(1, 0, rg_db.virtualServer[virtual_server_idx].gateway_port_start+j, rg_db.virtualServer[virtual_server_idx].is_tcp); } }else{ _rtk_rg_algDynamicPort_delete(1, 0, rg_db.virtualServer[virtual_server_idx].local_port_start, rg_db.virtualServer[virtual_server_idx].is_tcp); //20160126LUKE: we should del gateway port also! _rtk_rg_algDynamicPort_delete(1, 0, rg_db.virtualServer[virtual_server_idx].gateway_port_start, rg_db.virtualServer[virtual_server_idx].is_tcp); } } memset(&rg_db.virtualServer[virtual_server_idx],0,sizeof(rtk_rg_virtualServer_t)); /* Virtual Server Group table */ for(i=0;i<rg_db.systemGlobal.virtualServerTotalNum;i++) { if(rg_db.systemGlobal.virtualServerGroup[i].index == virtual_server_idx) { if(i==(rg_db.systemGlobal.virtualServerTotalNum-1)) { //The last entry rg_db.systemGlobal.virtualServerGroup[i].index = 0; rg_db.systemGlobal.virtualServerGroup[i].p_virtualServer = NULL; } else { //Replace the deleteing entry by last one rg_db.systemGlobal.virtualServerGroup[i].index = rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum-1].index; rg_db.systemGlobal.virtualServerGroup[i].p_virtualServer = rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum-1].p_virtualServer; rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum-1].index = 0; rg_db.systemGlobal.virtualServerGroup[rg_db.systemGlobal.virtualServerTotalNum-1].p_virtualServer = NULL; } rg_db.systemGlobal.virtualServerTotalNum--; return (RT_ERR_RG_OK); } } return (RT_ERR_RG_ENTRY_NOT_EXIST); } rtk_rg_err_code_t rtk_rg_apollo_virtualServer_find(rtk_rg_virtualServer_t *virtual_server, int *valid_idx) { int idx=0; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(virtual_server==NULL || valid_idx==NULL) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(*valid_idx<0 || *valid_idx>=MAX_VIRTUAL_SERVER_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Find server port mapping for(idx=*valid_idx;idx<MAX_VIRTUAL_SERVER_SW_TABLE_SIZE;idx++) { if(rg_db.virtualServer[idx].valid) { *valid_idx=idx; memcpy(virtual_server,&rg_db.virtualServer[idx],sizeof(rtk_rg_virtualServer_t)); return RT_ERR_RG_OK; } } return RT_ERR_RG_SVRPORT_SW_ENTRY_NOT_FOUND; } int _rtk_rg_is_aclSWEntry_init(void) { if(rg_db.systemGlobal.acl_SW_table_entry==NULL || rg_db.systemGlobal.acl_filter_temp==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); else return (RT_ERR_RG_OK); } int _rtk_rg_aclSWEntry_init(void) { int j; for(j=0;j<MAX_ACL_SW_ENTRY_SIZE;j++){ rg_db.systemGlobal.acl_SWindex_sorting_by_weight[j]=-1; rg_db.systemGlobal.acl_SWindex_sorting_by_weight_and_ingress_cvid_action[j]=-1; } bzero(rg_db.systemGlobal.acl_SW_table_entry, sizeof(rtk_rg_aclFilterEntry_t)*MAX_ACL_SW_ENTRY_SIZE); bzero(rg_db.systemGlobal.acl_filter_temp, sizeof(rtk_rg_aclFilterAndQos_t)*MAX_ACL_SW_ENTRY_SIZE); rg_db.systemGlobal.acl_SW_table_entry_size=0; return (RT_ERR_RG_OK); } int _rtk_rg_aclSWEntry_dump(void) { int i; rtk_rg_aclFilterEntry_t aclSWEntry, empty_aclSWEntry; bzero(&aclSWEntry,sizeof(aclSWEntry)); bzero(&empty_aclSWEntry,sizeof(empty_aclSWEntry)); DEBUG("dump aclSWEntry"); for(i=0; i<MAX_ACL_SW_ENTRY_SIZE; i++) { _rtk_rg_aclSWEntry_get(i,&aclSWEntry); if(memcmp(&aclSWEntry,&empty_aclSWEntry,sizeof(rtk_rg_aclFilterEntry_t))) DEBUG("aclSWEntry[%d]: aclstart=%d aclsize=%d cfstart=%d cfsize=%d type=%d\n",i,aclSWEntry.hw_aclEntry_start,aclSWEntry.hw_aclEntry_size,aclSWEntry.hw_cfEntry_start,aclSWEntry.hw_cfEntry_size,aclSWEntry.type); } return (RT_ERR_RG_OK); } int _rtk_rg_aclSWEntry_get(int index, rtk_rg_aclFilterEntry_t* aclSWEntry) { //check the acl_SW_Entry has been allocate if(_rtk_rg_is_aclSWEntry_init()) { if(_rtk_rg_aclSWEntry_init()) RETURN_ERR(RT_ERR_RG_NULL_POINTER); } *aclSWEntry = rg_db.systemGlobal.acl_SW_table_entry[index]; return (RT_ERR_RG_OK); } int _rtk_rg_aclSWEntry_set(int index, rtk_rg_aclFilterEntry_t aclSWEntry) { //check the acl_SW_Entry has been allocate if(_rtk_rg_is_aclSWEntry_init()) { if(_rtk_rg_aclSWEntry_init()) RETURN_ERR(RT_ERR_RG_NULL_POINTER); } rg_db.systemGlobal.acl_SW_table_entry[index] = aclSWEntry; return (RT_ERR_RG_OK); } int _rtk_rg_aclSWEntry_empty_find(int* index) { int i; rtk_rg_aclFilterEntry_t aclSWEntry, empty_aclSWEntry; bzero(&aclSWEntry, sizeof(aclSWEntry)); bzero(&empty_aclSWEntry, sizeof(empty_aclSWEntry)); //check the acl_SW_Entry has been allocate if(_rtk_rg_is_aclSWEntry_init()) { if(_rtk_rg_aclSWEntry_init()) RETURN_ERR(RT_ERR_RG_NULL_POINTER); } for(i=0; i<MAX_ACL_SW_ENTRY_SIZE; i++) { if(_rtk_rg_aclSWEntry_get(i,&aclSWEntry)) RETURN_ERR(RT_ERR_RG_ACL_SW_ENTRY_ACCESS_FAILED); if(!memcmp(&aclSWEntry,&empty_aclSWEntry,sizeof(rtk_rg_aclFilterEntry_t))) { *index = i; break; } //not found empty entry if(i==(MAX_ACL_SW_ENTRY_SIZE-1)) return (RT_ERR_RG_ACL_SW_ENTRY_FULL); } return (RT_ERR_RG_OK); } int _rtk_rg_classifySWEntry_init(void) { bzero(rg_db.systemGlobal.classify_SW_table_entry, sizeof(rtk_rg_classifyEntry_t)*TOTAL_CF_ENTRY_SIZE); return (RT_ERR_RG_OK); } int _rtk_rg_classifySWEntry_set(int index, rtk_rg_classifyEntry_t cfSWEntry) { rg_db.systemGlobal.classify_SW_table_entry[index] = cfSWEntry; return (RT_ERR_RG_OK); } int _rtk_rg_classifySWEntry_get(int index, rtk_rg_classifyEntry_t* cfSWEntry) { *cfSWEntry = rg_db.systemGlobal.classify_SW_table_entry[index]; return (RT_ERR_RG_OK); } int _rtk_rg_dot1pPriRemarking2InternalPri_search(int target_rmk_dot1p, int *duplicateSize,int* intPri){ /*search which IntPri(size & which Intpri) is remarking to rmkPri*/ int int_pri; int rmk_dot1p; int size=0; for(int_pri=0;int_pri<8;int_pri++){ assert_ok(rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_get(int_pri,&rmk_dot1p)); if(rmk_dot1p == target_rmk_dot1p){ *(intPri+size) = int_pri; size++; } } *duplicateSize= size; return (RT_ERR_RG_OK); } int rtk_rg_reserved_acl_arrange_show(struct seq_file *s, void *v) { int len=0; if(rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_ACL_RRESERVED){ _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_TAIL_END, NULL); }else{ PROC_PRINTF("The arrange message is showed in rg debug_level! please enable by cmd: echo 0x1000 > proc/rg/debug_level \n"); } return len; } int _rtk_rg_acl_skip_hw_rearrange_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.aclSkipRearrangeHWAclCf){ PROC_PRINTF("ACL skip HW rearrange Enabled. (packet will be trap to software)\n"); }else{ PROC_PRINTF("ACL skip HW rearrange Disabled.\n"); } return len; } int _rtk_rg_acl_skip_hw_rearrange_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int enabled,ret; enabled = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(enabled==0){ rg_db.systemGlobal.aclSkipRearrangeHWAclCf=0; _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_ACL_SKIP_HW_REARRANGE_PERMIT_AND_TRAP_RESERV); //rearaange the HW ASIC when disabled the proc ret = _rtk_rg_aclSWEntry_and_asic_rearrange(); if(ret!=RT_ERR_RG_OK) WARNING("ACL rearrange Failed!!! Asic could be not right!"); _rtk_rg_acl_skip_hw_rearrange_get(NULL,NULL); }else if(enabled==1){ if(rg_db.systemGlobal.aclSkipRearrangeHWAclCf==0)//rearrange first before record to rg_db.systemGlobal.aclSkipRearrangeHWAclCf , and avoid rearrange twice to avoid H/W user ACL be clean _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_ACL_SKIP_HW_REARRANGE_PERMIT_AND_TRAP_RESERV,NULL); rg_db.systemGlobal.aclSkipRearrangeHWAclCf=1; _rtk_rg_acl_skip_hw_rearrange_get(NULL,NULL); }else{ rtlglue_printf("invalid parameter\n"); } return len; } #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) //ApolloFE suppport only one wifi, no need reserved ACL to patch. #else int32 _rtk_rg_acl_reserved_wifi_extPMaskTranslate_add(int patchFor,unsigned int igr_extPmsk,unsigned int egr_extPmsk){ //patchFor = 0 is patch for master rtk_rg_aclAndCf_reserved_WifiMasterExtportPatch_t masterPatch; rtk_rg_aclAndCf_reserved_WifiSlaveExtportPatch_t slavePatch; if(patchFor==0){ bzero(&masterPatch,sizeof(masterPatch)); masterPatch.igrPmsk=igr_extPmsk; masterPatch.egrPmsk=egr_extPmsk; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_WIFI_MASTER_EXTPORT_PATCH, &masterPatch)); } //patchFor =1 is patch for slave if(patchFor==1){ bzero(&slavePatch,sizeof(slavePatch)); slavePatch.igrPmsk=igr_extPmsk; slavePatch.egrPmsk=egr_extPmsk; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_WIFI_SLAVE_EXTPORT_PATCH, &slavePatch)); } return (RT_ERR_RG_OK); } int32 _rtk_rg_acl_reserved_wifi_extPMaskTranslate_del(int patchFor){ if(patchFor==0) assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_WIFI_MASTER_EXTPORT_PATCH)); if(patchFor==1) assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_WIFI_SLAVE_EXTPORT_PATCH)); return (RT_ERR_RG_OK); } #endif #if 0 int32 _rtk_rg_cf_reserved_pon_intfSSIDRemap_add(int intfIdx,int ssid, int vid){ rtk_rg_aclAndCf_reserved_ponIntfSsidRemapPatch_t ponIntfSsidRemapPatchPara; bzero(&ponIntfSsidRemapPatchPara,sizeof(ponIntfSsidRemapPatchPara)); ponIntfSsidRemapPatchPara.intfIdx=intfIdx; ponIntfSsidRemapPatchPara.ssid=ssid; ponIntfSsidRemapPatchPara.vid=vid; if(intfIdx==-1){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTFDEFAULTSSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==0){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF0SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==1){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF1SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==2){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF2SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==3){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF3SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==4){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF4SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==5){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF5SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==6){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF6SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); }else if(intfIdx==7){ assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PON_INTF7SSIDREMAP_PATCH, &ponIntfSsidRemapPatchPara)); } return (RT_ERR_RG_OK); } int32 _rtk_rg_cf_reserved_pon_intfSSIDRemap_del(int intfIdx){ int index; if(intfIdx==-1){ index=RESERVED_CF_INTF_DEFAULT_SSID_REMAPPING; }else if(intfIdx==0){ index=RESERVED_CF_INTF0_SSID_REMAPPING; }else if(intfIdx==1){ index=RESERVED_CF_INTF1_SSID_REMAPPING; }else if(intfIdx==2){ index=RESERVED_CF_INTF2_SSID_REMAPPING; }else if(intfIdx==3){ index=RESERVED_CF_INTF3_SSID_REMAPPING; }else if(intfIdx==4){ index=RESERVED_CF_INTF4_SSID_REMAPPING; }else if(intfIdx==5){ index=RESERVED_CF_INTF5_SSID_REMAPPING; }else if(intfIdx==6){ index=RESERVED_CF_INTF6_SSID_REMAPPING; }else if(intfIdx==7){ index=RESERVED_CF_INTF7_SSID_REMAPPING; }else { RETURN_ERR(RT_ERR_RG_CF_ENTRY_ACCESS_FAILED); } assert_ok(rtk_classify_cfgEntry_del(index)); return (RT_ERR_RG_OK); } #endif #ifdef CONFIG_RG_PPPOE_PASSTHROUGH #if 1 //pppoe_passthru acl disable void _rtk_rg_acl_reserved_pppoePassthroughDefaultRule_add(unsigned int lan_pmask, unsigned int wan_pmsk, int remark_vid) { rtk_rg_aclAndCf_reserved_pppoepassthroughtDefaulTrapRuletPatch_t pppoepassthroughtDefaulTrapRulePatchpara; bzero(&pppoepassthroughtDefaulTrapRulePatchpara,sizeof(pppoepassthroughtDefaulTrapRulePatchpara)); pppoepassthroughtDefaulTrapRulePatchpara.lanPmsk=lan_pmask; pppoepassthroughtDefaulTrapRulePatchpara.wanPmsk=wan_pmsk; pppoepassthroughtDefaulTrapRulePatchpara.remarkVid=remark_vid; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_TRAP_ALL8863_US8864_DMAC2CVID_DS8864, &pppoepassthroughtDefaulTrapRulePatchpara)); } void _rtk_rg_acl_reserved_pppoePassthroughDefaultRule_del(void) { assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_TRAP_ALL8863_US8864_DMAC2CVID_DS8864)); } int32 _rtk_rg_acl_reserved_pppoePassthrough_IntfisPppoewan_add(int intf_idx, rtk_mac_t gmac) { rtk_rg_aclAndCf_reserved_pppoepassthroughtIntfIsPppoewanPatch_t pppoepassthroughtIntfIsPppoewanpara; bzero(&pppoepassthroughtIntfIsPppoewanpara,sizeof(pppoepassthroughtIntfIsPppoewanpara)); memcpy(&pppoepassthroughtIntfIsPppoewanpara.gmac.octet[0],&gmac.octet[0],ETHER_ADDR_LEN); if(intf_idx==0){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF0ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==1){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF1ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==2){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF2ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==3){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF3ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==4){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF4ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==5){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF5ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==6){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF6ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} else if(intf_idx==7){assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF7ISPPPOEWAN, &pppoepassthroughtIntfIsPppoewanpara));} return (RT_ERR_RG_OK); } int32 _rtk_rg_acl_reserved_pppoePassthrough_IntfisPppoewan_del(int intf_idx){ if(intf_idx==0){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF0ISPPPOEWAN));} else if(intf_idx==1){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF1ISPPPOEWAN));} else if(intf_idx==2){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF2ISPPPOEWAN));} else if(intf_idx==3){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF3ISPPPOEWAN));} else if(intf_idx==4){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF4ISPPPOEWAN));} else if(intf_idx==5){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF5ISPPPOEWAN));} else if(intf_idx==6){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF6ISPPPOEWAN));} else if(intf_idx==7){assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_PPPoEPASSTHROUGHT_INTF7ISPPPOEWAN));} return (RT_ERR_RG_OK); } #endif #endif int32 _rtk_rg_apollo_naptFilterAndQos_init(void){ int i; rg_db.systemGlobal.pValidUsNaptPriorityRuleStart = NULL; rg_db.systemGlobal.pValidDsNaptPriorityRuleStart = NULL; for(i=0;i<MAX_NAPT_FILER_SW_ENTRY_SIZE;i++){ bzero(&rg_db.systemGlobal.napt_SW_table_entry[i],sizeof(rtk_rg_sw_naptFilterAndQos_t)); rg_db.systemGlobal.napt_SW_table_entry[i].pNextValid = NULL; } return (RT_ERR_RG_OK); } int32 _rtk_rg_apollo_naptFilterAndQos_rule_sorting_by_weight(void){ int i,j; rtk_rg_sw_naptFilterAndQos_t *pCurrenSortedtRule; rtk_rg_sw_naptFilterAndQos_t *pLastSortedtRule; rtk_rg_sw_naptFilterAndQos_t *pCurrentCheckingRule; int any_rule_enabled_rate_limit = 0; //resort the valid entry list (for speedup fwdEngine) rg_db.systemGlobal.pValidUsNaptPriorityRuleStart = NULL; rg_db.systemGlobal.pValidDsNaptPriorityRuleStart = NULL; for(i=0;i<MAX_NAPT_FILER_SW_ENTRY_SIZE;i++){ rg_db.systemGlobal.napt_SW_table_entry[i].pNextValid = NULL; } //Seperate US/DS rule and sort by weight into pValidUsNaptPriorityRuleStartpValidDsNaptPriorityRuleStart for(i=0;i<MAX_NAPT_FILER_SW_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.napt_SW_table_entry[i].valid==RTK_RG_ENABLED){ pCurrentCheckingRule = (&rg_db.systemGlobal.napt_SW_table_entry[i]); //check is any rule enabled the rate limit in this loop to saving time if(pCurrentCheckingRule->naptFilter.action_fields & NAPT_SW_RATE_LIMIT_BIT){ any_rule_enabled_rate_limit = 1; } if(pCurrentCheckingRule->naptFilter.direction==RTK_RG_NAPT_FILTER_OUTBOUND){ if(rg_db.systemGlobal.pValidUsNaptPriorityRuleStart==NULL){//first US valid rule get rg_db.systemGlobal.pValidUsNaptPriorityRuleStart = pCurrentCheckingRule; DEBUG("US first is rule[%d]\n",pCurrentCheckingRule->sw_index); }else{ pCurrenSortedtRule = rg_db.systemGlobal.pValidUsNaptPriorityRuleStart; pLastSortedtRule = rg_db.systemGlobal.pValidUsNaptPriorityRuleStart; j=0;//avoid cycle while(pCurrenSortedtRule!=NULL && j<MAX_NAPT_FILER_SW_ENTRY_SIZE){ if(pCurrentCheckingRule->naptFilter.weight > pCurrenSortedtRule->naptFilter.weight){ //insert pCurrentCheckingRule if((pCurrenSortedtRule==rg_db.systemGlobal.pValidUsNaptPriorityRuleStart) && (pLastSortedtRule==rg_db.systemGlobal.pValidUsNaptPriorityRuleStart)){//insert in head case DEBUG("US insert rule[%d] in head, so replace rg_db.systemGlobal.pValidUsNaptPriorityRuleStart.\n",pCurrentCheckingRule->sw_index); rg_db.systemGlobal.pValidUsNaptPriorityRuleStart = pCurrentCheckingRule; }else{//normal none-insert in head case DEBUG("US insert rule[%d] between rule[%d] & rule[%d]\n",pCurrentCheckingRule->sw_index,pLastSortedtRule->sw_index,pCurrenSortedtRule->sw_index); pLastSortedtRule->pNextValid = pCurrentCheckingRule; } pCurrentCheckingRule->pNextValid = pCurrenSortedtRule; break; }else if(pCurrenSortedtRule->pNextValid==NULL){//insert in the tail DEBUG("US insert rule[%d] in tail\n",pCurrentCheckingRule->sw_index); pCurrenSortedtRule->pNextValid = pCurrentCheckingRule; pCurrentCheckingRule->pNextValid = NULL; break; }else{ //move to next checking DEBUG("US checking next... pCurrenSortedtRule.sw_index=%d pCurrenSortedtRule=%p pLastSortedtRule.sw_index=%d pLastSortedtRule=%p\n",pCurrenSortedtRule->sw_index,pCurrenSortedtRule,pLastSortedtRule->sw_index,pLastSortedtRule); if(pLastSortedtRule!=pCurrenSortedtRule)//only first checking that pLastSortedtRule equal to pCurrenSortedtRule pLastSortedtRule=pLastSortedtRule->pNextValid; pCurrenSortedtRule = pCurrenSortedtRule->pNextValid; } j++; } } }else{//inboud rule if(rg_db.systemGlobal.pValidDsNaptPriorityRuleStart==NULL){//first US valid rule get rg_db.systemGlobal.pValidDsNaptPriorityRuleStart = pCurrentCheckingRule; DEBUG("DS first is rule[%d]\n",pCurrentCheckingRule->sw_index); }else{ pCurrenSortedtRule = rg_db.systemGlobal.pValidDsNaptPriorityRuleStart; pLastSortedtRule = rg_db.systemGlobal.pValidDsNaptPriorityRuleStart; j=0;//avoid cycle while(pCurrenSortedtRule!=NULL && j<MAX_NAPT_FILER_SW_ENTRY_SIZE){ if(pCurrentCheckingRule->naptFilter.weight > pCurrenSortedtRule->naptFilter.weight){ //insert pCurrentCheckingRule if((pCurrenSortedtRule==rg_db.systemGlobal.pValidDsNaptPriorityRuleStart) && (pLastSortedtRule==rg_db.systemGlobal.pValidDsNaptPriorityRuleStart)){//insert in head case DEBUG("DS insert rule[%d] in head, so replace rg_db.systemGlobal.pValidUsNaptPriorityRuleStart.\n",pCurrentCheckingRule->sw_index); rg_db.systemGlobal.pValidDsNaptPriorityRuleStart = pCurrentCheckingRule; }else{//normal none-insert in head case DEBUG("DS insert rule[%d] between rule[%d] & rule[%d]\n",pCurrentCheckingRule->sw_index,pLastSortedtRule->sw_index,pCurrenSortedtRule->sw_index); pLastSortedtRule->pNextValid = pCurrentCheckingRule; } pCurrentCheckingRule->pNextValid = pCurrenSortedtRule; break; }else if(pCurrenSortedtRule->pNextValid==NULL){//insert in the tail DEBUG("DS insert rule[%d] in tail\n",pCurrentCheckingRule->sw_index); pCurrenSortedtRule->pNextValid = pCurrentCheckingRule; pCurrentCheckingRule->pNextValid = NULL; break; }else{ //move to next checking DEBUG("DS checking next... pCurrenSortedtRule.sw_index=%d(pCurrenSortedtRule=%p) pLastSortedtRule.sw_index=%d(pLastSortedtRule=%p)\n",pCurrenSortedtRule->sw_index,pCurrenSortedtRule,pLastSortedtRule->sw_index,pLastSortedtRule); if(pLastSortedtRule!=pCurrenSortedtRule)//only first checking that pLastSortedtRule equal to pCurrenSortedtRule pLastSortedtRule=pLastSortedtRule->pNextValid; pCurrenSortedtRule = pCurrenSortedtRule->pNextValid; } j++; } } } } } //check any rule enabled the rate limit if(any_rule_enabled_rate_limit==1) { //enabled the packet count clean timer rg_db.systemGlobal.naptSwRateLimitTriggered = 1; } else { //disabled the packet count clean timer rg_db.systemGlobal.naptSwRateLimitTriggered = 0; } #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask) #ifdef CONFIG_MASTER_WLAN0_ENABLE ||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask) #endif ||rg_db.systemGlobal.naptSwRateLimitTriggered){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*(TICKTIME_PERIOD>>4)/*unit:(1/16)sec*/)); } #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_gponDsBcFilterAndRemarking_Enable(rtk_rg_enable_t enable) { if(enable >= RTK_RG_ENABLE_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); rg_db.systemGlobal.gponDsBCModuleEnable = enable; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_gponDsBcFilterAndRemarking_add(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t *filterRule,int *index) { int i; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(filterRule==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(index==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(*index==-1){//add to first empty entry, and bring the index back for(i=0;i<MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].valid == RTK_RG_DISABLED) break; } rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].valid = RTK_RG_ENABLED; memcpy(&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].filterRule,filterRule,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)); *index = i; }else if((*index>=0) && (*index<MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE)){ //force assign the rule to the specific index if(rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[*index].valid == RTK_RG_ENABLED)//check the rule is empty return(RT_ERR_RG_ENTRY_EXIST); rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[*index].valid = RTK_RG_ENABLED; memcpy(&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[*index].filterRule,filterRule,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)); }else{//index out of range RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_gponDsBcFilterAndRemarking_del(int index) { if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(!((index>=0) && (index<MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE))) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[index].valid = RTK_RG_DISABLED; memset(&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[index].filterRule,0,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)); return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_gponDsBcFilterAndRemarking_del_all(void) { bzero(rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry,sizeof(rtk_rg_sw_gpon_ds_bc_vlanfilterAndRemarking_t)*MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE); return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_gponDsBcFilterAndRemarking_find(int *index,rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t *filterRule) { int i; if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(filterRule==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(index==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(*index==-1){//search the index of the specific filterRule for(i=0;i<MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE;i++){ if((rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].valid==RTK_RG_ENABLED) && !(memcmp(filterRule,&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].filterRule,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)))) { memcpy(filterRule,&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[i].filterRule,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)); return (RT_ERR_RG_OK); } } }else if((*index>=0) && (*index<MAX_GPON_DS_BC_FILTER_SW_ENTRY_SIZE)){//search the filterRule by index if(rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[*index].valid==RTK_RG_ENABLED) { memcpy(filterRule,&rg_db.systemGlobal.gpon_SW_ds_bc_filter_table_entry[*index].filterRule,sizeof(rtk_rg_gpon_ds_bc_vlanfilterAndRemarking_t)); return (RT_ERR_RG_OK); } }else{//index out of range RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } return (RT_ERR_RG_ENTRY_NOT_FOUND); } void _rtk_rg_macCleanNaptAndShortcut(int l2Idx) { int i,k,retval; int inIdx=FAIL,outIdx=FAIL; ipaddr_t victim_ip; //may be more than one rtk_l34_naptInbound_entry_t asic_naptr; rtk_l34_naptOutbound_entry_t asic_napt; DEBUG("l2Idx=%d",l2Idx); #ifdef CONFIG_ROME_NAPT_SHORTCUT for(i=0; i<MAX_NAPT_SHORTCUT_SIZE; i++) { #if defined(CONFIG_RTL9600_SERIES) if(rg_db.naptShortCut[i].sip!=0 && rg_db.naptShortCut[i].new_lut_idx==l2Idx) #else //support lut traffic bit if(rg_db.naptShortCut[i].sip!=0 && (rg_db.naptShortCut[i].new_lut_idx==l2Idx || rg_db.naptShortCut[i].smacL2Idx==l2Idx)) #endif { TABLE("del v4 shortcut[%d].", i); _rtk_rg_v4ShortCut_delete(i); } } #endif //WARNING(" choose victim from arp_used entry:%d",first_dynArp); for(i=0;i<MAX_ARP_SW_TABLE_SIZE;i++) { if(rg_db.arp[i].rtk_arp.valid && rg_db.arp[i].rtk_arp.nhIdx==l2Idx) { //look up for napt DIP are same with victim victim_ip=rg_db.arp[i].ipv4Addr; DEBUG("Found ARP[%d]!! ip is %x",i,victim_ip); for(k=0;k<MAX_NAPT_OUT_SW_TABLE_SIZE;k++) { inIdx = rg_db.naptOut[k].rtk_naptOut.hashIdx; if(rg_db.naptOut[k].state>0 && ((rg_db.naptOut[k].remoteIp==victim_ip)||(rg_db.naptIn[inIdx].rtk_naptIn.intIp==victim_ip))) { //Delete H/W NAPT entry!! DEBUG("naptConnection[%d] IP match, need to remove NAPT/NAPTr",k); //assert_ok(rtk_rg_apollo_naptConnection_del(k)); outIdx = k; if(outIdx<MAX_NAPT_OUT_SW_TABLE_SIZE) { DEBUG("del HW NAPT_OUT[%d]",outIdx); memset(&asic_napt,0,sizeof(rtk_l34_naptOutbound_entry_t)); retval = RTK_L34_NAPTOUTBOUNDTABLE_SET(1,outIdx,&asic_napt); if(retval!=RT_ERR_OK) { WARNING("del NAPT_OUT[%d] failed! retval=0x%x",outIdx,retval); } } if(inIdx<MAX_NAPT_IN_SW_TABLE_SIZE) { DEBUG("del HW NAPT_IN[%d]",inIdx); memset(&asic_naptr,0,sizeof(rtk_l34_naptInbound_entry_t)); retval = RTK_L34_NAPTINBOUNDTABLE_SET(1,inIdx,&asic_naptr); if(retval!=RT_ERR_OK) { WARNING("del NAPT_IN[%d] failed! retval=0x%x",inIdx,retval); } } } } } } } void _rtk_rg_macCleanHwNaptAndShortcut(int l2Idx) { //keep slow path NAPT alive. int i,k,retval; int inIdx=FAIL,outIdx=FAIL; ipaddr_t victim_ip; //may be more than one rtk_l34_naptInbound_entry_t asic_naptr; rtk_l34_naptOutbound_entry_t asic_napt; DEBUG("l2Idx=%d",l2Idx); #ifdef CONFIG_ROME_NAPT_SHORTCUT for(i=0; i<MAX_NAPT_SHORTCUT_SIZE; i++) { #if defined(CONFIG_RTL9600_SERIES) if(rg_db.naptShortCut[i].sip!=0 && rg_db.naptShortCut[i].new_lut_idx==l2Idx) #else //support lut traffic bit if(rg_db.naptShortCut[i].sip!=0 && (rg_db.naptShortCut[i].new_lut_idx==l2Idx || rg_db.naptShortCut[i].smacL2Idx==l2Idx)) #endif { TABLE("del v4 shortcut[%d].", i); _rtk_rg_v4ShortCut_delete(i); } } #endif //WARNING(" choose victim from arp_used entry:%d",first_dynArp); for(i=0;i<MAX_ARP_SW_TABLE_SIZE;i++) { if(rg_db.arp[i].rtk_arp.valid && rg_db.arp[i].rtk_arp.nhIdx==l2Idx) { //look up for napt DIP are same with victim victim_ip=rg_db.arp[i].ipv4Addr; DEBUG("Found ARP[%d]!! ip is %x",i,victim_ip); for(k=0;k<MAX_NAPT_OUT_SW_TABLE_SIZE;k++) { inIdx = rg_db.naptOut[k].rtk_naptOut.hashIdx; if(rg_db.naptOut[k].state>0 && ((rg_db.naptOut[k].remoteIp==victim_ip)||(rg_db.naptIn[inIdx].rtk_naptIn.intIp==victim_ip))) { //Delete H/W NAPT entry!! DEBUG("naptConnection[%d] IP match, need to remove H/W NAPT/NAPTr",k); //assert_ok(rtk_rg_apollo_naptConnection_del(k)); outIdx = k; if(outIdx<MAX_NAPT_OUT_SW_TABLE_SIZE) { DEBUG("del HW NAPT_OUT[%d]",outIdx); //delete the H/W NAPT, but keep the slow path NAPT memset(&asic_napt,0,sizeof(rtk_l34_naptOutbound_entry_t)); retval = rtk_l34_naptOutboundTable_get(outIdx,&asic_napt); if(retval!=RT_ERR_OK) { WARNING("get HW NAPT_OUT[%d] failed! retval=0x%x",outIdx,retval); } asic_napt.valid = 0; retval = RTK_L34_NAPTOUTBOUNDTABLE_SET(1,outIdx,&asic_napt); if(retval!=RT_ERR_OK) { WARNING("del HW NAPT_OUT[%d] failed! retval=0x%x",outIdx,retval); } //restore the slow path NAPT rg_db.naptOut[outIdx].rtk_naptOut.valid=1; } if(inIdx<MAX_NAPT_IN_SW_TABLE_SIZE) { DEBUG("del HW NAPT_IN[%d]",inIdx); memset(&asic_naptr,0,sizeof(rtk_l34_naptInbound_entry_t)); retval = rtk_l34_naptInboundTable_get(inIdx,&asic_naptr); if(retval!=RT_ERR_OK) { WARNING("get HW NAPT_IN[%d] failed! retval=0x%x",inIdx,retval); } asic_naptr.valid=0; retval = RTK_L34_NAPTINBOUNDTABLE_SET(1,inIdx,&asic_naptr); if(retval!=RT_ERR_OK) { WARNING("del HW NAPT_IN[%d] failed! retval=0x%x",inIdx,retval); } //restore the slow path NAPT rg_db.naptIn[inIdx].rtk_naptIn.valid=1; } } } } } } rtk_rg_err_code_t rtk_rg_apollo_naptFilterAndQos_add(int *index,rtk_rg_naptFilterAndQos_t *napt_filter){ int i,ret; rtk_rg_macEntry_t macEntry; int valid_idx; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(napt_filter->filter_fields==0x0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(napt_filter->direction>=RTK_RG_NAPT_FILTER_DIRECTION_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((napt_filter->assign_priority<0) || (napt_filter->assign_priority>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //for rate limit or trap, limit pattern to ingres_smac or egress_dmac only , and clear related NAPT, shortcut //Note: in rate limit case, TCP connection will be disconnect. it must relay the PC Host to reset the connection. if((napt_filter->action_fields & NAPT_SW_RATE_LIMIT_BIT) || (napt_filter->action_fields & NAPT_SW_TRAP_TO_PS)){ #if 0 if(napt_filter->filter_fields & (~(INGRESS_SMAC|EGRESS_DMAC)) ) { DEBUG("rate_limit and trap_to_ps only support with pattern INGRESS_SMAC or EGRESS_DMAC"); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } #endif if((napt_filter->filter_fields & INGRESS_SMAC)==0x0 && (napt_filter->filter_fields & EGRESS_DMAC)==0x0) { DEBUG("rate_limit and trap_to_ps must support with pattern INGRESS_SMAC or EGRESS_DMAC"); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } if((napt_filter->action_fields & NAPT_SW_TRAP_TO_PS) && (napt_filter->filter_fields & (EGRESS_SIP|EGRESS_DIP|EGRESS_SPORT|EGRESS_DPORT|EGRESS_SIP_RANGE|EGRESS_DIP_RANGE|EGRESS_SPORT_RANGE|EGRESS_DPORT_RANGE))) { DEBUG("NAPT_SW_TRAP_TO_PS can not support egress pattern(except EGRESS_DMAC when downstream) "); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } //delete INGRESS_SMAC related entry if(napt_filter->filter_fields & INGRESS_SMAC){ bzero(&macEntry,sizeof(macEntry)); memcpy(macEntry.mac.octet,napt_filter->ingress_smac.octet,ETHER_ADDR_LEN); valid_idx=FAIL; ret = rtk_rg_apollo_macEntry_find(&macEntry,&valid_idx); if(ret==RT_ERR_RG_OK){ if(napt_filter->action_fields & NAPT_SW_TRAP_TO_PS) //clear all learned entry { DEBUG("Delete Lut[%d] related Napt and shortcut",valid_idx); //delete H/W NAPT and shortcut related entries which refer to this l2 index _rtk_rg_macCleanNaptAndShortcut(valid_idx); } else if(napt_filter->action_fields & NAPT_SW_RATE_LIMIT_BIT) //clear H/W NAPT and shortcut, but keep slow path connection alive { DEBUG("Delete Lut[%d] related H/W Napt and shortcut",valid_idx); _rtk_rg_macCleanHwNaptAndShortcut(valid_idx); } } } //delete EGRESS_DMAC related entry if(napt_filter->filter_fields & EGRESS_DMAC){ bzero(&macEntry,sizeof(macEntry)); memcpy(macEntry.mac.octet,napt_filter->egress_dmac.octet,ETHER_ADDR_LEN); valid_idx=FAIL; ret = rtk_rg_apollo_macEntry_find(&macEntry,&valid_idx); if(ret==RT_ERR_RG_OK){ if(napt_filter->action_fields & NAPT_SW_TRAP_TO_PS) //clear all learned entry { DEBUG("Delete Lut[%d] related Napt and shortcut",valid_idx); //delete H/W NAPT and shortcut related entries which refer to this l2 index _rtk_rg_macCleanNaptAndShortcut(valid_idx); } else if(napt_filter->action_fields & NAPT_SW_RATE_LIMIT_BIT) //clear H/W NAPT and shortcut, but keep slow path connection alive { DEBUG("Delete Lut[%d] related H/W Napt and shortcut",valid_idx); _rtk_rg_macCleanHwNaptAndShortcut(valid_idx); } } } } //add into a empty entry for(i=0;i<MAX_NAPT_FILER_SW_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.napt_SW_table_entry[i].valid == RTK_RG_DISABLED) break; } if(i==MAX_NAPT_FILER_SW_ENTRY_SIZE){ RETURN_ERR(RT_ERR_RG_NAPTFILTERANDQOS_SW_ENTRY_FULL); } rg_db.systemGlobal.napt_SW_table_entry[i].valid = RTK_RG_ENABLED; rg_db.systemGlobal.napt_SW_table_entry[i].sw_index = i; memcpy(&rg_db.systemGlobal.napt_SW_table_entry[i].naptFilter,napt_filter,sizeof(rtk_rg_naptFilterAndQos_t)); *index=i; //record the rate limit counter into global if((napt_filter->action_fields) & NAPT_SW_RATE_LIMIT_BIT) { rg_db.systemGlobal.naptSwRateLimitSpeed[i]=napt_filter->assign_rate; } //resort by weight assert_ok(_rtk_rg_apollo_naptFilterAndQos_rule_sorting_by_weight()); //clear shortcut assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_naptFilterAndQos_del(int index){ //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((index<0)||(index>MAX_NAPT_FILER_SW_ENTRY_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); bzero(&rg_db.systemGlobal.napt_SW_table_entry[index],sizeof(rtk_rg_sw_naptFilterAndQos_t)); rg_db.systemGlobal.napt_SW_table_entry[index].pNextValid=NULL; //clear the rate limit rg_db.systemGlobal.naptSwRateLimitSpeed[index]=0; //resort by weight assert_ok(_rtk_rg_apollo_naptFilterAndQos_rule_sorting_by_weight()); //clear shortcut assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_naptFilterAndQos_find(int *index,rtk_rg_naptFilterAndQos_t *napt_filter){ //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((*index<0)||(*index>MAX_NAPT_FILER_SW_ENTRY_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.napt_SW_table_entry[*index].valid == RTK_RG_ENABLED) memcpy(napt_filter,&rg_db.systemGlobal.napt_SW_table_entry[*index].naptFilter,sizeof(rtk_rg_naptFilterAndQos_t)); else return (RT_ERR_RG_NAPTFILTERANDQOS_SW_ENTRY_NOT_FOUND); return (RT_ERR_RG_OK); } #ifdef CONFIG_ROME_NAPT_SHORTCUT int32 _rtk_rg_v4ShortCut_delete(int deleteIdx) { rg_db.naptShortCut[deleteIdx].sip = 0; rg_db.v4ShortCutValidSet[deleteIdx>>5] &= ~(0x1<<(deleteIdx&0x1f)); TABLE("del v4SC:[idx:%d]", deleteIdx); return (RT_ERR_RG_OK); } #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT int32 _rtk_rg_v6ShortCut_delete(int deleteIdx) { bzero(rg_db.naptv6ShortCut[deleteIdx].sip.ipv6_addr, IPV6_ADDR_LEN); rg_db.v6ShortCutValidSet[deleteIdx>>5] &= ~(0x1<<(deleteIdx&0x1f)); TABLE("del v6SC:[idx:%d]", deleteIdx); return (RT_ERR_RG_OK); } #endif int32 _rtk_rg_shortCut_clear(void) { #ifdef CONFIG_ROME_NAPT_SHORTCUT bzero(rg_db.naptShortCut,MAX_NAPT_SHORTCUT_SIZE*sizeof(rtk_rg_napt_shortcut_t)); bzero(rg_db.v4ShortCutValidSet,(MAX_NAPT_SHORTCUT_SIZE/32)*sizeof(uint32)); #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT bzero(rg_db.naptv6ShortCut,MAX_NAPT_V6_SHORTCUT_SIZE*sizeof(rtk_rg_naptv6_shortcut_t)); bzero(rg_db.v6ShortCutValidSet,(MAX_NAPT_V6_SHORTCUT_SIZE/32)*sizeof(uint32)); #endif TABLE("clear all SC"); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_aclFilterAndQos_add(rtk_rg_aclFilterAndQos_t *acl_filter, int *acl_filter_idx) { return _rtk_rg_apollo_aclFilterAndQos_add(acl_filter,acl_filter_idx); } rtk_rg_err_code_t rtk_rg_apollo_aclFilterAndQos_del(int acl_filter_idx) { return _rtk_rg_apollo_aclFilterAndQos_del(acl_filter_idx); } rtk_rg_err_code_t rtk_rg_apollo_aclFilterAndQos_find(rtk_rg_aclFilterAndQos_t *acl_filter, int *valid_idx) { return _rtk_rg_apollo_aclFilterAndQos_find(acl_filter,valid_idx); } /*API for L2 access CF64-511 directly */ rtk_rg_err_code_t rtk_rg_apollo_classifyEntry_add(rtk_rg_classifyEntry_t *classifyFilter) { return _rtk_rg_apollo_classifyEntry_add(classifyFilter); } rtk_rg_err_code_t rtk_rg_apollo_classifyEntry_del(int index) { return _rtk_rg_apollo_classifyEntry_del(index); } rtk_rg_err_code_t rtk_rg_apollo_classifyEntry_find(int index, rtk_rg_classifyEntry_t *classifyFilter) { return _rtk_rg_apollo_classifyEntry_find(index,classifyFilter); } //MAC Filter //static rtk_rg_macFilterSWEntry_t* macFilter_table_Entry=NULL; int _rtk_rg_is_macFilter_table_init(void) { if(rg_db.systemGlobal.macFilter_table_Entry==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); else return (RT_ERR_RG_OK); } int _rtk_rg_macFilter_table_init(void) { bzero(rg_db.systemGlobal.macFilter_table_Entry, sizeof(rtk_rg_macFilterSWEntry_t)*MAX_MAC_FILTER_ENTRY_SIZE); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_macFilter_add(rtk_rg_macFilterEntry_t *macFilterEntry,int *mac_filter_idx) { int i=0,j=0; int macFilter_index=-1, free_index=-1; int index, search_index; int isLearnBefore=0; //uint32 default_fid[INTERFACE_END]={2,2}; //current lan/wan use SVL mode, both fid are 2. //uint32 default_efid[INTERFACE_END]={0,0}; uint32 default_efid = 0; //current default efid is always zero, compatible with 9602C uint32 fid; uint32 efid; rtk_l2_addr_table_t lut; bzero(&lut,sizeof(lut)); //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(macFilterEntry==NULL || mac_filter_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //not support multicast mac filter if((macFilterEntry->mac.octet[0]&1)==1) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(_rtk_rg_is_macFilter_table_init()) ASSERT_EQ(_rtk_rg_macFilter_table_init(),RT_ERR_RG_OK); //search empty macFilterEntry for(i=0; i<MAX_MAC_FILTER_ENTRY_SIZE; i++) { if(rg_db.systemGlobal.macFilter_table_Entry[i].valid==DISABLE) { if(free_index==-1) free_index=i; continue; } if(!memcmp(rg_db.systemGlobal.macFilter_table_Entry[i].macFilterEntry.mac.octet, macFilterEntry->mac.octet, ETHER_ADDR_LEN)) { if((macFilterEntry->isIVL==rg_db.systemGlobal.macFilter_table_Entry[i].macFilterEntry.isIVL) && (macFilterEntry->isIVL==0 || (macFilterEntry->isIVL==1 && macFilterEntry->vlan_id==rg_db.systemGlobal.macFilter_table_Entry[i].macFilterEntry.vlan_id))) { macFilter_index=i; break; } } } if(macFilter_index==-1) { if(free_index==-1) RETURN_ERR(RT_ERR_RG_L2_MACFILTER_ENTRY_FULL); else macFilter_index = free_index; } if(macFilterEntry->isIVL){/*IVL*/ index=_rtk_rg_hash_mac_vid_efid(macFilterEntry->mac.octet,macFilterEntry->vlan_id,default_efid); //FIXME:EFID is 0 now index<<=2; search_index = index; j=0; do { bzero(&lut,sizeof(lut)); if(rtk_l2_nextValidEntry_get(&search_index,&lut)){ //RETURN_ERR(RT_ERR_RG_L2_ENTRY_ACCESS_FAILED); break; } if( RTK_LUT_L2UC == lut.entryType) { if((!memcmp(&lut.entry.l2UcEntry.mac, &(macFilterEntry->mac), sizeof(rtk_mac_t) )) && lut.entry.l2UcEntry.vid==macFilterEntry->vlan_id){ break; }else{ search_index = lut.entry.l2UcEntry.index +1; //search from next entry } } else if(RTK_LUT_L2MC == lut.entryType) { search_index = lut.entry.l2McEntry.index +1; //search from next entry } else /* RTK_LUT_L3MC*/ { search_index = lut.entry.ipmcEntry.index +1; //search from next entry } j++; } while(j < 4);//at most search 4 times. if((!memcmp(&lut.entry.l2UcEntry.mac, &(macFilterEntry->mac), sizeof(rtk_mac_t) )) && lut.entry.l2UcEntry.vid==macFilterEntry->vlan_id) { //the l2 entry is found, change some flags & auth lut.entry.l2UcEntry.auth=1; if((lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) isLearnBefore=1; //reset sa/da block flag lut.entry.l2UcEntry.flags &= ~(RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK); if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_DEST_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_DA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_DEST_MAC_BOTH){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK; }else{//not defined value } } else { //the l2 entry is not found, set the l2 entry. bzero(&lut,sizeof(lut)); lut.entryType = RTK_LUT_L2UC; lut.entry.l2UcEntry.port=RTK_RG_PORT_CPU; lut.entry.l2UcEntry.vid=macFilterEntry->vlan_id; //lut.entry.l2UcEntry.fid=2;//default fid is defined to 2, but usless here. lut.entry.l2UcEntry.fid=LAN_FID; //LAN_FID should be the same as WAN_FID #if !defined(CONFIG_RTL9602C_SERIES) lut.entry.l2UcEntry.efid=default_efid; #endif lut.entry.l2UcEntry.mac = macFilterEntry->mac; lut.entry.l2UcEntry.auth=1; if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_DEST_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_DA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_DEST_MAC_BOTH){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK; }else{//not defined value } } lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_IVL; ASSERT_EQ(RTK_L2_ADDR_ADD(&lut.entry.l2UcEntry),RT_ERR_OK); DEBUG("add mac filter at lut[%d]",lut.entry.l2UcEntry.index); //setup software macFilterEntry *mac_filter_idx = macFilter_index; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].valid=ENABLE; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].macFilterEntry = *macFilterEntry; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].l2_table_entry_index = lut.entry.l2UcEntry.index; }else{/*SVL*/ //for(i=0;i<INTERFACE_END;i++){/* i==0 for lan_intf, i==1 for wan_intf*/ for(i=0;i<INTERFACE_FOR_WAN;i++){ /*Lan/Wan use same fid, efid, vid and both are svl in current architecture(same lut index)=>just add/del once */ switch(i){ case INTERFACE_FOR_LAN: //fid = default_fid[INTERFACE_FOR_LAN]; fid = LAN_FID; //efid = default_efid[INTERFACE_FOR_LAN]; efid = default_efid; break; case INTERFACE_FOR_WAN: //fid = default_fid[INTERFACE_FOR_WAN]; fid = WAN_FID; //efid = default_efid[INTERFACE_FOR_WAN]; efid = default_efid; break; default: RETURN_ERR(RT_ERR_RG_L2_MACFILTER_ENTRY_ACCESS_FAILED); break; } //find lut index index=_rtk_rg_hash_mac_fid_efid(macFilterEntry->mac.octet,fid,efid); index<<=2; search_index = index; j=0; do { bzero(&lut,sizeof(lut)); if(rtk_l2_nextValidEntry_get(&search_index,&lut)){ //RETURN_ERR(RT_ERR_RG_L2_ENTRY_ACCESS_FAILED); break; } if( RTK_LUT_L2UC == lut.entryType) { if((!memcmp(&lut.entry.l2UcEntry.mac, &(macFilterEntry->mac), sizeof(rtk_mac_t) )) && lut.entry.l2UcEntry.fid==fid){ break; }else{ search_index = lut.entry.l2UcEntry.index +1; //search from next entry } } else if(RTK_LUT_L2MC == lut.entryType) { search_index = lut.entry.l2McEntry.index +1; //search from next entry } else /* RTK_LUT_L3MC*/ { search_index = lut.entry.ipmcEntry.index +1; //search from next entry } j++; } while(j < 4);//at most search 4 times. if((!memcmp(&lut.entry.l2UcEntry.mac, &(macFilterEntry->mac), sizeof(rtk_mac_t) )) && lut.entry.l2UcEntry.fid==fid) { //the l2 entry is found, change some flags & auth lut.entry.l2UcEntry.auth=1; if((lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) isLearnBefore=1; //reset sa/da block flag lut.entry.l2UcEntry.flags &= ~(RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK); if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_DEST_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_DA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_DEST_MAC_BOTH){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK; }else{//not defined value } } else { //the l2 entry is not found, set the l2 entry. bzero(&lut,sizeof(lut)); lut.entryType = RTK_LUT_L2UC; lut.entry.l2UcEntry.port=RTK_RG_PORT_CPU; lut.entry.l2UcEntry.vid=0; lut.entry.l2UcEntry.fid=fid; #if !defined(CONFIG_RTL9602C_SERIES) lut.entry.l2UcEntry.efid=efid; #endif lut.entry.l2UcEntry.mac = macFilterEntry->mac; lut.entry.l2UcEntry.auth=1; if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_DEST_MAC_ONLY){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_DA_BLOCK; }else if(macFilterEntry->direct==RTK_RG_MACFILTER_FILTER_SRC_DEST_MAC_BOTH){ lut.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_STATIC|RTK_L2_UCAST_FLAG_SA_BLOCK|RTK_L2_UCAST_FLAG_DA_BLOCK; }else{//not defined value } } ASSERT_EQ(RTK_L2_ADDR_ADD(&lut.entry.l2UcEntry),RT_ERR_OK); //memcpy(rg_db.mac[lut.entry.l2UcEntry.index].macAddr.octet,lut.entry.l2UcEntry.mac.octet,6); DEBUG("add mac filter at lut[%d]",lut.entry.l2UcEntry.index); //setup software macFilterEntry *mac_filter_idx = macFilter_index; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].valid=ENABLE; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].macFilterEntry = *macFilterEntry; rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].l2_table_entry_index = lut.entry.l2UcEntry.index; //if(i==INTERFACE_FOR_LAN) //rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].l2_table_entry_index_for_lan= lut.entry.l2UcEntry.index;//not used //if(i==INTERFACE_FOR_WAN) //rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].l2_table_entry_index_for_wan= lut.entry.l2UcEntry.index;//not used } } if(isLearnBefore==1) { //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.saLearningLimitLock); if(lut.entry.l2UcEntry.port>=RTK_RG_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&rg_db.lut[lut.entry.l2UcEntry.index].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); //decrease wlan's device count if(lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU) #ifdef CONFIG_DUALBAND_CONCURRENT ||(rg_db.systemGlobal.enableSlaveSSIDBind && lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) #endif ) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.accessWanLimitPortMask_wlan0member&(0x1<<(rg_db.lut[lut.entry.l2UcEntry.index].wlan_device_idx))&&rg_db.lut[lut.entry.l2UcEntry.index].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[lut.entry.l2UcEntry.index].wlan_device_idx]); #endif } if(_rtK_rg_checkCategoryPortmask_spa(lut.entry.l2UcEntry.port+lut.entry.l2UcEntry.ext_port)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[lut.entry.l2UcEntry.index].category]); } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(lut.entry.l2UcEntry.port))&&rg_db.lut[lut.entry.l2UcEntry.index].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[lut.entry.l2UcEntry.port]); if(_rtK_rg_checkCategoryPortmask_spa(lut.entry.l2UcEntry.port)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[lut.entry.l2UcEntry.index].category]); } //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.saLearningLimitLock); } assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_macFilter_del(int mac_filter_idx) { uint32 default_fid[INTERFACE_END]={2,2}; //uint32 default_efid[INTERFACE_END]={0,0}; #if !defined(CONFIG_RTL9602C_SERIES) uint32 default_efid = 0; //current default efid is always zero, compatilbe with 9602C #endif uint32 default_vid[INTERFACE_END]={0,0}; rtk_l2_ucastAddr_t lut; bzero(&lut,sizeof(lut)); //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(_rtk_rg_is_macFilter_table_init()) ASSERT_EQ(_rtk_rg_macFilter_table_init(),RT_ERR_RG_OK); if((mac_filter_idx > MAX_MAC_FILTER_ENTRY_SIZE) || (rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].valid==DISABLE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //delete index for lan bzero(&lut,sizeof(lut)); lut.mac = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].macFilterEntry.mac; lut.fid = default_fid[INTERFACE_FOR_LAN]; //lut.efid = default_efid[INTERFACE_FOR_LAN]; #if !defined(CONFIG_RTL9602C_SERIES) lut.efid = default_efid; #endif lut.vid = default_vid[INTERFACE_FOR_LAN]; //lut.index = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index_for_lan; lut.index = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index; DEBUG("del lut[%d]",lut.index); //ASSERT_EQ(RTK_L2_ADDR_DEL(&lut),RT_ERR_OK); ASSERT_EQ(rtk_rg_apollo_macEntry_del(lut.index),RT_ERR_OK); #if 0 //Lan/Wan use same fid, efid, vid and both are svl in current architecture (same lut index)=>just add/del once //delete index for wan bzero(&lut,sizeof(lut)); lut.mac = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].macFilterEntry.mac; lut.fid = default_fid[INTERFACE_FOR_WAN]; lut.efid = default_efid[INTERFACE_FOR_WAN]; lut.vid = default_vid[INTERFACE_FOR_WAN]; //lut.index = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index_for_wan; lut.index = rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index; DEBUG("del lut[%d]",lut.index); ASSERT_EQ(RTK_L2_ADDR_DEL(&lut),RT_ERR_OK); #endif rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].valid =DISABLE; bzero(&rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].macFilterEntry,sizeof(rtk_rg_macFilterEntry_t)); rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index = 0; //rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index_for_lan= 0;//not used //rg_db.systemGlobal.macFilter_table_Entry[mac_filter_idx].l2_table_entry_index_for_wan= 0;//not used return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_macFilter_find(rtk_rg_macFilterEntry_t *macFilterEntry, int *valid_idx) { int macFilter_index; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(macFilterEntry==NULL || valid_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(_rtk_rg_is_macFilter_table_init()) ASSERT_EQ(_rtk_rg_macFilter_table_init(),RT_ERR_RG_OK); //search empty macFilterEntry for(macFilter_index=*valid_idx; macFilter_index<MAX_MAC_FILTER_ENTRY_SIZE; macFilter_index++) { if(rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].valid==ENABLE) { *valid_idx = macFilter_index; *macFilterEntry = rg_db.systemGlobal.macFilter_table_Entry[macFilter_index].macFilterEntry; return (RT_ERR_RG_OK); } } return (RT_ERR_RG_L2_MACFILTER_ENTRY_NOT_FOUND); } //URL Filter #if 1 //rtk_rg_urlFilterEntry_t *urlFilter_table_entry; int _rtk_rg_is_urlFilter_table_init(void) { if(rg_db.systemGlobal.urlFilter_table_entry==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); else return (RT_ERR_RG_OK); } int _rtk_rg_urlFilter_table_init(void) { int i; bzero(rg_db.systemGlobal.urlFilter_table_entry, sizeof(rtk_rg_urlFilterEntry_t)*MAX_URL_FILTER_ENTRY_SIZE); for(i=0;i<MAX_URL_FILTER_ENTRY_SIZE;i++){ rg_db.systemGlobal.urlFilter_valid_entry[i] = -1; } rg_db.systemGlobal.urlFilter_totalNum=0; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_urlFilterString_add(rtk_rg_urlFilterString_t *filter,int *url_idx) { int i,urlFilter_index; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(filter==NULL || url_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(_rtk_rg_is_urlFilter_table_init()) ASSERT_EQ(_rtk_rg_urlFilter_table_init(),RT_ERR_RG_OK); //search empty urlFilterEntry for(urlFilter_index=0;urlFilter_index<MAX_URL_FILTER_ENTRY_SIZE;urlFilter_index++){ if(rg_db.systemGlobal.urlFilter_table_entry[urlFilter_index].valid==DISABLE){ rg_db.systemGlobal.urlFilter_table_entry[urlFilter_index].urlFilter= *filter; rg_db.systemGlobal.urlFilter_table_entry[urlFilter_index].valid = ENABLE; break; } if(urlFilter_index == (MAX_URL_FILTER_ENTRY_SIZE-1)) RETURN_ERR(RT_ERR_RG_URLFILTER_ENTRY_FULL); } *url_idx = urlFilter_index; for(i=0;i<MAX_URL_FILTER_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.urlFilter_valid_entry[i]==-1){ rg_db.systemGlobal.urlFilter_valid_entry[i]=urlFilter_index; break; } } rg_db.systemGlobal.urlFilter_totalNum++; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_urlFilterString_del(int url_idx) { int i,index=0; int empty_flag = DISABLED; // avoid access urlFilter_valid_entry[-1] makes kernal panic while call rtk_rg_urlFilterString_del() when no any rule has been set. //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(_rtk_rg_is_urlFilter_table_init()) ASSERT_EQ(_rtk_rg_urlFilter_table_init(),RT_ERR_RG_OK); /*delete the assigned Entry*/ bzero(&rg_db.systemGlobal.urlFilter_table_entry[url_idx],sizeof(rg_db.systemGlobal.urlFilter_table_entry[url_idx])); /*delete urlFilter_valid_entry record*/ //search the relate index for(i=0;i<MAX_URL_FILTER_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.urlFilter_valid_entry[i]==url_idx){ index=i;//record the valid entry index break; } } //if index is not found, do not access urlFilter_valid_entry[index] if(i==MAX_URL_FILTER_ENTRY_SIZE){ empty_flag = ENABLED; //index not found DEBUG("%s: valid index not found",__func__); } if(empty_flag != ENABLED){ for(i=0;i<MAX_URL_FILTER_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.urlFilter_valid_entry[i]==-1){ DEBUG("%s: index=%d i=%d",__func__,index,i); rg_db.systemGlobal.urlFilter_valid_entry[index]=rg_db.systemGlobal.urlFilter_valid_entry[i-1];//replace by the last valid entry rg_db.systemGlobal.urlFilter_valid_entry[i-1]=-1;//clean last valid entry rg_db.systemGlobal.urlFilter_totalNum--; break; } } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_urlFilterString_find(rtk_rg_urlFilterString_t *filter, int *valid_idx) { int urlFilter_index; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(filter==NULL || valid_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(_rtk_rg_is_urlFilter_table_init()) ASSERT_EQ(_rtk_rg_urlFilter_table_init(),RT_ERR_RG_OK); for(urlFilter_index=*valid_idx;urlFilter_index<MAX_URL_FILTER_ENTRY_SIZE;urlFilter_index++){ if(rg_db.systemGlobal.urlFilter_table_entry[urlFilter_index].valid==ENABLE){ *valid_idx = urlFilter_index; *filter = rg_db.systemGlobal.urlFilter_table_entry[urlFilter_index].urlFilter; return (RT_ERR_RG_OK); } } return (RT_ERR_RG_URLFILTER_ENTRY_NOT_FOUND); } #endif rtk_rg_err_code_t rtk_rg_apollo_redirectHttpAll_set(rtk_rg_redirectHttpAll_t *pRedirectHttpAll) { int i,portidx; //Check input parameter if(pRedirectHttpAll==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); rg_db.redirectHttpAll.enable=pRedirectHttpAll->enable; //save data in rg_db if(rg_db.redirectHttpAll.enable){ memcpy(&rg_db.redirectHttpAll,pRedirectHttpAll,sizeof(rtk_rg_redirectHttpAll_t)); //reset all non-type mac as default type for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++){ if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && rg_db.lut[i].redirect_http_req==0){ if(rg_db.lut[i].rtk_lut.entry.l2UcEntry.port==RTK_RG_PORT_CPU)portidx=RTK_RG_PORT_CPU+rg_db.lut[i].rtk_lut.entry.l2UcEntry.ext_port; else portidx=rg_db.lut[i].rtk_lut.entry.l2UcEntry.port; if(portidx!=RTK_RG_PORT_CPU && rg_db.systemGlobal.lanPortMask.portmask&(0x1<<portidx)) rg_db.lut[i].redirect_http_req=1; } } //20160517LUKE: clear all shortcut in case bridge and pure-routing mode failed. _rtk_rg_shortCut_clear(); }else if(!rg_db.systemGlobal.forcePortal_url_list[0].valid){ //clear all same type mac for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++) if(rg_db.lut[i].valid && rg_db.lut[i].redirect_http_req==1) rg_db.lut[i].redirect_http_req=0; } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpAll_get(rtk_rg_redirectHttpAll_t *pRedirectHttpAll) { //Check input parameter if(pRedirectHttpAll==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); memcpy(pRedirectHttpAll,&rg_db.redirectHttpAll,sizeof(rtk_rg_redirectHttpAll_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpURL_add(rtk_rg_redirectHttpURL_t *pRedirectHttpURL) { rtk_rg_redirectHttpURL_linkList_t *pRedEntry,*pNextEntry; //Check input parameter if(pRedirectHttpURL==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(pRedirectHttpURL->count<=-2)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check if we were added same URL before if(!list_empty(&rg_db.redirectHttpURLListHead)){ list_for_each_entry(pRedEntry,&rg_db.redirectHttpURLListHead,url_list){ if(!strncmp(pRedEntry->url_data.url_str,pRedirectHttpURL->url_str,pRedEntry->url_len)){ //set count and return DEBUG("exist URL!! reset count from %d to %d...",atomic_read(&pRedEntry->count),pRedirectHttpURL->count); atomic_set(&pRedEntry->count,pRedirectHttpURL->count); pRedEntry->url_data.count=pRedirectHttpURL->count; return (RT_ERR_RG_OK); } } } //if not exist, create new one from free list if(!list_empty(&rg_db.redirectHttpURLFreeListHead)){ list_for_each_entry_safe(pRedEntry,pNextEntry,&rg_db.redirectHttpURLFreeListHead,url_list){ //Get one from free list, just return the first entry right behind of head list_del_init(&pRedEntry->url_list); //Setup information atomic_set(&pRedEntry->count,pRedirectHttpURL->count); memcpy(&pRedEntry->url_data,pRedirectHttpURL,sizeof(rtk_rg_redirectHttpURL_t)); pRedEntry->url_len=strlen(pRedirectHttpURL->url_str); //Add to hash head list list_add_tail(&pRedEntry->url_list,&rg_db.redirectHttpURLListHead); return (RT_ERR_RG_OK); } } DEBUG("all free redirect URL list are allocated..."); RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpURL_del(rtk_rg_redirectHttpURL_t *pRedirectHttpURL) { rtk_rg_redirectHttpURL_linkList_t *pRedEntry,*pNextEntry; //Check input parameter if(pRedirectHttpURL==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(!list_empty(&rg_db.redirectHttpURLListHead)){ list_for_each_entry_safe(pRedEntry,pNextEntry,&rg_db.redirectHttpURLListHead,url_list){ if(!strncmp(pRedEntry->url_data.url_str,pRedirectHttpURL->url_str,pRedEntry->url_len)){ //Delete from head list list_del_init(&pRedEntry->url_list); //Add back to free list list_add(&pRedEntry->url_list,&rg_db.redirectHttpURLFreeListHead); return (RT_ERR_RG_OK); } } } DEBUG("the redirect URL \"%s\" could not be found...",pRedirectHttpURL->url_str); RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpWhiteList_add(rtk_rg_redirectHttpWhiteList_t *pRedirectHttpWhiteList) { rtk_rg_redirectHttpWhiteList_linkList_t *pRedEntry,*pNextEntry; //Check input parameter if(pRedirectHttpWhiteList==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Check if we were added same URL before if(!list_empty(&rg_db.redirectHttpWhiteListListHead)){ list_for_each_entry(pRedEntry,&rg_db.redirectHttpWhiteListListHead,white_list){ if(!strncmp(pRedEntry->white_data.url_str,pRedirectHttpWhiteList->url_str,pRedEntry->url_len) && !strncmp(pRedEntry->white_data.keyword_str,pRedirectHttpWhiteList->keyword_str,pRedEntry->keyword_len)){ //just return DEBUG("exist URL and keyword..."); return (RT_ERR_RG_OK); } } } //if not exist, create new one from free list if(!list_empty(&rg_db.redirectHttpWhiteListFreeListHead)){ list_for_each_entry_safe(pRedEntry,pNextEntry,&rg_db.redirectHttpWhiteListFreeListHead,white_list){ //Get one from free list, just return the first entry right behind of head list_del_init(&pRedEntry->white_list); //Setup information memcpy(&pRedEntry->white_data,pRedirectHttpWhiteList,sizeof(rtk_rg_redirectHttpWhiteList_t)); pRedEntry->url_len=strlen(pRedirectHttpWhiteList->url_str); pRedEntry->keyword_len=strlen(pRedirectHttpWhiteList->keyword_str); //Add to hash head list list_add_tail(&pRedEntry->white_list,&rg_db.redirectHttpWhiteListListHead); return (RT_ERR_RG_OK); } } DEBUG("all free redirect White list are allocated..."); RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpWhiteList_del(rtk_rg_redirectHttpWhiteList_t *pRedirectHttpWhiteList) { rtk_rg_redirectHttpWhiteList_linkList_t *pRedEntry,*pNextEntry; //Check input parameter if(pRedirectHttpWhiteList==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(!list_empty(&rg_db.redirectHttpWhiteListListHead)){ list_for_each_entry_safe(pRedEntry,pNextEntry,&rg_db.redirectHttpWhiteListListHead,white_list){ if(!strncmp(pRedEntry->white_data.url_str,pRedirectHttpWhiteList->url_str,pRedEntry->url_len) && !strncmp(pRedEntry->white_data.keyword_str,pRedirectHttpWhiteList->keyword_str,pRedEntry->keyword_len)){ //Delete from head list list_del_init(&pRedEntry->white_list); //Add back to free list list_add(&pRedEntry->white_list,&rg_db.redirectHttpWhiteListFreeListHead); return (RT_ERR_RG_OK); } } } DEBUG("the redirect WhiteList \"%s\" keyword \"%s\" could not be found...",pRedirectHttpWhiteList->url_str,pRedirectHttpWhiteList->keyword_str); RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpRsp_set(rtk_rg_redirectHttpRsp_t *pRedirectHttpRsp) { //Check input parameter if(pRedirectHttpRsp==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); //save data in rg_db memcpy(&rg_db.redirectHttpRsp,pRedirectHttpRsp,sizeof(rtk_rg_redirectHttpRsp_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpRsp_get(rtk_rg_redirectHttpRsp_t *pRedirectHttpRsp) { //Check input parameter if(pRedirectHttpRsp==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); memcpy(pRedirectHttpRsp,&rg_db.redirectHttpRsp,sizeof(rtk_rg_redirectHttpRsp_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpCount_set(rtk_rg_redirectHttpCount_t *pRedirectHttpCount) { //Check input parameter if(pRedirectHttpCount==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); memcpy(&rg_db.redirectHttpCount,pRedirectHttpCount,sizeof(rtk_rg_redirectHttpCount_t)); //20160517LUKE: clear all shortcut in case bridge and pure-routing mode failed. if(pRedirectHttpCount->enable)_rtk_rg_shortCut_clear(); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_redirectHttpCount_get(rtk_rg_redirectHttpCount_t *pRedirectHttpCount) { //Check input parameter if(pRedirectHttpCount==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); memcpy(pRedirectHttpCount,&rg_db.redirectHttpCount,sizeof(rtk_rg_redirectHttpCount_t)); return (RT_ERR_RG_OK); } //UPnP rtk_rg_err_code_t rtk_rg_apollo_upnpConnection_add(rtk_rg_upnpConnection_t *upnp, int *upnp_idx) { int i=0; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(rg_db.systemGlobal.interfaceInfo[upnp->wan_intf_idx].storedInfo.is_wan == 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.interfaceInfo[upnp->wan_intf_idx].storedInfo.wan_intf.wan_intf_conf.wan_type == RTK_RG_BRIDGE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); // Get free entry for(i=0;i<MAX_UPNP_SW_TABLE_SIZE;i++) { if(rg_db.upnp[i].valid) continue; else break; } if(i==MAX_UPNP_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); if(_rtk_rg_naptExtPortInUsedCheck(FALSE,upnp->is_tcp,upnp->gateway_port,TRUE,TRUE)==1) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); atomic_inc(&rg_db.naptForwardEngineEntryNumber[upnp->is_tcp]); // Add UPNP mapping *upnp_idx = i; rg_db.upnp[i].is_tcp = upnp->is_tcp; rg_db.upnp[i].wan_intf_idx = upnp->wan_intf_idx; rg_db.upnp[i].gateway_port = upnp->gateway_port; rg_db.upnp[i].local_ip = upnp->local_ip; rg_db.upnp[i].local_port = upnp->local_port; rg_db.upnp[i].limit_remote_ip = upnp->limit_remote_ip; rg_db.upnp[i].limit_remote_port = upnp->limit_remote_port; rg_db.upnp[i].remote_ip = upnp->remote_ip; rg_db.upnp[i].remote_port = upnp->remote_port; rg_db.upnp[i].type = upnp->type; rg_db.upnp[i].timeout = upnp->timeout; rg_db.upnp[i].valid = 1; rg_db.upnp[i].idle = 0; rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum].index = *upnp_idx; rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum].p_upnp = &rg_db.upnp[i]; rg_db.systemGlobal.upnpTotalNum++; DEBUG("Add UPNP[%d]",i); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_upnpConnection_del(int upnp_idx) { int i=0; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if( (upnp_idx<0) || (upnp_idx>=MAX_UPNP_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); _rtk_rg_naptExtPortFree(FALSE,rg_db.upnp[upnp_idx].is_tcp,rg_db.upnp[upnp_idx].gateway_port); memset(&rg_db.upnp[upnp_idx],0,sizeof(rtk_rg_upnpConnection_t)); /* UPNP Group table */ for(i=0;i<rg_db.systemGlobal.upnpTotalNum;i++) { if(rg_db.systemGlobal.upnpGroup[i].index == upnp_idx) { if(i==(rg_db.systemGlobal.upnpTotalNum-1)) { //The last entry rg_db.systemGlobal.upnpGroup[i].index = 0; rg_db.systemGlobal.upnpGroup[i].p_upnp = NULL; } else { //Replace the deleteing entry by last one rg_db.systemGlobal.upnpGroup[i].index = rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum-1].index; rg_db.systemGlobal.upnpGroup[i].p_upnp = rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum-1].p_upnp; rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum-1].index = 0; rg_db.systemGlobal.upnpGroup[rg_db.systemGlobal.upnpTotalNum-1].p_upnp = NULL; } rg_db.systemGlobal.upnpTotalNum--; break; } } DEBUG("Del UPNP[%d]",upnp_idx); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_upnpConnection_find(rtk_rg_upnpConnection_t *upnp, int *valid_idx) { int idx=0; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((upnp==NULL) || (valid_idx==NULL)) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Find UPNP mapping for(idx=*valid_idx;idx<MAX_UPNP_SW_TABLE_SIZE;idx++) { if(rg_db.upnp[idx].valid) { *valid_idx=idx; memcpy(upnp,&rg_db.upnp[idx],sizeof(rtk_rg_upnpConnection_t)); return (RT_ERR_RG_OK); } } return (RT_ERR_RG_UPNP_SW_ENTRY_NOT_FOUND); } rtk_rg_err_code_t _rtk_rg_portToMacPort_translator(rtk_rg_port_idx_t in_Port, rtk_rg_mac_port_idx_t* out_mac_port, rtk_rg_mac_ext_port_idx_t* out_mac_extPort) { if(out_mac_port==NULL || out_mac_extPort==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(in_Port<=RTK_RG_PORT_CPU) { *out_mac_port = in_Port; *out_mac_extPort = 0; } else //from ext port { *out_mac_port = RTK_RG_MAC_PORT_CPU; *out_mac_extPort = in_Port-RTK_RG_PORT_CPU; } #elif defined(CONFIG_RTL9607C_SERIES) if(in_Port<=RTK_RG_PORT_MASTERCPU_CORE1) { *out_mac_port = in_Port; *out_mac_extPort = 0; } else //from ext port { if(in_Port>=RTK_RG_EXT_PORT0 && in_Port<=RTK_RG_EXT_PORT5) { *out_mac_port = RTK_RG_MAC_PORT_MASTERCPU_CORE0; *out_mac_extPort = in_Port-RTK_RG_EXT_PORT0+1; } else if(in_Port>=RTK_RG_MAC10_EXT_PORT0 && in_Port<=RTK_RG_MAC10_EXT_PORT5) { *out_mac_port = RTK_RG_MAC_PORT_MASTERCPU_CORE1; *out_mac_extPort = in_Port-RTK_RG_MAC10_EXT_PORT0+1; } else //(in_Port>=RTK_RG_MAC7_EXT_PORT0 && in_Port<=RTK_RG_MAC7_EXT_PORT5) { *out_mac_port = RTK_RG_MAC_PORT_SLAVECPU; *out_mac_extPort = in_Port-RTK_RG_MAC7_EXT_PORT0+1; } } #else #error #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtk_rg_macPortToPort_translator(rtk_rg_port_idx_t* out_Port, rtk_rg_mac_port_idx_t in_mac_port, rtk_rg_mac_ext_port_idx_t in_mac_extPort) { if(out_Port==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(in_mac_port==RTK_RG_MAC_PORT_CPU) { *out_Port = RTK_RG_PORT_CPU+(in_mac_extPort-RTK_RG_MAC_EXT_CPU); } else { *out_Port = in_mac_port; } #elif defined(CONFIG_RTL9607C_SERIES) if(in_mac_port==RTK_RG_MAC_PORT_MASTERCPU_CORE0) { if(in_mac_extPort==RTK_RG_MAC_EXT_CPU) *out_Port = RTK_RG_PORT_MASTERCPU_CORE0; else //extspa is not 0 *out_Port = RTK_RG_EXT_PORT0+(in_mac_extPort-RTK_RG_MAC_EXT_PORT0); } else if(in_mac_port==RTK_RG_MAC_PORT_MASTERCPU_CORE1) { if(in_mac_extPort==RTK_RG_MAC_EXT_CPU) *out_Port = RTK_RG_PORT_MASTERCPU_CORE1; else //extspa is not 0 *out_Port = RTK_RG_MAC10_EXT_PORT0+(in_mac_extPort-RTK_RG_MAC_EXT_PORT0); } else if(in_mac_port==RTK_RG_MAC_PORT_SLAVECPU) { if(in_mac_extPort==RTK_RG_MAC_EXT_CPU) *out_Port = RTK_RG_PORT_SLAVECPU; else //extspa is not 0 *out_Port = RTK_RG_MAC7_EXT_PORT0+(in_mac_extPort-RTK_RG_MAC_EXT_PORT0); } else { *out_Port = in_mac_port; } #else #error #endif return (RT_ERR_RG_OK); } uint32 _rtk_rg_isVlanMember(uint32 vlanId, rtk_rg_mac_port_idx_t mac_port, rtk_rg_mac_ext_port_idx_t mac_extPort) { if(rg_db.vlan[vlanId].valid) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(mac_port==RTK_RG_MAC_PORT_CPU) { if((rg_db.vlan[vlanId].MemberPortmask.bits[0]&(0x1<<mac_port)) && (rg_db.vlan[vlanId].Ext_portmask.bits[0]&(0x1<<mac_extPort))) return TRUE; } else { if(rg_db.vlan[vlanId].MemberPortmask.bits[0]&(0x1<<mac_port)) return TRUE; } #elif defined(CONFIG_RTL9607C_SERIES) if(mac_extPort!=0) { if(mac_port==RTK_RG_MAC_PORT_MASTERCPU_CORE0) { if(rg_db.vlan[vlanId].Ext_portmask.bits[0]&(0x1<<(mac_extPort-RTK_RG_MAC_EXT_PORT0))) return TRUE; } else if(mac_port==RTK_RG_MAC_PORT_MASTERCPU_CORE1) { if(rg_db.vlan[vlanId].Ext_portmask.bits[0]&(0x1<<(mac_extPort-RTK_RG_MAC_EXT_PORT0+6))) return TRUE; } else if(mac_port==RTK_RG_MAC_PORT_SLAVECPU) { if(rg_db.vlan[vlanId].Ext_portmask.bits[0]&(0x1<<(mac_extPort-RTK_RG_MAC_EXT_PORT0+12))) return TRUE; } } else { if(rg_db.vlan[vlanId].MemberPortmask.bits[0]&(0x1<<mac_port)) return TRUE; } #else #error #endif } return FALSE; } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) uint32 _rtk_rg_decideNetIfIdx(rtk_rg_port_idx_t checkPort, rtk_rg_mac_port_idx_t checkMacPort, uint8 checkCtagIf, uint16 checkCvid) { uint8 i, netIf_ctagif; uint16 netIf_cvid; uint32 netIfIdx=FAIL; if((0x1<<checkPort) & rg_db.systemGlobal.wanPortMask.portmask) //from wan { for(i=0;i<rg_db.systemGlobal.wanIntfTotalNum;i++) { //skip L34 wan interface if(rg_db.netif[rg_db.systemGlobal.wanIntfGroup[i].index].rtk_netif.isL34!=0) continue; if(checkPort!=rg_db.systemGlobal.wanIntfGroup[i].p_wanIntfConf->wan_port_idx) continue; netIf_ctagif = (rg_db.netif[rg_db.systemGlobal.wanIntfGroup[i].index].rtk_netif.isCtagIf)?1:0; netIf_cvid = rg_db.netif[rg_db.systemGlobal.wanIntfGroup[i].index].rtk_netif.vlan_id; if((checkCtagIf==0 && netIf_ctagif==0) || ((checkCtagIf==1 && netIf_ctagif==1) && checkCvid==netIf_cvid)) { netIfIdx = rg_db.systemGlobal.wanIntfGroup[i].index; break; } } if(netIfIdx==FAIL) { WARNING("Can not decide wan interface(default: %d)", DEFAULT_L2_WANIF_IDX); netIfIdx = DEFAULT_L2_WANIF_IDX; } } else //from lan { int32 firstHitIdx=FAIL; //record the first index of interface that ingress port matches its portmask. for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if(((0x1<<checkPort) & rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask.portmask)==0) continue; if(firstHitIdx==FAIL) firstHitIdx = rg_db.systemGlobal.lanIntfGroup[i].index; netIf_ctagif = ((0x1<<checkMacPort) & rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->untag_mask.portmask)?0:1; netIf_cvid = rg_db.netif[rg_db.systemGlobal.lanIntfGroup[i].index].rtk_netif.vlan_id; if((checkCtagIf==0 && netIf_ctagif==0) || ((checkCtagIf==1 && netIf_ctagif==1) && checkCvid==netIf_cvid)) { netIfIdx = rg_db.systemGlobal.lanIntfGroup[i].index; break; } } if(netIfIdx==FAIL) { WARNING("Can not decide lan interface(firstHitIdx: %d, default: %d)", firstHitIdx, DEFAULT_L2_WANIF_IDX); netIfIdx = (firstHitIdx!=FAIL)?firstHitIdx:DEFAULT_L2_LANIF_IDX; } } return netIfIdx; } #if defined(CONFIG_RG_FLOW_4K_MODE) int _rtk_rg_flowTcamListAdd(uint32 flowHashIdx, uint32 addFlowIdx) { rtk_rg_flowTcam_linkList_t *pFlowTcamEntry; pFlowTcamEntry = &rg_db.flowTcamList[addFlowIdx-MAX_FLOW_TABLE_SIZE]; //Delete from head list list_del_init(&pFlowTcamEntry->flowTcam_list); //Add to hash head list list_add_tail(&pFlowTcamEntry->flowTcam_list, &rg_db.flowTcamListHead[flowHashIdx]); DEBUG("add flow[%d] to flowTcamListHead[%d]\n", addFlowIdx, flowHashIdx); return (RT_ERR_RG_OK); } int _rtk_rg_flowTcamListDel(uint32 delFlowIdx) { rtk_rg_flowTcam_linkList_t *pDelFlowTcamEntry; rtk_rg_flowTcam_linkList_t *pFlowTcamEntry, *pNextFlowTcamEntry; pDelFlowTcamEntry = &rg_db.flowTcamList[delFlowIdx-MAX_FLOW_TABLE_SIZE]; //Delete from head list list_del_init(&pDelFlowTcamEntry->flowTcam_list); if(list_empty(&rg_db.flowTcamFreeListHead)) { list_add(&pDelFlowTcamEntry->flowTcam_list, &rg_db.flowTcamFreeListHead); } else { list_for_each_entry_safe(pFlowTcamEntry, pNextFlowTcamEntry, &rg_db.flowTcamFreeListHead, flowTcam_list) { if(pDelFlowTcamEntry->idx < pFlowTcamEntry->idx) { list_add_tail(&pDelFlowTcamEntry->flowTcam_list, &pFlowTcamEntry->flowTcam_list); break; } if(&pNextFlowTcamEntry->flowTcam_list == &rg_db.flowTcamFreeListHead) { list_add(&pDelFlowTcamEntry->flowTcam_list, &pFlowTcamEntry->flowTcam_list); break; } } } return (RT_ERR_RG_OK); } #endif uint32 _rtk_rg_sw_flowHashPath34ExtraItem_get(void *pFlowData, uint16 igrSVID, uint16 igrCVID, uint16 lutDaIdx) { u32 extraItem = 0; rtk_rg_asic_path3_entry_t *pP3Data = pFlowData; u8 isMulticast = FALSE; if(pP3Data->in_ipv4_or_ipv6 == 1){ /* IPv6 */ isMulticast = (pP3Data->in_dst_ipv6_addr_hash& FLOW_V6HASHADDR_MC_BIT)?TRUE:FALSE; }else{ /* IPv4 */ if((pP3Data->in_dst_ipv4_addr > FLOW_V4ADDR_MC_LO_BOUND) && (pP3Data->in_dst_ipv4_addr < FLOW_V4ADDR_MC_UP_BOUND)) isMulticast = TRUE; } if(isMulticast){ if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_SVID]==DISABLED) extraItem |= (igrSVID<<12); else extraItem &= 0x000fff; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_CVID]==DISABLED) extraItem |= igrCVID; else extraItem &= 0xfff000; }else{ if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_SVID]==DISABLED) extraItem |= (igrSVID<<12); else extraItem &= 0x000fff; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_CVID]==DISABLED) extraItem |= igrCVID; else extraItem &= 0xfff000; } /* Extraitem: Consider L4 protocol only for path 3/4/5 */ if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_SKIP_DA]==DISABLED) extraItem = (pP3Data->in_l4proto<<23 | (lutDaIdx&0xfff)) ^ extraItem; else extraItem = (pP3Data->in_l4proto<<23) ^ extraItem; return extraItem; } uint32 _rtk_rg_sw_flowHashPath5ExtraItem_get(void *pFlowData, uint16 igrSVID, uint16 igrCVID) { u32 extraItem = 0; rtk_rg_asic_path5_entry_t *pP5Data = pFlowData; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_SVID]==DISABLED) extraItem |= (igrSVID<<12); else extraItem &= 0x000fff; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_CVID]==DISABLED) extraItem |= igrCVID; else extraItem &= 0xfff000; /* Extraitem: Consider L4 protocol only for path 3/4/5 */ extraItem = (pP5Data->in_l4proto<<23) ^ extraItem; return extraItem; } uint32 _rtk_rg_sw_flowHashIndexStep1_get(uint16 param1, uint16 param2, uint32 param3, uint32 param4, uint32 extraItem) { uint32 sum1=0, sum2=0, sum=0, sum_nk=0, hashIdx=0; uint32 sport, dport, sip, dip; rtk_rg_asic_fbMode_t fbMode; uint32 preHashPtn; //ASSERT_EQ(rtk_rg_asic_fbModeCtrl_get(FB_MODE_FB_MOD, &fbMode), SUCCESS); fbMode = rgpro_db.fbMode; preHashPtn = rg_db.systemGlobal.preHashPtn[FB_PREHASH_PTN_SPORT]; sport = _rtk_rg_flowHashPreProcessPort(param1, preHashPtn); preHashPtn = rg_db.systemGlobal.preHashPtn[FB_PREHASH_PTN_DPORT]; dport = _rtk_rg_flowHashPreProcessPort(param2, preHashPtn); preHashPtn = rg_db.systemGlobal.preHashPtn[FB_PREHASH_PTN_SIP]; sip = _rtk_rg_flowHashPreProcessIP(param3, preHashPtn); preHashPtn = rg_db.systemGlobal.preHashPtn[FB_PREHASH_PTN_DIP]; dip = _rtk_rg_flowHashPreProcessIP(param4, preHashPtn); sum1 = ((sip&0xfffff) + (sip>>20) + (dip&0xfffff) + (dip>>20) + sport + dport) & 0x7fffff; // sum1[22:0] sum2 = ((sum1&0xfffff) + (sum1>>20)) & 0x1fffff; // sum2[20:0] sum = ((sum2&0xfffff) + (sum>>20)) & 0x1fffff; // sum[20:0] switch(fbMode) { case FB_MODE_4K: // 4k mode: 4-way (10 bits index) sum_nk = ((sum&0x3ff) + ((sum>>10)&0x3ff) + ((sum>>20)&0x1))&0x3ff; // sum_4k[9:0] hashIdx = sum_nk ^ (extraItem&0x3ff) ^ ((extraItem>>10)&0x3ff) ^ ((extraItem>>20)&0xf); hashIdx = hashIdx<<2; // Get base entry index break; case FB_MODE_8K: // 8k mode: 1-way (13 bits index) sum1 = (sum&0x1fff) + ((sum>>13)&0xff); sum2 = (sum1&0xfff) ^ ((sum1>>12)&0x1); sum_nk = (sum1&0x1000) | (sum2&0xfff); // sum_8k[12:0] hashIdx = sum_nk ^ (extraItem&0x1fff) ^ ((extraItem>>13)&0x7ff); break; case FB_MODE_16K: // 16k mode: 1-way (14 bits index) sum1 = (sum&0x3fff) + ((sum>>14)&0x7f); sum2 = (sum1&0xfff) ^ ((sum1>>12)&0x3); sum_nk = (sum1&0x3000) | (sum2&0xfff); // sum_8k[13:0] hashIdx = sum_nk ^ (extraItem&0x3fff) ^ ((extraItem>>14)&0x3ff); break; case FB_MODE_32K: // 32k mode: 1-way (15 bits index) sum1 = (sum&0x7fff) + ((sum>>15)&0x3f); sum2 = (sum1&0xfff) ^ ((sum1>>12)&0x7); sum_nk = (sum1&0x7000) | (sum2&0xfff); // sum_8k[14:0] hashIdx = sum_nk ^ (extraItem&0x7fff) ^ ((extraItem>>15)&0x1ff); break; } DEBUG("[FLOWHASH] index=%d, fbMode=%d, params: [0x%x,0x%x,0x%x,0x%x], extra:0x%x", hashIdx, fbMode, param1, param2, param3, param4, extraItem); //DEBUG("[FLOWHASH] index=%d, fbMode=%d, after preprocess: [0x%x,0x%x,0x%x,0x%x], sum1:0x%x", hashIdx, fbMode, sport, dport, sip, dip, sum); return hashIdx; } uint32 _rtk_rg_sw_flowHashIndexStep2_get(uint32 step1Idx) { uint32 hashidx = 0; uint32 mask = 0x7E03; // Hid_s2[14:0] = {~hid[14:10], ~hid[9], hid[8:2], ~hid[1:0]} 0x7E03= 111_1110_0000_0011 rtk_rg_asic_fbMode_t fbMode; //ASSERT_EQ(rtk_rg_asic_fbModeCtrl_get(FB_MODE_FB_MOD, &fbMode), SUCCESS); fbMode = rgpro_db.fbMode; switch(fbMode) { case FB_MODE_4K: // 4k mode: 4-way (10 bits index) hashidx = step1Idx >> 2; // shift back to 10 bits mask = mask & 0x3ff; // Hid_s2[9:0] = { ~hid[9], hid[8:2], ~hid[1:0]} => 10 bits mask hashidx = hashidx ^ mask; hashidx = hashidx << 2; // shift to 12 bits 4 way entry index break; case FB_MODE_8K: // 8k mode: 1-way (13 bits index) mask = mask & 0x1fff; hashidx = step1Idx ^ mask; break; case FB_MODE_16K: // 16k mode: 1-way (14 bits index) mask = mask & 0x3fff; hashidx = step1Idx ^ mask; break; case FB_MODE_32K: // 32k mode: 1-way (15 bits index) mask = mask & 0x7fff; hashidx = step1Idx ^ mask; break; } DEBUG("[FLOWHASH] index=%d (Step2), fbMode=%d", hashidx, fbMode); return hashidx; } uint32 _rtk_rg_flow_hashIndex(rtk_rg_table_flowPath_t flowPathEntry, uint16 igrSVID, uint16 igrCVID, uint16 lutDaIdx /*used by path3 & path4*/) { uint32 flowHashIdx; uint16 param1=0, param2=0; uint32 param3=0, param4=0; uint32 extraItem=0; if(flowPathEntry.path1.in_path==FB_PATH_12) { if(flowPathEntry.path1.in_multiple_act==0) //path 1 { param1 = (rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_SVID]==DISABLED)?igrSVID:0; param2 = (rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_CVID]==DISABLED)?igrCVID:0; param3 = flowPathEntry.path1.in_smac_lut_idx; param4 = flowPathEntry.path1.in_dmac_lut_idx; extraItem = 0; } else //path 2 { param1 = (rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_SVID]==DISABLED)?igrSVID:0; param2 = (rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_CVID]==DISABLED)?igrCVID:0; param3 = flowPathEntry.path2.in_smac_lut_idx; param4 = flowPathEntry.path2.in_dmac_lut_idx; extraItem = 0; } } else if(flowPathEntry.path1.in_path==FB_PATH_34) { if(flowPathEntry.path1.in_multiple_act==0) //path 3 { param1 = flowPathEntry.path3.in_l4_src_port; param2 = flowPathEntry.path3.in_l4_dst_port; param3 = flowPathEntry.path3.in_src_ipv4_addr; param4 = flowPathEntry.path3.in_dst_ipv4_addr; extraItem = _rtk_rg_sw_flowHashPath34ExtraItem_get(&flowPathEntry.path3, igrSVID, igrCVID, lutDaIdx); } else //path 4 { param1 = flowPathEntry.path4.in_l4_src_port; param2 = flowPathEntry.path4.in_l4_dst_port; param3 = flowPathEntry.path4.in_src_ipv4_addr; param4 = flowPathEntry.path4.in_dst_ipv4_addr; extraItem = _rtk_rg_sw_flowHashPath34ExtraItem_get(&flowPathEntry.path4, igrSVID, igrCVID, lutDaIdx); } } else if(flowPathEntry.path1.in_path==FB_PATH_5) //path 5 { if(flowPathEntry.path5.in_ipv4_or_ipv6==0 && flowPathEntry.path5.out_l4_act==1 && flowPathEntry.path5.out_l4_direction==0) { param1 = flowPathEntry.path5.in_l4_src_port; param2 = flowPathEntry.path5.in_l4_dst_port; param3 = flowPathEntry.path5.in_src_ipv4_addr; param4 = rg_db.systemGlobal.interfaceInfo[flowPathEntry.path5.in_intf_idx].p_wanStaticInfo->ip_addr; extraItem = _rtk_rg_sw_flowHashPath5ExtraItem_get(&flowPathEntry.path5, igrSVID, igrCVID); } else { param1 = flowPathEntry.path5.in_l4_src_port; param2 = flowPathEntry.path5.in_l4_dst_port; param3 = flowPathEntry.path5.in_src_ipv4_addr; param4 = flowPathEntry.path5.in_dst_ipv4_addr; extraItem = _rtk_rg_sw_flowHashPath5ExtraItem_get(&flowPathEntry.path5, igrSVID, igrCVID); } } else if(flowPathEntry.path1.in_path==FB_PATH_6) //path 6 { param1 = flowPathEntry.path6.in_smac_lut_idx; param2 = flowPathEntry.path6.in_dmac_lut_idx; param3 = flowPathEntry.path6.in_src_ipv4_addr; param4 = flowPathEntry.path6.in_dst_ipv4_addr; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH6_SKIP_SVID]==DISABLED) extraItem |= (igrSVID<<12); else extraItem &= 0x000fff; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH6_SKIP_CVID]==DISABLED) extraItem |= igrCVID; else extraItem &= 0xfff000; } flowHashIdx = _rtk_rg_sw_flowHashIndexStep1_get(param1, param2, param3, param4, extraItem); if(flowPathEntry.path1.in_multiple_act==1 && (flowPathEntry.path1.in_path==0 || flowPathEntry.path1.in_path==1)) flowHashIdx = _rtk_rg_sw_flowHashIndexStep2_get(flowHashIdx); return flowHashIdx; } uint32 _rtk_rg_flow_matchOrNot(uint32 flow_idx, rtk_rg_table_flowPath_t flowPathEntry) { if(rg_db.flow[flow_idx].flowPath.path1.valid==0) { WARNING("flow invalid!!"); return FALSE; } if(rg_db.flow[flow_idx].flowPath.path1.in_path!=flowPathEntry.path1.in_path) return FALSE; if(flowPathEntry.path1.in_path==FB_PATH_12) { if(rg_db.flow[flow_idx].flowPath.path1.in_multiple_act!=flowPathEntry.path1.in_multiple_act) return FALSE; if(flowPathEntry.path1.in_multiple_act==0) //path 1 { if(rg_db.flow[flow_idx].flowPath.path1.in_smac_lut_idx!=flowPathEntry.path1.in_smac_lut_idx) return FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_dmac_lut_idx!=flowPathEntry.path1.in_dmac_lut_idx) return FALSE; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_SVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path1.in_stagif!=flowPathEntry.path1.in_stagif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_svlan_id!=flowPathEntry.path1.in_svlan_id) return FALSE; } if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_CVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path1.in_ctagif!=flowPathEntry.path1.in_ctagif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_cvlan_id!=flowPathEntry.path1.in_cvlan_id) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path1.in_pppoeif!=flowPathEntry.path1.in_pppoeif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_pppoe_sid_check!=flowPathEntry.path1.in_pppoe_sid_check) return FALSE; if(flowPathEntry.path1.in_pppoeif && flowPathEntry.path1.in_pppoe_sid_check) { if(rg_db.flow[flow_idx].flowPath.path1.in_pppoe_sid!=flowPathEntry.path1.in_pppoe_sid) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_TOS]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path1.in_tos!=flowPathEntry.path1.in_tos) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_PROTOCOL]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path1.in_protocol!=flowPathEntry.path1.in_protocol) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_SPA]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path1.in_spa!=flowPathEntry.path1.in_spa) return FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_ext_spa!=flowPathEntry.path1.in_ext_spa) return FALSE; } if( rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_STREAM_IDX]==ENABLED && rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.flow[flow_idx].flowPath.path1.in_spa==RTK_RG_MAC_PORT_PON && flowPathEntry.path1.in_spa==RTK_RG_MAC_PORT_PON) { if(rg_db.flow[flow_idx].flowPath.path1.in_out_stream_idx!=flowPathEntry.path1.in_out_stream_idx) return FALSE; } } else //path 2 { if(rg_db.flow[flow_idx].flowPath.path2.in_smac_lut_idx!=flowPathEntry.path2.in_smac_lut_idx) return FALSE; if(rg_db.flow[flow_idx].flowPath.path2.in_dmac_lut_idx!=flowPathEntry.path2.in_dmac_lut_idx) return FALSE; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_SVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path2.in_stagif!=flowPathEntry.path2.in_stagif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path2.in_svlan_id!=flowPathEntry.path2.in_svlan_id) return FALSE; } if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH12_SKIP_CVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path2.in_ctagif!=flowPathEntry.path2.in_ctagif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path2.in_cvlan_id!=flowPathEntry.path2.in_cvlan_id) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path2.in_pppoeif!=flowPathEntry.path2.in_pppoeif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path2.in_pppoe_sid_check!=flowPathEntry.path2.in_pppoe_sid_check) return FALSE; if(flowPathEntry.path2.in_pppoeif && flowPathEntry.path2.in_pppoe_sid_check) { if(rg_db.flow[flow_idx].flowPath.path2.in_pppoe_sid!=flowPathEntry.path2.in_pppoe_sid) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_TOS]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path2.in_tos!=flowPathEntry.path2.in_tos) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_PROTOCOL]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path2.in_protocol!=flowPathEntry.path2.in_protocol) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_SPA]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path2.in_spa!=flowPathEntry.path2.in_spa) return FALSE; if(rg_db.flow[flow_idx].flowPath.path2.in_ext_spa!=flowPathEntry.path2.in_ext_spa) return FALSE; } if( rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_STREAM_IDX]==ENABLED && rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.flow[flow_idx].flowPath.path2.in_spa==RTK_RG_MAC_PORT_PON && flowPathEntry.path2.in_spa==RTK_RG_MAC_PORT_PON) { if(rg_db.flow[flow_idx].flowPath.path2.in_stream_idx!=flowPathEntry.path2.in_stream_idx) return FALSE; } } } else if(flowPathEntry.path1.in_path==FB_PATH_34) { u8 isMulticast = FALSE; if(rg_db.flow[flow_idx].flowPath.path1.in_multiple_act!=flowPathEntry.path1.in_multiple_act) return FALSE; if(flowPathEntry.path1.in_multiple_act==0) //path 3 { if(rg_db.flow[flow_idx].flowPath.path3.in_ipv4_or_ipv6!=flowPathEntry.path3.in_ipv4_or_ipv6) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_src_ipv4_addr!=flowPathEntry.path3.in_src_ipv4_addr) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_dst_ipv4_addr!=flowPathEntry.path3.in_dst_ipv4_addr) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_l4_src_port!=flowPathEntry.path3.in_l4_src_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_l4_dst_port!=flowPathEntry.path3.in_l4_dst_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_l4proto!=flowPathEntry.path3.in_l4proto) return FALSE; //check multicast or not if(flowPathEntry.path3.in_ipv4_or_ipv6==1) isMulticast = (flowPathEntry.path3.in_dst_ipv6_addr_hash & FLOW_V6HASHADDR_MC_BIT)?TRUE:FALSE; else isMulticast = ((flowPathEntry.path3.in_dst_ipv4_addr > FLOW_V4ADDR_MC_LO_BOUND) && (flowPathEntry.path3.in_dst_ipv4_addr < FLOW_V4ADDR_MC_UP_BOUND))?TRUE:FALSE; if((isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_SVID]==DISABLED) || (!isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_SVID]==DISABLED)) { if(rg_db.flow[flow_idx].flowPath.path3.in_stagif!=flowPathEntry.path3.in_stagif) return FALSE; } if((isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_CVID]==DISABLED) || (!isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_CVID]==DISABLED)) { if(rg_db.flow[flow_idx].flowPath.path3.in_ctagif!=flowPathEntry.path3.in_ctagif) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path3.in_pppoeif!=flowPathEntry.path3.in_pppoeif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path3.in_pppoe_sid_check!=flowPathEntry.path3.in_pppoe_sid_check) return FALSE; if(flowPathEntry.path3.in_pppoeif && flowPathEntry.path3.in_pppoe_sid_check) { if(rg_db.flow[flow_idx].flowPath.path3.in_intf_idx!=flowPathEntry.path3.in_intf_idx) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH34_TOS]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path3.in_tos!=flowPathEntry.path3.in_tos) return FALSE; } } else //path 4 { if(rg_db.flow[flow_idx].flowPath.path4.in_ipv4_or_ipv6!=flowPathEntry.path4.in_ipv4_or_ipv6) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_src_ipv4_addr!=flowPathEntry.path4.in_src_ipv4_addr) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_dst_ipv4_addr!=flowPathEntry.path4.in_dst_ipv4_addr) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_l4_src_port!=flowPathEntry.path4.in_l4_src_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_l4_dst_port!=flowPathEntry.path4.in_l4_dst_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_l4proto!=flowPathEntry.path4.in_l4proto) return FALSE; //check multicast or not if(flowPathEntry.path4.in_ipv4_or_ipv6==1) isMulticast = (flowPathEntry.path4.in_dst_ipv6_addr_hash & FLOW_V6HASHADDR_MC_BIT)?TRUE:FALSE; else isMulticast = ((flowPathEntry.path4.in_dst_ipv4_addr > FLOW_V4ADDR_MC_LO_BOUND) && (flowPathEntry.path4.in_dst_ipv4_addr < FLOW_V4ADDR_MC_UP_BOUND))?TRUE:FALSE; if((isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_SVID]==DISABLED) || (!isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_SVID]==DISABLED)) { if(rg_db.flow[flow_idx].flowPath.path4.in_stagif!=flowPathEntry.path4.in_stagif) return FALSE; } if((isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_MC_SKIP_CVID]==DISABLED) || (!isMulticast && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_UCBC_SKIP_CVID]==DISABLED)) { if(rg_db.flow[flow_idx].flowPath.path4.in_ctagif!=flowPathEntry.path4.in_ctagif) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path4.in_pppoeif!=flowPathEntry.path4.in_pppoeif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path4.in_pppoe_sid_check!=flowPathEntry.path4.in_pppoe_sid_check) return FALSE; if(flowPathEntry.path4.in_pppoeif && flowPathEntry.path4.in_pppoe_sid_check) { if(rg_db.flow[flow_idx].flowPath.path4.in_intf_idx!=flowPathEntry.path4.in_intf_idx) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH34_TOS]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path4.in_tos!=flowPathEntry.path4.in_tos) return FALSE; } } } else if(flowPathEntry.path1.in_path==FB_PATH_5) //path 5 { if(rg_db.flow[flow_idx].flowPath.path5.in_ipv4_or_ipv6!=flowPathEntry.path5.in_ipv4_or_ipv6) return FALSE; if(rg_db.flow[flow_idx].flowPath.path5.out_l4_act!=flowPathEntry.path5.out_l4_act) // Routing: 0, NAPT: 1 return FALSE; if(flowPathEntry.path5.out_l4_act) { if(rg_db.flow[flow_idx].flowPath.path5.out_l4_direction!=flowPathEntry.path5.out_l4_direction) // outbound: 1, inbound: 0 return FALSE; } if(rg_db.flow[flow_idx].flowPath.path5.in_src_ipv4_addr!=flowPathEntry.path5.in_src_ipv4_addr) return FALSE; if(flowPathEntry.path5.in_ipv4_or_ipv6==0 && flowPathEntry.path5.out_l4_act==1 && flowPathEntry.path5.out_l4_direction==0) //naptr { if(rg_db.flow[flow_idx].flowPath.path5.in_intf_idx!=flowPathEntry.path5.in_intf_idx) return FALSE; } else //napt, routing { if(rg_db.flow[flow_idx].flowPath.path5.in_dst_ipv4_addr!=flowPathEntry.path5.in_dst_ipv4_addr) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path5.in_l4_src_port!=flowPathEntry.path5.in_l4_src_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path5.in_l4_dst_port!=flowPathEntry.path5.in_l4_dst_port) return FALSE; if(rg_db.flow[flow_idx].flowPath.path5.in_l4proto!=flowPathEntry.path5.in_l4proto) return FALSE; if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_SVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path5.in_stagif!=flowPathEntry.path5.in_stagif) return FALSE; } if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_CVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path5.in_ctagif!=flowPathEntry.path5.in_ctagif) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path5.in_pppoeif!=flowPathEntry.path5.in_pppoeif) return FALSE; if(flowPathEntry.path5.in_pppoeif) { if(rg_db.flow[flow_idx].flowPath.path5.in_intf_idx!=flowPathEntry.path5.in_intf_idx) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH5_TOS]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path5.in_tos!=flowPathEntry.path5.in_tos) return FALSE; } } else if(flowPathEntry.path1.in_path==FB_PATH_6) //path 6 { if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SMAC_IDX]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_smac_lut_idx!=flowPathEntry.path6.in_smac_lut_idx) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DMAC_IDX]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_dmac_lut_idx!=flowPathEntry.path6.in_dmac_lut_idx) return FALSE; } if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH6_SKIP_SVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_stagif!=flowPathEntry.path6.in_stagif) return FALSE; } if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH6_SKIP_CVID]==DISABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_ctagif!=flowPathEntry.path6.in_ctagif) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path6.in_pppoeif!=flowPathEntry.path6.in_pppoeif) return FALSE; if(rg_db.flow[flow_idx].flowPath.path6.in_pppoe_sid_check!=flowPathEntry.path6.in_pppoe_sid_check) return FALSE; if(flowPathEntry.path6.in_pppoeif && flowPathEntry.path6.in_pppoe_sid_check) { if(rg_db.flow[flow_idx].flowPath.path6.in_pppoe_sid!=flowPathEntry.path6.in_pppoe_sid) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path6.in_dsliteif!=flowPathEntry.path6.in_dsliteif) //check ipv6 or not return FALSE; if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SIP]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_src_ipv4_addr!=flowPathEntry.path6.in_src_ipv4_addr) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DIP]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_dst_ipv4_addr!=flowPathEntry.path6.in_dst_ipv4_addr) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SPORT]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_l4_src_port!=flowPathEntry.path6.in_l4_src_port) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DPORT]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_l4_dst_port!=flowPathEntry.path6.in_l4_dst_port) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_PROTOCOL]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_protocol!=flowPathEntry.path6.in_protocol) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path6.in_pptpif!=flowPathEntry.path6.in_pptpif) return FALSE; if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_GRE_CALL_ID]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_gre_call_id!=flowPathEntry.path6.in_gre_call_id) return FALSE; } if(rg_db.flow[flow_idx].flowPath.path6.in_l2tpif!=flowPathEntry.path6.in_l2tpif) return FALSE; if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_TUNNEL_ID]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_l2tp_tunnel_id!=flowPathEntry.path6.in_l2tp_tunnel_id) return FALSE; } if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_SID]==ENABLED) { if(rg_db.flow[flow_idx].flowPath.path6.in_l2tp_session_id!=flowPathEntry.path6.in_l2tp_session_id) return FALSE; } } return TRUE; } uint32 _rtk_rg_checkGwMac(rtk_mac_t checkMac) { int i; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.interfaceInfo[i].valid==0) continue; if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan) { if(memcmp(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.gmac.octet, checkMac.octet, ETHER_ADDR_LEN)==0) return TRUE; } else { if(memcmp(rg_db.systemGlobal.interfaceInfo[i].storedInfo.lan_intf.gmac.octet, checkMac.octet, ETHER_ADDR_LEN)==0) return TRUE; } } return FALSE; } rtk_rg_err_code_t _rtk_rg_flow_entryCheckAndInit(rtk_rg_table_flowPath_t *flowPathEntry, uint16 lutDaIdx /*used by path3 & path4*/) { flowPathEntry->path1.valid = TRUE; if(flowPathEntry->path1.in_path==FB_PATH_12) { if(flowPathEntry->path1.in_multiple_act==0) //path 1 { if(rg_db.lut[flowPathEntry->path1.in_dmac_lut_idx].valid==0) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); //init vlan if(flowPathEntry->path1.in_stagif==0) flowPathEntry->path1.in_svlan_id = 0; if(flowPathEntry->path1.in_ctagif==0) flowPathEntry->path1.in_cvlan_id = 0; //init pppoe if(rg_db.lut[flowPathEntry->path1.in_dmac_lut_idx].rtk_lut.entryType==RTK_LUT_L2UC) flowPathEntry->path1.in_pppoe_sid_check = _rtk_rg_checkGwMac(rg_db.lut[flowPathEntry->path1.in_dmac_lut_idx].rtk_lut.entry.l2UcEntry.mac); else flowPathEntry->path1.in_pppoe_sid_check = FALSE; if(flowPathEntry->path1.in_pppoeif==0) flowPathEntry->path1.in_pppoe_sid = 0; //init tos check flowPathEntry->path1.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_TOS]; //init spa check flowPathEntry->path1.in_spa_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_SPA]; //init protocol check if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_PROTOCOL]==DISABLED) flowPathEntry->path1.in_protocol = FB_INPROTOCOL_ALL_ACCEPT; //init ingerss stream idx check if(flowPathEntry->path1.in_spa==RTK_RG_MAC_PORT_PON) flowPathEntry->path1.in_out_stream_idx_check_act = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_STREAM_IDX]; } else //path 2 { if(rg_db.lut[flowPathEntry->path2.in_dmac_lut_idx].valid==0) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); //init vlan if(flowPathEntry->path2.in_stagif==0) flowPathEntry->path2.in_svlan_id = 0; if(flowPathEntry->path2.in_ctagif==0) flowPathEntry->path2.in_cvlan_id = 0; //init pppoe if(rg_db.lut[flowPathEntry->path2.in_dmac_lut_idx].rtk_lut.entryType==RTK_LUT_L2UC) flowPathEntry->path2.in_pppoe_sid_check = _rtk_rg_checkGwMac(rg_db.lut[flowPathEntry->path2.in_dmac_lut_idx].rtk_lut.entry.l2UcEntry.mac); else flowPathEntry->path2.in_pppoe_sid_check = FALSE; if(flowPathEntry->path2.in_pppoeif==0) flowPathEntry->path2.in_pppoe_sid = 0; //init tos check flowPathEntry->path2.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_TOS]; //init spa check flowPathEntry->path2.in_spa_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_SPA]; //init protocol check if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_PROTOCOL]==DISABLED) flowPathEntry->path2.in_protocol = FB_INPROTOCOL_ALL_ACCEPT; //init ingerss stream idx check if(flowPathEntry->path2.in_spa==RTK_RG_MAC_PORT_PON) flowPathEntry->path2.in_stream_idx_check= rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH12_STREAM_IDX]; } } else if(flowPathEntry->path1.in_path==FB_PATH_34) { if(rg_db.lut[lutDaIdx].valid==0) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); if(flowPathEntry->path1.in_multiple_act==0) //path 3 { //init pppoe check if(rg_db.lut[lutDaIdx].rtk_lut.entryType==RTK_LUT_L2UC) flowPathEntry->path3.in_pppoe_sid_check = _rtk_rg_checkGwMac(rg_db.lut[lutDaIdx].rtk_lut.entry.l2UcEntry.mac); else flowPathEntry->path3.in_pppoe_sid_check = FALSE; //init tos check flowPathEntry->path3.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH34_TOS]; } else //path 4 { //init pppoe check if(rg_db.lut[lutDaIdx].rtk_lut.entryType==RTK_LUT_L2UC) flowPathEntry->path4.in_pppoe_sid_check = _rtk_rg_checkGwMac(rg_db.lut[lutDaIdx].rtk_lut.entry.l2UcEntry.mac); else flowPathEntry->path4.in_pppoe_sid_check = FALSE; //init tos check flowPathEntry->path4.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH34_TOS]; } } else if(flowPathEntry->path1.in_path==FB_PATH_5) //path 5 { //init tos check flowPathEntry->path5.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH5_TOS]; } else if(flowPathEntry->path1.in_path==FB_PATH_6) //path 6 { //init mac idx check flowPathEntry->path6.in_src_mac_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SMAC_IDX]; flowPathEntry->path6.in_dst_mac_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DMAC_IDX]; //init ip check flowPathEntry->path6.in_src_ip_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SIP]; flowPathEntry->path6.in_dst_ip_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DIP]; //init port check flowPathEntry->path6.in_l4_src_port_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_SPORT]; flowPathEntry->path6.in_l4_dst_port_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_DPORT]; //init pppoe flowPathEntry->path6.in_pppoe_sid_check = TRUE; if(flowPathEntry->path6.in_pppoeif==0) flowPathEntry->path6.in_pppoe_sid = 0; //init tos check flowPathEntry->path6.in_tos_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_TOS]; //init protocol check if(rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_PROTOCOL]==DISABLED) flowPathEntry->path6.in_protocol = FB_INPROTOCOL_ALL_ACCEPT; //init gre call id check flowPathEntry->path6.in_gre_call_id_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_GRE_CALL_ID]; //init l2tp tunnel id check flowPathEntry->path6.in_l2tp_tunnel_id_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_TUNNEL_ID]; //init l2tp tunnel sid check flowPathEntry->path6.in_l2tp_session_id_check = rg_db.systemGlobal.flowCheckState[FB_FLOW_CHECK_PATH6_L2TP_SID]; } return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtk_rg_flow_add(uint32 *flow_idx, rtk_rg_table_flowPath_t *flowPathEntry, uint16 igrSVID, uint16 igrCVID, uint16 lutDaIdx /*used by path3 & path4*/, int32 naptIdx, int32 arpOrNeighborIdx, uint32 staticEntry) { rtk_rg_err_code_t ret; uint32 flowHashIdx, i, search_index; int32 first_invalid=FAIL, flowHitIdx=FAIL, longestIdleTimeFlowIdx=FAIL; uint32 longestIdleTime=0; //Check input parameters if(rg_db.systemGlobal.rgInit!=1) RETURN_ERR(RT_ERR_RG_NOT_INIT); ret = _rtk_rg_flow_entryCheckAndInit(flowPathEntry, lutDaIdx); if(ret!=RT_ERR_RG_OK) RETURN_ERR(ret); flowHashIdx = _rtk_rg_flow_hashIndex(*flowPathEntry, igrSVID, igrCVID, lutDaIdx); DEBUG("flowHashIdx=%d", flowHashIdx); for(i=0; i<MAX_FLOW_WAYS; i++) { search_index = flowHashIdx+i; if(rg_db.flow[search_index].flowPath.path1.valid==0) { if(first_invalid==FAIL) first_invalid = search_index; continue; } if(_rtk_rg_flow_matchOrNot(search_index, *flowPathEntry)) { flowHitIdx = search_index; break; } if(rg_db.flow[search_index].canBeReplaced) { if(first_invalid==FAIL) first_invalid = search_index; continue; } if(rg_db.flow[search_index].staticEntry==0 && rg_db.flow[search_index].idleSecs>=longestIdleTime) { longestIdleTimeFlowIdx = search_index; longestIdleTime = rg_db.flow[search_index].idleSecs; } } #if defined(CONFIG_RG_FLOW_4K_MODE) if(i==MAX_FLOW_WAYS && !(flowPathEntry->path1.in_path==FB_PATH_34 && rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH34_SKIP_DA]==DISABLED)) //if path34 da hash is enabled, path34 can not be added to TCAM. { rtk_rg_flowTcam_linkList_t *pFlowTcamEntry, *pNextFlowTcamEntry; if(first_invalid==FAIL) { if(!list_empty(&rg_db.flowTcamFreeListHead)) { list_for_each_entry_safe(pFlowTcamEntry, pNextFlowTcamEntry, &rg_db.flowTcamFreeListHead, flowTcam_list) //just return the first entry right behind of head { first_invalid = pFlowTcamEntry->idx; break; } } } if(!list_empty(&rg_db.flowTcamListHead[flowHashIdx>>MAX_FLOW_WAYS_SHIFT])) { list_for_each_entry_safe(pFlowTcamEntry, pNextFlowTcamEntry, &rg_db.flowTcamListHead[flowHashIdx>>MAX_FLOW_WAYS_SHIFT], flowTcam_list) //just return the first entry right behind of head { search_index = pFlowTcamEntry->idx; if(_rtk_rg_flow_matchOrNot(search_index, *flowPathEntry)) { flowHitIdx = search_index; break; } if(rg_db.flow[search_index].canBeReplaced) { if(first_invalid==FAIL) first_invalid = search_index; continue; } if(rg_db.flow[search_index].staticEntry==0 && rg_db.flow[search_index].idleSecs>=longestIdleTime) { longestIdleTimeFlowIdx = search_index; longestIdleTime = rg_db.flow[search_index].idleSecs; } } } } #endif if(flowHitIdx!=FAIL) //hit flow entry { TRACE("Match flow[%d]", flowHitIdx); *flow_idx = flowHitIdx; } else //do not hit any flow entry { if(first_invalid!=FAIL) { TRACE("Found invalid flow[%d]", first_invalid); *flow_idx = first_invalid; } else { if(staticEntry) //force add { if(longestIdleTimeFlowIdx!=FAIL) { TRACE("forcely add static entry, replace longest idel time flow %d", longestIdleTimeFlowIdx); ASSERT_EQ(_rtk_rg_flow_del(longestIdleTimeFlowIdx, 0), RT_ERR_RG_OK); *flow_idx = longestIdleTimeFlowIdx; } else { WARNING("HW flow entry full and all entries are static!"); *flow_idx = flowHashIdx; return (RT_ERR_RG_FLOW_FULL); } } else { TRACE("HW flow entry full."); *flow_idx = flowHashIdx; return (RT_ERR_RG_FLOW_FULL); } } } if(flowPathEntry->path1.in_path==FB_PATH_12) { if(flowPathEntry->path1.in_multiple_act==0) //path 1 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH1_SET(*flow_idx, &flowPathEntry->path1), RT_ERR_RG_OK); } else //path 2 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH2_SET(*flow_idx, &flowPathEntry->path2), RT_ERR_RG_OK); } } else if(flowPathEntry->path1.in_path==FB_PATH_34) { if(flowPathEntry->path1.in_multiple_act==0) //path 3 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH3_SET(*flow_idx, &flowPathEntry->path3), RT_ERR_RG_OK); } else //path 4 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH4_SET(*flow_idx, &flowPathEntry->path4), RT_ERR_RG_OK); } } else if(flowPathEntry->path1.in_path==FB_PATH_5) //path 5 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH5_SET(*flow_idx, &flowPathEntry->path5), RT_ERR_RG_OK); } else if(flowPathEntry->path1.in_path==FB_PATH_6) //path 6 { ASSERT_EQ(RTK_RG_ASIC_FLOWPATH6_SET(*flow_idx, &flowPathEntry->path6), RT_ERR_RG_OK); } TABLE("Add flow[%d]", *flow_idx); #if defined(CONFIG_RG_FLOW_4K_MODE) //update TCAM list if(*flow_idx>=MAX_FLOW_TABLE_SIZE && *flow_idx<MAX_FLOW_HW_TABLE_SIZE) { _rtk_rg_flowTcamListAdd(flowHashIdx>>MAX_FLOW_WAYS_SHIFT, *flow_idx); } #endif rg_db.flow[*flow_idx].naptIdx = (naptIdx>=0)?naptIdx:FAIL; rg_db.flow[*flow_idx].arpOrNeighborIdx = (arpOrNeighborIdx>=0)?arpOrNeighborIdx:FAIL; rg_db.flow[*flow_idx].idleSecs = 0; rg_db.flow[*flow_idx].staticEntry = staticEntry; rg_db.flowValidSet[*flow_idx>>5] |= (0x1<<(*flow_idx&31)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtk_rg_flow_del(uint32 flow_idx, uint8 addFlowFromCandidates) { //Check input parameters if(rg_db.systemGlobal.rgInit!=1) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(flow_idx>=MAX_FLOW_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.flow[flow_idx].flowPath.path1.valid==0) RETURN_ERR (RT_ERR_RG_FLOW_NOT_FOUND); TABLE("Del flow[%d]", flow_idx); ASSERT_EQ(RTK_RG_ASIC_FLOWPATH_DEL(flow_idx), RT_ERR_RG_OK); #if defined(CONFIG_RG_FLOW_4K_MODE) //update TCAM list if(flow_idx>=MAX_FLOW_TABLE_SIZE && flow_idx<MAX_FLOW_HW_TABLE_SIZE) _rtk_rg_flowTcamListDel(flow_idx); #endif if(rg_db.flow[flow_idx].naptIdx>=0) { if(rg_db.flow[flow_idx].flowPath.path5.out_l4_direction) //out bound { rg_db.naptOut[rg_db.flow[flow_idx].naptIdx].outFlowExist= 0; rg_db.naptOut[rg_db.flow[flow_idx].naptIdx].outFlowIdx = 0; } else { rg_db.naptOut[rg_db.flow[flow_idx].naptIdx].inFlowExist= 0; rg_db.naptOut[rg_db.flow[flow_idx].naptIdx].inFlowIdx = 0; } } rg_db.flow[flow_idx].naptIdx = FAIL; rg_db.flow[flow_idx].arpOrNeighborIdx = FAIL; rg_db.flow[flow_idx].idleSecs = 0; rg_db.flow[flow_idx].staticEntry = 0; rg_db.flowValidSet[flow_idx>>5] &= ~(0x1<<(flow_idx&31)); if(addFlowFromCandidates) { TRACE("add flow from list of candicates."); //1 FIXME: add flow from list of candicates. } return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtk_rg_flow_find(uint32 *flow_idx, rtk_rg_table_flowPath_t *flowPathEntry, uint16 igrSVID, uint16 igrCVID, uint16 lutDaIdx /*used by path3 & path4*/) { //Check input parameters if(rg_db.systemGlobal.rgInit!=1) RETURN_ERR(RT_ERR_RG_NOT_INIT); return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtk_rg_flow_del_by_naptOutIdx(uint32 naptOutIdx, uint8 addFlowFromCandidates) { //Check input parameters if(rg_db.systemGlobal.rgInit!=1) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((naptOutIdx<0) || (naptOutIdx>=MAX_NAPT_OUT_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Delete HW flow entry if(rg_db.naptOut[naptOutIdx].outFlowExist) { ASSERT_EQ(_rtk_rg_flow_del(rg_db.naptOut[naptOutIdx].outFlowIdx, addFlowFromCandidates), RT_ERR_RG_OK); } if(rg_db.naptOut[naptOutIdx].inFlowExist) { ASSERT_EQ(_rtk_rg_flow_del(rg_db.naptOut[naptOutIdx].inFlowIdx, addFlowFromCandidates), RT_ERR_RG_OK); } return (RT_ERR_RG_OK); } uint32 _rtk_rg_flow_canAddNaptInboundInHw(uint32 sip, uint32 dip, uint16 sport, uint16 dport, uint32 isTcp) { uint32 flowHashIdx, i, search_index, extraItem=0; //only support path5 skip svid&cvid hash. extraItem &= 0x000fff; extraItem &= 0xfff000; /* Extraitem: Consider L4 protocol only for path 3/4/5 */ extraItem = (isTcp<<23) ^ extraItem; flowHashIdx = _rtk_rg_sw_flowHashIndexStep1_get(sport, dport, sip, dip, extraItem); for(i=0; i<MAX_FLOW_WAYS; i++) { search_index = flowHashIdx+i; if(rg_db.flow[search_index].flowPath.path1.valid==0 || rg_db.flow[search_index].canBeReplaced) return TRUE; } #if defined(CONFIG_RG_FLOW_4K_MODE) if(!list_empty(&rg_db.flowTcamFreeListHead)) return TRUE; if(!list_empty(&rg_db.flowTcamListHead[flowHashIdx>>MAX_FLOW_WAYS_SHIFT])) { rtk_rg_flowTcam_linkList_t *pFlowTcamEntry, *pNextFlowTcamEntry; list_for_each_entry_safe(pFlowTcamEntry, pNextFlowTcamEntry, &rg_db.flowTcamListHead[flowHashIdx>>MAX_FLOW_WAYS_SHIFT], flowTcam_list) { search_index = pFlowTcamEntry->idx; if(rg_db.flow[search_index].canBeReplaced) return TRUE; } } #endif return FALSE; } __SRAM_FWDENG_SLOWPATH void _rtk_rg_flowUpdate(rtk_rg_pktHdr_t *pPktHdr) { rtk_rg_err_code_t ret; rtk_rg_table_flowPath_t flowPathEntry; uint32 flow_idx; uint16 igrSVID, igrCVID, lutDaIdx/*used by path3 & path4*/; int32 naptIdx, arpOrNeighborIdx; int8 shareMeterIdx; uint16 egressVlanID; uint8 egressVlanTagif; //0:untag 1:tagged uint8 isBridge, isNapt, isIpv6, isTcp, dmac2cvlanTagif, vlanTagif, serviceVlanTagif; uint16 sport, dport; uint32 vlanID, priority, serviceVlanID, servicePriority, internalCFPri, streamID; int32 smacL2Idx, dmacL2Idx, new_intf_idx, dmac2cvlanID, dscp; int32 indMacIdx=0; ipaddr_t sip, dip; rtk_ipv6_addr_t v6Sip, v6Dip; rtk_rg_naptDirection_t naptDirection; rtk_rg_port_idx_t ingressPort; #ifdef CONFIG_ROME_NAPT_SHORTCUT rtk_rg_napt_shortcut_t *pNaptSc=NULL; #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT rtk_rg_naptv6_shortcut_t *pNaptV6Sc=NULL; #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT rtk_rg_ipv6_layer4_linkList_t *pV6StatefulList=NULL; #endif if(pPktHdr->algAction==RG_ALG_ACT_TO_FWDENGINE) { TRACE("ALG enable port...does not add to hw flow!"); return; } if(pPktHdr->ingressLocation!=RG_IGR_PHY_PORT) { TRACE("Ingress location is not from physical port...does not add to hw flow!"); return; } //init flow path memset(&flowPathEntry, 0, sizeof(rtk_rg_table_flowPath_t)); igrSVID = (pPktHdr->tagif&SVLAN_TAGIF)?pPktHdr->stagVid:0; igrCVID = (pPktHdr->tagif&CVLAN_TAGIF)?pPktHdr->ctagVid:0; lutDaIdx = pPktHdr->dmacL2Idx; naptIdx = FAIL; arpOrNeighborIdx = FAIL; shareMeterIdx = (pPktHdr->aclDecision.qos_actions&ACL_ACTION_SHARE_METER_BIT)?pPktHdr->aclDecision.action_share_meter:FAIL; switch(pPktHdr->shortcutStatus) { #ifdef CONFIG_ROME_NAPT_SHORTCUT case RG_SC_NEED_UPDATE_BEFORE_SEND: pNaptSc=pPktHdr->pCurrentShortcutEntry; if(pNaptSc->notFinishUpdated) { TRACE("Shortcut is notFinishUpdated."); return; } if(pNaptSc->isHairpinNat) { TRACE("Hw flow does not support hairpin nat."); return; } { ingressPort = pNaptSc->spa; isIpv6 = FALSE; isBridge = pNaptSc->isBridge; isNapt = pNaptSc->isNapt; if(isNapt) naptIdx = pPktHdr->naptOutboundIndx; arpOrNeighborIdx= pNaptSc->arpIdx; smacL2Idx = pNaptSc->smacL2Idx; dmacL2Idx = pNaptSc->new_lut_idx; sip = pNaptSc->sip; dip = pNaptSc->dip; memset(&v6Sip, 0, sizeof(rtk_ipv6_addr_t)); memset(&v6Dip, 0, sizeof(rtk_ipv6_addr_t)); sport = pNaptSc->sport; dport = pNaptSc->dport; isTcp = pNaptSc->isTcp; naptDirection = pNaptSc->direction; new_intf_idx = pNaptSc->new_intf_idx; dmac2cvlanTagif = pNaptSc->dmac2cvlanTagif; dmac2cvlanID = pNaptSc->dmac2cvlanID; vlanTagif = pNaptSc->vlanTagif; vlanID = pNaptSc->vlanID; priority = pNaptSc->priority; serviceVlanTagif= pNaptSc->serviceVlanTagif; serviceVlanID = pNaptSc->serviceVlanID; servicePriority = pNaptSc->servicePriority; internalCFPri = pNaptSc->internalCFPri; dscp = pNaptSc->dscp; streamID = pNaptSc->streamID; } break; #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT case RG_SC_V6_NEED_UPDATE_BEFORE_SEND: pNaptV6Sc=pPktHdr->pCurrentV6ShortcutEntry; if(pNaptV6Sc->notFinishUpdated) { TRACE("Shortcut is notFinishUpdated."); return; } { ingressPort = pNaptV6Sc->spa; isIpv6 = TRUE; isBridge = pNaptV6Sc->isBridge; isNapt = FALSE; arpOrNeighborIdx= pNaptV6Sc->neighborIdx; smacL2Idx = pNaptV6Sc->smacL2Idx; dmacL2Idx = pNaptV6Sc->new_lut_idx; sip = 0; dip = 0; memcpy(&v6Sip, &pNaptV6Sc->sip, sizeof(rtk_ipv6_addr_t)); memcpy(&v6Dip, &pNaptV6Sc->dip, sizeof(rtk_ipv6_addr_t)); sport = pNaptV6Sc->sport; dport = pNaptV6Sc->dport; isTcp = pNaptV6Sc->isTcp; naptDirection = IPV6_ROUTE_OUTBOUND; // routing don't care new_intf_idx = pNaptV6Sc->new_intf_idx; dmac2cvlanTagif = pNaptV6Sc->dmac2cvlanTagif; dmac2cvlanID = pNaptV6Sc->dmac2cvlanID; vlanTagif = pNaptV6Sc->vlanTagif; vlanID = pNaptV6Sc->vlanID; priority = pNaptV6Sc->priority; serviceVlanTagif= pNaptV6Sc->serviceVlanTagif; serviceVlanID = pNaptV6Sc->serviceVlanID; servicePriority = pNaptV6Sc->servicePriority; internalCFPri = pNaptV6Sc->internalCFPri; dscp = pNaptV6Sc->dscp; streamID = pNaptV6Sc->streamID; } break; #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT case RG_SC_STATEFUL_NEED_UPDATE_BEFORE_SEND: pV6StatefulList=pPktHdr->pIPv6StatefulList; #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT if(pV6StatefulList->direction!=IPV6_ROUTE_OUTBOUND && pV6StatefulList->direction!=IPV6_ROUTE_INBOUND) { TRACE("HW flow do not support IPv6 Napt."); return; } #endif if(pV6StatefulList->notFinishUpdated) { TRACE("Shortcut is notFinishUpdated."); return; } { ingressPort = pV6StatefulList->spa; isIpv6 = TRUE; isBridge = pV6StatefulList->isBridge; isNapt = FALSE; arpOrNeighborIdx= pV6StatefulList->neighborIdx; smacL2Idx = pV6StatefulList->smacL2Idx; dmacL2Idx = pV6StatefulList->new_lut_idx; sip = 0; dip = 0; memcpy(&v6Sip, &pV6StatefulList->srcIP, sizeof(rtk_ipv6_addr_t)); memcpy(&v6Dip, &pV6StatefulList->destIP, sizeof(rtk_ipv6_addr_t)); sport = pV6StatefulList->sport; dport = pV6StatefulList->dport; isTcp = pV6StatefulList->isTcp; naptDirection = IPV6_ROUTE_OUTBOUND; // routing don't care new_intf_idx = pV6StatefulList->new_intf_idx; dmac2cvlanTagif = pV6StatefulList->dmac2cvlanTagif; dmac2cvlanID = pV6StatefulList->dmac2cvlanID; vlanTagif = pV6StatefulList->vlanTagif; vlanID = pV6StatefulList->vlanID; priority = pV6StatefulList->priority; serviceVlanTagif= pV6StatefulList->serviceVlanTagif; serviceVlanID = pV6StatefulList->serviceVlanID; servicePriority = pV6StatefulList->servicePriority; internalCFPri = pV6StatefulList->internalCFPri; dscp = pV6StatefulList->dscp; streamID = pV6StatefulList->streamID; } break; #endif default: TRACE("Unknown shortcut status."); return; } if(naptIdx>=0 && rg_db.naptOut[naptIdx].cannotAddToHw) { TRACE("Can not add this flow to hw for DPI."); return; } // translate egress dmacL2Idx to indirectMAc table and save to out_dmac_idx if(RTK_RG_ASIC_INDIRECTMACTABLE_ADD(dmacL2Idx, &indMacIdx)!=RT_ERR_RG_OK) { TRACE("FAIL to add indirect mac table."); return; } //decide egress ctag format if( dmac2cvlanID!=FAIL && !(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port))) { egressVlanID = dmac2cvlanID; egressVlanTagif = dmac2cvlanTagif; } else { egressVlanID = vlanID; egressVlanTagif = vlanTagif; } //decide bridge egress interface if(new_intf_idx==FAIL) { rtk_rg_port_idx_t egressPort; rtk_rg_mac_port_idx_t egressMacPort=rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port; rtk_rg_mac_ext_port_idx_t egressMacExtPort=rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.ext_port; _rtk_rg_macPortToPort_translator(&egressPort, egressMacPort, egressMacExtPort); new_intf_idx=_rtk_rg_decideNetIfIdx(egressPort, egressMacPort, (egressVlanTagif)?1:0, egressVlanID); } //------------------------------------fill pattern and action-------------------------------------- if(isBridge) //bridge { if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_L2_FLOW_LOOKUP_BY_MAC]==ENABLED) { rtk_rg_mac_port_idx_t spaIdx; rtk_rg_mac_ext_port_idx_t extSpaIdx; _rtk_rg_portToMacPort_translator(ingressPort, &spaIdx, &extSpaIdx); flowPathEntry.path1.in_path = FB_PATH_12; //path 1 flowPathEntry.path1.in_multiple_act = FALSE; flowPathEntry.path1.in_smac_lut_idx = smacL2Idx; flowPathEntry.path1.in_dmac_lut_idx = dmacL2Idx; //spa //flowPathEntry.path1.in_spa_check will be set automatically. flowPathEntry.path1.in_spa = spaIdx; flowPathEntry.path1.in_ext_spa = extSpaIdx; //svlan flowPathEntry.path1.in_stagif = (pPktHdr->tagif&SVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path1.in_svlan_id = igrSVID; //cvlan flowPathEntry.path1.in_ctagif = (pPktHdr->tagif&CVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path1.in_cvlan_id = igrCVID; //pppoe //flowPathEntry.path1.in_pppoe_sid_check will be set automatically. flowPathEntry.path1.in_pppoeif = (pPktHdr->tagif&PPPOE_TAGIF)?TRUE:FALSE; flowPathEntry.path1.in_pppoe_sid = (pPktHdr->tagif&PPPOE_TAGIF)?pPktHdr->sessionId:0; //other flowPathEntry.path1.in_intf_idx = pPktHdr->srcNetifIdx; flowPathEntry.path1.in_protocol = FB_INPROTOCOL_ALL_ACCEPT; //all //flowPathEntry.path1.in_tos_check will be set automatically. flowPathEntry.path1.in_tos = pPktHdr->tos; #ifdef CONFIG_GPON_FEATURE if(rg_db.systemGlobal.initParam.wanPortGponMode) { if(flowPathEntry.path1.in_spa==RTK_RG_MAC_PORT_PON) //from pon port { //flowPathEntry.path1.in_out_stream_idx_check_act will be set automatically. flowPathEntry.path1.in_out_stream_idx = pPktHdr->pRxDesc->rx_pon_stream_id; } if(rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port==RTK_RG_MAC_PORT_PON) //to pon port { flowPathEntry.path1.in_out_stream_idx_check_act = TRUE; flowPathEntry.path1.in_out_stream_idx = streamID; } } #endif //---------------egress action--------------- flowPathEntry.path1.out_uc_lut_lookup = TRUE; flowPathEntry.path1.out_dmac_idx = indMacIdx; flowPathEntry.path1.out_intf_idx = new_intf_idx; flowPathEntry.path1.out_portmask = 0; flowPathEntry.path1.out_ext_portmask_idx = 0; flowPathEntry.path1.out_smac_trans = 0; flowPathEntry.path1.out_dmac_trans = 0; //cvlan flowPathEntry.path1.out_egress_cvid_act = TRUE; flowPathEntry.path1.out_cvlan_id = egressVlanID; flowPathEntry.path1.out_ctag_format_act = TRUE; flowPathEntry.path1.out_cvid_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path1.out_cpri_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path1.out_cpri = priority; //svlan flowPathEntry.path1.out_egress_svid_act = FALSE; flowPathEntry.path1.out_svlan_id =serviceVlanID; flowPathEntry.path1.out_stag_format_act = TRUE; flowPathEntry.path1.out_svid_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path1.out_spri_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path1.out_spri = servicePriority; //other flowPathEntry.path1.out_multiple_act = FALSE; flowPathEntry.path1.out_user_pri_act = TRUE; flowPathEntry.path1.out_user_priority = internalCFPri; if(dscp>=0) { flowPathEntry.path1.out_dscp_act = TRUE; flowPathEntry.path1.out_dscp = dscp; } if(shareMeterIdx>=0) { flowPathEntry.path1.out_share_meter_act = TRUE; flowPathEntry.path1.out_share_meter_idx = shareMeterIdx; } flowPathEntry.path1.out_extra_tag_index = 0; //1 FIXME flowPathEntry.path1.out_egress_port_to_vid_act = FALSE; //1 FIXME flowPathEntry.path1.out_drop = 0; flowPathEntry.path1.lock = 0; } else //FB_GLOBAL_L2_FLOW_LOOKUP_BY_MAC is disabled { flowPathEntry.path1.in_path = FB_PATH_34; //path 3 flowPathEntry.path3.in_multiple_act = FALSE; flowPathEntry.path3.in_ipv4_or_ipv6 = isIpv6; if(isIpv6==0) { flowPathEntry.path3.in_src_ipv4_addr = sip; flowPathEntry.path3.in_dst_ipv4_addr = dip; } else { flowPathEntry.path3.in_src_ipv6_addr_hash = _rtk_rg_flowHashIPv6SrcAddr_get(v6Sip.ipv6_addr); flowPathEntry.path3.in_dst_ipv6_addr_hash = _rtk_rg_flowHashIPv6DstAddr_get(v6Dip.ipv6_addr); } flowPathEntry.path3.in_l4_src_port = sport; flowPathEntry.path3.in_l4_dst_port = dport; flowPathEntry.path3.in_l4proto = isTcp; //tcp: 1, udp: 0 flowPathEntry.path3.in_intf_idx = pPktHdr->srcNetifIdx; flowPathEntry.path3.in_stagif = (pPktHdr->tagif&SVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path3.in_ctagif = (pPktHdr->tagif&CVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path3.in_pppoeif = (pPktHdr->tagif&PPPOE_TAGIF)?TRUE:FALSE; //flowPathEntry.path3.in_pppoe_sid_check will be set automatically. //flowPathEntry.path3.in_tos_check will be set automatically. flowPathEntry.path3.in_tos = pPktHdr->tos; //-------------------------egress action------------------------- flowPathEntry.path3.out_uc_lut_lookup = TRUE; flowPathEntry.path3.out_dmac_idx = indMacIdx; flowPathEntry.path3.out_intf_idx = new_intf_idx; flowPathEntry.path3.out_portmask = 0; flowPathEntry.path3.out_ext_portmask_idx = 0; flowPathEntry.path3.out_smac_trans = 0; flowPathEntry.path3.out_dmac_trans = 0; //cvlan flowPathEntry.path3.out_egress_cvid_act = TRUE; flowPathEntry.path3.out_cvlan_id = egressVlanID; flowPathEntry.path3.out_ctag_format_act = TRUE; flowPathEntry.path3.out_cvid_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path3.out_cpri_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path3.out_cpri = priority; //svlan flowPathEntry.path3.out_egress_svid_act = FALSE; flowPathEntry.path3.out_svlan_id = serviceVlanID; flowPathEntry.path3.out_stag_format_act = TRUE; flowPathEntry.path3.out_svid_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path3.out_spri_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path3.out_spri = servicePriority; //other flowPathEntry.path3.out_multiple_act = FALSE; flowPathEntry.path3.out_user_pri_act = TRUE; flowPathEntry.path3.out_user_priority = internalCFPri; if(dscp>=0) { flowPathEntry.path3.out_dscp_act = TRUE; flowPathEntry.path3.out_dscp = dscp; } #ifdef CONFIG_GPON_FEATURE if(rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port==RTK_RG_MAC_PORT_PON) { flowPathEntry.path3.out_stream_idx_act = TRUE; flowPathEntry.path3.out_stream_idx = streamID; } #endif if(shareMeterIdx>=0) { flowPathEntry.path3.out_share_meter_act = TRUE; flowPathEntry.path3.out_share_meter_idx = shareMeterIdx; } flowPathEntry.path3.out_extra_tag_index = 0; //1 FIXME flowPathEntry.path3.out_egress_port_to_vid_act = FALSE; //1 FIXME flowPathEntry.path3.out_drop = 0; flowPathEntry.path3.lock = 0; } } else //routing, napt { flowPathEntry.path1.in_path = FB_PATH_5; flowPathEntry.path5.in_ipv4_or_ipv6 = isIpv6; if(isIpv6==0) { flowPathEntry.path5.in_src_ipv4_addr = sip; flowPathEntry.path5.in_dst_ipv4_addr = dip; } else { flowPathEntry.path5.in_src_ipv6_addr_hash = _rtk_rg_flowHashIPv6SrcAddr_get(v6Sip.ipv6_addr); flowPathEntry.path5.in_dst_ipv6_addr_hash = _rtk_rg_flowHashIPv6DstAddr_get(v6Dip.ipv6_addr); } flowPathEntry.path5.in_l4_src_port = sport; flowPathEntry.path5.in_l4_dst_port = dport; flowPathEntry.path5.in_l4proto = isTcp; flowPathEntry.path5.in_intf_idx = pPktHdr->srcNetifIdx; flowPathEntry.path5.in_stagif = (pPktHdr->tagif&SVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path5.in_ctagif = (pPktHdr->tagif&CVLAN_TAGIF)?TRUE:FALSE; flowPathEntry.path5.in_pppoeif = (pPktHdr->tagif&PPPOE_TAGIF)?TRUE:FALSE; //flowPathEntry.path5.in_tos_check will be set automatically. flowPathEntry.path5.in_tos = pPktHdr->tos; //-------------------------egress action------------------------- if(isNapt) { flowPathEntry.path5.out_l4_act = 1; // Routing: 0, NAPT: 1 flowPathEntry.path5.out_l4_direction = (naptDirection==NAPT_DIRECTION_OUTBOUND)?1:0; // outbound: 1, inbound: 0 (routing don't care) flowPathEntry.path5.out_l4_port = rg_db.naptOut[naptIdx].extPort; // routing don't care } flowPathEntry.path5.out_intf_idx = new_intf_idx; flowPathEntry.path5.out_dmac_idx = indMacIdx; //cvlan flowPathEntry.path5.out_egress_cvid_act = TRUE; flowPathEntry.path5.out_cvlan_id = egressVlanID; flowPathEntry.path5.out_ctag_format_act = TRUE; flowPathEntry.path5.out_cvid_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_cpri_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_cpri = priority; //svlan flowPathEntry.path5.out_egress_svid_act = FALSE; flowPathEntry.path5.out_svlan_id = serviceVlanID; flowPathEntry.path5.out_stag_format_act = TRUE; flowPathEntry.path5.out_svid_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_spri_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_spri = servicePriority; //other flowPathEntry.path5.out_user_pri_act = TRUE; flowPathEntry.path5.out_user_priority = internalCFPri; if(dscp>=0) { flowPathEntry.path5.out_dscp_act = TRUE; flowPathEntry.path5.out_dscp = dscp; } #ifdef CONFIG_GPON_FEATURE if(rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port==RTK_RG_MAC_PORT_PON) { flowPathEntry.path5.out_stream_idx_act = TRUE; flowPathEntry.path5.out_stream_idx = streamID; } #endif if(shareMeterIdx>=0) { flowPathEntry.path5.out_share_meter_act = TRUE; flowPathEntry.path5.out_share_meter_idx = shareMeterIdx; } flowPathEntry.path5.out_extra_tag_index = 0; //1 FIXME flowPathEntry.path5.out_egress_port_to_vid_act = FALSE; //1 FIXME flowPathEntry.path5.out_drop = 0; flowPathEntry.path5.lock = 0; } ret = _rtk_rg_flow_add(&flow_idx, &flowPathEntry, igrSVID, igrCVID, lutDaIdx, naptIdx, arpOrNeighborIdx, 0); if(ret==RT_ERR_RG_OK || ret==RT_ERR_RG_FLOW_FULL) { //store flowIdx(RT_ERR_RG_OK) or flowHashIdx(RT_ERR_RG_FLOW_FULL) if(naptIdx>=0) { if(naptDirection==NAPT_DIRECTION_OUTBOUND) { rg_db.naptOut[naptIdx].outFlowExist=(ret==RT_ERR_RG_OK)?1:0; rg_db.naptOut[naptIdx].outFlowIdx=flow_idx; } else { rg_db.naptOut[naptIdx].inFlowExist=(ret==RT_ERR_RG_OK)?1:0; rg_db.naptOut[naptIdx].inFlowIdx=flow_idx; } } if(ret==RT_ERR_RG_FLOW_FULL) { TRACE("HW flow entry full, add this flow to list of candicates."); //1 FIXME: add this flow to list of candicates. } } else { assert_ok(ret); } return; } __SRAM_FWDENG_SLOWPATH void _rtk_rg_flowUpdate_byInboundShortcut(rtk_rg_pktHdr_t *pPktHdr) { rtk_rg_err_code_t ret; rtk_rg_table_flowPath_t flowPathEntry; uint32 flow_idx; uint16 igrSVID, igrCVID, lutDaIdx/*used by path3 & path4*/; int32 naptIdx, arpOrNeighborIdx; int8 shareMeterIdx; uint16 egressVlanID; uint8 egressVlanTagif; //0:untag 1:tagged uint8 isNapt, isIpv6, isTcp, dmac2cvlanTagif, vlanTagif, serviceVlanTagif; uint16 sport, dport; uint32 vlanID, priority, serviceVlanID, servicePriority, internalCFPri, streamID; int32 smacL2Idx, dmacL2Idx, new_intf_idx, dmac2cvlanID, dscp; int32 indMacIdx=0; ipaddr_t sip, dip; rtk_rg_naptDirection_t naptDirection; #ifdef CONFIG_ROME_NAPT_SHORTCUT rtk_rg_napt_shortcut_t *pNaptSc=NULL; #endif pNaptSc=pPktHdr->pInboundShortcutEntry; if(pNaptSc->notFinishUpdated) { TRACE("Shortcut is notFinishUpdated."); return; } if(pNaptSc->isHairpinNat) { TRACE("Hw flow does not support hairpin nat."); return; } //init flow path memset(&flowPathEntry, 0, sizeof(rtk_rg_table_flowPath_t)); igrSVID = (pPktHdr->egressServiceVlanTagif)?pPktHdr->egressServiceVlanID:0; igrCVID = (pPktHdr->egressVlanTagif)?pPktHdr->egressVlanID:0; lutDaIdx = 0; //path 5 don't care naptIdx = FAIL; arpOrNeighborIdx = FAIL; shareMeterIdx = FAIL; { isIpv6 = FALSE; isNapt = pNaptSc->isNapt; if(isNapt) naptIdx = pPktHdr->naptOutboundIndx; arpOrNeighborIdx= pNaptSc->arpIdx; smacL2Idx = pNaptSc->smacL2Idx; dmacL2Idx = pNaptSc->new_lut_idx; sip = pNaptSc->sip; dip = pNaptSc->dip; sport = pNaptSc->sport; dport = pNaptSc->dport; isTcp = pNaptSc->isTcp; naptDirection = pNaptSc->direction; new_intf_idx = pNaptSc->new_intf_idx; dmac2cvlanTagif = pNaptSc->dmac2cvlanTagif; dmac2cvlanID = pNaptSc->dmac2cvlanID; vlanTagif = pNaptSc->vlanTagif; vlanID = pNaptSc->vlanID; priority = pNaptSc->priority; serviceVlanTagif= pNaptSc->serviceVlanTagif; serviceVlanID = pNaptSc->serviceVlanID; servicePriority = pNaptSc->servicePriority; internalCFPri = pNaptSc->internalCFPri; dscp = pNaptSc->dscp; streamID = pNaptSc->streamID; } // translate egress dmacL2Idx to indirectMAc table and save to out_dmac_idx if(RTK_RG_ASIC_INDIRECTMACTABLE_ADD(dmacL2Idx, &indMacIdx)!=RT_ERR_RG_OK) { TRACE("FAIL to add indirect mac table."); return; } flowPathEntry.path1.in_path = FB_PATH_5; flowPathEntry.path5.in_ipv4_or_ipv6 = isIpv6; flowPathEntry.path5.in_src_ipv4_addr = sip; flowPathEntry.path5.in_dst_ipv4_addr = dip; flowPathEntry.path5.in_l4_src_port = sport; flowPathEntry.path5.in_l4_dst_port = dport; flowPathEntry.path5.in_l4proto = isTcp; flowPathEntry.path5.in_intf_idx = pPktHdr->netifIdx; flowPathEntry.path5.in_stagif = (pPktHdr->egressServiceVlanTagif)?TRUE:FALSE; flowPathEntry.path5.in_ctagif = (pPktHdr->egressVlanTagif)?TRUE:FALSE; flowPathEntry.path5.in_pppoeif = (pPktHdr->egressTagif&PPPOE_TAGIF)?TRUE:FALSE; //1 FIXME: tos check must be disabled. //flowPathEntry.path5.in_tos_check will be set automatically. flowPathEntry.path5.in_tos = *pPktHdr->pTos; //-------------------------egress action------------------------- if(isNapt) { flowPathEntry.path5.out_l4_act = 1; // Routing: 0, NAPT: 1 flowPathEntry.path5.out_l4_direction = (naptDirection==NAPT_DIRECTION_OUTBOUND)?1:0; // outbound: 1, inbound: 0 (routing don't care) flowPathEntry.path5.out_l4_port = rg_db.naptOut[naptIdx].extPort; // routing don't care } flowPathEntry.path5.out_intf_idx = new_intf_idx; flowPathEntry.path5.out_dmac_idx = indMacIdx; //cvlan if( dmac2cvlanID!=FAIL && !(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port))) { egressVlanID = dmac2cvlanID; egressVlanTagif = dmac2cvlanTagif; } else { egressVlanID = vlanID; egressVlanTagif = vlanTagif; } flowPathEntry.path5.out_egress_cvid_act = TRUE; flowPathEntry.path5.out_cvlan_id = egressVlanID; flowPathEntry.path5.out_ctag_format_act = TRUE; flowPathEntry.path5.out_cvid_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_cpri_format_act = (egressVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_cpri = priority; //svlan flowPathEntry.path5.out_egress_svid_act = FALSE; flowPathEntry.path5.out_svlan_id = serviceVlanID; flowPathEntry.path5.out_stag_format_act = TRUE; flowPathEntry.path5.out_svid_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_spri_format_act = (serviceVlanTagif)?TRUE:FALSE; flowPathEntry.path5.out_spri = servicePriority; //other flowPathEntry.path5.out_user_pri_act = TRUE; flowPathEntry.path5.out_user_priority = internalCFPri; if(dscp>=0) { flowPathEntry.path5.out_dscp_act = TRUE; flowPathEntry.path5.out_dscp = dscp; } #ifdef CONFIG_GPON_FEATURE if(rg_db.systemGlobal.initParam.wanPortGponMode && rg_db.lut[dmacL2Idx].rtk_lut.entry.l2UcEntry.port==RTK_RG_MAC_PORT_PON) { flowPathEntry.path5.out_stream_idx_act = TRUE; flowPathEntry.path5.out_stream_idx = streamID; } #endif if(shareMeterIdx>=0) { flowPathEntry.path5.out_share_meter_act = TRUE; flowPathEntry.path5.out_share_meter_idx = shareMeterIdx; } flowPathEntry.path5.out_extra_tag_index = 0; //1 FIXME flowPathEntry.path5.out_egress_port_to_vid_act = FALSE; //1 FIXME flowPathEntry.path5.out_drop = 0; flowPathEntry.path5.lock = 0; ret = _rtk_rg_flow_add(&flow_idx, &flowPathEntry, igrSVID, igrCVID, lutDaIdx, naptIdx, arpOrNeighborIdx, 0); if(ret==RT_ERR_RG_OK || ret==RT_ERR_RG_FLOW_FULL) { //store flowIdx(RT_ERR_RG_OK) or flowHashIdx(RT_ERR_RG_FLOW_FULL) if(naptIdx>=0) { if(naptDirection==NAPT_DIRECTION_OUTBOUND) { rg_db.naptOut[naptIdx].outFlowExist=(ret==RT_ERR_RG_OK)?1:0; rg_db.naptOut[naptIdx].outFlowIdx=flow_idx; } else { rg_db.naptOut[naptIdx].inFlowExist=(ret==RT_ERR_RG_OK)?1:0; rg_db.naptOut[naptIdx].inFlowIdx=flow_idx; } } if(ret==RT_ERR_RG_FLOW_FULL) { TRACE("HW flow entry full, add this flow to list of candicates."); //1 FIXME: add this flow to list of candicates. } } else { assert_ok(ret); } return ; } #if defined(CONFIG_RG_FLOW_AUTO_AGEOUT) extern rtk_rg_err_code_t rtk_rg_asic_flowTraffic_get(uint32 *validSet, uint32 *flowTrafficSet); extern rtk_rg_err_code_t rtk_rg_asic_camTraffic_get(uint64 *pCamTrfBits); void _rtk_rg_flowUpdateIdleTime(uint32 flowIdx) { rg_db.flow[flowIdx].idleSecs = 0; if(rg_db.flow[flowIdx].flowPath.path1.in_path==FB_PATH_5) //path 5 { if(rg_db.flow[flowIdx].arpOrNeighborIdx>=0) { if(rg_db.flow[flowIdx].flowPath.path5.in_ipv4_or_ipv6==0) rg_db.arp[rg_db.flow[flowIdx].arpOrNeighborIdx].idleSecs = 0; else rg_db.v6neighbor[rg_db.flow[flowIdx].arpOrNeighborIdx].idleSecs = 0; } if(rg_db.flow[flowIdx].naptIdx>=0) { rg_db.naptOut[rg_db.flow[flowIdx].naptIdx].idleSecs = 0; rg_db.naptIn[rg_db.naptOut[rg_db.flow[flowIdx].naptIdx].rtk_naptOut.hashIdx].idleSecs = 0; } } } int _rtk_rg_flowTimeoutCheck(void) { int i=0,j=0; uint32 flowIdx; int ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs uint32 notIdleSet[MAX_FLOW_TABLE_SIZE/32]; #if defined(CONFIG_RG_FLOW_4K_MODE) uint64 camTrfBits; uint64 camBitMask; #endif //rtlglue_printf("TICK at %ld . Check FLOW timeout.\n",jiffies); //Read traffic bits ASSERT_EQ(rtk_rg_asic_flowTraffic_get(rg_db.flowValidSet, notIdleSet), RT_ERR_RG_OK); #if defined(CONFIG_RG_FLOW_4K_MODE) ASSERT_EQ(rtk_rg_asic_camTraffic_get(&camTrfBits), RT_ERR_RG_OK); #endif if(rg_db.systemGlobal.flow_HouseKeepIndex >= MAX_FLOW_HW_TABLE_SIZE) { FIXME("the flow_HouseKeepIndex is %d",rg_db.systemGlobal.flow_HouseKeepIndex); rg_db.systemGlobal.flow_HouseKeepIndex=0; } DEBUG("flow timeout check range: %d to %d", rg_db.systemGlobal.flow_HouseKeepIndex, rg_db.systemGlobal.flow_HouseKeepIndex+FLOW_KEEP_NUM); //each time we check FLOW_KEEP_NUM entries //Check idle for(i=(rg_db.systemGlobal.flow_HouseKeepIndex>>5); i<((rg_db.systemGlobal.flow_HouseKeepIndex+FLOW_KEEP_NUM)>>5); i++) { if(i<(MAX_FLOW_TABLE_SIZE>>5)) { if(rg_db.flowValidSet[i]) { for(j=0;j<32;j++) { if(rg_db.flowValidSet[i] & (0x1<<j)) { flowIdx = (i<<5)+j; if(rg_db.flow[flowIdx].staticEntry) continue; if(!rg_db.flow[flowIdx].flowPath.path1.valid) continue; if(notIdleSet[i]&(0x1<<j)) { _rtk_rg_flowUpdateIdleTime(flowIdx); } else { rg_db.flow[flowIdx].idleSecs += (elapsedTime*(MAX_FLOW_HW_TABLE_SIZE/FLOW_KEEP_NUM)); } if(rg_db.flow[flowIdx].idleSecs >= rg_db.systemGlobal.flow_timeout) { TRACE("flow idx [%d] timeout!!", flowIdx); ret=_rtk_rg_flow_del(flowIdx, 1); if(ret==RT_ERR_RG_FLOW_NOT_FOUND) rg_db.flowValidSet[flowIdx>>5] &= ~(0x1<<(flowIdx&0x1f)); else assert_ok(ret); } } } } } #if defined(CONFIG_RG_FLOW_4K_MODE) else //i>=(MAX_FLOW_TABLE_SIZE>>5), for 4K mode TCAM { for(j=0;j<32;j++) { flowIdx = (i<<5)+j; camBitMask = (uint64)(0x1<<(flowIdx-MAX_FLOW_TABLE_SIZE)); if(rg_db.flow[flowIdx].staticEntry) continue; if(!rg_db.flow[flowIdx].flowPath.path1.valid) continue; if(camTrfBits & camBitMask) { DEBUG("check flow entry trafficbits[%d] bit %d", i, j); _rtk_rg_flowUpdateIdleTime(flowIdx); } else { rg_db.flow[flowIdx].idleSecs += (elapsedTime*(MAX_FLOW_HW_TABLE_SIZE/FLOW_KEEP_NUM)); } if(rg_db.flow[flowIdx].idleSecs >= rg_db.systemGlobal.flow_timeout) { TRACE("flow idx [%d] timeout!", flowIdx); rtlglue_printf("flow idx [%d] timeout!!\n", flowIdx); ret=_rtk_rg_flow_del(flowIdx, 1); if(ret!=RT_ERR_RG_FLOW_NOT_FOUND) assert_ok(ret); } } } #endif } rg_db.systemGlobal.flow_HouseKeepIndex += FLOW_KEEP_NUM; return (RT_ERR_RG_OK); } #endif #endif #if defined(CONFIG_APOLLO) #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) __SRAM_FWDENG_SLOWPATH uint32 _rtk_rg_naptTcpUdpInHashIndex(uint16 isTcp, uint32 dip, uint16 dport) { uint32 eidx=0; uint32 _xor; /* HashIn(Protocol,DIP,DPORT) SUM = DPORT[15:0] + DIP[15:0] TMP = SUM[9:0] + SUM[16:10] XOR = DIP[25:16] ^ {Protocol,3'b0,DIP[31:26]} NAPTR hash ID[9:0] = TMP[9:0] ^ XOR[9:0] */ eidx = dport + (dip&0xffff); eidx = (eidx&0x3ff) + ((eidx>>10)&0x7f); _xor = ((dip>>16)&0x3ff) ^ ((isTcp<<9)|((dip>>26)&0x3f)); eidx = (eidx&0x3ff) ^ (_xor&0x3ff); return eidx; } __IRAM_FWDENG_SLOWPATH uint32 _rtk_rg_naptTcpUdpOutHashIndex(int8 isTcp, ipaddr_t srcAddr, uint16 srcPort, ipaddr_t destAddr, uint16 destPort) { uint32 eidx; uint32 tmp; /* HashOut(Protocol,SIP,SPORT,DIP,DPORT) X[17:0] = SIP[15:0] + DIP[15:0] + SPORT[15:0] + DPORT[15:0]; Y[9:0] = X[9:0] + {2'b0, X[17:10]}; Z[9:0] = SIP[25:16] ^ {Protocol,3'b0, SIP[31:26]} ^ DIP[25:16] ^ {DIP[31:26], 4'b0 }; HASH_IDX = Y[9:0] ^Z[9:0]; */ eidx = (srcAddr&0xffff) + (destAddr&0xffff) + srcPort + destPort; eidx = (eidx&0x3ff) + ((eidx>>10)&0xff); tmp = ((srcAddr>>16)&0x3ff) ^ (((isTcp<<9)|((srcAddr>>26)&0x3f)) ^ ((destAddr>>16)&0x3ff) ^ ((destAddr>>22)&0x3f0)); eidx = (eidx&0x3ff) ^ (tmp&0x3ff); assert(eidx < MAX_NAPT_OUT_HASH_SIZE); return eidx; } #else __SRAM_FWDENG_SLOWPATH uint32 _rtk_rg_naptTcpUdpInHashIndex(uint16 isTcp, uint32 dip, uint16 dport) { uint32 eidx=0; /* SUM = added by 1's complementary arithmetic( DIP[7:0] + DIP[15:8] + DIP[23:16] + DIP[31:24] + DPOR[7:0] ) NAPTR hash ID[7:0] = SUM[7:0] ^ DPORT[15:8] ^ ( TCP << 7)A!KA!K.. HashIN */ eidx = ((dip&0xff) + ((dip>>8)&0xff) + ((dip>>16)&0xff) + ((dip>>24)&0xff) + (dport&0xff)); while(eidx>0xff) { eidx = ((eidx&0xff) + ((eidx>>8)&0xff)); } eidx = (eidx&0xff) ^ ((dport>>8)&0xff) ^ (isTcp << 7); return eidx; } __IRAM_FWDENG_SLOWPATH uint32 _rtk_rg_naptTcpUdpOutHashIndex(int8 isTcp, ipaddr_t srcAddr, uint16 srcPort, ipaddr_t destAddr, uint16 destPort) { uint32 eidx; eidx = (((destAddr&0x3)<<16) | srcPort) + ((destAddr>>2)&0x3ffff); eidx = (eidx&0x3ffff) + (eidx>>18); eidx += (((srcAddr&0x3f)<<12) | (destAddr>>20)); eidx = (eidx&0x3ffff) + (eidx>>18); eidx += ((srcAddr>>6)&0x3ffff); eidx = (eidx&0x3ffff) + (eidx>>18); eidx += (((destPort&0x3ff)<<8) | (srcAddr>>24)); eidx = (eidx&0x3ffff) + (eidx>>18); eidx = (eidx&0x1ff) + (eidx>>9); eidx = eidx + ((isTcp<<8) | ((destPort>>10)<<2)); eidx = ((eidx&0x1ff) + (eidx>>9))&0x1ff; assert(eidx < MAX_NAPT_OUT_HASH_SIZE); return eidx; } #endif #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) /* return napt table In and Out index (return vaule[10bits] should >> 2 for four-way index) out = rtl8651_naptTcpUdpTableIndex((uint8)protocol, intIp, intPort, remIp, remPort); */ /* return 4-way index */ uint32 _rtk_rg_naptTcpUdpOutHashIndex(int8 isTcp, ipaddr_t srcAddr, uint16 srcPort, ipaddr_t destAddr, uint16 destPort) { return ((unsigned int)rtl8651_naptTcpUdpTableIndex(isTcp,srcAddr,srcPort,destAddr,destPort))>>2; } /* return inbound indexID(10bits), inbound only 1-way */ uint32 _rtk_rg_naptTcpUdpInHashIndex(uint16 isTcp ,uint32 sip,uint16 sport ,uint32 dip, uint16 dport) { return rtl8651_naptTcpUdpTableIndex((uint8)isTcp, sip, sport, dip, dport); } #endif /* caller should make sure inbound and outbound table index correct , we alway force write to asic and sw table this func xdsl:caller should write some field in the sw table ex.(rg_db.naptOut[outIdx].extPort) */ __SRAM_FWDENG_SLOWPATH int32 _rtk_rg_naptConnection_add(int outIdx, rtk_l34_naptOutbound_entry_t *asic_napt, rtk_l34_naptInbound_entry_t *asic_naptr) { // call this function when connection status change to connect. //Write to ASIC int retval = 0; rtk_rg_naptInfo_t naptInfo; int inIdx = asic_napt->hashIdx; int sw_cb; //0:invalid, execute callback 1:valid, sw_napt added before //20151209LUKE: check if we are set software NAPT at first time or not. sw_cb=rg_db.naptOut[outIdx].rtk_naptOut.valid; rg_lock(&rg_kernel.naptTableLock); //========================critical region start========================= #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) if((inIdx<MAX_NAPT_IN_HW_TABLE_SIZE) && (outIdx<MAX_NAPT_OUT_HW_TABLE_SIZE)) #endif { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //=================865x assign value start ====================== asic_naptr->extPort = rg_db.naptOut[outIdx].extPort; asic_napt->intIp = asic_naptr->intIp; asic_napt->intPort = asic_naptr->intPort ; asic_napt->extPort = rg_db.naptOut[outIdx].extPort; asic_napt->isTcp = asic_naptr->isTcp; //=================865x assign value end ====================== if(asic_naptr->valid!=ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE){ WARNING("XDSL only support NAPT_IN_TYPE_SYMMETRIC_NAPT\n"); } #endif //NAPT-R retval = RTK_L34_NAPTINBOUNDTABLE_SET(1,inIdx,asic_naptr); if(retval!=RT_ERR_OK) { rg_unlock(&rg_kernel.naptTableLock); RETURN_ERR(RT_ERR_RG_NAPTR_SET_FAIL); } //NAPT retval = RTK_L34_NAPTOUTBOUNDTABLE_SET(1,outIdx,asic_napt); if(retval!=RT_ERR_OK) { rg_unlock(&rg_kernel.naptTableLock); RETURN_ERR(RT_ERR_RG_NAPT_SET_FAIL); } } rg_db.naptIn[inIdx].idleSecs=0; rg_db.naptOut[outIdx].idleSecs=0; if(rg_db.naptIn[inIdx].rtk_naptIn.isTcp) rg_db.naptOut[outIdx].state=TCP_CONNECTED; else rg_db.naptOut[outIdx].state=UDP_CONNECTED; rg_db.naptValidSet[outIdx>>5] |= (0x1<<(outIdx&31)); if(asic_naptr->valid==ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE) { rg_db.naptIn[inIdx].coneType=NAPT_IN_TYPE_SYMMETRIC_NAPT; rg_db.naptIn[inIdx].symmetricNaptOutIdx=outIdx; } else if(asic_naptr->valid==ASIC_NAPT_IN_TYPE_RESTRICTED_CONE) rg_db.naptIn[inIdx].coneType=NAPT_IN_TYPE_RESTRICTED_CONE; else if(asic_naptr->valid==ASIC_NAPT_IN_TYPE_FULL_CONE) rg_db.naptIn[inIdx].coneType=NAPT_IN_TYPE_FULL_CONE; //20151209LUKE: we execute software NAPT add callback only if we are first setup it here. if(!sw_cb && rg_db.systemGlobal.initParam.softwareNaptInfoAddCallBack!=NULL) { //20151202LUKE: collect info from NAPT/NAPTR table. _rtk_rg_naptInfoCollectForCallback(outIdx, &naptInfo); rg_db.systemGlobal.initParam.softwareNaptInfoAddCallBack(&naptInfo); } if(rg_db.systemGlobal.initParam.naptAddByHwCallBack!=NULL) { //20151202LUKE: collect info from NAPT/NAPTR table. _rtk_rg_naptInfoCollectForCallback(outIdx, &naptInfo); rg_db.systemGlobal.initParam.naptAddByHwCallBack(&naptInfo); } TABLE("Add NAPT: [%s][Local=0x%x:%d][WANIF=%d:%d][Remote=0x%x:%d][inIdx:%d][outIdx:%d][Type:%s]\n" ,(rg_db.naptIn[inIdx].rtk_naptIn.isTcp==1)?"TCP":"UDP" ,rg_db.naptIn[inIdx].rtk_naptIn.intIp,rg_db.naptIn[inIdx].rtk_naptIn.intPort,rg_db.nexthop[rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.nhIdx].rtk_nexthop.ifIdx,rg_db.naptOut[outIdx].extPort,rg_db.naptOut[outIdx].remoteIp,rg_db.naptOut[outIdx].remotePort,inIdx,outIdx ,(rg_db.naptIn[inIdx].coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT)?"SYMMETRIC_NAPT":((rg_db.naptIn[inIdx].coneType==NAPT_IN_TYPE_FULL_CONE)?"FULL_CONE":"RESTRICTED_CONE") ); rg_unlock(&rg_kernel.naptTableLock); //========================critical region end========================= return (RT_ERR_RG_OK); } //NAPT Flow /* XDSL:FIXME need more check only add for hw_table naptIn/naptOut and sync to swTbl ,if hw_table full return failed input:naptFlow output:flow_idx -flow index is outbound table index */ rtk_rg_err_code_t rtk_rg_apollo_naptConnection_add(rtk_rg_naptEntry_t *naptFlow, int *flow_idx) { #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt int i=0; #endif int retval=0; int outIdx=0,inIdx=0,eipIdx; rtk_l34_ext_intip_entry_t eip; rtk_l34_naptInbound_entry_t asic_naptr; rtk_l34_naptOutbound_entry_t asic_napt; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((naptFlow->is_tcp!=0) && (naptFlow->is_tcp!=1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); eipIdx=rg_db.systemGlobal.interfaceInfo[naptFlow->wan_intf_idx].storedInfo.wan_intf.extip_idx; if(eipIdx<0 || rg_db.extip[eipIdx].rtk_extip.valid==0) RETURN_ERR(RT_ERR_RG_EXTIP_GET_FAIL); #if defined(CONFIG_APOLLO) if(naptFlow->coneType==NAPT_IN_TYPE_RESTRICTED_CONE) RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //xdsl only support symmetric napt mode if(naptFlow->coneType!=NAPT_IN_TYPE_SYMMETRIC_NAPT) {RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); WARNING("xdsl only support NAPT_IN_TYPE_SYMMETRIC_NAPT");} #endif memcpy(&eip,&rg_db.extip[eipIdx].rtk_extip,sizeof(rtk_l34_ext_intip_entry_t)); #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt outIdx = _rtk_rg_naptTcpUdpOutHashIndex(naptFlow->is_tcp,naptFlow->local_ip,naptFlow->local_port,naptFlow->remote_ip,naptFlow->remote_port); //Check duplication for(i=outIdx<<2;i<(outIdx<<2)+4;i++) { if(rg_db.naptOut[i].rtk_naptOut.valid) { inIdx=rg_db.naptOut[i].rtk_naptOut.hashIdx; if((rg_db.naptIn[inIdx].rtk_naptIn.isTcp == naptFlow->is_tcp) && (rg_db.naptIn[inIdx].rtk_naptIn.intIp == naptFlow->local_ip) && (rg_db.naptIn[inIdx].rtk_naptIn.intPort== naptFlow->local_port) && (rg_db.naptOut[i].remoteIp == naptFlow->remote_ip) && (rg_db.naptOut[i].remotePort == naptFlow->remote_port)) { rg_db.naptIn[inIdx].rtk_naptIn.priId=naptFlow->inbound_priority; rg_db.naptIn[inIdx].rtk_naptIn.priValid=naptFlow->inbound_pri_valid; rg_db.naptOut[i].rtk_naptOut.priValue=naptFlow->outbound_priority; rg_db.naptOut[i].rtk_naptOut.priValid=naptFlow->outbound_pri_valid; retval = _rtk_rg_naptConnection_add(i,&rg_db.naptOut[i].rtk_naptOut,&rg_db.naptIn[inIdx].rtk_naptIn); RETURN_ERR(RT_ERR_RG_NAPT_FLOW_DUPLICATE); } } } #if defined(CONFIG_APOLLO) { int j; //Get free Inbound entry inIdx = _rtk_rg_naptTcpUdpInHashIndex(naptFlow->is_tcp,eip.extIpAddr,naptFlow->external_port); for(j=0;j<2;j++) { for(i=0;i<4;i++) { if( ((j==0)&& /* If cone rule is exist, re-use the inbound index. Search all cone index first. */ (((rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_RESTRICTED_CONE)&&(naptFlow->coneType==NAPT_IN_TYPE_RESTRICTED_CONE))|| ((rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_FULL_CONE)&&(naptFlow->coneType==NAPT_IN_TYPE_FULL_CONE)))&& (rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.extIpIdx==eipIdx)&& #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) (((rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.extPortHSB<<8) | rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.extPortLSB)==naptFlow->external_port)&& #else (rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.extPortLSB==(naptFlow->external_port&0xff))&& #endif (rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.isTcp==naptFlow->is_tcp)) /* if all cone rule is not exist, find a empty rule. */ ||((j==1)&&(rg_db.naptIn[(inIdx<<2)+i].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_INVALID))) { #if 0 rg_db.naptIn[(inIdx<<2)+i].remoteIp = naptFlow->remote_ip; rg_db.naptIn[(inIdx<<2)+i].remotePort = naptFlow->remote_port; #endif memset(&asic_naptr,0,sizeof(rtk_l34_naptInbound_entry_t)); asic_naptr.intIp = naptFlow->local_ip; asic_naptr.intPort = naptFlow->local_port; asic_naptr.extIpIdx = eipIdx; #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) asic_naptr.extPortHSB = naptFlow->external_port>>8; #endif asic_naptr.extPortLSB = naptFlow->external_port&0xff; asic_naptr.isTcp = naptFlow->is_tcp; if(naptFlow->coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT){ asic_naptr.remHash = _rtk_rg_NAPTRemoteHash_get(naptFlow->remote_ip,naptFlow->remote_port); asic_naptr.valid = ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE; }else if(naptFlow->coneType==NAPT_IN_TYPE_RESTRICTED_CONE){ asic_naptr.remHash = _rtk_rg_NAPTRemoteHash_get(naptFlow->remote_ip,0); asic_naptr.valid = ASIC_NAPT_IN_TYPE_RESTRICTED_CONE; }else if(naptFlow->coneType==NAPT_IN_TYPE_FULL_CONE) asic_naptr.valid = ASIC_NAPT_IN_TYPE_FULL_CONE; asic_naptr.priValid = naptFlow->inbound_pri_valid; asic_naptr.priId = naptFlow->inbound_priority; inIdx=(inIdx<<2)+i; //Absolute index for outside use. break; } } if(i!=4) break; /* found */ if((j==1)&&(i==4)) { RETURN_ERR(RT_ERR_RG_NAPTR_OVERFLOW); //full } } } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Get free Inbound entry inIdx = _rtk_rg_naptTcpUdpInHashIndex(naptFlow->is_tcp,naptFlow->remote_ip,naptFlow->remote_port,eip.extIpAddr,naptFlow->external_port); /*xdsl naptIn only one-way hash ,Boyce 2015-01-14*/ if(rg_db.naptIn[inIdx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_INVALID){ memset(&asic_naptr,0,sizeof(rtk_l34_naptInbound_entry_t)); asic_naptr.intIp = naptFlow->local_ip; asic_naptr.intPort = naptFlow->local_port; asic_naptr.remHash = _rtk_rg_NAPTRemoteHash_get(naptFlow->is_tcp,naptFlow->remote_ip,naptFlow->remote_port); asic_naptr.extIpIdx = naptFlow->wan_intf_idx; asic_naptr.extPortLSB = naptFlow->external_port; asic_naptr.isTcp = naptFlow->is_tcp; asic_naptr.valid = ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE; //we only support NAPT_IN_TYPE_SYMMETRIC_NAPT( same as ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE) asic_naptr.priValid = naptFlow->inbound_pri_valid; asic_naptr.priId = naptFlow->inbound_priority; //sync to naptOut tbl rg_db.naptOut[inIdx].rtk_naptOut.valid=1; }else{ //we only one-way chance... RETURN_ERR(RT_ERR_RG_NAPTR_OVERFLOW); //full } #endif //get free NAPT entry for(i=0;i<4;i++) { if(rg_db.naptOut[(outIdx<<2)+i].rtk_naptOut.valid==0) { rg_db.naptOut[(outIdx<<2)+i].extPort = naptFlow->external_port; #if 1 rg_db.naptOut[(outIdx<<2)+i].remoteIp = naptFlow->remote_ip; rg_db.naptOut[(outIdx<<2)+i].remotePort = naptFlow->remote_port; #endif #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //sync to naptIn tbl rg_db.naptIn[(outIdx<<2)+i].rtk_naptIn.valid=1; #endif memset(&asic_napt,0,sizeof(rtk_l34_naptOutbound_entry_t)); asic_napt.hashIdx = inIdx; asic_napt.priValid = naptFlow->outbound_pri_valid; asic_napt.priValue = naptFlow->outbound_priority; asic_napt.valid = 1; outIdx=(outIdx<<2)+i; break; } } if(i==4) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //failed set naptOut table back rg_db.naptOut[inIdx].rtk_naptOut.valid=0; #endif RETURN_ERR(RT_ERR_RG_NAPT_OVERFLOW); //full } #else //1 CONFIG_RG_FLOW_BASED_PLATFORM { uint32 outHashIdx, inHashIdx; rtk_rg_table_naptIn_linkList_t *pNaptInLinkList; outIdx = _rtk_rg_naptTcpUdpOutHashIndexLookup(naptFlow->is_tcp, naptFlow->local_ip, naptFlow->local_port, naptFlow->remote_ip, naptFlow->remote_port); if(outIdx>=0) //found { inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; rg_db.naptOut[outIdx].rtk_naptOut.priValue=naptFlow->outbound_priority; rg_db.naptOut[outIdx].rtk_naptOut.priValid=naptFlow->outbound_pri_valid; rg_db.naptIn[inIdx].rtk_naptIn.priId=naptFlow->inbound_priority; rg_db.naptIn[inIdx].rtk_naptIn.priValid=naptFlow->inbound_pri_valid; assert_ok(_rtk_rg_naptConnection_add(outIdx,&rg_db.naptOut[outIdx].rtk_naptOut,&rg_db.naptIn[inIdx].rtk_naptIn)); RETURN_ERR(RT_ERR_RG_NAPT_FLOW_DUPLICATE); } outHashIdx = _rtk_rg_naptTcpUdpOutHashIndex(naptFlow->is_tcp,naptFlow->local_ip,naptFlow->local_port,naptFlow->remote_ip,naptFlow->remote_port); outIdx = _rtk_rg_swNaptOutFreeEntryGet(outHashIdx); if(outIdx==RG_RET_ENTRY_NOT_GET) RETURN_ERR(RT_ERR_RG_NAPTR_OVERFLOW); //full inIdx = FAIL; /* If cone rule is exist, re-use the inbound index. Search all cone index first. */ inHashIdx = _rtk_rg_naptTcpUdpInHashIndex(naptFlow->is_tcp, eip.extIpAddr, naptFlow->external_port); pNaptInLinkList=rg_db.pNaptInHashListHead[inHashIdx]; while(pNaptInLinkList!=NULL) { if((((rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_RESTRICTED_CONE)&&(naptFlow->coneType==NAPT_IN_TYPE_RESTRICTED_CONE))|| ((rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_FULL_CONE)&&(naptFlow->coneType==NAPT_IN_TYPE_FULL_CONE))) &&(rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.extIpIdx==eipIdx) &&(((rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.extPortHSB<<8) | rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.extPortLSB)==naptFlow->external_port) &&(rg_db.naptIn[pNaptInLinkList->idx].rtk_naptIn.isTcp==naptFlow->is_tcp)) { inIdx = pNaptInLinkList->idx; break; } pNaptInLinkList=pNaptInLinkList->pNext; } if(inIdx==FAIL) //not found { inIdx = _rtk_rg_swNaptInFreeEntryGet(inHashIdx); if(inIdx==RG_RET_ENTRY_NOT_GET) RETURN_ERR(RT_ERR_RG_NAPTR_OVERFLOW); //full } //fill napt entry memset(&asic_napt, 0, sizeof(rtk_l34_naptOutbound_entry_t)); asic_napt.hashIdx = inIdx; asic_napt.priValid = naptFlow->outbound_pri_valid; asic_napt.priValue = naptFlow->outbound_priority; asic_napt.valid = 1; //fill naptr entry memset(&asic_naptr, 0, sizeof(rtk_l34_naptInbound_entry_t)); asic_naptr.intIp = naptFlow->local_ip; asic_naptr.intPort = naptFlow->local_port; asic_naptr.extIpIdx = eipIdx; asic_naptr.extPortHSB = naptFlow->external_port>>8; asic_naptr.extPortLSB = naptFlow->external_port&0xff; asic_naptr.isTcp = naptFlow->is_tcp; if(naptFlow->coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT){ asic_naptr.remHash = _rtk_rg_NAPTRemoteHash_get(naptFlow->remote_ip, naptFlow->remote_port); asic_naptr.valid = ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE; }else if(naptFlow->coneType==NAPT_IN_TYPE_RESTRICTED_CONE){ asic_naptr.remHash = _rtk_rg_NAPTRemoteHash_get(naptFlow->remote_ip, 0); asic_naptr.valid = ASIC_NAPT_IN_TYPE_RESTRICTED_CONE; }else if(naptFlow->coneType==NAPT_IN_TYPE_FULL_CONE){ asic_naptr.valid = ASIC_NAPT_IN_TYPE_FULL_CONE; } asic_naptr.priValid = naptFlow->inbound_pri_valid; asic_naptr.priId = naptFlow->inbound_priority; } #endif //add refCount of extPort { atomic_inc(&rg_db.naptForwardEngineEntryNumber[naptFlow->is_tcp]); _rtk_rg_naptExtPortInUsedCheck(TRUE, naptFlow->is_tcp, naptFlow->external_port, TRUE, FALSE); } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) rg_db.naptOut[outIdx].outFlowExist=0; rg_db.naptOut[outIdx].outFlowIdx=0; rg_db.naptOut[outIdx].inFlowExist=0; rg_db.naptOut[outIdx].inFlowIdx=0; #endif *flow_idx = outIdx; retval = _rtk_rg_naptConnection_add(outIdx,&asic_napt,&asic_naptr); return retval; } int _rtk_rg_naptExtPortFree(int fromPS,int isTcp,uint16 port) { int idx; uint32 bitValue; int i; int hasFree=0; i=port; idx=i>>5; // =port/32 bitValue=1<<(i&0x1f); if(isTcp) { if((rg_db.naptTcpExternPortUsed[idx]&bitValue)>0) { rg_db.naptTcpExternPortUsedRefCount[port]--; if(rg_db.naptTcpExternPortUsedRefCount[port] <= 0) { rg_db.naptTcpExternPortUsed[idx]&=(~bitValue); hasFree=1; } } } else { if((rg_db.naptUdpExternPortUsed[idx]&bitValue)>0) { rg_db.naptUdpExternPortUsedRefCount[port]--; if(rg_db.naptUdpExternPortUsedRefCount[port] <= 0) { rg_db.naptUdpExternPortUsed[idx]&=(~bitValue); hasFree=1; } } } if(fromPS) { //if(hasFree==1) atomic_dec(&rg_db.naptProtcolStackEntryNumber[isTcp]); DEBUG("[NAPT] Del protocol stack connection flow:%d\n",atomic_read(&rg_db.naptProtcolStackEntryNumber[0])+atomic_read(&rg_db.naptProtcolStackEntryNumber[1])); } else { //if(hasFree==1) atomic_dec(&rg_db.naptForwardEngineEntryNumber[isTcp]); DEBUG("[NAPT] Del forwarding engine connection flow:%d\n",atomic_read(&rg_db.naptForwardEngineEntryNumber[0])+atomic_read(&rg_db.naptForwardEngineEntryNumber[1])); } return hasFree; } __SRAM_FWDENG_SLOWPATH int _rtk_rg_naptExtPortUsedByOtherServiceCheck(int isTcp, uint16 wishPort, int skipPreDefinedPort) { int i, isUsed=0; if(skipPreDefinedPort) { //Pre-defined port (used by server in LAN) for(i=RTK_RG_ALG_SIP_TCP_SRV_IN_LAN; i<=RTK_RG_ALG_FTP_UDP_SRV_IN_LAN; i++) { switch(i) { case RTK_RG_ALG_SIP_TCP_SRV_IN_LAN: case RTK_RG_ALG_H323_TCP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN: case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: if(isTcp==1 && wishPort==rg_db.algUserDefinedPort[i]) { isUsed = 1; } break; case RTK_RG_ALG_SIP_UDP_SRV_IN_LAN: case RTK_RG_ALG_H323_UDP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN: case RTK_RG_ALG_FTP_UDP_SRV_IN_LAN: if(isTcp==0 && wishPort==rg_db.algUserDefinedPort[i]) { isUsed = 1; } break; default: break; } if(isUsed==1) { DEBUG("port %d is used by alg type %d, skip", wishPort, i); break; } } } //Dynamic port (used by server in LAN) if(_rtk_rg_algSrvInLanCheckEnable(isTcp, wishPort)==RG_RET_SUCCESS) { DEBUG("port %d is used by alg server in lan, skip", wishPort); isUsed = 1; } //Pre-allocated port (used by protocol stack) if(!(rg_db.systemGlobal.lowerBoundPortUsedByPS==0 && rg_db.systemGlobal.upperBoundPortUsedByPS==0) && rg_db.systemGlobal.lowerBoundPortUsedByPS<=wishPort && wishPort<=rg_db.systemGlobal.upperBoundPortUsedByPS) { DEBUG("port %d is used by protocol stack, skip", wishPort); isUsed = 1; } return isUsed; //0: free, 1: used } __SRAM_FWDENG_SLOWPATH int _rtk_rg_naptExtPortInUsedCheck(int force, int isTcp,uint16 wishPort, int addRefCount, int skipPreDefinedPort) { int wishIdx; uint32 wishBitValue; wishIdx=wishPort>>5; // =wishPort/32 wishBitValue=1<<(wishPort&0x1f); if(isTcp) { //DEBUG("[TCP]wishPort:%d, usedBit: %s", wishPort, (rg_db.naptTcpExternPortUsed[wishIdx]&wishBitValue)==0?"unused":"used"); if(((rg_db.naptTcpExternPortUsed[wishIdx]&wishBitValue)==0 && _rtk_rg_naptExtPortUsedByOtherServiceCheck(isTcp, wishPort, skipPreDefinedPort)==0) || force) { if(addRefCount) { rg_db.naptTcpExternPortUsedRefCount[wishPort]++; rg_db.naptTcpExternPortUsed[wishIdx]|=wishBitValue; DEBUG("[TCP]wishPort:%d is used.", wishPort); } return 0; //free } } else { //DEBUG("[UDP]wishPort:%d, usedBit: %s", wishPort, (rg_db.naptUdpExternPortUsed[wishIdx]&wishBitValue)==0?"unused":"used"); if(((rg_db.naptUdpExternPortUsed[wishIdx]&wishBitValue)==0 && _rtk_rg_naptExtPortUsedByOtherServiceCheck(isTcp, wishPort, skipPreDefinedPort)==0) || force) { if(addRefCount) { rg_db.naptUdpExternPortUsedRefCount[wishPort]++; rg_db.naptUdpExternPortUsed[wishIdx]|=wishBitValue; DEBUG("[UDP]wishPort:%d is used.", wishPort); } return 0; //free } } return 1; //used } #ifdef CONFIG_RG_NAPT_PORT_COLLISION_PREVENTION rtk_rg_err_code_t rtk_rg_apollo_naptExtPortFree(int isTcp,uint16 port) { #ifdef CONFIG_ROME_NAPT_LRU #else //Protocol Stack free _rtk_rg_naptExtPortFree(1,isTcp,port); #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_naptExtPortGet(int isTcp,uint16 *pPort) { #ifdef CONFIG_ROME_NAPT_LRU return 0; #else int wishPort; int port_in_used; int j; wishPort=*pPort; port_in_used=0; port_in_used = _rtk_rg_naptExtPortInUsedCheck(0,isTcp,wishPort,TRUE,TRUE); if(port_in_used) goto INUSED; atomic_inc(&rg_db.naptProtcolStackEntryNumber[isTcp]); DEBUG("@@@@@@@@@@@ Add protocol stack connection flow:%d\n",atomic_read(&rg_db.naptProtcolStackEntryNumber[0])+atomic_read(&rg_db.naptProtcolStackEntryNumber[1])); //Check UPNP for(j=0;j<MAX_UPNP_SW_TABLE_SIZE;j++) { if((rg_db.upnp[j].valid==1) && (rg_db.upnp[j].is_tcp&&isTcp) && (rg_db.upnp[j].gateway_port==wishPort)) { port_in_used=1; break; } } if(port_in_used) goto INBOUNDUSED; //Check Virtual Server for(j=0;j<MAX_VIRTUAL_SERVER_SW_TABLE_SIZE;j++) { if((rg_db.virtualServer[j].valid==1) && (rg_db.virtualServer[j].is_tcp&&isTcp) && (wishPort>=rg_db.virtualServer[j].gateway_port_start) && (wishPort<=(rg_db.virtualServer[j].gateway_port_start+rg_db.virtualServer[j].mappingPortRangeCnt))) { port_in_used=1; break; } } if(port_in_used) goto INBOUNDUSED; if((atomic_read(&rg_db.naptProtcolStackEntryNumber[0])+atomic_read(&rg_db.naptProtcolStackEntryNumber[1])) > (65536-MAX_NAPT_OUT_SW_TABLE_SIZE)) goto CONNFULL; return 0; CONNFULL: return -1; INBOUNDUSED: _rtk_rg_naptExtPortFree(1,isTcp,wishPort); INUSED: return -2; #endif } #endif /* return a match outbound index*/ __IRAM_FWDENG_SLOWPATH rtk_rg_lookupIdxReturn_t _rtk_rg_naptTcpUdpOutHashIndexLookup(int8 isTcp, ipaddr_t srcAddr, uint16 srcPort, ipaddr_t destAddr, uint16 destPort) { #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int i; #endif uint32 naptHashOutIdx; int32 naptOutIdx; rtk_rg_table_naptOut_linkList_t *pNaptOutList; naptHashOutIdx=_rtk_rg_naptTcpUdpOutHashIndex(isTcp,srcAddr,srcPort,destAddr,destPort); #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt naptOutIdx=naptHashOutIdx<<2; for(i=naptOutIdx;i<naptOutIdx+4;i++) { if(rg_db.naptOut[i].rtk_naptOut.valid!=0) { int naptInIdx=rg_db.naptOut[i].rtk_naptOut.hashIdx; if((srcAddr==rg_db.naptIn[naptInIdx].rtk_naptIn.intIp)&& (srcPort==rg_db.naptIn[naptInIdx].rtk_naptIn.intPort)&& (destAddr==rg_db.naptOut[i].remoteIp)&& (destPort==rg_db.naptOut[i].remotePort)&& (isTcp==rg_db.naptIn[naptInIdx].rtk_naptIn.isTcp)) //for PORT_REST ONLY { int hit=0; if(rg_db.naptIn[naptInIdx].coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT) { #if defined(CONFIG_APOLLO) if(_rtk_rg_NAPTRemoteHash_get(destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(_rtk_rg_NAPTRemoteHash_get(isTcp,destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #endif } else { hit=1; } if(hit==1) { naptOutIdx=i; rg_db.naptOut[naptOutIdx].idleSecs = 0; rg_db.naptIn[rg_db.naptOut[naptOutIdx].rtk_naptOut.hashIdx].idleSecs = 0; DEBUG("naptOutIdx=%d",naptOutIdx); return naptOutIdx; } } } } //FIXME("NAPT 4-ways not found!"); #endif //Found NAPT out flow in software link list (idx >= 2048) pNaptOutList=rg_db.pNaptOutHashListHead[naptHashOutIdx]; while(pNaptOutList!=NULL) { if(rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.valid!=0) { int naptInIdx=rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx; //TRACE("hashidx=%d sip=(%x,%x) sport=(%d,%d), tcp=(%d,%d)",naptHashOutIdx,srcAddr,rg_db.naptIn[naptInIdx].rtk_naptIn.intIp,srcPort,rg_db.naptIn[naptInIdx].rtk_naptIn.intPort,isTcp,rg_db.naptIn[naptInIdx].rtk_naptIn.isTcp); if((srcAddr==rg_db.naptIn[naptInIdx].rtk_naptIn.intIp)&& (srcPort==rg_db.naptIn[naptInIdx].rtk_naptIn.intPort)&& (destAddr==rg_db.naptOut[pNaptOutList->idx].remoteIp)&& (destPort==rg_db.naptOut[pNaptOutList->idx].remotePort)&& (isTcp==rg_db.naptIn[naptInIdx].rtk_naptIn.isTcp)) { int hit=0; if(rg_db.naptIn[naptInIdx].coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT) { #if defined(CONFIG_APOLLO) if(_rtk_rg_NAPTRemoteHash_get(destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(_rtk_rg_NAPTRemoteHash_get(isTcp,destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #endif } else { hit=1; } if(hit==1) { naptOutIdx=pNaptOutList->idx; rg_db.naptOut[naptOutIdx].idleSecs = 0; rg_db.naptIn[rg_db.naptOut[naptOutIdx].rtk_naptOut.hashIdx].idleSecs = 0; //rtlglue_printf("naptOutIdx=%d\n",naptOutIdx); DEBUG("naptOutIdx=%d\n",naptOutIdx); return naptOutIdx; } } } pNaptOutList=pNaptOutList->pNext; } //FIXME("NAPT OUT flow not found in FwdEngine!%p",rg_db.pNaptOutFreeListHead); return RG_RET_LOOKUPIDX_NOT_FOUND; } #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) /* outbound search by pPktHdr and isTcp ,return a match index(software/hw) by pPktHdr */ rtk_rg_lookupIdxReturn_t _rtk_rg_naptTcpUdpOutHashIndexLookupByPktHdr(int8 isTcp, rtk_rg_pktHdr_t *pPktHdr) { uint32 naptHashOutIdx; int32 naptOutIdx; rtk_rg_table_naptOut_linkList_t *pNaptOutList; ipaddr_t srcAddr=pPktHdr->ipv4Sip; uint16 srcPort=pPktHdr->sport; ipaddr_t destAddr=pPktHdr->ipv4Dip; uint16 destPort=pPktHdr->dport; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt //20160729LUKE: prevent virtualserver create hw_napt, but L2TP always search sw_napt and be dropped. if(1)//(!pPktHdr->addNaptSwOnly) { return _rtk_rg_naptTcpUdpOutHashIndexLookup(isTcp,srcAddr,srcPort,destAddr,destPort); } else #endif { naptHashOutIdx=_rtk_rg_naptTcpUdpOutHashIndex(isTcp,srcAddr,srcPort,destAddr,destPort); //Found NAPT out flow in software link list (idx >= 2048) pNaptOutList=rg_db.pNaptOutHashListHead[naptHashOutIdx]; while(pNaptOutList!=NULL) { if(rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.valid!=0) { int naptInIdx=rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx; if((srcAddr==rg_db.naptIn[naptInIdx].rtk_naptIn.intIp)&& (srcPort==rg_db.naptIn[naptInIdx].rtk_naptIn.intPort)&& (destAddr==rg_db.naptOut[pNaptOutList->idx].remoteIp)&& (destPort==rg_db.naptOut[pNaptOutList->idx].remotePort)&& (isTcp==rg_db.naptIn[naptInIdx].rtk_naptIn.isTcp)) { int hit=0; if(rg_db.naptIn[naptInIdx].coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT) { #if defined(CONFIG_APOLLO) if(_rtk_rg_NAPTRemoteHash_get(destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(_rtk_rg_NAPTRemoteHash_get(isTcp,destAddr,destPort)==rg_db.naptIn[naptInIdx].rtk_naptIn.remHash) hit=1; #endif } else { hit=1; } if(hit==1) { naptOutIdx=pNaptOutList->idx; rg_db.naptOut[naptOutIdx].idleSecs = 0; rg_db.naptIn[rg_db.naptOut[naptOutIdx].rtk_naptOut.hashIdx].idleSecs = 0; //rtlglue_printf("naptOutIdx=%d\n",naptOutIdx); DEBUG("naptOutIdx=%d\n",naptOutIdx); return naptOutIdx; } } } pNaptOutList=pNaptOutList->pNext; } //FIXME("NAPT OUT flow not found in FwdEngine!%p",rg_db.pNaptOutFreeListHead); return RG_RET_LOOKUPIDX_NOT_FOUND; } } #endif /* use inbound index to find outbound index */ rtk_rg_lookupIdxReturn_t _rtk_rg_naptTcpUdpOutIdxLookupByInIdx(int8 isTcp, ipaddr_t remoteAddr, uint16 remotePort, ipaddr_t extAddr, uint16 extPort,int inIdx) { int naptInIdx, naptHashOutIdx; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt int naptOutIdx; int j; #endif if(rg_db.naptIn[inIdx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE) { #if defined (CONFIG_APOLLO) #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) DEBUG("externalPort=%x remotePort=%x remHash=%x testHash=%x\n",rg_db.naptIn[inIdx].rtk_naptIn.extPortHSB<<8|rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB,extPort,rg_db.naptIn[inIdx].rtk_naptIn.remHash,_rtk_rg_NAPTRemoteHash_get(remoteAddr,remotePort)); if((((rg_db.naptIn[inIdx].rtk_naptIn.extPortHSB<<8)|rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB)==extPort)&& (rg_db.naptIn[inIdx].rtk_naptIn.isTcp==isTcp)&& (rg_db.naptIn[inIdx].rtk_naptIn.remHash==_rtk_rg_NAPTRemoteHash_get(remoteAddr,remotePort))) #else DEBUG("LSB=%x remotePort=%x remHash=%x testHash=%x\n",rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB,extPort,rg_db.naptIn[inIdx].rtk_naptIn.remHash,_rtk_rg_NAPTRemoteHash_get(remoteAddr,remotePort)); if((rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB==(extPort&0xff))&& (rg_db.naptIn[inIdx].rtk_naptIn.isTcp==isTcp)&& (rg_db.naptIn[inIdx].rtk_naptIn.remHash==_rtk_rg_NAPTRemoteHash_get(remoteAddr,remotePort))) #endif #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //DEBUG("LSB=%x remotePort=%x remHash=%x testHash=%x\n",rg_db.naptIn[i].rtk_naptIn.extPortLSB,extPort,rg_db.naptIn[i].rtk_naptIn.remHash,_rtk_rg_NAPTRemoteHash_get(isTcp,remoteAddr,remotePort)); if((rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB==(extPort&0xff))&& (rg_db.naptIn[inIdx].rtk_naptIn.isTcp==isTcp)&& (rg_db.naptIn[inIdx].rtk_naptIn.remHash==_rtk_rg_NAPTRemoteHash_get(isTcp,remoteAddr,remotePort))) #endif { naptInIdx=inIdx; naptHashOutIdx=_rtk_rg_naptTcpUdpOutHashIndex(isTcp,rg_db.naptIn[inIdx].rtk_naptIn.intIp,rg_db.naptIn[inIdx].rtk_naptIn.intPort,remoteAddr,remotePort); #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt naptOutIdx=naptHashOutIdx<<2; for(j=naptOutIdx;j<naptOutIdx+4;j++) { if(rg_db.naptOut[j].rtk_naptOut.valid!=0) { if(rg_db.naptOut[j].rtk_naptOut.hashIdx==naptInIdx) { return j; //naptOutIdx } } } #endif //not found in 4way, found from link list. { rtk_rg_table_naptOut_linkList_t *pNaptOutList; pNaptOutList=rg_db.pNaptOutHashListHead[naptHashOutIdx]; while(pNaptOutList!=NULL) { if(rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx==naptInIdx) { return pNaptOutList->idx; //naptOutIdx } pNaptOutList=pNaptOutList->pNext; } } FIXME("NAPTR found[%d], but NAPT 4-ways and link list not found! rip=0x%x rport=%d eip=0x%x eport=%d",naptInIdx,remoteAddr,remotePort,extAddr,extPort); return RG_RET_LOOKUPIDX_NOT_FOUND; } } #if defined(CONFIG_APOLLO) //xdsl only support ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE else if(rg_db.naptIn[inIdx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_FULL_CONE) { #if defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) if((((rg_db.naptIn[inIdx].rtk_naptIn.extPortHSB<<8) | rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB)==extPort)&& (rg_db.naptIn[inIdx].rtk_naptIn.isTcp==isTcp)) #else if((rg_db.naptIn[inIdx].rtk_naptIn.extPortLSB==(extPort&0xff))&& (rg_db.naptIn[inIdx].rtk_naptIn.isTcp==isTcp)) #endif { naptInIdx=inIdx; TRACE("full cone naptInIdx=%d",inIdx); naptHashOutIdx=_rtk_rg_naptTcpUdpOutHashIndex(isTcp,rg_db.naptIn[inIdx].rtk_naptIn.intIp,rg_db.naptIn[inIdx].rtk_naptIn.intPort,remoteAddr,remotePort); #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt naptOutIdx=naptHashOutIdx<<2; for(j=naptOutIdx;j<naptOutIdx+4;j++) { if(rg_db.naptOut[j].rtk_naptOut.valid!=0) { //if(rg_db.naptOut[j].rtk_naptOut.hashIdx==naptInIdx) if((rg_db.naptOut[j].rtk_naptOut.hashIdx==naptInIdx)&&(rg_db.naptOut[j].remoteIp==remoteAddr)&&(rg_db.naptOut[j].remotePort==remotePort)) { TRACE("full cone HW naptOutIdx=%d",j); return j; //naptOutIdx } } } #endif //not found in 4way, found from link list. { rtk_rg_table_naptOut_linkList_t *pNaptOutList; pNaptOutList=rg_db.pNaptOutHashListHead[naptHashOutIdx]; while(pNaptOutList!=NULL) { //if(rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx==naptInIdx) //DEBUG("hashIdx=%d %d rg_db.naptOut[j].remoteIp=%x %x rg_db.naptOut[j].remotePort=%d %d",rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx,naptInIdx,rg_db.naptOut[j].remoteIp,remoteAddr,rg_db.naptOut[j].remotePort,remotePort); if((rg_db.naptOut[pNaptOutList->idx].rtk_naptOut.hashIdx==naptInIdx)&&(rg_db.naptOut[pNaptOutList->idx].remoteIp==remoteAddr)&&(rg_db.naptOut[pNaptOutList->idx].remotePort==remotePort)) { TRACE("full cone SW naptOutIdx=%d",pNaptOutList->idx); return pNaptOutList->idx; //naptOutIdx(can't hw acc) } pNaptOutList=pNaptOutList->pNext; } } return RG_RET_LOOKUPIDX_ONLY_INBOUND_FOUND; //inbound is full cone but outbound is not created yet. } } else { FIXME("non-exist path"); } #endif //end defined(CONFIG_APOLLO) return RG_RET_LOOKUPIDX_NOT_FOUND; } /* use inbound info(isTcp,remtroAddr,remotePort,extAddr,extPort) find outbound index */ rtk_rg_lookupIdxReturn_t _rtk_rg_naptTcpUdpInHashIndexLookup(int8 isTcp, ipaddr_t remoteAddr, uint16 remotePort, ipaddr_t extAddr, uint16 extPort) { int naptHashInIdx, naptOutIdx; rtk_rg_table_naptIn_linkList_t *pNaptInLinkList; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) //flow-based platform only support sw napt int naptInIdx; #if defined(CONFIG_APOLLO) { int i; naptHashInIdx=_rtk_rg_naptTcpUdpInHashIndex(isTcp,extAddr,extPort); naptInIdx=naptHashInIdx<<2; for(i=naptInIdx;i<naptInIdx+4;i++) { naptOutIdx=_rtk_rg_naptTcpUdpOutIdxLookupByInIdx(isTcp,remoteAddr,remotePort,extAddr,extPort,i); if(naptOutIdx!=RG_RET_LOOKUPIDX_NOT_FOUND) //>=0:means valid outIdx, -2:means fullcone without outbound return naptOutIdx; } } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) naptHashInIdx=_rtk_rg_naptTcpUdpInHashIndex(isTcp,remoteAddr,remotePort,extAddr,extPort); naptInIdx=naptHashInIdx; //865x only 1-way naptHashInIdx=naptHashInIdx>>2;//change to hash_inidx // for(i=naptInIdx;i<naptInIdx+4;i++) // { naptOutIdx=_rtk_rg_naptTcpUdpOutIdxLookupByInIdx(isTcp,remoteAddr,remotePort,extAddr,extPort,naptInIdx); if(naptOutIdx!=RG_RET_LOOKUPIDX_NOT_FOUND) return naptOutIdx; // } #endif #else // CONFIG_RG_FLOW_BASED_PLATFORM naptHashInIdx=_rtk_rg_naptTcpUdpInHashIndex(isTcp,extAddr,extPort); #endif //DEBUG("NAPTR 4-ways[%d~%d] not found! rip=0x%x rport=%d eip=0x%x eport=%d",naptInIdx,naptInIdx+3,remoteAddr,remotePort,extAddr,extPort); //found in link list pNaptInLinkList=rg_db.pNaptInHashListHead[naptHashInIdx]; while(pNaptInLinkList!=NULL) { naptOutIdx=_rtk_rg_naptTcpUdpOutIdxLookupByInIdx(isTcp,remoteAddr,remotePort,extAddr,extPort,pNaptInLinkList->idx); if(naptOutIdx!=RG_RET_LOOKUPIDX_NOT_FOUND) //>=0:means valid outIdx, -2:means fullcone without outbound return naptOutIdx; pNaptInLinkList=pNaptInLinkList->pNext; } DEBUG("NAPTR 4-ways and link list not found! naptHashInIdx=%d rip=0x%x rport=%d eip=0x%x eport=%d",naptHashInIdx,remoteAddr,remotePort,extAddr,extPort); return RG_RET_LOOKUPIDX_NOT_FOUND; } int32 _rtk_rg_delNaptShortCutEntrybyOutboundIdx(int outIdx) { int ret=FAIL; #ifdef CONFIG_ROME_NAPT_SHORTCUT int i,j,inIdx; rtk_rg_napt_shortcut_t *pNaptSc; //Delete shortcut entry inIdx=rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; #if 1 //Out i=_rtk_rg_shortcutHashIndex(rg_db.naptIn[inIdx].rtk_naptIn.intIp, rg_db.naptOut[outIdx].remoteIp, rg_db.naptIn[inIdx].rtk_naptIn.intPort, rg_db.naptOut[outIdx].remotePort); for(j=i;j<i+MAX_NAPT_SHORTCUT_WAYS;j++) { pNaptSc=&rg_db.naptShortCut[j]; if(pNaptSc->direction==NAPT_DIRECTION_OUTBOUND) { if(rg_db.naptIn[inIdx].rtk_naptIn.intIp!=pNaptSc->sip) continue; if(rg_db.naptIn[inIdx].rtk_naptIn.intPort!=pNaptSc->sport) continue; if(rg_db.naptOut[outIdx].remotePort!=pNaptSc->dport) continue; if(rg_db.naptOut[outIdx].remoteIp!=pNaptSc->dip) continue; if(pNaptSc->isTcp!=rg_db.naptIn[inIdx].rtk_naptIn.isTcp) continue; TABLE("Clear NAPT shortcut [%d] for NAPT out[%d]\n",j,outIdx); //pNaptSc->sip=0; _rtk_rg_v4ShortCut_delete(j); //memset(pNaptSc,0,sizeof(rtk_rg_napt_shortcut_t)); ret=SUCCESS; break; } } //In i=_rtk_rg_shortcutHashIndex(rg_db.naptOut[outIdx].remoteIp, rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.extIpAddr, rg_db.naptOut[outIdx].remotePort, rg_db.naptOut[outIdx].extPort); for(j=i;j<i+MAX_NAPT_SHORTCUT_WAYS;j++) { pNaptSc=&rg_db.naptShortCut[j]; if(pNaptSc->direction==NAPT_DIRECTION_INBOUND) { if(rg_db.naptOut[outIdx].remoteIp!=pNaptSc->sip) continue; if(rg_db.naptOut[outIdx].extPort!=pNaptSc->dport) continue; if(rg_db.naptOut[outIdx].remotePort!=pNaptSc->sport) continue; if(rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.extIpAddr!=pNaptSc->dip) continue; if(pNaptSc->isTcp!=rg_db.naptIn[inIdx].rtk_naptIn.isTcp) continue; TABLE("Clear NAPT shortcut [%d] for NAPT in[%d]\n",j,inIdx); //pNaptSc->sip=0; _rtk_rg_v4ShortCut_delete(j); //memset(pNaptSc,0,sizeof(rtk_rg_napt_shortcut_t)); ret=SUCCESS; break; } } #else for(i=0;i<MAX_NAPT_SHORTCUT_SIZE;i++) { rtk_rg_napt_shortcut_t *pNaptSc; pNaptSc=&rg_db.naptShortCut[i]; if(pNaptSc->isNapt==0) continue; if((rg_db.naptIn[inIdx].rtk_naptIn.isTcp)^(pNaptSc->isTcp)) continue; //Out #if 0 if((pNaptSc->direction==NAPT_DIRECTION_OUTBOUND) && (rg_db.naptIn[inIdx].rtk_naptIn.intIp==pNaptSc->sip) && (rg_db.naptIn[inIdx].rtk_naptIn.intPort==pNaptSc->sport) && (rg_db.naptIn[inIdx].remoteIp==pNaptSc->dip) && (rg_db.naptIn[inIdx].remotePort==pNaptSc->dport)) #else if((pNaptSc->direction==NAPT_DIRECTION_OUTBOUND) && (rg_db.naptIn[inIdx].rtk_naptIn.intIp==pNaptSc->sip) && (rg_db.naptIn[inIdx].rtk_naptIn.intPort==pNaptSc->sport) && (rg_db.naptOut[outIdx].remoteIp==pNaptSc->dip) && (rg_db.naptOut[outIdx].remotePort==pNaptSc->dport)) #endif { memset(pNaptSc,0,sizeof(rtk_rg_napt_shortcut_t)); TABLE("Clear NAPT shortcut [%d] for NAPT out[%d]\n",i,outIdx); ret=SUCCESS; } //IN #if 0 if((pNaptSc->direction==NAPT_DIRECTION_INBOUND) && (rg_db.naptIn[inIdx].remoteIp==pNaptSc->sip) && (rg_db.naptIn[inIdx].remotePort==pNaptSc->sport) && (rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.extIpAddr==pNaptSc->dip) && (rg_db.naptOut[flow_idx].extPort==pNaptSc->dport)) #else if((pNaptSc->direction==NAPT_DIRECTION_INBOUND) && (rg_db.naptOut[outIdx].remoteIp==pNaptSc->sip) && (rg_db.naptOut[outIdx].remotePort==pNaptSc->sport) && (rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.extIpAddr==pNaptSc->dip) && (rg_db.naptOut[outIdx].extPort==pNaptSc->dport)) #endif { memset(pNaptSc,0,sizeof(rtk_rg_napt_shortcut_t)); TABLE("Clear NAPT shortcut [%d] for NAPT in[%d]\n",i,inIdx); ret=SUCCESS; } } #endif #endif return ret; } /* using flow_idx(outbound_index) delete napt sw and hw table (0-4095) XDSL:FIXME need more check */ rtk_rg_err_code_t rtk_rg_apollo_naptConnection_del(int flow_idx) { int retval=0; rtk_l34_naptInbound_entry_t asic_naptr; rtk_l34_naptOutbound_entry_t asic_napt; int inIdx,wanIntfIdx; rtk_rg_naptInfo_t naptInfo; int hasFree; uint16 extPort; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if( (flow_idx<0) || (flow_idx>=MAX_NAPT_OUT_SW_TABLE_SIZE)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Find ASIC NAPT entry //Check whether entry is inused rg_lock(&rg_kernel.naptTableLock); //========================critical region start========================= if(rg_db.naptOut[flow_idx].state==0) { rg_unlock(&rg_kernel.naptTableLock); return (RT_ERR_RG_NAPT_SW_ENTRY_NOT_FOUND); } inIdx = rg_db.naptOut[flow_idx].rtk_naptOut.hashIdx; wanIntfIdx = rg_db.nexthop[rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.nhIdx].rtk_nexthop.ifIdx; // Free ext port hasFree=_rtk_rg_naptExtPortFree(0,rg_db.naptIn[inIdx].rtk_naptIn.isTcp,rg_db.naptOut[flow_idx].extPort); //Fill NAPT info if(rg_db.systemGlobal.initParam.naptDelByHwCallBack != NULL || rg_db.systemGlobal.initParam.softwareNaptInfoDeleteCallBack != NULL){ //20151202LUKE: collect info from NAPT/NAPTR table. _rtk_rg_naptInfoCollectForCallback(flow_idx, &naptInfo); } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) //Delete HW flow entry ASSERT_EQ(_rtk_rg_flow_del_by_naptOutIdx(flow_idx, TRUE), RT_ERR_RG_OK); #endif //Delete shortcut entry _rtk_rg_delNaptShortCutEntrybyOutboundIdx(flow_idx); //outbound link-list (pure sw enties) if(flow_idx>=MAX_NAPT_OUT_HW_TABLE_SIZE) { int hashIdx=rg_db.naptOut[flow_idx].hashOutIdx; //DEBUG("del out flow_idx=%d",flow_idx); //remove from link list rtk_rg_table_naptOut_linkList_t *pNaptOutList,*pPreNaptOutList; pPreNaptOutList=rg_db.pNaptOutHashListHead[hashIdx]; pNaptOutList=pPreNaptOutList; while(pNaptOutList!=NULL) { if(pNaptOutList->idx==flow_idx) { //remove from hashIdx list if(pPreNaptOutList==pNaptOutList) rg_db.pNaptOutHashListHead[hashIdx]=pNaptOutList->pNext; else pPreNaptOutList->pNext=pNaptOutList->pNext; //add to free list pNaptOutList->pNext=rg_db.pNaptOutFreeListHead; rg_db.pNaptOutFreeListHead=pNaptOutList; break; } pPreNaptOutList=pNaptOutList; pNaptOutList=pNaptOutList->pNext; } } extPort=rg_db.naptOut[flow_idx].extPort; TABLE("Del NAPT_OUT: [%s][Local=0x%x:%d][WANIF=%d:%d][Remote=0x%x:%d][IDX:%d]\n" ,(rg_db.naptIn[inIdx].rtk_naptIn.isTcp==1)?"TCP":"UDP" ,rg_db.naptIn[inIdx].rtk_naptIn.intIp ,rg_db.naptIn[inIdx].rtk_naptIn.intPort ,wanIntfIdx ,extPort ,rg_db.naptOut[flow_idx].remoteIp ,rg_db.naptOut[flow_idx].remotePort,flow_idx); //Sync to ASIC NAPT table memset(&asic_napt,0,sizeof(rtk_l34_naptOutbound_entry_t)); retval = RTK_L34_NAPTOUTBOUNDTABLE_SET(1,flow_idx,&asic_napt); if(retval!=RT_ERR_OK) { rg_unlock(&rg_kernel.naptTableLock); RETURN_ERR(RT_ERR_RG_NAPT_SET_FAIL); } //Sync to SW NAPT table rg_db.naptOut[flow_idx].state=INVALID; rg_db.naptOut[flow_idx].remoteIp=0; rg_db.naptOut[flow_idx].remotePort=0; rg_db.naptOut[flow_idx].extPort=0; rg_db.naptOut[flow_idx].hashOutIdx=0; rg_db.naptOut[flow_idx].idleSecs=0; rg_db.naptValidSet[flow_idx>>5] &= ~(0x1<<(flow_idx&31)); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(flow_idx<MAX_NAPT_OUT_HW_TABLE_SIZE){ //idx < MAX_NAPT_OUT_HW_TABLE_SIZE only a napt hw table,sync to naptIn rg_db.naptIn[flow_idx].hashIdx=0; rg_db.naptIn[flow_idx].idleSecs=0; rg_db.naptIn[flow_idx].coneType=NAPT_IN_TYPE_SYMMETRIC_NAPT; rg_db.naptIn[flow_idx].symmetricNaptOutIdx=0; //free hw entry rg_db.naptIn[flow_idx].rtk_naptIn.valid=0; } #endif //inbound link-list (pure sw enties) if(inIdx>=MAX_NAPT_IN_HW_TABLE_SIZE) { int hashIdx=rg_db.naptIn[inIdx].hashIdx; //DEBUG("del in flow_idx=%d",inIdx); //remove from link list rtk_rg_table_naptIn_linkList_t *pNaptInList,*pPreNaptInList; pPreNaptInList=rg_db.pNaptInHashListHead[hashIdx]; pNaptInList=pPreNaptInList; while(pNaptInList!=NULL) { if(pNaptInList->idx==inIdx) { //remove from hashIdx list if(pPreNaptInList==pNaptInList) rg_db.pNaptInHashListHead[hashIdx]=pNaptInList->pNext; else pPreNaptInList->pNext=pNaptInList->pNext; //add to free list pNaptInList->pNext=rg_db.pNaptInFreeListHead; rg_db.pNaptInFreeListHead=pNaptInList; break; } pPreNaptInList=pNaptInList; pNaptInList=pNaptInList->pNext; } } //Sync. to ASIC NAPT-R table if((rg_db.naptIn[inIdx].rtk_naptIn.valid==ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE)|| ((rg_db.naptIn[inIdx].rtk_naptIn.valid!=ASIC_NAPT_IN_TYPE_PORT_RESTRICTED_CONE)&&(hasFree==1))) { TABLE("Del NAPT_IN: [%s][Local=0x%x:%d][WANIF=%d:%d][IDX:%d][TYPE:%s]\n" ,(rg_db.naptIn[inIdx].rtk_naptIn.isTcp==1)?"TCP":"UDP" ,rg_db.naptIn[inIdx].rtk_naptIn.intIp ,rg_db.naptIn[inIdx].rtk_naptIn.intPort ,wanIntfIdx ,extPort ,inIdx ,(rg_db.naptIn[inIdx].coneType==NAPT_IN_TYPE_SYMMETRIC_NAPT)?"SYMMETRIC_NAPT":((rg_db.naptIn[inIdx].coneType==NAPT_IN_TYPE_FULL_CONE)?"FULL_CONE":"RESTRICTED_CONE") ); memset(&asic_naptr,0,sizeof(rtk_l34_naptInbound_entry_t)); retval = RTK_L34_NAPTINBOUNDTABLE_SET(1,inIdx,&asic_naptr); if(retval!=RT_ERR_OK) { rg_unlock(&rg_kernel.naptTableLock); RETURN_ERR(RT_ERR_RG_NAPTR_SET_FAIL); } //Sync. to SW NAPT-R table #if 0 rg_db.naptIn[inIdx].remoteIp=0; rg_db.naptIn[inIdx].remotePort=0; #endif rg_db.naptIn[inIdx].hashIdx=0; rg_db.naptIn[inIdx].idleSecs=0; rg_db.naptIn[inIdx].coneType=NAPT_IN_TYPE_SYMMETRIC_NAPT; rg_db.naptIn[inIdx].symmetricNaptOutIdx=0; #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(inIdx<MAX_NAPT_OUT_HW_TABLE_SIZE){ //idx < MAX_NAPT_OUT_HW_TABLE_SIZE only a napt hw table,sync to naptout rg_db.naptOut[inIdx].state=INVALID; rg_db.naptOut[inIdx].remoteIp=0; rg_db.naptOut[inIdx].remotePort=0; rg_db.naptOut[inIdx].extPort=0; rg_db.naptOut[inIdx].hashOutIdx=0; rg_db.naptOut[inIdx].idleSecs=0; rg_db.naptValidSet[inIdx>>5] &= ~(0x1<<(inIdx&31)); //free hw entry rg_db.naptOut[inIdx].rtk_naptOut.valid=0; } #endif //call callback function if(rg_db.systemGlobal.initParam.naptDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.naptDelByHwCallBack(&naptInfo); } if(rg_db.systemGlobal.initParam.softwareNaptInfoDeleteCallBack != NULL) { rg_db.systemGlobal.initParam.softwareNaptInfoDeleteCallBack(&naptInfo); } } rg_unlock(&rg_kernel.naptTableLock); //========================critical region end========================= return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_naptConnection_find(rtk_rg_naptInfo_t *naptEntry,int *valid_idx) { int i; int inIdx=0,wanIntfIdx; rtk_rg_naptEntry_t *naptFlow; naptFlow=&naptEntry->naptTuples; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); wanIntfIdx = rg_db.nexthop[rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.nhIdx].rtk_nexthop.ifIdx; if(*valid_idx==-1) { rtk_rg_lookupIdxReturn_t outIdx = _rtk_rg_naptTcpUdpOutHashIndexLookup(naptEntry->naptTuples.is_tcp,naptEntry->naptTuples.local_ip,naptEntry->naptTuples.local_port,naptEntry->naptTuples.remote_ip,naptEntry->naptTuples.remote_port); if(outIdx!=RG_RET_LOOKUPIDX_NOT_FOUND) { inIdx=rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; naptEntry->state = rg_db.naptOut[outIdx].state; naptEntry->idleSecs = rg_db.naptOut[outIdx].idleSecs; naptEntry->naptTuples.wan_intf_idx = wanIntfIdx; naptEntry->naptTuples.inbound_pri_valid = rg_db.naptIn[inIdx].rtk_naptIn.priValid; naptEntry->naptTuples.inbound_priority = rg_db.naptIn[inIdx].rtk_naptIn.priId; naptEntry->naptTuples.external_port = rg_db.naptOut[outIdx].extPort; naptEntry->naptTuples.outbound_pri_valid = rg_db.naptOut[outIdx].rtk_naptOut.priValid; naptEntry->naptTuples.outbound_priority = rg_db.naptOut[outIdx].rtk_naptOut.priValue; *valid_idx = (int)outIdx; return (RT_ERR_RG_OK); } return (RT_ERR_RG_NAPT_SW_ENTRY_NOT_FOUND); } if(MAX_NAPT_OUT_SW_TABLE_SIZE!=0) { for(i=*valid_idx;i<MAX_NAPT_OUT_SW_TABLE_SIZE;i++) { if(rg_db.naptOut[i].rtk_naptOut.valid==0) continue; inIdx=rg_db.naptOut[i].rtk_naptOut.hashIdx; naptFlow->external_port=rg_db.naptOut[i].extPort; naptFlow->is_tcp=rg_db.naptIn[inIdx].rtk_naptIn.isTcp; naptFlow->local_ip=rg_db.naptIn[inIdx].rtk_naptIn.intIp; #if 0 naptFlow->remote_ip=rg_db.naptIn[inIdx].remoteIp; naptFlow->remote_port=rg_db.naptIn[inIdx].remotePort; #else naptFlow->remote_ip=rg_db.naptOut[i].remoteIp; naptFlow->remote_port=rg_db.naptOut[i].remotePort; #endif naptFlow->wan_intf_idx=wanIntfIdx; naptFlow->local_port=rg_db.naptIn[inIdx].rtk_naptIn.intPort; naptFlow->inbound_pri_valid=rg_db.naptIn[inIdx].rtk_naptIn.priValid; naptFlow->inbound_priority=rg_db.naptIn[inIdx].rtk_naptIn.priId; naptFlow->outbound_pri_valid=rg_db.naptOut[i].rtk_naptOut.priValid; naptFlow->outbound_priority=rg_db.naptOut[i].rtk_naptOut.priValue; naptEntry->idleSecs=rg_db.naptOut[i].idleSecs; naptEntry->state=rg_db.naptOut[i].state; naptEntry->pContext=&rg_db.naptOut[i].pContext; *valid_idx=i; return (RT_ERR_RG_OK); } } return (RT_ERR_RG_NAPT_SW_ENTRY_NOT_FOUND); } rtk_rg_entryGetReturn_t _rtk_rg_findAndReclamL2mcEntry(rtk_mac_t *mac,int fid) { int l2Idx,search_index,count=0,first_invalid=-1; rtk_rg_lut_linkList_t *pLutBCAMList; l2Idx=_rtk_rg_hash_mac_fid_efid(mac->octet,fid,0); //FIXME:EFID is 0 now l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty, just add count++; //search from next entry continue; } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2MC && (memcmp(rg_db.lut[search_index].rtk_lut.entry.l2McEntry.mac.octet,mac->octet,ETHER_ADDR_LEN)==0) && rg_db.lut[search_index].rtk_lut.entry.l2McEntry.fid==fid) { // DEBUG("match!! froced replace LUT entry[%d] for MC entry!!",search_index); break; } count++; //search from next entry }while(count < 4); if(count==4){ //lookup BCAM first! #if defined(CONFIG_RTL9600_SERIES) list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMLinkListHead,lut_list) #else //support lut traffic bit list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMTableHead[l2Idx>>2],lut_list) #endif { if(rg_db.lut[pLutBCAMList->idx].valid && rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L2MC && (memcmp(rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2McEntry.mac.octet,mac->octet,ETHER_ADDR_LEN)==0) && rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2McEntry.fid==fid){ return pLutBCAMList->idx; } } if(first_invalid>=0) return first_invalid; //not in 4-way and bCAM, use the first invalid to add one! else{ count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table if(count==4){ search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); return RG_RET_ENTRY_NOT_GET; } }else search_index=l2Idx+count; } } return search_index; } rtk_rg_entryGetReturn_t _rtk_rg_findAndReclamL2mcEntryIVL(rtk_mac_t *mac,int vid) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) return -1; #else int l2Idx,search_index,count=0,first_invalid=-1; rtk_rg_lut_linkList_t *pLutBCAMList; l2Idx=_rtk_rg_hash_mac_vid_efid(mac->octet,vid,0); //FIXME:EFID is 0 now l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty, just add count++; //search from next entry continue; } if(rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L2MC && (memcmp(rg_db.lut[search_index].rtk_lut.entry.l2McEntry.mac.octet,mac->octet,ETHER_ADDR_LEN)==0) && (rg_db.lut[search_index].rtk_lut.entry.l2McEntry.flags&RTK_L2_MCAST_FLAG_IVL)&& rg_db.lut[search_index].rtk_lut.entry.l2McEntry.vid==vid) { // DEBUG("match!! froced replace LUT entry[%d] for MC entry!!",search_index); break; } count++; //search from next entry }while(count < 4); if(count==4){ //lookup BCAM first! #if defined(CONFIG_RTL9600_SERIES) list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMLinkListHead,lut_list) #else //support lut traffic bit list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMTableHead[l2Idx>>2],lut_list) #endif { if(rg_db.lut[pLutBCAMList->idx].valid && rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L2MC && (memcmp(rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2McEntry.mac.octet,mac->octet,ETHER_ADDR_LEN)==0) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2McEntry.flags&RTK_L2_MCAST_FLAG_IVL)&& rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.l2McEntry.vid==vid){ return pLutBCAMList->idx; } } if(first_invalid>=0) return first_invalid; //not in 4-way and bCAM, use the first invalid to add one! else{ count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table if(count==4){ search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); return RG_RET_ENTRY_NOT_GET; } }else search_index=l2Idx+count; } } return search_index; #endif } int32 _rtk_rg_apollo_l2MultiCastFlow_add(rtk_rg_l2MulticastFlow_t *l2McFlow,int *flow_idx) { #if defined(CONFIG_APOLLO) rtk_rg_entryGetReturn_t search_index; rtk_l2_mcastAddr_t lut; memset(&lut,0,sizeof(rtk_l2_mcastAddr_t)); if(l2McFlow->isIVL)lut.flags|=RTK_L2_MCAST_FLAG_IVL; lut.vid = l2McFlow->vlanID; lut.fid = LAN_FID; _rtk_rg_portmask_translator(l2McFlow->port_mask,&lut.portmask,&lut.ext_portmask); memcpy(lut.mac.octet,l2McFlow->mac.octet,6); #if defined(CONFIG_DUALBAND_CONCURRENT) && defined(CONFIG_RTL9600_SERIES) if((lut.portmask.bits[0]==(1<<RTK_RG_MAC_PORT_CPU))&&(lut.ext_portmask.bits[0]==4)) //Only to EXT1(slave wifi), modify priority. { lut.flags|=RTK_L2_MCAST_FLAG_FWD_PRI; lut.priority=CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI; } #endif if(l2McFlow->isIVL) search_index=_rtk_rg_findAndReclamL2mcEntryIVL(&lut.mac,lut.vid); else search_index=_rtk_rg_findAndReclamL2mcEntry(&lut.mac,lut.fid); TABLE("### L2 MULTICAST(%s) add at %d: %02x:%02x:%02x:%02x:%02x:%02x PortMask:0x%x ExtPortMask:0x%x ###\n",l2McFlow->isIVL?"IVL":"SVL",search_index, lut.mac.octet[0],lut.mac.octet[1],lut.mac.octet[2],lut.mac.octet[3],lut.mac.octet[4],lut.mac.octet[5], lut.portmask.bits[0],lut.ext_portmask.bits[0]); ASSERT_EQ(RTK_L2_MCASTADDR_ADD(&lut),RT_ERR_OK); ASSERT_EQ(search_index,lut.index); *flow_idx=lut.index; return (RT_ERR_RG_OK); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) return (RT_ERR_RG_OK); #endif } /* Martin ZHU add */ rtk_rg_err_code_t rtk_rg_apollo_l2MultiCastFlow_add(rtk_rg_l2MulticastFlow_t *l2McFlow,int *flow_idx) { rtk_l2_ipmcMode_t l2mode; int ret=0; if ( (RT_ERR_RG_OK != rtk_rg_l2_ipmcMode_get(&l2mode)) || (l2mode != LOOKUP_ON_MAC_AND_VID_FID) ) { /* ipv6 l2 mac entry not care ipmc mode */ if(!(l2McFlow->mac.octet[0] == 0x33 && l2McFlow->mac.octet[1] == 0x33)) return RT_ERR_RG_FAILED; } ret = _rtk_rg_apollo_l2MultiCastFlow_add(l2McFlow, flow_idx); return ret; } //RTL9602BVB_SERIES care dip/ivlsvl/vid_fid/ipfilterIdx/ipfilterEn if(!(includeMode||excludeMode)) {ipfilterIdx=0; ipfilterEn=0} //RTL9600_SERIES care sip/dip/ipfilterEn ipfilterEn=1 means include/exclude mode entry rtk_rg_entryGetReturn_t _rtk_rg_findAndReclamIpmcEntry(ipaddr_t sip,ipaddr_t dip,int32 ivlsvl,int32 vid_fid,int32 ipfilterIdx,int32 ipfilterEn) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //FIXME return -1; #else //APOLLO int l2Idx,search_index,count=0,first_invalid=-1; rtk_rg_lut_linkList_t *pLutBCAMList; #if defined(CONFIG_RTL9602C_SERIES) l2Idx=_hash_dip_vidfid_sipidx_sipfilter(ivlsvl,dip,vid_fid,ipfilterIdx,ipfilterEn); #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) l2Idx=_rtk_rg_hash_sip_gip(sip,dip,0,vid_fid); //FIXME:EFID is 0 now #endif l2Idx<<=2; do { search_index = l2Idx+count; //DEBUG("search_idx is %d\n",search_index); if(rg_db.lut[search_index].valid==0) { if(first_invalid==-1) first_invalid=search_index; //break; //empty, just add count++; //search from next entry continue; } #if defined(CONFIG_RTL9602C_SERIES) if( (((rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_IVL)>>7) ==ivlsvl) && (rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L3MC) && (rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.dip==dip) && ((rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_SIP_FILTER)>>6 == ipfilterEn) && (rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.sip_index==ipfilterIdx) ) { if(ivlsvl) { DEBUG("IVL match!! froced replace LUT entry[%d] for MC entry!!",search_index); if(rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.vid== vid_fid) return search_index; } else { DEBUG("SVL match!! froced replace LUT entry[%d] for MC entry!!",search_index); if(rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.fid== vid_fid) return search_index; } } #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) if((rg_db.lut[search_index].rtk_lut.entryType==RTK_LUT_L3MC) && (rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.dip==dip)&& (rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.sip==sip)) { if(rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.flags&RTK_L2_IPMCAST_FLAG_DIP_ONLY && ipfilterEn==0) DEBUG(" lutP4 match multicast routing entry!! froced replace LUT entry[%d] for MC entry!!",search_index); else if( (!(rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.flags&RTK_L2_IPMCAST_FLAG_DIP_ONLY)) && ipfilterEn==1) DEBUG(" lutP3 match multicast include/exculde entry!! froced replace LUT entry[%d] for MC entry!!",search_index); else WARNING("config Error flag=%d ipfilterEn =%d entry[%d]",rg_db.lut[search_index].rtk_lut.entry.ipmcEntry.flags&RTK_L2_IPMCAST_FLAG_DIP_ONLY,ipfilterEn,search_index); return search_index; } #endif count++; //search from next entry }while(count < 4); if(count==4){ //lookup BCAM first! #if defined(CONFIG_RTL9600_SERIES) list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMLinkListHead,lut_list) #else //support lut traffic bit list_for_each_entry(pLutBCAMList,&rg_db.lutBCAMTableHead[l2Idx>>2],lut_list) #endif { //lookup BCAM first! #if defined(CONFIG_RTL9602C_SERIES) if(rg_db.lut[pLutBCAMList->idx].valid && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L3MC) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.dip==dip)&& ((rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_SIP_FILTER)>>6 == ipfilterEn) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.sip_index==ipfilterIdx) && (((rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_IVL)>>7) ==ivlsvl)) { if(ivlsvl) { DEBUG("IVL match!! froced replace LUT BCAM entry[%d] for MC entry!!",pLutBCAMList->idx); if(rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.vid== vid_fid) return pLutBCAMList->idx; } else { DEBUG("SVL match!! froced replace LUT BCAM entry[%d] for MC entry!!",pLutBCAMList->idx); if(rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.fid== vid_fid) return pLutBCAMList->idx; } } #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) if(rg_db.lut[pLutBCAMList->idx].valid && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entryType==RTK_LUT_L3MC) && (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.dip==dip)&& (rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.sip==sip) && ipfilterEn && (!(rg_db.lut[pLutBCAMList->idx].rtk_lut.entry.ipmcEntry.flags&RTK_L2_IPMCAST_FLAG_DIP_ONLY)) ) { DEBUG("path3 include/exclude mode support add to CAM"); return pLutBCAMList->idx; } #endif } if(first_invalid>=0) return first_invalid; //not in 4-way and bCAM, use the first invalid to add one! else{ count=_rtk_rg_layer2GarbageCollection(l2Idx); //check if there is asynchronus between software and hardware table if(count==4){ #if defined(CONFIG_RTL9600_SERIES) //20150916: If wan_sa is set, this IP MC entry should not add to lut cam. if(ipfilterEn) //DIPONLY=0 path3 search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry else //DIPONLY=1 path4 and wansa=1 search_index=_rtk_rg_layer2HashedReplace(l2Idx); //replace the least recently used entry for new entry #else search_index=_rtk_rg_layer2LeastRecentlyUsedReplace(l2Idx); //replace the least recently used entry for new entry #endif if(search_index==RG_RET_ENTRY_NOT_GET) { FIXME("must add software LUT entry for LUT entry full."); return RG_RET_ENTRY_NOT_GET; } }else search_index=l2Idx+count; } } return search_index; #endif } /* add transTbl and fill transferIdx for ipv4/ipv6 */ //1 FIXME: should not reconstruction transtbl every time when add a new multicast flow void _rtk_rg_apollo_ipmcMultiCast_transTbl_add(int32 isIpv6,rtk_l2_ipMcastAddr_t *lut) { #if 1 int tmpNetifId,tmpPortId; //update IPMC Transtbl by interface for(tmpNetifId=0;tmpNetifId<MAX_NETIF_HW_TABLE_SIZE;tmpNetifId++) // 1-to-1 mapping to hw netif { int intfIdx; rtk_l34_ipmcTrans_entry_t ipmcEntry; memset(&ipmcEntry,0,sizeof(ipmcEntry)); #if defined(CONFIG_RTL9602C_SERIES) ipmcEntry.untagMbr.bits[0]=0xf; //default all untag #endif intfIdx=tmpNetifId; if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].valid) { int portmask; int intfVid; ipmcEntry.sipTransEnable=0; ipmcEntry.extipIdx=0; if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.is_wan==0) { portmask=rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.lan_intf.port_mask.portmask; intfVid=rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.lan_intf.intf_vlan_id; ipmcEntry.pppoeAct=L34_PPPOE_ACT_REMOVE; //1:remove pppoe tag when packets forward to LAN ipmcEntry.pppoeIdx=0; } else { int napt_enable=0; portmask=1<<(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.wan_port_idx); intfVid=rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_STATIC) { if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.static_info.napt_enable==ENABLED) napt_enable=1; } else if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DHCP) { if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.dhcp_client_info.hw_info.napt_enable==ENABLED) napt_enable=1; } else if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE) { if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.pppoe_info.after_dial.hw_info.napt_enable==ENABLED) napt_enable=1; } if(napt_enable==1 && (rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.extip_idx != -1)) { ipmcEntry.sipTransEnable=1; ipmcEntry.extipIdx= rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.extip_idx; } #if defined(CONFIG_RTL9602C_SERIES) //always set remove mode ipmcEntry.pppoeAct=L34_PPPOE_ACT_REMOVE; ipmcEntry.pppoeIdx=0; #elif defined(CONFIG_RTL9600_SERIES) // don't remove pppoe tag when packets forward to WAN ipmcEntry.pppoeAct=L34_PPPOE_ACT_NO; ipmcEntry.pppoeIdx=0; #endif } #if defined(CONFIG_RTL9602C_SERIES) ipmcEntry.untagMbr.bits[0] =rg_db.vlan[intfVid].UntagPortmask.bits[0]; #endif ipmcEntry.netifIdx=intfIdx; //DEBUG("set IPMC_ENTRY[%d]={netifIdx=%d,pppoeAct=%d,pppoeIdx=%d,sipTrans=%d}",intfIdx,ipmcEntry.netifIdx,ipmcEntry.pppoeAct,ipmcEntry.pppoeIdx,ipmcEntry.sipTransEnable); assert_ok(rtk_l34_ipmcTransTable_set(intfIdx,&ipmcEntry)); } } //1 assigned this multicast routing entry per-port default interface for(tmpPortId=0;tmpPortId<RTK_RG_MAX_MAC_PORT;tmpPortId++) { int pvid; int lanPortmask,wanPortmask; int intf,lanIntf=-1,wanIntf=-1; // first LAN mbr > WAN mbr for(tmpNetifId=0;tmpNetifId<MAX_NETIF_HW_TABLE_SIZE;tmpNetifId++) // 1-to-1 mapping to hw netif { if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].valid) { if(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.is_wan==0) { //server in wan ( this port is lan port ) lanPortmask=rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.lan_intf.port_mask.portmask; if((1<<tmpPortId)&lanPortmask && (lanIntf==-1)) //find the first lan (portmask include this port) { lanIntf=tmpNetifId; break; } } else { //server in lan ( this port is wan port ) wanPortmask=1<<(rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.wan_port_idx); rtk_vlan_portPvid_get(tmpPortId,&pvid); if((1<<tmpPortId)&wanPortmask && (pvid==rg_db.systemGlobal.interfaceInfo[tmpNetifId].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id)) { wanIntf=tmpNetifId; } } } } if(lanIntf !=-1) intf = lanIntf; //LAN Port else if (wanIntf==-1 && lanIntf==-1) intf=0 ; //non-expect default routing to lan0 else intf = wanIntf; //WAN Port #if defined(CONFIG_RTL9602C_SERIES) //1 FIXME:we always used l3_mcr_index= DEFAULT_L3MCR_IDX if(isIpv6) { lut->l3_mcr_index=DEFAULT_L3MCR_IDX; rtk_l34_ip6mcRoutingTransIdx_set(lut->l3_mcr_index,tmpPortId,intf); } else #endif { lut->l3_trans_index&=(~(0xf<<(tmpPortId<<2))); lut->l3_trans_index|=(intf<<(tmpPortId<<2)); } } #else int i; //SET IPMC Table for(i=0;i<MAX_NETIF_HW_TABLE_SIZE;i++) { int j; int intfIdx; rtk_l34_ipmcTrans_entry_t ipmcEntry; memset(&ipmcEntry,0,sizeof(ipmcEntry)); #if defined(CONFIG_RTL9602C_SERIES) ipmcEntry.untagMbr.bits[0]=0xf; //default all untag #endif intfIdx=i; if(rg_db.systemGlobal.interfaceInfo[i].valid) { int portmask; int intfVid; ipmcEntry.sipTransEnable=0; ipmcEntry.extipIdx=0; if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan==0) { portmask=rg_db.systemGlobal.interfaceInfo[i].storedInfo.lan_intf.port_mask.portmask; intfVid=rg_db.systemGlobal.interfaceInfo[i].storedInfo.lan_intf.intf_vlan_id; ipmcEntry.pppoeAct=L34_PPPOE_ACT_REMOVE; //1:remove pppoe tag when packets forward to LAN ipmcEntry.pppoeIdx=0; } else { int napt_enable=0; portmask=1<<(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_port_idx); intfVid=rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_STATIC) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.static_info.napt_enable==ENABLED) napt_enable=1; } else if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DHCP) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.dhcp_client_info.hw_info.napt_enable==ENABLED) napt_enable=1; } else if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE) { if(rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.pppoe_info.after_dial.hw_info.napt_enable==ENABLED) napt_enable=1; } if(napt_enable==1 && (rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.extip_idx != -1)) { ipmcEntry.sipTransEnable=1; ipmcEntry.extipIdx= rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.extip_idx; } #if defined(CONFIG_RTL9602C_SERIES) //always set remove mode ipmcEntry.pppoeAct=L34_PPPOE_ACT_REMOVE; ipmcEntry.pppoeIdx=0; #elif defined(CONFIG_RTL9600_SERIES) // don't remove pppoe tag when packets forward to WAN ipmcEntry.pppoeAct=L34_PPPOE_ACT_NO; ipmcEntry.pppoeIdx=0; #endif } for(j=0;j<RTK_RG_MAX_MAC_PORT;j++) { int pvid; if(j==RTK_RG_MAC_PORT_CPU) { rtk_vlan_extPortPvid_get(1,&pvid); //get wlan0 pvid for CPU Port //printk("ext0 pvid=%d\n",pvid); } else { rtk_vlan_portPvid_get(j,&pvid); } if(pvid==intfVid) { #if defined(CONFIG_RTL9602C_SERIES) if(!(rg_db.vlan[pvid].UntagPortmask.bits[0]& (1<<j))) //tag { ipmcEntry.untagMbr.bits[0]&= (~(1<<j)); //tag packet clear bit } //1 FIXME:we always used l3_mcr_index= DEFAULT_L3MCR_IDX if(isIpv6) { lut->l3_mcr_index=DEFAULT_L3MCR_IDX; rtk_l34_ip6mcRoutingTransIdx_set(lut->l3_mcr_index,j,intfIdx); } else #endif { if((((lut->l3_trans_index>>(j<<2))&0xf)==0xf)&&((1<<j)&portmask)) { lut->l3_trans_index&=(~(0xf<<(j<<2))); lut->l3_trans_index|=(intfIdx<<(j<<2)); } } } } ipmcEntry.netifIdx=intfIdx; } //DEBUG("set IPMC_ENTRY[%d]={netifIdx=%d,pppoeAct=%d,pppoeIdx=%d,sipTrans=%d}",intfIdx,ipmcEntry.netifIdx,ipmcEntry.pppoeAct,ipmcEntry.pppoeIdx,ipmcEntry.sipTransEnable); assert_ok(rtk_l34_ipmcTransTable_set(intfIdx,&ipmcEntry)); } #endif } /* only support (SVL Mode)/(server in wan) */ int32 _rtk_rg_apollo_ipv4MultiCastFlow_add(rtk_rg_ipv4MulticastFlow_t *ipv4McFlow,int *flow_idx) { #if defined (CONFIG_APOLLO) int search_index; rtk_l2_ipMcastAddr_t lut; memset(&lut,0,sizeof(rtk_l2_ipMcastAddr_t)); TRACE(" MUlticast ADD MODE=%d",ipv4McFlow->srcFilterMode); //if((ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_INCLUDE)||(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE)) if (rg_db.systemGlobal.initParam.igmpSnoopingEnable==2) { rtk_l2_ipmcMode_t Mode; rtk_portmask_t pmsk; int32 grpIdx=-1; int32 first_invaild=-1,i; int32 AddRfcount=1; /* avoid the same include/exculde entry count refCount twice */ #if defined(CONFIG_RTL9602C_SERIES) int32 sipFilterIdx=-1; #endif // This function support *** SVL *** mode only! Do strict mode checking here!! rtk_l2_ipmcMode_get(&Mode); #if defined(CONFIG_RTL9602C_SERIES) if(Mode!=LOOKUP_ON_DIP_AND_VID_FID) { WARNING("HASH FUNCTION CONFIG ERROR"); return RT_ERR_FAILED; } #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) if(Mode!=LOOKUP_ON_DIP_AND_SIP) { WARNING("HASH FUNCTION CONFIG ERROR"); return RT_ERR_FAILED; } #endif lut.dip=ipv4McFlow->groupIp; lut.sip = ipv4McFlow->includeOrExcludeIp; lut.flags=RTK_L2_IPMCAST_FLAG_STATIC; #if defined(CONFIG_RTL9602C_SERIES) /* wansa=1(enable_route/SVL static/sipfilter_en)*/ if(ipv4McFlow->routingMode == RTK_RG_IPV4MC_EN_ROUTING) lut.flags |= RTK_L2_IPMCAST_FLAG_L3MC_ROUTE_EN; lut.fid = LAN_FID; #elif defined(CONFIG_RTL9600_SERIES) //follow lut path3 , support include/exclude mode but not support same_multicast_IP routing at same time if(ipv4McFlow->routingMode == RTK_RG_IPV4MC_EN_ROUTING) { WARNING("not support include/exclude with routing mode"); return RT_ERR_RG_CHIP_NOT_SUPPORT; } #endif //Is entry exist? for(i=0;i<MAX_IPMCGRP_SW_TABLE_SIZE;i++) { if(first_invaild==-1 && rg_db.ipmcgrp[i].valid==FALSE) first_invaild=i; if((rg_db.ipmcgrp[i].groupIp==lut.dip)&&(rg_db.ipmcgrp[i].valid==TRUE)) { grpIdx= i; break; } if(grpIdx == -1) grpIdx=first_invaild; } #if defined(CONFIG_RTL9602C_SERIES) { first_invaild=-1; for(i=0;i<MAX_IPMCFILTER_HW_TABLE_SIZE;i++) { if(first_invaild==-1 && rg_db.ipmcfilter[i].valid==FALSE) first_invaild=i; if((rg_db.ipmcfilter[i].filterIp==ipv4McFlow->includeOrExcludeIp)&&(rg_db.ipmcfilter[i].valid==TRUE)) { sipFilterIdx= i; break; } if(sipFilterIdx == -1) sipFilterIdx=first_invaild; } TRACE("SwGipTable will add to [%d][%s] HwSipFilter will add to [%d][%s]",grpIdx,rg_db.ipmcgrp[grpIdx].valid?"valid":"invalid", sipFilterIdx,rg_db.ipmcfilter[sipFilterIdx].valid?"valid":"invalid"); if(rg_db.ipmcgrp[grpIdx].valid && rg_db.ipmcfilter[sipFilterIdx].valid) AddRfcount=0; if(rg_db.ipmcgrp[grpIdx].ipmRefCount==0 && rg_db.ipmcgrp[grpIdx].valid==0 ) { search_index=_rtk_rg_findAndReclamIpmcEntry(0,lut.dip,0,LAN_FID,0,0); if(search_index==FAIL) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); if (ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_INCLUDE) { /* add a first filter IPM entry all_mbr=0 to block other SIP (include mode) wansa=1(enable_route/SVL static)*/ lut.sip_index=0; lut.portmask.bits[0]=0; lut.ext_portmask.bits[0]=0; ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); TABLE("add a first filter IPM entry all_mbr=0 to block other SIP (include mode) SVL/static"); } else if(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { /* add a first filter IPM entry mbr=excludeModePor tmask to allow other SIP (exclude mode) wansa=1(enable_route/SVL static)*/ _rtk_rg_apollo_ipmcMultiCast_transTbl_add(0,&lut); _rtk_rg_portmask_translator(ipv4McFlow->ipm_portmask,&lut.portmask,&lut.ext_portmask); lut.sip_index=0; ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); TABLE("add a first filter IPM entry mbr=excludeModePor tmask to allow other SIP (exclude mode) SVL/static"); } ASSERT_EQ(search_index,lut.index); } } #endif if(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) pmsk.bits[0] = ipv4McFlow->ipm_portmask.portmask; else //include mode pmsk.bits[0] = 0; #if defined(CONFIG_RTL9602C_SERIES) { if(AddRfcount) { ASSERT_EQ(RTK_L2_IPMCGROUP_ADD(ipv4McFlow->groupIp,&pmsk,&grpIdx),RT_ERR_OK); ASSERT_EQ(RTK_L2_IPMCSIPFILTER_ADD(ipv4McFlow->includeOrExcludeIp,&sipFilterIdx),RT_ERR_OK); } //move ipm entry from default_rule branch to filter rule search_index=_rtk_rg_findAndReclamIpmcEntry(0,lut.dip,0,LAN_FID,sipFilterIdx,1); if(search_index==FAIL) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if (ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { lut.portmask.bits[0]=0; lut.ext_portmask.bits[0]=0; } else// if(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_INCLUDE) { _rtk_rg_portmask_translator(ipv4McFlow->ipm_portmask,&lut.portmask,&lut.ext_portmask); _rtk_rg_apollo_ipmcMultiCast_transTbl_add(0,&lut); } lut.flags |= (RTK_L2_IPMCAST_FLAG_SIP_FILTER); lut.sip_index = sipFilterIdx; ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); ASSERT_EQ(search_index,lut.index); TABLE("ADD multicast lut[%d] to path3 Gip:%d.%d.%d.%d %s Sip=%d.%d.%d.%d PMask=%x ExtPMask=%x", lut.index,(ipv4McFlow->groupIp>>24)&0xff,(ipv4McFlow->groupIp>>16)&0xff,(ipv4McFlow->groupIp>>8)&0xff,(ipv4McFlow->groupIp>>0)&0xff, ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE?"exclude":"include",(lut.sip>>24)&0xff,(lut.sip>>16)&0xff,(lut.sip>>8)&0xff,(lut.sip>>0)&0xff, lut.portmask.bits[0],lut.ext_portmask.bits[0]); } #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) search_index=_rtk_rg_findAndReclamIpmcEntry(lut.sip,lut.dip,0,LAN_FID,0,1); if(search_index==FAIL) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); if(rg_db.ipmcgrp[grpIdx].valid && rg_db.lut[search_index].valid) AddRfcount=0; if(AddRfcount) ASSERT_EQ(RTK_L2_IPMCGROUP_ADD(ipv4McFlow->groupIp,&pmsk,&grpIdx),RT_ERR_OK); if(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { lut.portmask.bits[0]=0; lut.ext_portmask.bits[0]=0; } else// if (ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_INCLUDE) { _rtk_rg_portmask_translator(ipv4McFlow->ipm_portmask,&lut.portmask,&lut.ext_portmask); #ifdef CONFIG_DUALBAND_CONCURRENT if((lut.portmask.bits[0]==(1<<RTK_RG_MAC_PORT_CPU))&&(lut.ext_portmask.bits[0]==4)) //Only to EXT1(slave wifi), modify priority. { lut.flags|=RTK_L2_MCAST_FLAG_FWD_PRI; lut.priority=CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI; } #endif } ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); ASSERT_EQ(search_index,lut.index); TABLE("ADD multicast lut[%d] to path3 Gip:%d.%d.%d.%d %s Sip=%d.%d.%d.%d PMask=%x ExtPMask=%x", lut.index,(ipv4McFlow->groupIp>>24)&0xff,(ipv4McFlow->groupIp>>16)&0xff,(ipv4McFlow->groupIp>>8)&0xff,(ipv4McFlow->groupIp>>0)&0xff, ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_EXCLUDE?"exclude":"include",(lut.sip>>24)&0xff,(lut.sip>>16)&0xff,(lut.sip>>8)&0xff,(lut.sip>>0)&0xff, lut.portmask.bits[0],lut.ext_portmask.bits[0]); #endif } //else if(ipv4McFlow->srcFilterMode==RTK_RG_IPV4MC_DONT_CARE_SRC) else if (rg_db.systemGlobal.initParam.igmpSnoopingEnable==1) { lut.dip=ipv4McFlow->groupIp; lut.sip=0; //lut.flags=RTK_L2_IPMCAST_FLAG_STATIC|RTK_L2_IPMCAST_FLAG_DIP_ONLY; //PATCH20131126:for SSDP packets, we should not do L3 routing!! #if defined(CONFIG_RTL9602C_SERIES) if(lut.dip==0xeffffffa) //239.255.255.250 lut.flags=RTK_L2_IPMCAST_FLAG_STATIC; else lut.flags=RTK_L2_IPMCAST_FLAG_STATIC|RTK_L2_IPMCAST_FLAG_L3MC_ROUTE_EN; //lut.l3_trans_index=0; //default point to interface[0] (first LAN interface) #else if(lut.dip==0xeffffffa) //239.255.255.250 lut.flags=RTK_L2_IPMCAST_FLAG_STATIC|RTK_L2_IPMCAST_FLAG_DIP_ONLY; else lut.flags=RTK_L2_IPMCAST_FLAG_STATIC|RTK_L2_IPMCAST_FLAG_DIP_ONLY|RTK_L2_IPMCAST_FLAG_L3MC_ROUTE_EN; //lut.l3_trans_index=0; //default point to interface[0] (first LAN interface) #endif _rtk_rg_apollo_ipmcMultiCast_transTbl_add(0,&lut); //DEBUG("set lut.l3_trans_index=0x%x\n",lut.l3_trans_index); _rtk_rg_portmask_translator(ipv4McFlow->ipm_portmask,&lut.portmask,&lut.ext_portmask); #if defined(CONFIG_DUALBAND_CONCURRENT) && defined(CONFIG_RTL9600_SERIES) if((lut.portmask.bits[0]==(1<<RTK_RG_MAC_PORT_CPU))&&(lut.ext_portmask.bits[0]==4)) //Only to EXT1(slave wifi), modify priority. { lut.flags|=RTK_L2_MCAST_FLAG_FWD_PRI; lut.priority=CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI; } #endif search_index=_rtk_rg_findAndReclamIpmcEntry(lut.sip,lut.dip,0,LAN_FID,0,0); if(search_index==FAIL) RETURN_ERR(RT_ERR_RG_ENTRY_FULL); #if defined(CONFIG_RTL9602C_SERIES) { int32 first_invaild=-1,i; int32 grpIdx=-1; rtk_portmask_t pmsk; //Is entry exist? for(i=0;i<MAX_IPMCGRP_SW_TABLE_SIZE;i++) { if(first_invaild==-1 && rg_db.ipmcgrp[i].valid==FALSE) first_invaild=i; if((rg_db.ipmcgrp[i].groupIp==lut.dip)&&(rg_db.ipmcgrp[i].valid==TRUE)) { grpIdx= i; break; } if(grpIdx == -1) grpIdx=first_invaild; } TABLE("### IPv4 MULTICAST add at %d: GIP:0x%x (%s%s),l3_trans_idx=0x%x PortMask:0x%x ExtPortMask:0x%x ###",search_index, lut.dip, (lut.flags&RTK_L2_IPMCAST_FLAG_SIP_FILTER)?"SIP_FILTER,":"", (lut.flags&RTK_L2_IPMCAST_FLAG_L3MC_ROUTE_EN)?"ROUTE_EN,":"", lut.l3_trans_index, lut.portmask.bits[0],lut.ext_portmask.bits[0]); lut.fid=LAN_FID; ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); ASSERT_EQ(search_index,lut.index); //add group table to patch sipFilterTable filter mode issue pmsk.bits[0]=lut.portmask.bits[0]; if (!rg_db.ipmcgrp[grpIdx].valid) /* avoid the same Gip entry count refCount twice */ ASSERT_EQ(RTK_L2_IPMCGROUP_ADD(ipv4McFlow->groupIp,&pmsk,&grpIdx),RT_ERR_OK); } #else TABLE("### IPv4 MULTICAST add at %d: GIP:0x%x (%s%s%s),l3_trans_idx=0x%x PortMask:0x%x ExtPortMask:0x%x ###",search_index, lut.dip, (lut.flags&RTK_L2_IPMCAST_FLAG_DIP_ONLY)?"DIP_ONLY,":"", (lut.flags&RTK_L2_IPMCAST_FLAG_L3MC_ROUTE_EN)?"ROUTE_EN,":"", (lut.flags&RTK_L2_IPMCAST_FLAG_FORCE_EXT_ROUTE)?"FORCE_EXT_ROUTE,":"", lut.l3_trans_index, lut.portmask.bits[0],lut.ext_portmask.bits[0]); ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut),RT_ERR_OK); ASSERT_EQ(search_index,lut.index); #endif } *flow_idx=lut.index; #endif //end if CONFIG_APOLLO return (RT_ERR_RG_OK); } void _rtk_rg_apollo_multicastFlow_ipv6L2_add(rtk_rg_multicastFlow_t *mcFlow, int *flow_idx) { #if defined(CONFIG_APOLLO) rtk_rg_l2MulticastFlow_t l2Mc; int idx; #if defined(CONFIG_RTL9602C_SERIES) { rtk_l2_ipmcMode_t Mode; rtk_l2_ipv6mcMode_get(&Mode); if(Mode!=LOOKUP_ON_MAC_AND_VID_FID) WARNING("HASH FUNCTION CONFIG ERROR!!"); } #endif memset(&l2Mc,0,sizeof(rtk_rg_l2MulticastFlow_t)); l2Mc.mac.octet[0] = 0x33; l2Mc.mac.octet[1] = 0x33; l2Mc.mac.octet[2] = (mcFlow->multicast_ipv6_addr[3]&0xFF000000)>>24; l2Mc.mac.octet[3] = (mcFlow->multicast_ipv6_addr[3]&0x00FF0000)>>16; l2Mc.mac.octet[4] = (mcFlow->multicast_ipv6_addr[3]&0x0000FF00)>>8; l2Mc.mac.octet[5] = (mcFlow->multicast_ipv6_addr[3]&0x000000FF); l2Mc.port_mask=mcFlow->port_mask; l2Mc.isIVL=mcFlow->isIVL; l2Mc.vlanID=mcFlow->vlanID; assert_ok(_rtk_rg_apollo_l2MultiCastFlow_add(&l2Mc,&idx)); *flow_idx=idx; memcpy(rg_db.lut[idx].ipv6_addr,mcFlow->multicast_ipv6_addr,16); #endif } void _rtk_rg_apollo_multicastFlow_ipv4L2_add(rtk_rg_multicastFlow_t *mcFlow, int *flow_idx) { #if defined(CONFIG_APOLLO) rtk_rg_l2MulticastFlow_t l2Mc; int idx; #if defined(CONFIG_RTL9602C_SERIES) { rtk_l2_ipmcMode_t Mode; rtk_l2_ipmcMode_get(&Mode); if(Mode!=LOOKUP_ON_MAC_AND_VID_FID) WARNING("HASH FUNCTION CONFIG ERROR!!"); } #endif memset(&l2Mc,0,sizeof(rtk_rg_l2MulticastFlow_t)); l2Mc.mac.octet[0] = 0x01; l2Mc.mac.octet[1] = 0x00; l2Mc.mac.octet[2] = 0x5e; l2Mc.mac.octet[3] = (mcFlow->multicast_ipv4_addr&0x007F0000)>>16; l2Mc.mac.octet[4] = (mcFlow->multicast_ipv4_addr&0x0000FF00)>>8; l2Mc.mac.octet[5] = (mcFlow->multicast_ipv4_addr&0x000000FF); l2Mc.port_mask=mcFlow->port_mask; l2Mc.isIVL=mcFlow->isIVL; l2Mc.vlanID=mcFlow->vlanID; assert_ok(_rtk_rg_apollo_l2MultiCastFlow_add(&l2Mc,&idx)); *flow_idx=idx; #endif } /* include mode pmask=0 indicate other_sip drop , pmask!=0 include the sip exclude mode pmask!=0 indicate other_sip mbr , pmask=0 exclude the sip */ rtk_rg_err_code_t rtk_rg_apollo_multicastFlow_add(rtk_rg_multicastFlow_t *mcFlow, int *flow_idx) { //if igmp_max_simultaneous_group_size enabled, check the limited size. if((rg_db.systemGlobal.igmp_max_simultaneous_group_size!=RTK_RG_DEFAULT_IGMP_SYS_MAX_SIMULTANEOUS_GROUP_SIZE_UNLIMIT) && (rg_db.systemGlobal.igmp_simultaneous_group_size >= rg_db.systemGlobal.igmp_max_simultaneous_group_size)) { DEBUG("igmp max simultaneous group size is limted to %d! Current igmp goup siez is %d, Can not add more multicast flow",rg_db.systemGlobal.igmp_max_simultaneous_group_size,rg_db.systemGlobal.igmp_simultaneous_group_size); return (RT_ERR_RG_ENTRY_FULL); } if(rg_db.systemGlobal.initParam.ivlMulticastSupport ==0 && mcFlow->isIVL==1) { WARNING("RT_ERR_RG_INVALID_PARAM rg_init please enable multicast ivl support "); return RT_ERR_RG_INVALID_PARAM; } if(mcFlow->isIPv6 && mcFlow->srcFilterMode!=RTK_RG_IPV4MC_DONT_CARE_SRC) { WARNING("IPv6 not SUPPORT include/exclude mode"); return RT_ERR_RG_CHIP_NOT_SUPPORT; } #if defined(CONFIG_RTL9600_SERIES) if(mcFlow->routingMode==RTK_RG_IPV4MC_EN_ROUTING && mcFlow->srcFilterMode !=RTK_RG_IPV4MC_DONT_CARE_SRC) { WARNING("not support include/exclude with routing mode"); return RT_ERR_RG_CHIP_NOT_SUPPORT; } #endif #if defined(CONFIG_RTL9602C_SERIES) if(mcFlow->isIPv6) { #if RTK_RG_MULTICAST_MODE_MACFID /* PATH2 */ _rtk_rg_apollo_multicastFlow_ipv6L2_add(mcFlow,flow_idx); #else if(mcFlow->isIVL) { /* PATH2 */ _rtk_rg_apollo_multicastFlow_ipv6L2_add(mcFlow,flow_idx); } else { /* PATH4 */ rtk_rg_ipv6MulticastFlow_t v6IpMc; int idx; memset(&v6IpMc,0,sizeof(v6IpMc)); memcpy(&(v6IpMc.groupIp6.ipv6_addr[0]),&(mcFlow->multicast_ipv6_addr[0]),16); v6IpMc.portMaskIPMC6.portmask = mcFlow->port_mask.portmask; ASSERT_EQ(_rtk_rg_apollo_ipv6MultiCastFlow_add(&v6IpMc,&idx),RT_ERR_RG_OK); *flow_idx=idx; } #endif rg_db.systemGlobal.igmp_simultaneous_group_size++; //add counter return (RT_ERR_RG_OK); } else { #if RTK_RG_MULTICAST_MODE_MACFID _rtk_rg_apollo_multicastFlow_ipv4L2_add(mcFlow,flow_idx); #else if(mcFlow->isIVL) { _rtk_rg_apollo_multicastFlow_ipv4L2_add(mcFlow,flow_idx); } else { rtk_rg_ipv4MulticastFlow_t ipv4Mc; int idx=-1; memset(&ipv4Mc,0,sizeof(rtk_rg_ipv4MulticastFlow_t)); ipv4Mc.srcFilterMode=mcFlow->srcFilterMode; ipv4Mc.groupIp=mcFlow->multicast_ipv4_addr; ipv4Mc.routingMode = mcFlow->routingMode; ipv4Mc.ipm_portmask.portmask = mcFlow->port_mask.portmask; if(ipv4Mc.srcFilterMode != RTK_RG_IPV4MC_DONT_CARE_SRC ) { ipv4Mc.includeOrExcludeIp = mcFlow->includeOrExcludeIp; } ASSERT_EQ(_rtk_rg_apollo_ipv4MultiCastFlow_add(&ipv4Mc,&idx),RT_ERR_RG_OK); *flow_idx=idx; } #endif rg_db.systemGlobal.igmp_simultaneous_group_size++; //add counter return (RT_ERR_RG_OK); } #elif defined(CONFIG_RTL9600_SERIES) if(mcFlow->isIPv6) { _rtk_rg_apollo_multicastFlow_ipv6L2_add(mcFlow,flow_idx); rg_db.systemGlobal.igmp_simultaneous_group_size++; //add counter return (RT_ERR_RG_OK); } else { #if RTK_RG_MULTICAST_MODE_MACFID _rtk_rg_apollo_multicastFlow_ipv4L2_add(mcFlow,flow_idx); #else //if(mcFlow->isIVL) if (rg_db.systemGlobal.initParam.ivlMulticastSupport) { _rtk_rg_apollo_multicastFlow_ipv4L2_add(mcFlow,flow_idx); } else { rtk_rg_ipv4MulticastFlow_t ipv4Mc; int idx=-1; memset(&ipv4Mc,0,sizeof(rtk_rg_ipv4MulticastFlow_t)); ipv4Mc.srcFilterMode=mcFlow->srcFilterMode; ipv4Mc.groupIp=mcFlow->multicast_ipv4_addr; ipv4Mc.routingMode = mcFlow->routingMode; ipv4Mc.ipm_portmask.portmask = mcFlow->port_mask.portmask; if(ipv4Mc.srcFilterMode != RTK_RG_IPV4MC_DONT_CARE_SRC ) { ipv4Mc.includeOrExcludeIp = mcFlow->includeOrExcludeIp; } ASSERT_EQ(_rtk_rg_apollo_ipv4MultiCastFlow_add(&ipv4Mc,&idx),RT_ERR_RG_OK); *flow_idx=idx; } #endif rg_db.systemGlobal.igmp_simultaneous_group_size++; //add counter return (RT_ERR_RG_OK); } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //FIXME: return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9607C_SERIES) //FIXME: return (RT_ERR_RG_OK); #endif } rtk_rg_err_code_t rtk_rg_apollo_multicastFlow_del(int flow_idx) { #if defined(CONFIG_APOLLO) int ret; rtk_l2_addr_table_t lut; int found=flow_idx; if(flow_idx>=MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); ret=rtk_l2_nextValidEntry_get(&found,&lut); if(ret) return ret; if(found==flow_idx) { //rtlglue_printf("flow====%d dip=%x\n",flow_idx,lut.entry.ipmcEntry.dip); if(lut.entryType==RTK_LUT_L2MC) { ret=RTK_L2_MCASTADDR_DEL(&lut.entry.l2McEntry); TABLE("### L2 MULTICAST del: %02x:%02x:%02x:%02x:%02x:%02x PortMask:0x%x ExtPortMask:0x%x ret=%d ###", lut.entry.l2McEntry.mac.octet[0],lut.entry.l2McEntry.mac.octet[1],lut.entry.l2McEntry.mac.octet[2],lut.entry.l2McEntry.mac.octet[3],lut.entry.l2McEntry.mac.octet[4],lut.entry.l2McEntry.mac.octet[5], lut.entry.l2McEntry.portmask.bits[0],lut.entry.l2McEntry.ext_portmask.bits[0],ret); } else if(lut.entryType==RTK_LUT_L3MC) { int32 delGrpIdx=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9607C_SERIES) if(!(lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_DIP_ONLY)) //path3 delete grp table ASSERT_EQ(RTK_L2_IPMCGROUP_DEL(lut.entry.ipmcEntry.dip,&delGrpIdx),RT_ERR_OK); ret = RTK_L2_IPMCASTADDR_DEL(&lut.entry.ipmcEntry); #elif defined(CONFIG_RTL9602C_SERIES) int32 delFilterIdx=0; for(delGrpIdx=0;delGrpIdx<MAX_IPMCGRP_SW_TABLE_SIZE;delGrpIdx++) { if(((rg_db.ipmcgrp[delGrpIdx].groupIp==lut.entry.ipmcEntry.dip)&&(rg_db.ipmcgrp[delGrpIdx].valid==TRUE))) break; } if(lut.entry.ipmcEntry.flags & RTK_L2_IPMCAST_FLAG_SIP_FILTER) { RTK_L2_IPMCSIPFILTER_DEL(rg_db.ipmcfilter[lut.entry.ipmcEntry.sip_index].filterIp,&delFilterIdx); if(rg_db.ipmcfilter[lut.entry.ipmcEntry.sip_index].valid != 0) // filterTable refcount !=0 delete flowIdx and transfer to default_rule branch { int32 default_flowIdx,ruleIdx; //find default rule default_flowIdx =_rtk_rg_findAndReclamIpmcEntry(0,lut.entry.ipmcEntry.dip,0,LAN_FID,0,0); if(default_flowIdx==FAIL || (!(rg_db.lut[default_flowIdx].valid))) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); ruleIdx=_rtk_rg_findAndReclamIpmcEntry(rg_db.ipmcfilter[lut.entry.ipmcEntry.sip_index].filterIp,lut.entry.ipmcEntry.dip,0,LAN_FID,lut.entry.ipmcEntry.sip_index,1); //move incldue/exclude flow to default_rule branch lut.entry.ipmcEntry.portmask.bits[0]=rg_db.lut[default_flowIdx].rtk_lut.entry.ipmcEntry.portmask.bits[0]; lut.entry.ipmcEntry.ext_portmask.bits[0]=rg_db.lut[default_flowIdx].rtk_lut.entry.ipmcEntry.ext_portmask.bits[0]; lut.entry.ipmcEntry.flags = rg_db.lut[default_flowIdx].rtk_lut.entry.ipmcEntry.flags | RTK_L2_IPMCAST_FLAG_SIP_FILTER; lut.entry.ipmcEntry.l3_trans_index = rg_db.lut[default_flowIdx].rtk_lut.entry.ipmcEntry.l3_trans_index; ASSERT_EQ(RTK_L2_IPMCASTADDR_ADD(&lut.entry.ipmcEntry),RT_ERR_OK); ASSERT_EQ(ruleIdx,lut.entry.ipmcEntry.index); } } ASSERT_EQ(RTK_L2_IPMCGROUP_DEL(lut.entry.ipmcEntry.dip,&delGrpIdx),RT_ERR_OK); #endif TABLE("### IP MULTICAST del: DIP:%d.%d.%d.%d SIP:%d.%d.%d.%d PortMask:0x%x ExtPortMask:0x%x ret=%d ###", (lut.entry.ipmcEntry.dip>>24)&0xff,(lut.entry.ipmcEntry.dip>>16)&0xff,(lut.entry.ipmcEntry.dip>>8)&0xff,(lut.entry.ipmcEntry.dip)&0xff, (lut.entry.ipmcEntry.sip>>24)&0xff,(lut.entry.ipmcEntry.sip>>16)&0xff,(lut.entry.ipmcEntry.sip>>8)&0xff,(lut.entry.ipmcEntry.sip)&0xff, lut.entry.ipmcEntry.portmask.bits[0],lut.entry.ipmcEntry.ext_portmask.bits[0],ret); } #if defined(CONFIG_RTL9602C_SERIES) else if(lut.entryType==RTK_LUT_L3V6MC) { ret=RTK_L2_IPMCASTADDR_DEL(&lut.entry.ipmcEntry); TABLE("Del ipv6 Mc Entry LSB_ip = %02x%02x%02x%02x",lut.entry.ipmcEntry.dip6.ipv6_addr[12],lut.entry.ipmcEntry.dip6.ipv6_addr[13],lut.entry.ipmcEntry.dip6.ipv6_addr[14],lut.entry.ipmcEntry.dip6.ipv6_addr[15]); } #endif if(ret==RT_ERR_OK){ rg_db.systemGlobal.igmp_simultaneous_group_size--; //decrease counter return (RT_ERR_RG_OK); }else{ return ret; } } else return (RT_ERR_RG_ENTRY_NOT_EXIST); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) return (RT_ERR_RG_OK); #elif defined(CONFIG_RTL9607C_SERIES) //FIXME: return (RT_ERR_RG_OK); #endif } rtk_rg_err_code_t rtk_rg_apollo_multicastFlow_find(rtk_rg_multicastFlow_t *mcFlow, int *valid_idx) { int ret=0; rtk_l2_addr_table_t data; int input=*valid_idx; int output=input; int i=0; //rtlglue_printf("valid_idx=%d input=%d output=%d\n",*valid_idx,input,output); if(*valid_idx>=MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); if(mcFlow==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); while(1) { ret=rtk_l2_nextValidEntry_get(&output,&data); if(ret!=RT_ERR_OK) return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); memset(mcFlow,0,sizeof(rtk_rg_multicastFlow_t)); if(data.entryType==RTK_LUT_L3MC) { mcFlow->multicast_ipv4_addr=data.entry.ipmcEntry.dip; mcFlow->port_mask.portmask=(data.entry.ipmcEntry.portmask.bits[0]&RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU)|(data.entry.ipmcEntry.ext_portmask.bits[0]<<RTK_RG_PORT_CPU); //mcFlow->src_intf_idx; //FIXME *valid_idx=data.entry.ipmcEntry.index; //rtlglue_printf("index=%d \n",data.entry.l2UcEntry.index); return (RT_ERR_RG_OK); }else if(data.entryType==RTK_LUT_L2MC){ *valid_idx=data.entry.l2McEntry.index; mcFlow->multicast_ipv4_addr=data.entry.ipmcEntry.dip; mcFlow->port_mask.portmask=(data.entry.ipmcEntry.portmask.bits[0]&RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU)|(data.entry.ipmcEntry.ext_portmask.bits[0]<<RTK_RG_PORT_CPU); //rtlglue_printf("Get entry:multicast_ipv4_addr=0x%x portmask=0x%x *valid_idx=%d entryType=%d\n",mcFlow->multicast_ipv4_addr,mcFlow->port_mask.portmask,*valid_idx,data.entryType); //FIXME //Need isIPv6, src_intf_idx mcFlow->multicast_ipv4_addr = (0xe0<<24) | (data.entry.l2McEntry.mac.octet[3]<<16)| (data.entry.l2McEntry.mac.octet[4]<<8)| (data.entry.l2McEntry.mac.octet[5]); if((data.entry.l2McEntry.mac.octet[0]==0x33)&&(data.entry.l2McEntry.mac.octet[1]==0x33)){//ipv6 multicast memcpy(mcFlow->multicast_ipv6_addr,rg_db.lut[output].ipv6_addr,16); mcFlow->isIPv6 = 1; } return (RT_ERR_RG_OK); } #if defined(CONFIG_RTL9602C_SERIES) else if(data.entryType==RTK_LUT_L3V6MC) { memcpy(mcFlow->multicast_ipv6_addr,rg_db.lut[output].ipv6_addr,16); mcFlow->port_mask.portmask = (data.entry.ipmcEntry.portmask.bits[0]&RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU)|(data.entry.ipmcEntry.ext_portmask.bits[0]<<RTK_RG_PORT_CPU); mcFlow->isIPv6 = 1; *valid_idx=data.entry.ipmcEntry.index; } #endif output++; i++; if(i>MAX_LUT_HW_TABLE_SIZE){return (RT_ERR_RG_NO_MORE_ENTRY_FOUND);/*find at most 2048 times, avoid infinite loop*/} } } rtk_rg_err_code_t rtk_rg_apollo_dsliteMcTable_set(rtk_l34_dsliteMc_entry_t *pDsliteMcEntry) { #if 0 int i; if(pDsliteMcEntry==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(pDsliteMcEntry->index>=MAX_DSLITEMC_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); memcpy(&rg_db.dsliteMc[pDsliteMcEntry->index].rtk_dsliteMc,pDsliteMcEntry,sizeof(*pDsliteMcEntry)); for(i=0;i<IPV6_ADDR_LEN;i++){ rg_db.dsliteMc[pDsliteMcEntry->index].ipMPrefix64_AND_mask.ipv6_addr[i]=pDsliteMcEntry->ipMPrefix64.ipv6_addr[i]&pDsliteMcEntry->ipMPrefix64Mask.ipv6_addr[i]; rg_db.dsliteMc[pDsliteMcEntry->index].ipUPrefix64_AND_mask.ipv6_addr[i]=pDsliteMcEntry->ipUPrefix64.ipv6_addr[i]&pDsliteMcEntry->ipUPrefix64Mask.ipv6_addr[i]; } return (RT_ERR_RG_OK); #endif return RT_ERR_RG_NOT_SUPPORT_TESTCHIP; } rtk_rg_err_code_t rtk_rg_apollo_dsliteMcTable_get(rtk_l34_dsliteMc_entry_t *pDsliteMcEntry) { #if 0 if(pDsliteMcEntry==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(pDsliteMcEntry->index>=MAX_DSLITEMC_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); memcpy(pDsliteMcEntry,&rg_db.dsliteMc[pDsliteMcEntry->index].rtk_dsliteMc,sizeof(rtk_l34_dsliteMc_entry_t)); return (RT_ERR_RG_OK); #endif return RT_ERR_RG_NOT_SUPPORT_TESTCHIP; } rtk_rg_err_code_t rtk_rg_apollo_dsliteControl_set(rtk_l34_dsliteCtrlType_t ctrlType, uint32 act) { #if 0 if(ctrlType>=L34_DSLITE_CTRL_END)RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); rg_db.systemGlobal.dsliteControlSet[ctrlType]=act; return (RT_ERR_RG_OK); #endif return RT_ERR_RG_NOT_SUPPORT_TESTCHIP; } rtk_rg_err_code_t rtk_rg_apollo_dsliteControl_get(rtk_l34_dsliteCtrlType_t ctrlType, uint32 *pAct) { #if 0 if(ctrlType>=L34_DSLITE_CTRL_END)RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); if(pAct==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); //FIXME: for now, we just support dslite multicast unmatch action if(ctrlType!=L34_DSLITE_CTRL_DS_UNMATCH_ACT)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); *pAct=rg_db.systemGlobal.dsliteControlSet[ctrlType]; return (RT_ERR_RG_OK); #endif return RT_ERR_RG_NOT_SUPPORT_TESTCHIP; } //MAC rtk_rg_err_code_t rtk_rg_apollo_macEntry_add(rtk_rg_macEntry_t *macEntry, int *entry_idx) { rtk_l2_ucastAddr_t lut; memset(&lut,0,sizeof(rtk_l2_ucastAddr_t)); memcpy(lut.mac.octet,macEntry->mac.octet,ETHER_ADDR_LEN); lut.fid=macEntry->fid; lut.vid=macEntry->vlan_id; //set lut traffic bit to 1(age=7), so idle time will not be add in first round. lut.age=7; lut.auth=macEntry->auth; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) return (RT_ERR_RG_NOT_INIT); if(macEntry->port_idx>=RTK_RG_PORT_CPU) { lut.port=RTK_RG_PORT_CPU; lut.ext_port=macEntry->port_idx-RTK_RG_PORT_CPU; #if defined(CONFIG_DUALBAND_CONCURRENT) && defined(CONFIG_RTL9600_SERIES) #if 1 //20130722: if the packet is from ext0, this packet will hit ACL rule.(modify dpmask to 8) // the GMAC hw will reference CPU_RRING_ROUTING. if(macEntry->port_idx==RTK_RG_EXT_PORT1) { //lut.flags|=(RTK_L2_UCAST_FLAG_FWD_PRI|RTK_L2_UCAST_FLAG_STATIC); // must set static becasue unknow DA can't forward to CPU2(trap to CPU1 again) lut.flags|=(RTK_L2_UCAST_FLAG_FWD_PRI); //2013071 lut.priority=CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI; } #endif #endif } else { lut.port=macEntry->port_idx; } if(macEntry->static_entry) lut.flags|=RTK_L2_UCAST_FLAG_STATIC; if(macEntry->isIVL) lut.flags|=RTK_L2_UCAST_FLAG_IVL; if(macEntry->arp_used) lut.flags|=RTK_L2_UCAST_FLAG_ARP_USED; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else // support ctag_if if(macEntry->ctag_if) lut.flags|=RTK_L2_UCAST_FLAG_CTAG_IF; else lut.flags&=(~RTK_L2_UCAST_FLAG_CTAG_IF); #endif //layer2 interface remarking #if defined(CONFIG_RTL9600_SERIES) if(lut.port==RTK_RG_PORT_PON && rg_db.systemGlobal.IntfRmkEnabled==1) { int i; //search intf by vlan for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++){ if( rg_db.systemGlobal.interfaceInfo[i].valid==1 && rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan ==1 && lut.vid==rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id){ lut.flags|=RTK_L2_UCAST_FLAG_FWD_PRI; lut.priority = rg_db.systemGlobal.intfMappingToPbit[i]; break; } } } #endif ASSERT_EQ(RTK_L2_ADDR_ADD(&lut),RT_ERR_OK); //memcpy(rg_db.mac[lut.index].macAddr.octet,macEntry->mac.octet,6); TABLE("add MAC[%02x:%02x:%02x:%02x:%02x:%02x] SPA=%d, index=%d %s",macEntry->mac.octet[0],macEntry->mac.octet[1],macEntry->mac.octet[2] ,macEntry->mac.octet[3],macEntry->mac.octet[4],macEntry->mac.octet[5],macEntry->port_idx, lut.index,macEntry->static_entry?"STATIC":"DYN"); rg_db.lut[lut.index].fix_l34_vlan=macEntry->fix_l34_vlan; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit rg_db.lut[lut.index].idleSecs = 0; #endif //20160113LUKE: if we had registered default URL for redirect, set redirect_http_req of lut in LAN. if(((rg_db.systemGlobal.forcePortal_url_list[0].valid)||(rg_db.redirectHttpAll.enable))&&(macEntry->port_idx!=RTK_RG_PORT_CPU)&&(rg_db.systemGlobal.lanPortMask.portmask&(0x1<<macEntry->port_idx))) rg_db.lut[lut.index].redirect_http_req=1; *entry_idx=lut.index; return (RT_ERR_RG_OK); // * RT_ERR_RG_ENTRY_FULL - the MAC entry is full. } rtk_rg_err_code_t rtk_rg_apollo_macEntry_del(int entry_idx) { rtk_l2_addr_table_t l2Entry; int found=entry_idx, ret; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(entry_idx>=MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); //2 FIXME: compare software LUT link-list!! ASSERT_EQ(rtk_l2_nextValidEntry_get(&found,&l2Entry),RT_ERR_OK); if(found==entry_idx) { #if 0 rtlglue_printf("found at %d, mac=%02x-%02x-%02x-%02x-%02x-%02x ivl=%d\n",entry_idx,l2Entry.entry.l2UcEntry.mac.octet[0] ,l2Entry.entry.l2UcEntry.mac.octet[1] ,l2Entry.entry.l2UcEntry.mac.octet[2] ,l2Entry.entry.l2UcEntry.mac.octet[3] ,l2Entry.entry.l2UcEntry.mac.octet[4] ,l2Entry.entry.l2UcEntry.mac.octet[5] ,l2Entry.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_IVL ); #endif if(l2Entry.entryType==RTK_LUT_L2UC) { ret = RTK_L2_ADDR_DEL(&l2Entry.entry.l2UcEntry); if(ret!=RT_ERR_OK) WARNING("del l2 %d fail!!", entry_idx); ASSERT_EQ(ret,RT_ERR_OK); //memset(rg_db.mac[entry_idx].macAddr.octet,0,6); TABLE("MAC[%02x:%02x:%02x:%02x:%02x:%02x] del" ,l2Entry.entry.l2UcEntry.mac.octet[0] ,l2Entry.entry.l2UcEntry.mac.octet[1] ,l2Entry.entry.l2UcEntry.mac.octet[2] ,l2Entry.entry.l2UcEntry.mac.octet[3] ,l2Entry.entry.l2UcEntry.mac.octet[4] ,l2Entry.entry.l2UcEntry.mac.octet[5]); return (RT_ERR_RG_OK); } else { WARNING("l2 %d is not unicast entry, del fail!!", entry_idx); return (RT_ERR_RG_FAILED); } } else return (RT_ERR_RG_ENTRY_NOT_EXIST); //* RT_ERR_RG_NOT_INIT - system is not initiated. } /* if (*valid_idx)==-1 search by mac (macEntry->mac) else search nextValidEntry start from *valid_idx and (1)set to macEntry (2)set *valid_idx=l2UcastData.index */ rtk_rg_err_code_t rtk_rg_apollo_macEntry_find(rtk_rg_macEntry_t *macEntry,int *valid_idx) { int i,ret=0; rtk_l2_ucastAddr_t l2UcastData={0}; int input=*valid_idx; int output=input; rtk_rg_lut_linkList_t *pSoftLut; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if((*valid_idx>=MAX_LUT_SW_TABLE_SIZE) || (*valid_idx<-1)) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); if(macEntry==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Search by MAC if(*valid_idx==-1) output=input=0; else if(*valid_idx>=MAX_LUT_HW_TABLE_SIZE){ CHECK_NEXT: if(rg_db.lut[output].valid){ memcpy(&l2UcastData,&rg_db.lut[output].rtk_lut.entry.l2UcEntry,sizeof(rtk_l2_ucastAddr_t)); *valid_idx=output; goto LUT_TO_MACENTRY; } if(++output>=MAX_LUT_SW_TABLE_SIZE)return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); goto CHECK_NEXT; } while(1) { ret=rtk_l2_nextValidAddr_get(&output, &l2UcastData); if(ret!=RT_ERR_OK) return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); if(input>output)goto SW_MAC_FIND; //Search by MAC if(*valid_idx==-1) { if(memcmp(macEntry->mac.octet,l2UcastData.mac.octet,ETHER_ADDR_LEN)) { output++; input=output; continue; } } *valid_idx=l2UcastData.index; LUT_TO_MACENTRY: memset(macEntry,0,sizeof(rtk_rg_macEntry_t)); macEntry->fid=l2UcastData.fid; memcpy(macEntry->mac.octet,l2UcastData.mac.octet,6); if(l2UcastData.port==RTK_RG_PORT_CPU) { macEntry->port_idx=RTK_RG_PORT_CPU+l2UcastData.ext_port; } else { macEntry->port_idx=l2UcastData.port; } if(l2UcastData.flags&RTK_L2_UCAST_FLAG_STATIC) macEntry->static_entry=1; macEntry->vlan_id=l2UcastData.vid; macEntry->isIVL = (l2UcastData.flags & RTK_L2_UCAST_FLAG_IVL)?1:0; macEntry->arp_used = (l2UcastData.flags & RTK_L2_UCAST_FLAG_ARP_USED)?1:0; macEntry->auth=l2UcastData.auth; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else // support ctag_if macEntry->ctag_if = (l2UcastData.flags & RTK_L2_UCAST_FLAG_CTAG_IF)?1:0; #endif #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit macEntry->idleSecs=rg_db.lut[*valid_idx].idleSecs; #endif //rtlglue_printf("index=%d \n",data.entry.l2UcEntry.index); return (RT_ERR_RG_OK); } SW_MAC_FIND: //Check if we had been add to software LUT in all link-list-head for(i=0;i<MAX_LUT_SW_TABLE_HEAD;i++){ if(!list_empty(&rg_db.softwareLutTableHead[i])){ list_for_each_entry(pSoftLut,&rg_db.softwareLutTableHead[i],lut_list){ if(!memcmp(rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry.mac.octet,macEntry->mac.octet,ETHER_ADDR_LEN)){ memcpy(&l2UcastData,&rg_db.lut[pSoftLut->idx].rtk_lut.entry.l2UcEntry,sizeof(rtk_l2_ucastAddr_t)); *valid_idx=pSoftLut->idx; goto LUT_TO_MACENTRY; } } } } return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); // * RT_ERR_RG_NOT_INIT - system is not initiated. } __IRAM_FWDENG_L34 int _rtk_rg_l3lookup(ipaddr_t ip) { int i; rtk_l34_routing_entry_t *pL3; unsigned char longestMask=0,matchIdx=V4_DEFAULT_ROUTE_IDX; for(i=0;i<MAX_L3_SW_TABLE_SIZE;i++) { if(i== V4_DEFAULT_ROUTE_IDX) continue; pL3=&rg_db.l3[i].rtk_l3; if(pL3->valid) { //DEBUG("ip=%x mask=%d iplookup=%x\n",rg_db.l3[i].rtk_l3.ipAddr,rg_db.l3[i].rtk_l3.ipMask,ip); if(((pL3->ipMask+1)>longestMask) && (pL3->ipAddr>>(31-pL3->ipMask))==(ip>>(31-pL3->ipMask))) { //DEBUG("return %d",i); longestMask=pL3->ipMask+1; matchIdx=i; //return i; //interface route } } } return matchIdx; //return hit index (default route) } int _rtk_rg_v6L3lookup(unsigned char *ipv6) { int i,prefix,longestMask=0,matchIdx=V6_DEFAULT_ROUTE_IDX; unsigned short idx,bitMask; rtk_ipv6Routing_entry_t *pL3; //DEBUG("_rtk_rg_v6L3lookup"); for(i=0;i<MAX_IPV6_ROUTING_SW_TABLE_SIZE;i++) { if(i == V6_HW_DEFAULT_ROUTE_IDX) continue; pL3=&rg_db.v6route[i].rtk_v6route; if(pL3->valid==1) { prefix=pL3->ipv6PrefixLen; if(prefix==128&&!memcmp(pL3->ipv6Addr.ipv6_addr,ipv6,IPV6_ADDR_LEN)) return i; //host route idx=(prefix>>3)&0xff; if((prefix&0x7)==0) bitMask=0; else bitMask=(0xff<<(8-(prefix&0x7)))&0xff; //DEBUG("prefix = %d, idx = %d, bitMask =%02x",prefix,idx,bitMask); //DEBUG("ip=%x mask=%d iplookup=%x\n",rg_db.l3[i].rtk_l3.ipAddr,rg_db.l3[i].rtk_l3.ipMask,ip); if(prefix>longestMask && memcmp(pL3->ipv6Addr.ipv6_addr,ipv6,idx)==0 && ((pL3->ipv6Addr.ipv6_addr[idx]&bitMask)==(ipv6[idx]&bitMask))) { //DEBUG("Match %d!!",i); longestMask=prefix; matchIdx=i; //return i; //interface route } } } if(rg_db.v6route[matchIdx].rtk_v6route.valid==0) //if default route is invalid, return -1 return -1; else return matchIdx; //return hit index (default route) } int _rtk_rg_check_duplicate_arp(int l2Idx) { int i; for(i = 0; i < MAX_ARP_SW_TABLE_SIZE; i++) { if(rg_db.arp[i].rtk_arp.valid==0) continue; if(rg_db.arp[i].rtk_arp.nhIdx == l2Idx) return 1; } return 0; } rtk_rg_err_code_t rtk_rg_apollo_arpEntry_add(rtk_rg_arpEntry_t *arpEntry, int *arp_entry_idx) { int l3Idx; int l2Idx; rtk_l2_addr_table_t asic_l2_entry; rtk_rg_arpInfo_t arpInfo; #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) rtk_rg_arp_linkList_t *pSwArpList; #endif #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) int arpIdx=-1; rtk_l34_arp_entry_t asic_arp_entry; #elif defined(CONFIG_RTL9602C_SERIES) uint16 arpIdx; rtk_rg_arp_linkList_t *pHwArpList; #endif //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); memset(&arpInfo,0,sizeof(arpInfo)); //Get Gateway IP l3Idx=_rtk_rg_l3lookup(arpEntry->ipv4Addr); if(l3Idx==V4_DEFAULT_ROUTE_IDX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.l3[l3Idx].rtk_l3.process==L34_PROCESS_ARP) { l2Idx = arpEntry->macEntryIdx; //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(l2Idx>=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); #endif if(rtk_l2_nextValidEntry_get(&l2Idx,&asic_l2_entry)) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); DEBUG("l2Idx=%d arp->l2Idx=%d",l2Idx,arpEntry->macEntryIdx); if(l2Idx!=arpEntry->macEntryIdx) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); if(rg_db.systemGlobal.antiIpSpoofStatus==RTK_RG_ENABLED) { if(_rtk_rg_check_duplicate_arp(arpEntry->macEntryIdx)) { TRACE("Anti IP spoofing: IP entry already exist, discard learning !"); return RT_ERR_RG_OK; } } //Check ARP exist or not #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) goto check_sw_arp; #elif defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) arpIdx=(rg_db.l3[l3Idx].rtk_l3.arpStart<<2)+(arpEntry->ipv4Addr & ((1<<(31-rg_db.l3[l3Idx].rtk_l3.ipMask))-1)); if(arpIdx>=MAX_ARP_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.arp[arpIdx].rtk_arp.valid) { if(rg_db.arp[arpIdx].staticEntry) { if(rg_db.arp[arpIdx].rtk_arp.nhIdx!=arpEntry->macEntryIdx) { DEBUG("different L2Idx but ARP entry is valid and STATIC...failed to add ARP"); RETURN_ERR(RT_ERR_RG_ARP_ENTRY_STATIC); } else { DEBUG("same L2Idx with static ARP..do nothing"); return (RT_ERR_RG_OK); } } } asic_arp_entry.index=arpIdx; asic_arp_entry.nhIdx=arpEntry->macEntryIdx; asic_arp_entry.valid=1; ASSERT_EQ(RTK_L34_ARPTABLE_SET(arpIdx,&asic_arp_entry),RT_ERR_OK); rg_db.arp[arpIdx].staticEntry=arpEntry->staticEntry; rg_db.arp[arpIdx].idleSecs=0; rg_db.arp[arpIdx].sendReqCount=0; rg_db.arp[arpIdx].routingIdx=l3Idx; rg_db.arp[arpIdx].ipv4Addr=arpEntry->ipv4Addr; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) rg_db.arpValidSet[arpIdx>>5] |= (0x1<<(arpIdx&31)); #endif DEBUG("### add arp[%d], ip=0x%x l2=%d l3idx=%d %s###\n",arpIdx,arpEntry->ipv4Addr,l2Idx,l3Idx,arpEntry->staticEntry?"STATIC":"DYN"); *arp_entry_idx=arpIdx; arpInfo.valid = rg_db.arp[arpIdx].rtk_arp.valid; #elif defined(CONFIG_RTL9602C_SERIES) _rtk_rg_softwareArpTableLookUp(l3Idx,arpEntry->ipv4Addr,&pSwArpList,0); if(pSwArpList!=NULL) //sw arp is found { if(rg_db.arp[pSwArpList->idx].staticEntry==0) { //Delete old link-list first DEBUG("delete the old dynamic software ARP before add new one"); _rtk_rg_softwareArpTableDel(pSwArpList); } else { if(rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx!=arpEntry->macEntryIdx) { DEBUG("different L2Idx but software ARP entry is valid and STATIC...failed to add ARP"); RETURN_ERR(RT_ERR_RG_ARP_ENTRY_STATIC); } else { DEBUG("same L2Idx with static software ARP..do nothing"); return (RT_ERR_RG_OK); } } goto add_sw_arp; } else // sw arp is not found { _rtk_rg_hardwareArpTableLookUp(l3Idx,arpEntry->ipv4Addr,&pHwArpList,0); if(pHwArpList!=NULL) //hw arp is found { DEBUG("HW ARP entry is added again..."); if(rg_db.arp[pHwArpList->idx].staticEntry==0) { //Delete old link-list first DEBUG("delete the old dynamic hardware ARP before add new one"); ASSERT_EQ(_rtk_rg_hardwareArpTableDel(pHwArpList), RT_ERR_RG_OK); } else { if(rg_db.arp[pHwArpList->idx].rtk_arp.nhIdx!=arpEntry->macEntryIdx) { DEBUG("different L2Idx but ARP entry is valid and STATIC...failed to add ARP"); RETURN_ERR(RT_ERR_RG_ARP_ENTRY_STATIC); } else { DEBUG("same L2Idx with static ARP..do nothing"); return (RT_ERR_RG_OK); } } //arpIdx = pHwArpList->idx; } if(_rtk_rg_hardwareArpTableAdd(l3Idx, arpEntry->ipv4Addr, l2Idx, arpEntry->staticEntry, &arpIdx) != RT_ERR_RG_OK) goto add_sw_arp; else { arpInfo.valid = rg_db.arp[arpIdx].rtk_arp.valid; *arp_entry_idx = arpIdx; } } #endif } #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) else if(rg_db.l3[l3Idx].rtk_l3.process==L34_PROCESS_CPU && rg_db.l3[l3Idx].rtk_l3.ipAddr>0) //software ARP table { l2Idx = arpEntry->macEntryIdx; if(rtk_l2_nextValidEntry_get(&l2Idx,&asic_l2_entry)) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); DEBUG("l2Idx=%d arp->l2Idx=%d",l2Idx,arpEntry->macEntryIdx); if(l2Idx!=arpEntry->macEntryIdx) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) check_sw_arp: #endif //Check software ARP exist or not _rtk_rg_softwareArpTableLookUp(l3Idx,arpEntry->ipv4Addr,&pSwArpList,0); if(pSwArpList!=NULL) { if(rg_db.arp[pSwArpList->idx].staticEntry==0) { //Delete old link-list first DEBUG("delete the old dynamic software ARP before add new one"); _rtk_rg_softwareArpTableDel(pSwArpList); } else { if(rg_db.arp[pSwArpList->idx].rtk_arp.nhIdx!=arpEntry->macEntryIdx) { DEBUG("different L2Idx but software ARP entry is valid and STATIC...failed to add ARP"); RETURN_ERR(RT_ERR_RG_ARP_ENTRY_STATIC); } else { DEBUG("same L2Idx with static software ARP..do nothing"); return (RT_ERR_RG_OK); } } } #if defined(CONFIG_RTL9602C_SERIES) add_sw_arp: #endif DEBUG("add software ARP %x! l2Idx is %d",arpEntry->ipv4Addr,l2Idx); assert_ok(_rtk_rg_softwareArpTableAdd(l3Idx,arpEntry->ipv4Addr,l2Idx,arpEntry->staticEntry)); } #endif else RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rtk_l2_nextValidEntry_get(&l2Idx,&asic_l2_entry)) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); DEBUG("l2Idx=%d arp->l2Idx=%d",l2Idx,arpEntry->macEntryIdx); if(l2Idx!=arpEntry->macEntryIdx) return (RT_ERR_RG_L2_ENTRY_NOT_FOUND); if(asic_l2_entry.entryType==RTK_LUT_L2UC) { if((asic_l2_entry.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0) { asic_l2_entry.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_ARP_USED; ASSERT_EQ(RTK_L2_ADDR_ADD(&asic_l2_entry.entry.l2UcEntry),RT_ERR_OK); //memcpy(rg_db.mac[asic_l2_entry.entry.l2UcEntry.index].macAddr.octet,asic_l2_entry.entry.l2UcEntry.mac.octet,6); } } if(rg_db.systemGlobal.initParam.arpAddByHwCallBack != NULL) { memcpy(&arpInfo.arpEntry,arpEntry,sizeof(rtk_rg_arpEntry_t)); rg_db.systemGlobal.initParam.arpAddByHwCallBack(&arpInfo); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_arpEntry_del(int arp_entry_idx) { int retval=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rtk_l34_arp_entry_t asic_arp_entry; #endif rtk_rg_arpInfo_t arpInfo; #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) int i; rtk_rg_arp_linkList_t *pArpList,*pNextArpList; #endif //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); if(arp_entry_idx<0 ||arp_entry_idx>=MAX_ARP_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Check whether entry is inused if(rg_db.arp[arp_entry_idx].rtk_arp.valid==0)return (RT_ERR_RG_ARP_NOT_FOUND); //init memset(&arpInfo,0,sizeof(rtk_rg_arpInfo_t)); //Sync to ARP info. arpInfo.arpEntry.ipv4Addr = rg_db.arp[arp_entry_idx].ipv4Addr; arpInfo.arpEntry.macEntryIdx = rg_db.arp[arp_entry_idx].rtk_arp.nhIdx; arpInfo.arpEntry.staticEntry = rg_db.arp[arp_entry_idx].staticEntry; #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) && defined(CONFIG_RTL9602C_SERIES) if(arp_entry_idx<MAX_ARP_HW_TABLE_SIZE_FPGA) #else if(arp_entry_idx<MAX_ARP_HW_TABLE_SIZE) #endif { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //Clear ASIC and software ARP table entry memset(&asic_arp_entry,0,sizeof(rtk_l34_arp_entry_t)); retval = RTK_L34_ARPTABLE_SET(arp_entry_idx,&asic_arp_entry); ASSERT_EQ(retval,RT_ERR_OK); rg_db.arp[arp_entry_idx].ipv4Addr=0; rg_db.arp[arp_entry_idx].staticEntry=0; rg_db.arp[arp_entry_idx].idleSecs=0; rg_db.arp[arp_entry_idx].sendReqCount=0; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) rg_db.arpValidSet[arp_entry_idx>>5] &= ~(0x1<<(arp_entry_idx&31)); #endif #elif defined(CONFIG_RTL9602C_SERIES) //Delete it retval=1; for(i=0;i<MAX_ARP_HW_TABLE_HEAD;i++) { list_for_each_entry_safe(pArpList,pNextArpList,&rg_db.hardwareArpTableHead[i],arp_list) { if(pArpList->idx==arp_entry_idx) { ASSERT_EQ(_rtk_rg_hardwareArpTableDel(pArpList), RT_ERR_RG_OK); retval=0; break; } } } if(retval) return (RT_ERR_RG_ARP_NOT_FOUND); //Clear ARP table entry memset(&rg_db.arp[arp_entry_idx],0,sizeof(rtk_rg_table_arp_t)); #endif } #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) else //deleting software ARP entry { //Delete it retval=1; for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { list_for_each_entry_safe(pArpList,pNextArpList,&rg_db.softwareArpTableHead[i],arp_list) { if(pArpList->idx==arp_entry_idx) { _rtk_rg_softwareArpTableDel(pArpList); retval=0; break; } } } if(retval) return (RT_ERR_RG_ARP_NOT_FOUND); //Clear ARP table entry memset(&rg_db.arp[arp_entry_idx],0,sizeof(rtk_rg_table_arp_t)); } #else else RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif if(rg_db.systemGlobal.initParam.arpDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.arpDelByHwCallBack(&arpInfo); } TABLE("del arp %d", arp_entry_idx); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_arpEntry_find(rtk_rg_arpInfo_t *arpInfo,int *arp_valid_idx) { rtk_rg_arpEntry_t *arpEntry; int arpIdx=0; #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) int threshold=MAX_ARP_SW_TABLE_SIZE; //contains software ARP range #else int threshold=MAX_ARP_HW_TABLE_SIZE; #endif arpEntry=&arpInfo->arpEntry; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Check NULL ARP entry if(arpInfo==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if((*arp_valid_idx>=threshold) && (*arp_valid_idx!=-1)) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); arpIdx = *arp_valid_idx; //Search by IP if(*arp_valid_idx==-1) arpIdx=0; //Check whether entry is inused nextArpEntry: if(arpIdx>=threshold) return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); if(rg_db.arp[arpIdx].rtk_arp.valid==0) { arpIdx++; goto nextArpEntry; } //Search by IP if(*arp_valid_idx==-1) { if(arpInfo->arpEntry.ipv4Addr != rg_db.arp[arpIdx].ipv4Addr) { arpIdx++; goto nextArpEntry; } } //Arp entry arpEntry->ipv4Addr=rg_db.arp[arpIdx].ipv4Addr; arpEntry->macEntryIdx=rg_db.arp[arpIdx].rtk_arp.nhIdx; arpEntry->staticEntry=rg_db.arp[arpIdx].staticEntry; arpInfo->idleSecs=rg_db.arp[arpIdx].idleSecs; arpInfo->valid=rg_db.arp[arpIdx].rtk_arp.valid; //20160121LUKE: return LANNetInfo here! memcpy(arpInfo->lanNetInfo.dev_name,rg_db.lut[rg_db.arp[arpIdx].rtk_arp.nhIdx].dev_name,MAX_LANNET_DEV_NAME_LENGTH); arpInfo->lanNetInfo.dev_type=rg_db.arp[arpIdx].lanNetInfo.dev_type; arpInfo->lanNetInfo.brand=rg_db.arp[arpIdx].lanNetInfo.brand; strncpy(arpInfo->brandStr,rg_lanNet_brand[arpInfo->lanNetInfo.brand][0],MAX_LANNET_BRAND_NAME_LENGTH); arpInfo->lanNetInfo.model=rg_db.arp[arpIdx].lanNetInfo.model; strncpy(arpInfo->modelStr,rg_lanNet_model[arpInfo->lanNetInfo.model][0],MAX_LANNET_MODEL_NAME_LENGTH); arpInfo->lanNetInfo.os=rg_db.arp[arpIdx].lanNetInfo.os; strncpy(arpInfo->osStr,rg_lanNet_os[arpInfo->lanNetInfo.os][0],MAX_LANNET_OS_NAME_LENGTH); arpInfo->lanNetInfo.conn_type=rg_db.lut[rg_db.arp[arpIdx].rtk_arp.nhIdx].conn_type; *arp_valid_idx=arpIdx; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_neighborEntry_add(rtk_rg_neighborEntry_t *neighborEntry,int *neighbor_idx) { int i,neighborIdx,hashValue,l3Idx,l2Idx; rtk_l2_addr_table_t asic_l2_entry; rtk_ipv6Neighbor_entry_t asic_neighbor_entry; rtk_rg_neighborInfo_t neighborInfo; #ifdef CONFIG_ROME_NEIGHBOR_LRU int oldestIdx=0,oldestIdleSecs=0; #endif //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Get Gateway IP l3Idx=neighborEntry->matchRouteIdx; if(rg_db.v6route[l3Idx].rtk_v6route.type!=L34_IPV6_ROUTE_TYPE_LOCAL) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(l3Idx>=MAX_IPV6_ROUTING_SW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); l2Idx=neighborEntry->l2Idx; //Since 6266's ARP, neighbor, nexthop only have 11 bits for l2Idx, they can never pointer to bCAM adress which after 2048 #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(l2Idx>=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_CHIP_NOT_SUPPORT); #endif if(rtk_l2_nextValidEntry_get(&l2Idx,&asic_l2_entry)!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_L2_ENTRY_NOT_FOUND); if(l2Idx!=neighborEntry->l2Idx) RETURN_ERR(RT_ERR_RG_L2_ENTRY_NOT_FOUND); if(asic_l2_entry.entryType==RTK_LUT_L2UC) { if((asic_l2_entry.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0) { asic_l2_entry.entry.l2UcEntry.flags|=RTK_L2_UCAST_FLAG_ARP_USED; ASSERT_EQ(RTK_L2_ADDR_ADD(&asic_l2_entry.entry.l2UcEntry),RT_ERR_OK); //memcpy(rg_db.mac[asic_l2_entry.entry.l2UcEntry.index].macAddr.octet,asic_l2_entry.entry.l2UcEntry.mac.octet,6); } } hashValue=_rtk_rg_IPv6NeighborHash(neighborEntry->interfaceId,l3Idx); DEBUG("Neighbor hashValue=%d l3Idx=%d interfaceId(%02x%02x:%02x%02x:%02x%02x:%02x%02x) ",hashValue,l3Idx, neighborEntry->interfaceId[0],neighborEntry->interfaceId[1],neighborEntry->interfaceId[2],neighborEntry->interfaceId[3], neighborEntry->interfaceId[4],neighborEntry->interfaceId[5],neighborEntry->interfaceId[6],neighborEntry->interfaceId[7]); neighborIdx=(hashValue<<3); for(i=0;i<8;i++) //8-way hash { //find the same entry first. if((rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.valid==1)&& (rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.ipv6RouteIdx==neighborEntry->matchRouteIdx)&& (memcmp(&rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.ipv6Ifid,neighborEntry->interfaceId,8)==0)) break; } if(i==8) { for(i=0;i<8;i++) //8-way hash { // find empty entry if the same entry isn't found. if(rg_db.v6neighbor[neighborIdx+i].rtk_v6neighbor.valid==0) break; } } #ifdef CONFIG_ROME_NEIGHBOR_LRU if(i==8) { for(i=0;i<8;i++) { if(rg_db.v6neighbor[neighborIdx+i].idleSecs>oldestIdleSecs) { oldestIdleSecs=rg_db.v6neighbor[neighborIdx+i].idleSecs; oldestIdx=i; } } i=oldestIdx; TRACE("Neighbor entry is Full, replace the oldest entry with the same hashIdx. idx=[%d]",neighborIdx+i); } #else if(i==8) RETURN_ERR(RT_ERR_RG_NEIGHBOR_FULL); #endif bzero(&asic_neighbor_entry,sizeof(rtk_ipv6Neighbor_entry_t)); asic_neighbor_entry.valid=1; asic_neighbor_entry.l2Idx=l2Idx; asic_neighbor_entry.ipv6RouteIdx=l3Idx; asic_neighbor_entry.ipv6Ifid=neighborEntry->interfaceId[0]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[1]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[2]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[3]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[4]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[5]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[6]; asic_neighbor_entry.ipv6Ifid<<=8; asic_neighbor_entry.ipv6Ifid|=neighborEntry->interfaceId[7]; ASSERT_EQ(RTK_L34_IPV6NEIGHBORTABLE_SET(neighborIdx+i,&asic_neighbor_entry),RT_ERR_OK); rg_db.v6neighbor[neighborIdx+i].staticEntry=neighborEntry->staticEntry; rg_db.v6neighbor[neighborIdx+i].idleSecs=0; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) rg_db.neighborValidSet[(neighborIdx+i)>>5] |= (0x1<<((neighborIdx+i)&31)); #endif TABLE("### add neighbor[%d], v6IfId=0x%016llx lutIdx=%d v6RouteIdx=%d static=%d ###\n",neighborIdx+i,asic_neighbor_entry.ipv6Ifid,l2Idx,l3Idx,neighborEntry->staticEntry); *neighbor_idx=neighborIdx+i; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit if(rg_db.lut[l2Idx].valid) rg_db.lut[l2Idx].arp_refCount++; #endif // TODO:Call the initParam's neighborAddByHwCallBack if(rg_db.systemGlobal.initParam.neighborAddByHwCallBack!= NULL) { bzero(&neighborInfo,sizeof(rtk_rg_neighborInfo_t)); memcpy(&neighborInfo.neighborEntry,neighborEntry,sizeof(rtk_rg_neighborEntry_t)); rg_db.systemGlobal.initParam.neighborAddByHwCallBack(&neighborInfo); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_neighborEntry_del(int neighbor_idx) { int i; uint32 neighborL2Idx; #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit int retval = 0, l2Idx, nxthopRefFlag; rtk_l2_addr_table_t asic_l2_entry; #endif rtk_ipv6Neighbor_entry_t asic_neighbor_entry; rtk_rg_neighborInfo_t neighborInfo; #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT rtk_rg_ipv6_layer4_linkList_t *pV6L4List,*nextEntry; #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT rtk_ipv6_addr_t zeroV6Ip; bzero(zeroV6Ip.ipv6_addr, IPV6_ADDR_LEN); #endif //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Check whether entry is inused if(rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.valid==0) return (RT_ERR_RG_NEIGHBOR_NOT_FOUND); neighborL2Idx = rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.l2Idx; //Sync to Neighbor info. bzero(&neighborInfo,sizeof(rtk_rg_neighborInfo_t)); neighborInfo.neighborEntry.l2Idx=rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.l2Idx; neighborInfo.neighborEntry.matchRouteIdx=rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.ipv6RouteIdx; neighborInfo.neighborEntry.valid=rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.valid; neighborInfo.neighborEntry.staticEntry=rg_db.v6neighbor[neighbor_idx].staticEntry; for(i=0;i<8;i++) neighborInfo.neighborEntry.interfaceId[i]=(rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.ipv6Ifid>>(56-(8*i)))&0xff; neighborInfo.idleSecs=rg_db.v6neighbor[neighbor_idx].idleSecs; //Delete from ASIC TABLE("### del neighbor[%d], v6IfId=0x%016llx lutIdx=%d v6RouteIdx=%d ###\n",neighbor_idx,rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.ipv6Ifid,neighborL2Idx,rg_db.v6neighbor[neighbor_idx].rtk_v6neighbor.ipv6RouteIdx); bzero(&asic_neighbor_entry,sizeof(rtk_ipv6Neighbor_entry_t)); ASSERT_EQ(RTK_L34_IPV6NEIGHBORTABLE_SET(neighbor_idx,&asic_neighbor_entry),RT_ERR_OK); rg_db.v6neighbor[neighbor_idx].staticEntry=0; rg_db.v6neighbor[neighbor_idx].idleSecs=0; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) rg_db.neighborValidSet[neighbor_idx>>5] &= ~(0x1<<(neighbor_idx&31)); #endif #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit //Sync to LUT if(rg_db.lut[neighborL2Idx].valid) { if(rg_db.lut[neighborL2Idx].arp_refCount>0) rg_db.lut[neighborL2Idx].arp_refCount--; if(rg_db.lut[neighborL2Idx].arp_refCount==0) { nxthopRefFlag = 0; for(i=0; i<MAX_NEXTHOP_SW_TABLE_SIZE; i++) { if(neighborL2Idx==rg_db.nexthop[i].rtk_nexthop.nhIdx) { nxthopRefFlag = 1; break; } } if(nxthopRefFlag==0) { //Sync to LUT l2Idx = neighborL2Idx; memset(&asic_l2_entry,0,sizeof(rtk_l2_addr_table_t)); retval = rtk_l2_nextValidEntry_get(&l2Idx, &asic_l2_entry); if((retval==RT_ERR_OK) && (neighborL2Idx == asic_l2_entry.entry.l2UcEntry.index) && (asic_l2_entry.entry.l2UcEntry.flags & RTK_L2_UCAST_FLAG_ARP_USED)!=0 && (asic_l2_entry.entry.l2UcEntry.flags & RTK_L2_UCAST_FLAG_STATIC)==0) { asic_l2_entry.entry.l2UcEntry.flags &= (~RTK_L2_UCAST_FLAG_ARP_USED); retval = RTK_L2_ADDR_ADD(&asic_l2_entry.entry.l2UcEntry); ASSERT_EQ(retval,RT_ERR_OK); } } } } #endif //Delete v6 shortcut and v6 stateful list #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT for(i=0; i<MAX_NAPT_V6_SHORTCUT_SIZE; i++) { if(memcmp(rg_db.naptv6ShortCut[i].sip.ipv6_addr, zeroV6Ip.ipv6_addr, IPV6_ADDR_LEN)==0) continue; if(rg_db.naptv6ShortCut[i].neighborIdx>=0 && rg_db.naptv6ShortCut[i].neighborIdx==neighbor_idx) { TABLE("del v6 shortcut[%d].", i); _rtk_rg_v6ShortCut_delete(i); } } #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT for(i=0; i<MAX_IPV6_STATEFUL_HASH_HEAD_SIZE; i++) { list_for_each_entry_safe(pV6L4List,nextEntry,&rg_db.ipv6Layer4HashListHead[i],layer4_list) { if(pV6L4List->neighborIdx>=0 && pV6L4List->neighborIdx==neighbor_idx) { TABLE("del v6 stateful shortcut."); //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.ipv6StatefulLock); _rtk_rg_fwdEngine_ipv6ConnList_del(pV6L4List); //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.ipv6StatefulLock); } } } #endif // TODO:Call the initParam's neighborDelByHwCallBack if(rg_db.systemGlobal.initParam.neighborDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.neighborDelByHwCallBack(&neighborInfo); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_neighborEntry_find(rtk_rg_neighborInfo_t *neighborInfo,int *neighbor_valid_idx) { rtk_rg_neighborEntry_t *neighborEntry; int neighborIdx=0,count=0; unsigned char ipv6Ifid[8]; //Check input parameters if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //Check NULL Neighbor entry if(neighborInfo==NULL || neighbor_valid_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if((*neighbor_valid_idx>=MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE) || (*neighbor_valid_idx<-1)) RETURN_ERR(RT_ERR_RG_INDEX_OUT_OF_RANGE); neighborEntry = &neighborInfo->neighborEntry; neighborIdx = *neighbor_valid_idx; //Search by MAC if(neighborIdx==-1) { neighborIdx=_rtk_rg_IPv6NeighborHash(neighborInfo->neighborEntry.interfaceId,neighborInfo->neighborEntry.matchRouteIdx)<<3; memcpy(ipv6Ifid,neighborInfo->neighborEntry.interfaceId,8); } //Check whether entry is valid nextNeighborEntry: if(neighborIdx>=MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE) return (RT_ERR_RG_NO_MORE_ENTRY_FOUND); if(count==8)return (RT_ERR_RG_NEIGHBOR_NOT_FOUND); //If this entry is invalid, goto next one if(rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.valid==0) { neighborIdx++; if(*neighbor_valid_idx==-1) count++; goto nextNeighborEntry; } //Search by MAC if(*neighbor_valid_idx==-1) { if(memcmp(ipv6Ifid,(unsigned char *)&rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.ipv6Ifid,8)|| neighborInfo->neighborEntry.matchRouteIdx!=rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.ipv6RouteIdx) { neighborIdx++; count++; goto nextNeighborEntry; } } //Neighbor Entry neighborEntry->l2Idx=rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.l2Idx; neighborEntry->matchRouteIdx=rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.ipv6RouteIdx; neighborEntry->valid=rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.valid; neighborEntry->staticEntry=rg_db.v6neighbor[neighborIdx].staticEntry; for(count=0;count<8;count++) neighborInfo->neighborEntry.interfaceId[count]=(rg_db.v6neighbor[neighborIdx].rtk_v6neighbor.ipv6Ifid>>(56-(8*count)))&0xff; neighborInfo->idleSecs=rg_db.v6neighbor[neighborIdx].idleSecs; *neighbor_valid_idx=neighborIdx; return (RT_ERR_RG_OK); } #ifdef __KERNEL__ rtk_rg_err_code_t rtk_rg_apollo_portMirror_set(rtk_rg_portMirrorInfo_t portMirrorInfo) { rtk_portmask_t mirroredRxPortmask; rtk_portmask_t mirroredTxPortmask; memset(&mirroredRxPortmask,0,sizeof(rtk_portmask_t)); memset(&mirroredTxPortmask,0,sizeof(rtk_portmask_t)); if(portMirrorInfo.direct==RTK_RG_MIRROR_RX_ONLY){ mirroredRxPortmask.bits[0] = portMirrorInfo.enabledPortMask.portmask; mirroredTxPortmask.bits[0] = 0x0; }else if(portMirrorInfo.direct==RTK_RG_MIRROR_TX_ONLY){ mirroredRxPortmask.bits[0] = 0x0; mirroredTxPortmask.bits[0] = portMirrorInfo.enabledPortMask.portmask; }else{//RTK_RG_MIRROR_TX_RX_BOTH mirroredRxPortmask.bits[0] = portMirrorInfo.enabledPortMask.portmask; mirroredTxPortmask.bits[0] = portMirrorInfo.enabledPortMask.portmask; } ASSERT_EQ(rtk_mirror_portBased_set(portMirrorInfo.monitorPort, &mirroredRxPortmask, &mirroredTxPortmask),SUCCESS); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portMirror_get(rtk_rg_portMirrorInfo_t *portMirrorInfo) { rtk_portmask_t mirroredRxPortmask; rtk_portmask_t mirroredTxPortmask; memset(&mirroredRxPortmask,0,sizeof(rtk_portmask_t)); memset(&mirroredTxPortmask,0,sizeof(rtk_portmask_t)); ASSERT_EQ(rtk_mirror_portBased_get(&(portMirrorInfo->monitorPort), &mirroredRxPortmask, &mirroredTxPortmask),SUCCESS); if(mirroredRxPortmask.bits[0]==0x0 && mirroredTxPortmask.bits[0]!=0x0){ portMirrorInfo->enabledPortMask.portmask= mirroredTxPortmask.bits[0] ; portMirrorInfo->direct = RTK_RG_MIRROR_TX_ONLY; }else if(mirroredRxPortmask.bits[0]!=0x0 && mirroredTxPortmask.bits[0]==0x0){ portMirrorInfo->enabledPortMask.portmask = mirroredRxPortmask.bits[0] ; portMirrorInfo->direct = RTK_RG_MIRROR_RX_ONLY; }else if(mirroredRxPortmask.bits[0]!=0x0 && mirroredTxPortmask.bits[0]!=0x0){ portMirrorInfo->enabledPortMask.portmask= (mirroredRxPortmask.bits[0] | mirroredTxPortmask.bits[0]) ; portMirrorInfo->direct = RTK_RG_MIRROR_TX_RX_BOTH; } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portMirror_clear(void) { rtk_portmask_t mirroredRxPortmask; rtk_portmask_t mirroredTxPortmask; memset(&mirroredRxPortmask,0,sizeof(rtk_portmask_t)); memset(&mirroredTxPortmask,0,sizeof(rtk_portmask_t)); mirroredRxPortmask.bits[0] = 0x0; mirroredTxPortmask.bits[0] = 0x0; ASSERT_EQ(rtk_mirror_portBased_set(0x0, &mirroredRxPortmask, &mirroredTxPortmask),SUCCESS); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portEgrBandwidthCtrlRate_set(rtk_rg_mac_port_idx_t port, uint32 rate) { if(rate < 8 || rate > 1048568){ if(rate==0) rate = 1048568; //rate==0 means unlimit in rtk API else RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } #if defined(RTK_RG_INGRESS_QOS_TEST_PATCH) && defined(CONFIG_RTL9600_SERIES) if((port==RTK_RG_MAC_PORT_PON) && (rg_db.systemGlobal.qos_type==RTK_RG_INGRESS_QOS_ALL_HIGH_QUEUE)) { int i; if(rate<=(rg_db.systemGlobal.qos_ingress_total_rate/4)) { //Don't need patch for(i=0;i<RTK_RG_MAC_PORT_PON;i++) { if(rg_db.systemGlobal.qos_acl_patch[i][0]) { DEBUG("DEL ACL index:%d\n",rg_db.systemGlobal.qos_acl_patch[i][0]-1); ASSERT_EQ(rtk_rg_apollo_aclFilterAndQos_del(rg_db.systemGlobal.qos_acl_patch[i][0]-1),RT_ERR_RG_OK); rg_db.systemGlobal.qos_acl_patch[i][0]=0; rg_db.systemGlobal.qos_acl_total_patch--; } } } else { //Re-config QoS patch for(i=0;i<RTK_RG_MAC_PORT_PON;i++) _rtk_rg_qos_acl_patch(i,rg_db.systemGlobal.qos_ingress_rate[i]); } } #endif ASSERT_EQ(rtk_rate_portEgrBandwidthCtrlRate_set(port,rate),SUCCESS); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portIgrBandwidthCtrlRate_set(rtk_rg_mac_port_idx_t port, uint32 rate) { if(rate < 8 || rate > 1048568){ if(rate==0) rate = 1048568; //rate==0 means unlimit. else RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } #if defined(RTK_RG_INGRESS_QOS_TEST_PATCH) && defined(CONFIG_RTL9600_SERIES) ASSERT_EQ(_rtk_rg_qos_acl_patch(port,rate),SUCCESS);; #else ASSERT_EQ(rtk_rate_portIgrBandwidthCtrlRate_set(port,rate),SUCCESS); #endif return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portEgrBandwidthCtrlRate_get(rtk_rg_mac_port_idx_t port, uint32 *rate) { if(rate==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ASSERT_EQ(rtk_rate_portEgrBandwidthCtrlRate_get(port,rate),SUCCESS); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portIgrBandwidthCtrlRate_get(rtk_rg_mac_port_idx_t port, uint32 *rate) { if(rate==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ASSERT_EQ(rtk_rate_portIgrBandwidthCtrlRate_get(port,rate),SUCCESS); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_phyPortForceAbility_set(rtk_rg_mac_port_idx_t port, rtk_rg_phyPortAbilityInfo_t ability) { uint32 reg; rtk_port_phy_ability_t auto_ability; bzero(&auto_ability,sizeof(rtk_port_phy_ability_t)); //parameter check if(port>=RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(ability.speed>=RTK_RG_PORT_SPEED_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(ability.duplex==RTK_RG_PORT_HALF_DUPLEX && ability.flowCtrl==RTK_RG_ENABLED) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_RTL9602C_SERIES) if(port==RTK_RG_MAC_PORT_CPU) RETURN_ERR(RT_ERR_RG_INVALID_PARAM);//CPU port without phy stage, should set by mac stage. #else if(port==RTK_RG_MAC_PORT_CPU || port==RTK_RG_MAC_PORT_RGMII) RETURN_ERR(RT_ERR_RG_INVALID_PARAM);//CPU & RGMII port without phy stage, should set by mac stage. #endif if(ability.force_disable_phy==RTK_RG_ENABLED){ assert_ok(rtk_port_phyReg_get(port,0,0,®)); //get original phy reg data reg|=0x800; //force disabled phy assert_ok(rtk_port_phyReg_set(port,0,0,reg)); }else{ assert_ok(rtk_port_phyReg_get(port,0,0,®)); //get original phy reg data reg&=~(0x800); //enabled phy (bit11) & reset assert_ok(rtk_port_phyReg_set(port,0,0,reg)); } if(ability.valid==RTK_RG_ENABLED){ if(ability.speed==RTK_RG_PORT_SPEED_10M && ability.duplex==RTK_RG_PORT_HALF_DUPLEX){ auto_ability.Half_10 = ENABLED; }else if(ability.speed==RTK_RG_PORT_SPEED_10M && ability.duplex==RTK_RG_PORT_FULL_DUPLEX){ auto_ability.Full_10 = ENABLED; }else if(ability.speed==RTK_RG_PORT_SPEED_100M && ability.duplex==RTK_RG_PORT_HALF_DUPLEX){ auto_ability.Half_100 = ENABLED; }else if(ability.speed==RTK_RG_PORT_SPEED_100M && ability.duplex==RTK_RG_PORT_FULL_DUPLEX){ auto_ability.Full_100 = ENABLED; }else if(ability.speed==RTK_RG_PORT_SPEED_1000M && ability.duplex==RTK_RG_PORT_FULL_DUPLEX){ auto_ability.Full_1000 = ENABLED; }else { RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } if(ability.flowCtrl==RTK_RG_ENABLED){ auto_ability.FC=ability.fc; auto_ability.AsyFC=ability.asym_fc; if(ability.fc==RTK_RG_DISABLED && ability.asym_fc==RTK_RG_DISABLED){//backward compatibility, it doesn't have parameter fc and asym_fc before. auto_ability.FC=ENABLED; auto_ability.AsyFC=ENABLED; } }else{ auto_ability.FC=DISABLED; auto_ability.AsyFC=DISABLED; } assert_ok(rtk_port_phyAutoNegoAbility_set(port,&auto_ability)); }else{ //set auto_ability to default (enable all speed & AsyFC) auto_ability.AsyFC = ENABLED; auto_ability.FC = ENABLED; auto_ability.Half_10 = ENABLED; auto_ability.Full_10= ENABLED; auto_ability.Half_100 = ENABLED; auto_ability.Full_100 = ENABLED; auto_ability.Half_1000 = DISABLED; auto_ability.Full_1000 = ENABLED; assert_ok(rtk_port_phyAutoNegoAbility_set(port,&auto_ability)); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_phyPortForceAbility_get(rtk_rg_mac_port_idx_t port, rtk_rg_phyPortAbilityInfo_t *ability) { uint32 reg; rtk_port_phy_ability_t auto_ability; //parameter check if(port>=RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_RTL9602C_SERIES) if(port==RTK_RG_MAC_PORT_CPU) RETURN_ERR(RT_ERR_RG_INVALID_PARAM);//CPU port without phy stage, should set by mac stage. #else if(port==RTK_RG_MAC_PORT_CPU || port==RTK_RG_MAC_PORT_RGMII) RETURN_ERR(RT_ERR_RG_INVALID_PARAM);//CPU & RGMII port without phy stage, should set by mac stage. #endif bzero(&auto_ability,sizeof(rtk_port_phy_ability_t)); assert_ok(rtk_port_phyReg_get(port,0,0,®)); if(reg&0x800){ ability->force_disable_phy = RTK_RG_ENABLED; }else{ ability->force_disable_phy = RTK_RG_DISABLED; } assert_ok(rtk_port_phyAutoNegoAbility_get(port,&auto_ability)); if(auto_ability.Half_10==ENABLED && auto_ability.Full_10==ENABLED && auto_ability.Half_100==ENABLED && auto_ability.Full_100==ENABLED && auto_ability.Full_1000==ENABLED && auto_ability.FC==ENABLED && auto_ability.AsyFC==ENABLED){//default setting ability->valid=DISABLED; /* ability->speed=RTK_RG_PORT_SPEED_1000M; ability->duplex=RTK_RG_PORT_FULL_DUPLEX; ability->flowCtrl=ENABLED; */ ability->speed=-1; ability->duplex=-1; ability->flowCtrl=-1; }else{ ability->valid=ENABLED; if(auto_ability.Half_10==ENABLED){ability->speed=RTK_RG_PORT_SPEED_10M; ability->duplex=RTK_RG_PORT_HALF_DUPLEX;} if(auto_ability.Full_10==ENABLED){ability->speed=RTK_RG_PORT_SPEED_10M; ability->duplex=RTK_RG_PORT_FULL_DUPLEX; } if(auto_ability.Half_100==ENABLED){ability->speed=RTK_RG_PORT_SPEED_100M; ability->duplex=RTK_RG_PORT_HALF_DUPLEX; } if(auto_ability.Full_100==ENABLED){ability->speed=RTK_RG_PORT_SPEED_100M; ability->duplex=RTK_RG_PORT_FULL_DUPLEX; } if(auto_ability.Full_1000==ENABLED){ability->speed=RTK_RG_PORT_SPEED_1000M; ability->duplex=RTK_RG_PORT_FULL_DUPLEX; } if(auto_ability.AsyFC==ENABLED || auto_ability.FC==ENABLED){ability->flowCtrl=ENABLED; ability->fc=auto_ability.FC; ability->asym_fc=auto_ability.AsyFC;} } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_cpuPortForceTrafficCtrl_set(rtk_rg_enable_t tx_fc_state, rtk_rg_enable_t rx_fc_state) { rtk_enable_t state; rtk_port_macAbility_t macAbility; int ret; //check CPU port is in mac-force mode ret=rtk_port_macForceAbilityState_get(RTK_RG_MAC_PORT_CPU,&state); assert_ok(ret); if(state!=ENABLED){ rtlglue_printf("CPU Port is not in force-mac mode!\n"); RETURN_ERR(RT_ERR_RG_FAILED); } ret=RTK_PORT_MACFORCEABILITY_GET(RTK_RG_MAC_PORT_CPU,&macAbility); assert_ok(ret); macAbility.txFc = tx_fc_state; macAbility.rxFc = rx_fc_state; ret=RTK_PORT_MACFORCEABILITY_SET(RTK_RG_MAC_PORT_CPU,macAbility); assert_ok(ret); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_cpuPortForceTrafficCtrl_get(rtk_rg_enable_t *pTx_fc_state, rtk_rg_enable_t *pRx_fc_state) { rtk_enable_t state; rtk_port_macAbility_t macAbility; int ret; //check CPU port is in mac-force mode ret=rtk_port_macForceAbilityState_get(RTK_RG_MAC_PORT_CPU,&state); assert_ok(ret); if(state!=ENABLED){ rtlglue_printf("CPU Port is not in force-mac mode!\n"); RETURN_ERR(RT_ERR_RG_FAILED); } ret=RTK_PORT_MACFORCEABILITY_GET(RTK_RG_MAC_PORT_CPU,&macAbility); assert_ok(ret); *pTx_fc_state = macAbility.txFc; *pRx_fc_state = macAbility.rxFc; return (RT_ERR_RG_OK); } int _rtk_rg_is_stormControlEntry_init(void) { if(rg_db.systemGlobal.stormControlInfoEntry==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); else return (RT_ERR_RG_OK); } int _rtk_rg_stormControlEntry_init(void) { bzero(rg_db.systemGlobal.stormControlInfoEntry, sizeof(rtk_rg_stormControlInfo_t)*MAX_STORMCONTROL_ENTRY_SIZE); return (RT_ERR_RG_OK); } int32 _rtk_rg_stormControl_search_empty(int* stormInfo_idx){ int i; if(_rtk_rg_is_stormControlEntry_init()) _rtk_rg_stormControlEntry_init(); for(i=0;i<MAX_STORMCONTROL_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.stormControlInfoEntry[i].valid==RTK_RG_DISABLED){ *stormInfo_idx=i; break; } } if(i==MAX_STORMCONTROL_ENTRY_SIZE){ *stormInfo_idx = -1; RETURN_ERR(RT_ERR_RG_STORMCONTROL_ENTRY_FULL); } return (RT_ERR_RG_OK); } int32 _rtk_rg_stormControl_duplex_check(rtk_rg_stormControlInfo_t *stormInfo){ int i; if(_rtk_rg_is_stormControlEntry_init()) _rtk_rg_stormControlEntry_init(); for(i=0;i<MAX_STORMCONTROL_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.stormControlInfoEntry[i].valid==RTK_RG_ENABLED){ if((rg_db.systemGlobal.stormControlInfoEntry[i].stormType==stormInfo->stormType) && (rg_db.systemGlobal.stormControlInfoEntry[i].port==stormInfo->port)){ RETURN_ERR(RT_ERR_RG_STORMCONTROL_ENTRY_HAS_BEEN_SET); } } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_stormControl_add(rtk_rg_stormControlInfo_t *stormInfo,int *stormInfo_idx) { int ret; rtk_rate_storm_group_ctrl_t stormTypeEnable; //Check rg has been init if(rg_db.systemGlobal.vlanInit==0) RETURN_ERR(RT_ERR_RG_NOT_INIT); //check input parameter if(stormInfo == NULL|| stormInfo_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(stormInfo->valid==RTK_RG_DISABLED) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(stormInfo->port >= RTK_RG_EXT_PORT0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(stormInfo->meterIdx >= MAX_STORMCONTROL_ENTRY_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(stormInfo->stormType >= RTK_RG_STORM_TYPE_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ASSERT_EQ(_rtk_rg_stormControl_duplex_check(stormInfo),RT_ERR_RG_OK); ASSERT_EQ(_rtk_rg_stormControl_search_empty(stormInfo_idx),RT_ERR_RG_OK); bzero(&stormTypeEnable,sizeof(rtk_rate_storm_group_ctrl_t)); switch(stormInfo->stormType){ case RTK_RG_STORM_TYPE_UNKNOWN_UNICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.unknown_unicast_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_UNKNOWN_MULTICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.unknown_multicast_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_MULTICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.multicast_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_BROADCAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.broadcast_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_DHCP: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.dhcp_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_ARP: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.arp_enable=RTK_RG_ENABLED; break; case RTK_RG_STORM_TYPE_IGMP_MLD: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.igmp_mld_enable=RTK_RG_ENABLED; break; default:break; } ret=rtk_rate_stormControlEnable_set(&stormTypeEnable); if(ret!=RT_ERR_OK){ //rtlglue_printf("4 different types are supported at most!"); RETURN_ERR(RT_ERR_RG_STORMCONTROL_TYPE_FULL); } assert_ok(rtk_rate_stormControlPortEnable_set(stormInfo->port,stormInfo->stormType,ENABLED)); assert_ok(rtk_rate_stormControlMeterIdx_set(stormInfo->port,stormInfo->stormType,stormInfo->meterIdx)); //maintain in FwdEngine memcpy(&rg_db.systemGlobal.stormControlInfoEntry[*stormInfo_idx],stormInfo,sizeof(rtk_rg_stormControlInfo_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_stormControl_del(int stormInfo_idx) { int i; rtk_rate_storm_group_ctrl_t stormTypeEnable; if(stormInfo_idx >= MAX_STORMCONTROL_ENTRY_SIZE || MAX_STORMCONTROL_ENTRY_SIZE < 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].valid==RTK_RG_DISABLED){ bzero(&rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx],sizeof(rtk_rg_stormControlInfo_t)); return (RT_ERR_RG_OK); }else{ assert_ok(rtk_rate_stormControlPortEnable_set(rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].port,rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].stormType,DISABLED)); assert_ok(rtk_rate_stormControlMeterIdx_set(rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].port,rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].stormType,0)); //check if other rule use same stormType for(i=0;i<MAX_STORMCONTROL_ENTRY_SIZE;i++){ if(i==stormInfo_idx){ continue; }else{ if(rg_db.systemGlobal.stormControlInfoEntry[i].stormType==rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].stormType) break; } } } if(i==MAX_STORMCONTROL_ENTRY_SIZE){ //no other rule use same stormType, clear it. bzero(&stormTypeEnable,sizeof(rtk_rate_storm_group_ctrl_t)); switch(rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx].stormType){ case RTK_RG_STORM_TYPE_UNKNOWN_UNICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.unknown_unicast_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_UNKNOWN_MULTICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.unknown_multicast_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_MULTICAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.multicast_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_BROADCAST: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.broadcast_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_DHCP: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.dhcp_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_ARP: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.arp_enable=RTK_RG_DISABLED; break; case RTK_RG_STORM_TYPE_IGMP_MLD: assert_ok(rtk_rate_stormControlEnable_get(&stormTypeEnable)); stormTypeEnable.igmp_mld_enable=RTK_RG_DISABLED; break; default:break; } assert_ok(rtk_rate_stormControlEnable_set(&stormTypeEnable)); } //clear stormInfo in fwdEngine bzero(&rg_db.systemGlobal.stormControlInfoEntry[stormInfo_idx],sizeof(rtk_rg_stormControlInfo_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_stormControl_find(rtk_rg_stormControlInfo_t *stormInfo,int *stormInfo_idx) { int i; //check input parameter if(stormInfo == NULL|| stormInfo_idx==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(*stormInfo_idx >= MAX_STORMCONTROL_ENTRY_SIZE || *stormInfo_idx < 0) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); for(i=(*stormInfo_idx);i<MAX_STORMCONTROL_ENTRY_SIZE;i++){ if(rg_db.systemGlobal.stormControlInfoEntry[i].valid==RTK_RG_ENABLED){ stormInfo->valid = RTK_RG_ENABLED; stormInfo->port = rg_db.systemGlobal.stormControlInfoEntry[i].port; stormInfo->stormType = rg_db.systemGlobal.stormControlInfoEntry[i].stormType; stormInfo->meterIdx = rg_db.systemGlobal.stormControlInfoEntry[i].meterIdx; *stormInfo_idx = i; break; } } if(i==MAX_STORMCONTROL_ENTRY_SIZE){ *stormInfo_idx = -1; return (RT_ERR_RG_STORMCONTROL_ENTRY_NOT_FOUND); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_shareMeter_set(uint32 index, uint32 rate, rtk_rg_enable_t ifgInclude){ int ret; ret = rtk_rate_shareMeter_set(index, rate, ifgInclude); if(ret==RT_ERR_OK){ return (RT_ERR_RG_OK);} else if(ret == RT_ERR_FAILED){ RETURN_ERR(RT_ERR_RG_SHAREMETER_SET_FAILED);} else if(ret == RT_ERR_FILTER_METER_ID){ RETURN_ERR(RT_ERR_RG_SHAREMETER_INVALID_METER_INDEX);} else if(ret == RT_ERR_RATE){ RETURN_ERR(RT_ERR_RG_SHAREMETER_INVALID_RATE);} else if(ret == RT_ERR_INPUT){ RETURN_ERR(RT_ERR_RG_SHAREMETER_INVALID_INPUT);} else{ RETURN_ERR(RT_ERR_RG_SHAREMETER_SET_FAILED);} } rtk_rg_err_code_t rtk_rg_apollo_shareMeter_get(uint32 index, uint32 *pRate , rtk_rg_enable_t *pIfgInclude){ int ret; ret =rtk_rate_shareMeter_get(index, pRate , (void*)pIfgInclude); if(ret==RT_ERR_OK){ return (RT_ERR_RG_OK);} else if(ret == RT_ERR_FILTER_METER_ID){ RETURN_ERR(RT_ERR_RG_SHAREMETER_INVALID_METER_INDEX);} else {RETURN_ERR(RT_ERR_RG_SHAREMETER_GET_FAILED);} return (RT_ERR_RG_OK); } #endif unsigned int _rtk_rg_hash_mac_fid_efid(unsigned char *mac,unsigned int fid,unsigned int efid) { #if defined(CONFIG_RTL9600_SERIES) unsigned char hashidx[9]={0}; hashidx[0]=(mac[5]^(mac[4]>>1)^(mac[3]>>2)^(mac[2]>>3)^(mac[1]>>4)^(mac[0]>>5)^0)&1; hashidx[1]=((mac[5]>>1)^(mac[4]>>2)^(mac[3]>>3)^(mac[2]>>4)^(mac[1]>>5)^(mac[0]>>6)^0)&1; hashidx[2]=((mac[5]>>2)^(mac[4]>>3)^(mac[3]>>4)^(mac[2]>>5)^(mac[1]>>6)^(mac[0]>>7)^0)&1; hashidx[3]=((mac[5]>>3)^(mac[4]>>4)^(mac[3]>>5)^(mac[2]>>6)^(mac[1]>>7)^fid^0)&1; hashidx[4]=((mac[5]>>4)^(mac[4]>>5)^(mac[3]>>6)^(mac[2]>>7)^mac[0]^(fid>>1)^0)&1; hashidx[5]=((mac[5]>>5)^(mac[4]>>6)^(mac[3]>>7)^mac[1]^(mac[0]>>1)^(fid>>2)^0)&1; hashidx[6]=((mac[5]>>6)^(mac[4]>>7)^mac[2]^(mac[1]>>1)^(mac[0]>>2)^(fid>>3)^(efid))&1; hashidx[7]=((mac[5]>>7)^mac[3]^(mac[2]>>1)^(mac[1]>>2)^(mac[0]>>3)^0^(efid>>1))&1; hashidx[8]=(mac[4]^(mac[3]>>1)^(mac[2]>>2)^(mac[1]>>3)^(mac[0]>>4)^0^(efid>>2))&1; return ((hashidx[8]<<8)|(hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0])); #elif defined(CONFIG_RTL9602C_SERIES) unsigned char hashidx[8]={0}; hashidx[7]=((mac[5]>>7)^(mac[4]>>7)^(mac[3]>>7)^(mac[2]>>7)^(mac[1]>>7)^(mac[0]>>7))&1; hashidx[6]=((mac[5]>>6)^(mac[4]>>6)^(mac[3]>>6)^(mac[2]>>6)^(mac[1]>>6)^(mac[0]>>6))&1; hashidx[5]=((mac[5]>>5)^(mac[4]>>5)^(mac[3]>>5)^(mac[2]>>5)^(mac[1]>>5)^(mac[0]>>5))&1; hashidx[4]=((mac[5]>>4)^(mac[4]>>4)^(mac[3]>>4)^(mac[2]>>4)^(mac[1]>>4)^(mac[0]>>4))&1; hashidx[3]=((mac[5]>>3)^(mac[4]>>3)^(mac[3]>>3)^(mac[2]>>3)^(mac[1]>>3)^(mac[0]>>3))&1; hashidx[2]=((mac[5]>>2)^(mac[4]>>2)^(mac[3]>>2)^(mac[2]>>2)^(mac[1]>>2)^(mac[0]>>2))&1; hashidx[1]=((mac[5]>>1)^(mac[4]>>1)^(mac[3]>>1)^(mac[2]>>1)^(mac[1]>>1)^(mac[0]>>1))&1; hashidx[0]=(mac[5]^mac[4]^mac[3]^mac[2]^mac[1]^mac[0]^fid)&1; return ((hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0])); #elif defined(CONFIG_RTL9607C_SERIES) unsigned char hashidx[9]={0}; hashidx[8]=((mac[4]>>0)&1)^((mac[3]>>1)&1)^((mac[2]>>2)&1)^((mac[1]>>3)&1)^((mac[0]>>4)&1); hashidx[7]=((mac[5]>>7)&1)^((mac[3]>>0)&1)^((mac[2]>>1)&1)^((mac[1]>>2)&1)^((mac[0]>>3)&1); hashidx[6]=((mac[5]>>6)&1)^((mac[4]>>7)&1)^((mac[2]>>0)&1)^((mac[1]>>1)&1)^((mac[0]>>2)&1); hashidx[5]=((mac[5]>>5)&1)^((mac[4]>>6)&1)^((mac[3]>>7)&1)^((mac[1]>>0)&1)^((mac[0]>>1)&1); hashidx[4]=((mac[5]>>4)&1)^((mac[4]>>5)&1)^((mac[3]>>6)&1)^((mac[2]>>7)&1)^((mac[0]>>0)&1)^((fid>>1)&1); hashidx[3]=((mac[5]>>3)&1)^((mac[4]>>4)&1)^((mac[3]>>5)&1)^((mac[2]>>6)&1)^((mac[1]>>7)&1)^((fid>>0)&1); hashidx[2]=((mac[5]>>2)&1)^((mac[4]>>3)&1)^((mac[3]>>4)&1)^((mac[2]>>5)&1)^((mac[1]>>6)&1)^((mac[0]>>7)&1); hashidx[1]=((mac[5]>>1)&1)^((mac[4]>>2)&1)^((mac[3]>>3)&1)^((mac[2]>>4)&1)^((mac[1]>>5)&1)^((mac[0]>>6)&1); hashidx[0]=((mac[5]>>0)&1)^((mac[4]>>1)&1)^((mac[3]>>2)&1)^((mac[2]>>3)&1)^((mac[1]>>4)&1)^((mac[0]>>5)&1); return ((hashidx[8]<<8)|(hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0]<<0)); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) return rtl8651_filterDbIndex((ether_addr_t *)mac, fid); #else #error #endif } unsigned int _rtk_rg_hash_mac_vid_efid(unsigned char *mac,unsigned int vid,unsigned int efid) { #if defined(CONFIG_RTL9600_SERIES) unsigned char hashidx[9]={0}; hashidx[0]=((mac[5])^(mac[4]>>1)^(mac[3]>>2)^(mac[2]>>3)^(mac[1]>>4)^(mac[0]>>5)^(vid>>6))&1; hashidx[1]=((mac[5]>>1)^(mac[4]>>2)^(mac[3]>>3)^(mac[2]>>4)^(mac[1]>>5)^(mac[0]>>6)^(vid>>7))&1; hashidx[2]=((mac[5]>>2)^(mac[4]>>3)^(mac[3]>>4)^(mac[2]>>5)^(mac[1]>>6)^(mac[0]>>7)^(vid>>8))&1; hashidx[3]=((mac[5]>>3)^(mac[4]>>4)^(mac[3]>>5)^(mac[2]>>6)^(mac[1]>>7)^vid^(vid>>9))&1; hashidx[4]=((mac[5]>>4)^(mac[4]>>5)^(mac[3]>>6)^(mac[2]>>7)^mac[0]^(vid>>1)^(vid>>10))&1; hashidx[5]=((mac[5]>>5)^(mac[4]>>6)^(mac[3]>>7)^mac[1]^(mac[0]>>1)^(vid>>2)^(vid>>11))&1; hashidx[6]=((mac[5]>>6)^(mac[4]>>7)^mac[2]^(mac[1]>>1)^(mac[0]>>2)^(vid>>3)^efid)&1; hashidx[7]=((mac[5]>>7)^mac[3]^(mac[2]>>1)^(mac[1]>>2)^(mac[0]>>3)^(vid>>4)^(efid>>1))&1; hashidx[8]=(mac[4]^(mac[3]>>1)^(mac[2]>>2)^(mac[1]>>3)^(mac[0]>>4)^(vid>>5)^(efid>>2))&1; return ((hashidx[8]<<8)|(hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0])); #elif defined(CONFIG_RTL9602C_SERIES) unsigned char hashidx[8]={0}; hashidx[7]=((mac[5]>>7)^(mac[4]>>7)^(mac[3]>>7)^(mac[2]>>7)^(mac[1]>>7)^(mac[0]>>7)^(vid>>7))&1; hashidx[6]=((mac[5]>>6)^(mac[4]>>6)^(mac[3]>>6)^(mac[2]>>6)^(mac[1]>>6)^(mac[0]>>6)^(vid>>6))&1; hashidx[5]=((mac[5]>>5)^(mac[4]>>5)^(mac[3]>>5)^(mac[2]>>5)^(mac[1]>>5)^(mac[0]>>5)^(vid>>5))&1; hashidx[4]=((mac[5]>>4)^(mac[4]>>4)^(mac[3]>>4)^(mac[2]>>4)^(mac[1]>>4)^(mac[0]>>4)^(vid>>4))&1; hashidx[3]=((mac[5]>>3)^(mac[4]>>3)^(mac[3]>>3)^(mac[2]>>3)^(mac[1]>>3)^(mac[0]>>3)^(vid>>3)^(vid>>11))&1; hashidx[2]=((mac[5]>>2)^(mac[4]>>2)^(mac[3]>>2)^(mac[2]>>2)^(mac[1]>>2)^(mac[0]>>2)^(vid>>2)^(vid>>10))&1; hashidx[1]=((mac[5]>>1)^(mac[4]>>1)^(mac[3]>>1)^(mac[2]>>1)^(mac[1]>>1)^(mac[0]>>1)^(vid>>1)^(vid>>9))&1; hashidx[0]=(mac[5]^mac[4]^mac[3]^mac[2]^mac[1]^mac[0]^vid^(vid>>8))&1; return ((hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0])); #elif defined(CONFIG_RTL9607C_SERIES) unsigned char hashidx[9]={0}; hashidx[8]=((mac[4]>>0)&1)^((mac[3]>>1)&1)^((mac[2]>>2)&1)^((mac[1]>>3)&1)^((mac[0]>>4)&1)^((vid>>5)&1); hashidx[7]=((mac[5]>>7)&1)^((mac[3]>>0)&1)^((mac[2]>>1)&1)^((mac[1]>>2)&1)^((mac[0]>>3)&1)^((vid>>4)&1); hashidx[6]=((mac[5]>>6)&1)^((mac[4]>>7)&1)^((mac[2]>>0)&1)^((mac[1]>>1)&1)^((mac[0]>>2)&1)^((vid>>3)&1); hashidx[5]=((mac[5]>>5)&1)^((mac[4]>>6)&1)^((mac[3]>>7)&1)^((mac[1]>>0)&1)^((mac[0]>>1)&1)^((vid>>2)&1)^((vid>>11)&1); hashidx[4]=((mac[5]>>4)&1)^((mac[4]>>5)&1)^((mac[3]>>6)&1)^((mac[2]>>7)&1)^((mac[0]>>0)&1)^((vid>>1)&1)^((vid>>10)&1); hashidx[3]=((mac[5]>>3)&1)^((mac[4]>>4)&1)^((mac[3]>>5)&1)^((mac[2]>>6)&1)^((mac[1]>>7)&1)^((vid>>0)&1)^((vid>>9)&1); hashidx[2]=((mac[5]>>2)&1)^((mac[4]>>3)&1)^((mac[3]>>4)&1)^((mac[2]>>5)&1)^((mac[1]>>6)&1)^((mac[0]>>7)&1)^((vid>>8)&1); hashidx[1]=((mac[5]>>1)&1)^((mac[4]>>2)&1)^((mac[3]>>3)&1)^((mac[2]>>4)&1)^((mac[1]>>5)&1)^((mac[0]>>6)&1)^((vid>>7)&1); hashidx[0]=((mac[5]>>0)&1)^((mac[4]>>1)&1)^((mac[3]>>2)&1)^((mac[2]>>3)&1)^((mac[1]>>4)&1)^((mac[0]>>5)&1)^((vid>>6)&1); return ((hashidx[8]<<8)|(hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0]<<0)); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) WARNING("not support IVL mode hash faild \n"); return RG_RET_ENTRY_NOT_GET; #else #error #endif } unsigned int _rtk_rg_hash_sip_gip(unsigned int sip,unsigned int gip,int is_ivl,int vid_fid) { #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9602C_SERIES) || defined(CONFIG_RTL9607C_SERIES) u8 hashidx[9]={0}; hashidx[8]=((gip>>8)^(gip>>17)^(gip>>26)^(sip>>7)^(sip>>16)^(sip>>25))&1; hashidx[7]=((gip>>7)^(gip>>16)^(gip>>25)^(sip>>6)^(sip>>15)^(sip>>24))&1; hashidx[6]=((gip>>6)^(gip>>15)^(gip>>24)^(sip>>5)^(sip>>14)^(sip>>23))&1; hashidx[5]=((gip>>5)^(gip>>14)^(gip>>23)^(sip>>4)^(sip>>13)^(sip>>22)^(sip>>31))&1; hashidx[4]=((gip>>4)^(gip>>13)^(gip>>22)^(sip>>3)^(sip>>12)^(sip>>21)^(sip>>30))&1; hashidx[3]=((gip>>3)^(gip>>12)^(gip>>21)^(sip>>2)^(sip>>11)^(sip>>20)^(sip>>29))&1; hashidx[2]=((gip>>2)^(gip>>11)^(gip>>20)^(sip>>1)^(sip>>10)^(sip>>19)^(sip>>28))&1; hashidx[1]=((gip>>1)^(gip>>10)^(gip>>19)^(sip>>0)^(sip>>9)^(sip>>18)^(sip>>27))&1; hashidx[0]=((gip>>0)^(gip>>9)^(gip>>18)^(gip>>27)^(sip>>8)^(sip>>17)^(sip>>26))&1; return ((hashidx[8]<<8)|(hashidx[7]<<7)|(hashidx[6]<<6)|(hashidx[5]<<5)|(hashidx[4]<<4)|(hashidx[3]<<3)|(hashidx[2]<<2)|(hashidx[1]<<1)|(hashidx[0]<<0)); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) WARNING("not support IVL mode hash faild \n"); return RG_RET_ENTRY_NOT_GET; #else #error #endif } //Source Address Learning Limit and Action Functions rtk_rg_err_code_t rtk_rg_apollo_accessWanLimit_set(rtk_rg_accessWanLimitData_t access_wan_info) { int i; rtk_rg_saLearningLimitInfo_t limitInfo; rtk_rg_accessWanLimitData_t resetInfo; //Check param if(access_wan_info.type>=RG_ACCESSWAN_TYPE_END || access_wan_info.type==RG_ACCESSWAN_TYPE_PORT) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info.type==RG_ACCESSWAN_TYPE_PORTMASK){ if(access_wan_info.data.port_mask.portmask>=(0x1<<RTK_RG_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20160315LUKE: if portmask contain CPU port or WAN port, return fail. if(access_wan_info.data.port_mask.portmask&(0x1<<RTK_RG_PORT_CPU)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info.data.port_mask.portmask&rg_db.systemGlobal.wanPortMask.portmask) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info.data.port_mask.portmask&(~rg_db.systemGlobal.lanPortMask.portmask)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } if(access_wan_info.type==RG_ACCESSWAN_TYPE_CATEGORY && access_wan_info.data.category>=WanAccessCategoryNum) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info.learningLimitNumber<DEF_SOFTWARE_LEARNING_LIMIT || access_wan_info.learningLimitNumber>=MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info.action>=SA_LEARN_EXCEED_ACTION_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #ifdef CONFIG_MASTER_WLAN0_ENABLE //if type is not portmask and wlan0_dev_mask != 0, return fail //or ext0 in portmask and wlan0_dev_mask != 0, return fail if(access_wan_info.wlan0_dev_mask) { if(access_wan_info.type!=RG_ACCESSWAN_TYPE_PORTMASK) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_NOT_ZERO); if(access_wan_info.data.port_mask.portmask&(0x1<<RTK_RG_EXT_PORT0)) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && access_wan_info.data.port_mask.portmask&(0x1<<RTK_RG_EXT_PORT1) && access_wan_info.wlan0_dev_mask>=(0x1<<WLAN_DEVICE_NUM)) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); #endif } #endif //Set number and action in rg_db switch(access_wan_info.type) { case RG_ACCESSWAN_TYPE_PORTMASK: //change rg_db.systemGlobal.accessWanLimitPortMask=access_wan_info.learningLimitNumber; rg_db.systemGlobal.accessWanLimitPortMaskAction=access_wan_info.action; //stop ARP request if actived atomic_set(&rg_kernel.lutReachLimit_portmask.activity,0); //Reset count and port_mask rg_db.systemGlobal.accessWanLimitPortMask_member.portmask=0x0; atomic_set(&rg_db.systemGlobal.accessWanLimitPortMaskCount,0); for(i=RTK_RG_PORT0;i<RTK_RG_PORT_MAX;i++) { if(access_wan_info.data.port_mask.portmask&(0x1<<i)) { //enable learning rg_db.systemGlobal.accessWanLimitPortMask_member.portmask |= (0x1<<i); //20160827LUKE: we should not treat all mac learned as l34permit count for lutlearning version2! if(CONFIG_RG_ACCESSWAN_VERSION==1){ //add per port count to system count atomic_add(atomic_read(&rg_db.systemGlobal.sourceAddrLearningCount[i]),&rg_db.systemGlobal.accessWanLimitPortMaskCount); } } } #ifdef CONFIG_MASTER_WLAN0_ENABLE if(access_wan_info.wlan0_dev_mask) { for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(access_wan_info.wlan0_dev_mask&(0x1<<i)) { //enable learning rg_db.systemGlobal.accessWanLimitPortMask_wlan0member |= (0x1<<i); //20160827LUKE: we should not treat all mac learned as l34permit count for lutlearning version2! if(CONFIG_RG_ACCESSWAN_VERSION==1){ //add per device count to system count atomic_add(atomic_read(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[i]),&rg_db.systemGlobal.accessWanLimitPortMaskCount); } } } } #endif break; case RG_ACCESSWAN_TYPE_CATEGORY: rg_db.systemGlobal.accessWanLimitCategory[access_wan_info.data.category]=access_wan_info.learningLimitNumber; rg_db.systemGlobal.accessWanLimitCategoryAction[access_wan_info.data.category]=access_wan_info.action; //stop ARP request if actived atomic_set(&rg_kernel.lutReachLimit_category[access_wan_info.data.category].activity,0); break; default: break; } if(access_wan_info.learningLimitNumber>=0 || access_wan_info.type==RG_ACCESSWAN_TYPE_UNLIMIT) { rg_db.systemGlobal.activeLimitFunction=access_wan_info.type; if(access_wan_info.type==RG_ACCESSWAN_TYPE_PORTMASK || access_wan_info.type==RG_ACCESSWAN_TYPE_UNLIMIT) { MACLN("set accessWAN type %s! reset category...",access_wan_info.type==RG_ACCESSWAN_TYPE_UNLIMIT?"UNLIMIT":"PORTMASK"); //Reset Category Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_CATEGORY; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; #ifdef CONFIG_MASTER_WLAN0_ENABLE resetInfo.wlan0_dev_mask=0x0; #endif for(i=0;i<WanAccessCategoryNum;i++) { resetInfo.data.category=i; rtk_rg_apollo_accessWanLimit_set(resetInfo); } } if(access_wan_info.type==RG_ACCESSWAN_TYPE_CATEGORY || access_wan_info.type==RG_ACCESSWAN_TYPE_UNLIMIT) { MACLN("set accessWAN type %s! reset portmask...",access_wan_info.type==RG_ACCESSWAN_TYPE_UNLIMIT?"UNLIMIT":"CATEGORY"); //Reset portmask Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_PORTMASK; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; resetInfo.data.port_mask.portmask=0x0; #ifdef CONFIG_MASTER_WLAN0_ENABLE resetInfo.wlan0_dev_mask=0x0; #endif atomic_set(&rg_db.systemGlobal.accessWanLimitPortMaskCount,0); rtk_rg_apollo_accessWanLimit_set(resetInfo); } //Reset Port-base MAC learning MACLN("reset port limit.."); limitInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; limitInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; #ifdef CONFIG_MASTER_WLAN0_ENABLE for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) if(rg_db.systemGlobal.wlan0BindDecision[i].exist) rtk_rg_apollo_wlanSoftwareSourceAddrLearningLimit_set(limitInfo,0,i); #endif for(i=RTK_RG_PORT0;i<RTK_RG_PORT_MAX;i++) rtk_rg_apollo_softwareSourceAddrLearningLimit_set(limitInfo,i); MACLN("clear all MAC and related table.."); //Clear all dynamic MAC(hw and sw) and related L34 table(NAPT, ARP, neighbor...) _rtk_rg_shortCut_clear(); for(i=0;i<MAX_NAPT_OUT_SW_TABLE_SIZE;i++) { rtk_rg_apollo_naptConnection_del(i); } _rtk_rg_softwareLut_allDelete(); for(i=0;i<MAX_LUT_HW_TABLE_SIZE;i++) { if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && ((rg_db.lut[i].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0)) { rtk_rg_apollo_macEntry_del(i); } } //20160827LUKE: prevent l34permit lut cause portmask count error. if(access_wan_info.type==RG_ACCESSWAN_TYPE_PORTMASK && CONFIG_RG_ACCESSWAN_VERSION==2) atomic_set(&rg_db.systemGlobal.accessWanLimitPortMaskCount,0); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_accessWanLimit_get(rtk_rg_accessWanLimitData_t *access_wan_info) { //Check param if(access_wan_info->type>=RG_ACCESSWAN_TYPE_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info->type==RG_ACCESSWAN_TYPE_PORTMASK && access_wan_info->data.port_mask.portmask>=(0x1<<RTK_RG_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(access_wan_info->type==RG_ACCESSWAN_TYPE_CATEGORY&& access_wan_info->data.category>=WanAccessCategoryNum) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Get number and action in rg_db switch(access_wan_info->type) { case RG_ACCESSWAN_TYPE_PORTMASK: access_wan_info->learningLimitNumber=rg_db.systemGlobal.accessWanLimitPortMask; access_wan_info->data.port_mask.portmask=rg_db.systemGlobal.accessWanLimitPortMask_member.portmask; #ifdef CONFIG_MASTER_WLAN0_ENABLE access_wan_info->wlan0_dev_mask=rg_db.systemGlobal.accessWanLimitPortMask_wlan0member; #endif access_wan_info->action=rg_db.systemGlobal.accessWanLimitPortMaskAction; access_wan_info->learningCount=atomic_read(&rg_db.systemGlobal.accessWanLimitPortMaskCount); break; case RG_ACCESSWAN_TYPE_CATEGORY: access_wan_info->learningLimitNumber=rg_db.systemGlobal.accessWanLimitCategory[(unsigned int)access_wan_info->data.category]; access_wan_info->action=rg_db.systemGlobal.accessWanLimitCategoryAction[(unsigned int)access_wan_info->data.category]; access_wan_info->learningCount=atomic_read(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)access_wan_info->data.category]); break; default: break; } return (RT_ERR_RG_OK); } rtk_rg_err_code_t _rtK_rg_checkCategoryPortmask(rtk_l2_ucastAddr_t *lut) { rtk_rg_port_idx_t port; if(lut->port>=RTK_RG_PORT_CPU) port=RTK_RG_PORT_CPU+lut->ext_port; else port=lut->port; if(((0x1<<port)&CONFIG_RG_ACCESSWAN_CATEGORY_PORTMASK)==0) //unmatch return (RT_ERR_RG_ACCESSWAN_NOT_LAN); return RT_ERR_RG_OK; } rtk_rg_err_code_t rtk_rg_apollo_accessWanLimitCategory_set(rtk_rg_accessWanLimitCategory_t macCategory_info) { int i,l2Found=0; int ret; //Check param if(rg_db.systemGlobal.activeLimitFunction!=RG_ACCESSWAN_TYPE_CATEGORY) RETURN_ERR(RT_ERR_RG_ACCESSWAN_DISABLE); if((macCategory_info.mac.octet[0]&0x1)>0) //multicast don't have category RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(macCategory_info.category>=WanAccessCategoryNum) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //setup the category of the "all" match MAC, if no such MAC, return RT_ERR_RG_L2_ENTRY_ACCESS_FAILED for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++) { if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet,macCategory_info.mac.octet,ETHER_ADDR_LEN)==0) { //Check if the MAC is under LAN portmask ret=_rtK_rg_checkCategoryPortmask(&rg_db.lut[i].rtk_lut.entry.l2UcEntry); if(ret!=RT_ERR_RG_OK) RETURN_ERR(ret); //Check if new category won't exceed limit if(rg_db.systemGlobal.accessWanLimitCategory[macCategory_info.category]>=0 && rg_db.systemGlobal.accessWanLimitCategory[macCategory_info.category]<=atomic_read(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)macCategory_info.category])) //no way to learn { MACLN("Category %d access WAN limit is reached(%d)...action is %s!!",macCategory_info.category,rg_db.systemGlobal.accessWanLimitCategory[(unsigned int)macCategory_info.category], rg_db.systemGlobal.accessWanLimitCategoryAction[(unsigned int)macCategory_info.category]==SA_LEARN_EXCEED_ACTION_PERMIT?"Permit and Forward": rg_db.systemGlobal.accessWanLimitCategoryAction[(unsigned int)macCategory_info.category]==SA_LEARN_EXCEED_ACTION_PERMIT_L2?"Permit L2 only":"Drop"); if(i<MAX_LUT_HW_TABLE_SIZE) { //move MAC from hardward to software ret=_rtk_rg_softwareLut_addFromHw(i,macCategory_info.category); DEBUG("### add l2[sw]=%02x:%02x:%02x:%02x:%02x:%02x from hw[%d] ###\n",rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[0],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[1],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[2], rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[3],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[4],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[5],i); TRACE("MAC[sw](%02x:%02x:%02x:%02x:%02x:%02x) add from hw[%d]\n",rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[0],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[1],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[2], rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[3],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[4],rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet[5],i); assert_ok(ret); //delete hw entry ret=rtk_rg_apollo_macEntry_del(i); assert_ok(ret); } else { //change software LUT's category rg_db.lut[i].category=macCategory_info.category; } //trigger ARP and neighbor request timer _rtk_rg_lutReachLimit_init(RG_ACCESSWAN_TYPE_CATEGORY, _rtk_rg_lutReachLimit_category, (unsigned long)macCategory_info.category); } else { //software LUT won't change count.. if(i<MAX_LUT_HW_TABLE_SIZE) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[i].category]); rg_db.lut[i].category=macCategory_info.category; //software LUT won't change count.. if(i<MAX_LUT_HW_TABLE_SIZE) atomic_inc(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)macCategory_info.category]); } l2Found++; } } if(l2Found>0) return (RT_ERR_RG_OK); else RETURN_ERR(RT_ERR_RG_L2_ENTRY_ACCESS_FAILED); } rtk_rg_err_code_t rtk_rg_apollo_accessWanLimitCategory_get(rtk_rg_accessWanLimitCategory_t *macCategory_info) { int i,ret; //Check param if(rg_db.systemGlobal.activeLimitFunction!=RG_ACCESSWAN_TYPE_CATEGORY) RETURN_ERR(RT_ERR_RG_ACCESSWAN_DISABLE); if((macCategory_info->mac.octet[0]&0x1)>0) //multicast don't have category RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //return the category of the "first" match MAC, if no such MAC, return RT_ERR_RG_L2_ENTRY_ACCESS_FAILED for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++) { if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && memcmp(rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet,macCategory_info->mac.octet,ETHER_ADDR_LEN)==0) { //Check if the MAC is under LAN portmask ret=_rtK_rg_checkCategoryPortmask(&rg_db.lut[i].rtk_lut.entry.l2UcEntry); if(ret!=RT_ERR_RG_OK) RETURN_ERR(ret); //Hit! macCategory_info->category=rg_db.lut[i].category; return (RT_ERR_RG_OK); } } RETURN_ERR(RT_ERR_RG_L2_ENTRY_ACCESS_FAILED); } rtk_rg_err_code_t rtk_rg_apollo_softwareSourceAddrLearningLimit_set(rtk_rg_saLearningLimitInfo_t sa_learnLimit_info, rtk_rg_port_idx_t port_idx) { int i; rtk_rg_accessWanLimitData_t resetInfo; //Check param if(port_idx < RTK_RG_PORT0 || port_idx >= RTK_RG_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(sa_learnLimit_info.learningLimitNumber<DEF_SOFTWARE_LEARNING_LIMIT || sa_learnLimit_info.learningLimitNumber>MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(sa_learnLimit_info.action>=SA_LEARN_EXCEED_ACTION_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #ifdef CONFIG_MASTER_WLAN0_ENABLE if(port_idx==RTK_RG_EXT_PORT0) { for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[i]!=DEF_SOFTWARE_LEARNING_LIMIT) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); } } #ifdef CONFIG_DUALBAND_CONCURRENT else if(rg_db.systemGlobal.enableSlaveSSIDBind && port_idx==RTK_RG_EXT_PORT1){ for(i=WLAN_DEVICE_NUM;i<MAX_WLAN_DEVICE_NUM;i++) { if(rg_db.systemGlobal.wlan0BindDecision[i].exist && rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[i]!=DEF_SOFTWARE_LEARNING_LIMIT) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); } } #endif #endif //Set number and action in rg_db rg_db.systemGlobal.sourceAddrLearningLimitNumber[port_idx]=sa_learnLimit_info.learningLimitNumber; rg_db.systemGlobal.sourceAddrLearningAction[port_idx]=sa_learnLimit_info.action; //stop ARP request if actived atomic_set(&rg_kernel.lutReachLimit_port[port_idx].activity,0); if(sa_learnLimit_info.learningLimitNumber>=0) { rg_db.systemGlobal.activeLimitFunction=RG_ACCESSWAN_TYPE_PORT; //Reset portmask Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_PORTMASK; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; resetInfo.data.port_mask.portmask=0x0; #ifdef CONFIG_MASTER_WLAN0_ENABLE resetInfo.wlan0_dev_mask=0x0; #endif atomic_set(&rg_db.systemGlobal.accessWanLimitPortMaskCount,0); rtk_rg_apollo_accessWanLimit_set(resetInfo); //Reset Category Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_CATEGORY; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; #ifdef CONFIG_MASTER_WLAN0_ENABLE resetInfo.wlan0_dev_mask=0x0; #endif for(i=0;i<WanAccessCategoryNum;i++) { resetInfo.data.category=i; rtk_rg_apollo_accessWanLimit_set(resetInfo); } } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_softwareSourceAddrLearningLimit_get(rtk_rg_saLearningLimitInfo_t *sa_learnLimit_info, rtk_rg_port_idx_t port_idx) { //Check param if(port_idx < RTK_RG_PORT0 || port_idx > RTK_RG_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Get number and action in rg_db sa_learnLimit_info->learningLimitNumber=rg_db.systemGlobal.sourceAddrLearningLimitNumber[port_idx]; sa_learnLimit_info->action=rg_db.systemGlobal.sourceAddrLearningAction[port_idx]; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_wlanSoftwareSourceAddrLearningLimit_set(rtk_rg_saLearningLimitInfo_t sa_learnLimit_info, int wlan_idx, int dev_idx) { #ifdef CONFIG_MASTER_WLAN0_ENABLE int i; rtk_rg_accessWanLimitData_t resetInfo; #endif //Check param //FIXME: we just support master wifi now. if(wlan_idx!=0)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(dev_idx<0 || dev_idx>=MAX_WLAN_DEVICE_NUM)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(sa_learnLimit_info.learningLimitNumber<DEF_SOFTWARE_LEARNING_LIMIT || sa_learnLimit_info.learningLimitNumber>MAX_LUT_HW_TABLE_SIZE) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(sa_learnLimit_info.action>=SA_LEARN_EXCEED_ACTION_END) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //20140721LUKE:we support WLAN0 only right now if(wlan_idx==0) { if(rg_db.systemGlobal.sourceAddrLearningLimitNumber[RTK_RG_EXT_PORT0]!=DEF_SOFTWARE_LEARNING_LIMIT) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); #ifdef CONFIG_MASTER_WLAN0_ENABLE #ifdef CONFIG_DUALBAND_CONCURRENT if(rg_db.systemGlobal.enableSlaveSSIDBind && dev_idx>=WLAN_DEVICE_NUM && rg_db.systemGlobal.sourceAddrLearningLimitNumber[RTK_RG_EXT_PORT1]!=DEF_SOFTWARE_LEARNING_LIMIT) RETURN_ERR(RT_ERR_RG_ACCESSWAN_WLAN_CONFLICT); #endif if(!rg_db.systemGlobal.wlan0BindDecision[dev_idx].exist)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Set number and action in rg_db rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[dev_idx]=sa_learnLimit_info.learningLimitNumber; rg_db.systemGlobal.wlan0SourceAddrLearningAction[dev_idx]=sa_learnLimit_info.action; //stop ARP request if actived atomic_set(&rg_kernel.lutReachLimit_wlan0dev[dev_idx].activity,0); if(sa_learnLimit_info.learningLimitNumber>=0) { rg_db.systemGlobal.activeLimitFunction=RG_ACCESSWAN_TYPE_PORT; //Reset portmask Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_PORTMASK; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; resetInfo.data.port_mask.portmask=0x0; resetInfo.wlan0_dev_mask=0x0; atomic_set(&rg_db.systemGlobal.accessWanLimitPortMaskCount,0); rtk_rg_apollo_accessWanLimit_set(resetInfo); //Reset Category Access Limit resetInfo.type=RG_ACCESSWAN_TYPE_CATEGORY; resetInfo.action=SA_LEARN_EXCEED_ACTION_PERMIT; resetInfo.learningLimitNumber=DEF_SOFTWARE_LEARNING_LIMIT; resetInfo.wlan0_dev_mask=0x0; for(i=0;i<WanAccessCategoryNum;i++) { resetInfo.data.category=i; rtk_rg_apollo_accessWanLimit_set(resetInfo); } } #endif } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_wlanSoftwareSourceAddrLearningLimit_get(rtk_rg_saLearningLimitInfo_t *sa_learnLimit_info, int wlan_idx, int dev_idx) { //Check param //FIXME: we just support master wifi now. if(wlan_idx!=0)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(sa_learnLimit_info==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(dev_idx<0 || dev_idx>=MAX_WLAN_DEVICE_NUM)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //20140721LUKE:we support WLAN0 only right now if(wlan_idx==0) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(!rg_db.systemGlobal.wlan0BindDecision[dev_idx].exist)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Get number and action in rg_db sa_learnLimit_info->learningLimitNumber=rg_db.systemGlobal.wlan0SourceAddrLearningLimitNumber[dev_idx]; sa_learnLimit_info->action=rg_db.systemGlobal.wlan0SourceAddrLearningAction[dev_idx]; #endif } return (RT_ERR_RG_OK); } #ifdef __KERNEL__ //model code skip those APIs. rtk_rg_err_code_t rtk_rg_apollo_qosStrictPriorityOrWeightFairQueue_set(rtk_rg_mac_port_idx_t port_idx,rtk_rg_qos_queue_weights_t q_weight) { int i,ret; rtk_qos_queue_weights_t rtk_q_weight; rtk_ponmac_queue_t rtk_pon_q; rtk_ponmac_queueCfg_t rtk_pon_q_cfg; bzero(&rtk_q_weight,sizeof(rtk_qos_queue_weights_t)); bzero(&rtk_pon_q,sizeof(rtk_ponmac_queue_t)); bzero(&rtk_pon_q_cfg,sizeof(rtk_ponmac_queueCfg_t)); if((port_idx < 0) || (port_idx >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_GPON_FEATURE) if((port_idx==RTK_RG_MAC_PORT_PON) && (rg_db.systemGlobal.initParam.wanPortGponMode))RETURN_ERR(RT_ERR_RG_GPON_NOT_SUPPORT); #endif for(i=0;i<RTK_RG_MAX_NUM_OF_QUEUE;i++) rtk_q_weight.weights[i]=q_weight.weights[i]; #ifdef __KERNEL__ //Exclude IFG ret=rtk_rate_egrBandwidthCtrlIncludeIfg_set(DISABLED); DEBUG("ret=%x\n",ret); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_FAILED); #endif if(port_idx==RTK_RG_MAC_PORT_PON) { int qid=0; for(qid=0;qid<RTK_RG_MAX_NUM_OF_QUEUE;qid++) { //Before set, just get(unknown problem) rtk_pon_q.queueId=qid; rtk_pon_q.schedulerId=0; rtk_ponmac_queue_get(&rtk_pon_q, &rtk_pon_q_cfg); rtk_pon_q_cfg.cir=0; rtk_pon_q_cfg.pir=0x1ffff; if(rtk_q_weight.weights[qid]) rtk_pon_q_cfg.type=WFQ_WRR_PRIORITY; else rtk_pon_q_cfg.type=STRICT_PRIORITY; rtk_pon_q_cfg.weight=rtk_q_weight.weights[qid]; ret=rtk_ponmac_queue_add(&rtk_pon_q, &rtk_pon_q_cfg); DEBUG("ret=%x\n",ret); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_FAILED); } } else { ret=rtk_qos_schedulingQueue_set(port_idx,&rtk_q_weight); DEBUG("ret=%x\n",ret); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_FAILED); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosStrictPriorityOrWeightFairQueue_get(rtk_rg_mac_port_idx_t port_idx,rtk_rg_qos_queue_weights_t *pQ_weight) { int i,ret; rtk_qos_queue_weights_t rtk_q_weight; rtk_ponmac_queue_t rtk_pon_q; rtk_ponmac_queueCfg_t rtk_pon_q_cfg; bzero(&rtk_q_weight,sizeof(rtk_qos_queue_weights_t)); bzero(&rtk_pon_q,sizeof(rtk_ponmac_queue_t)); bzero(&rtk_pon_q_cfg,sizeof(rtk_ponmac_queueCfg_t)); if((port_idx < 0) || (port_idx >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_GPON_FEATURE) if((port_idx==RTK_RG_MAC_PORT_PON) && (rg_db.systemGlobal.initParam.wanPortGponMode))RETURN_ERR(RT_ERR_RG_GPON_NOT_SUPPORT); #endif if(pQ_weight==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(port_idx==RTK_RG_MAC_PORT_PON) { int qid=0; for(qid=0;qid<RTK_RG_MAX_NUM_OF_QUEUE;qid++) { //Before set, just get(unknown problem) rtk_pon_q.queueId=qid; rtk_pon_q.schedulerId=0; ret=rtk_ponmac_queue_get(&rtk_pon_q, &rtk_pon_q_cfg); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_FAILED); if(rtk_pon_q_cfg.type==STRICT_PRIORITY) rtk_q_weight.weights[qid] = 0; else rtk_q_weight.weights[qid] = rtk_pon_q_cfg.weight; } } else { ret=rtk_qos_schedulingQueue_get(port_idx, &rtk_q_weight); if(ret!=RT_ERR_OK) RETURN_ERR(RT_ERR_RG_FAILED); } //memcpy(q_weight->weights,rtk_q_weight.weights,sizeof(uint32)*RTK_RG_MAX_NUM_OF_QUEUE); for(i=0;i<RTK_RG_MAX_NUM_OF_QUEUE;i++) pQ_weight->weights[i]=rtk_q_weight.weights[i]; return (RT_ERR_RG_OK); } /* ============================ DoS Port Security ============================ */ int dosThresholdMapping[]={1,2,3,4,6,7,8,9,10,11,12,13,14,16,17,18,19,20,21,22,23,24,26,27,28,29,30,31,32,33,34,36,37,38,39,40,41,42,43,44, 45,46,47,48,50,51,52,53,54,55,56,57,58,59,61,62,63,64,65,66,67,68,69,70,71,72,74,75,76,77,78,90,80,81,82,84,85,86,87,88, 89,90,91,92,94,95,96,97,98,99,100,101,102,104,105,106,107,108,109,110,111,112,114,115,116,117,118,119,120,121,122,124, 125,126,127,128,129,130,131,132,134,135,136,137,138,139,140,141,142,144,145,146,147,148, 149,150,151,152,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171, 173,174,175,176,177,178,179,180,181,183,184,185,186,187,188,189,190,191,193,194,195,196, 197,198,199,200,201,203,204,205,206,207,208,209,210,211,213,214,215,216,217,218,219,220, 221,223,224,225,226,227,228,229,230,231,233,234,235,236,237,238,239,240,241,242,243,244, 245,246,247,248,249,250,253,253,254,255}; rtk_rg_err_code_t rtk_rg_apollo_dosPortMaskEnable_set(rtk_rg_mac_portmask_t dos_port_mask) { int ret; int i; //Configure security ports for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { if(dos_port_mask.portmask & (0x1<<i)) ret = rtk_sec_portAttackPreventState_set(i,ENABLED); else ret = rtk_sec_portAttackPreventState_set(i,DISABLED); } assert_ok(ret); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dosPortMaskEnable_get(rtk_rg_mac_portmask_t *dos_port_mask) { int ret; int i; rtk_enable_t dos_enabled; if(dos_port_mask==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Get security port state dos_port_mask->portmask=0; for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { ret = rtk_sec_portAttackPreventState_get(i,&dos_enabled); assert_ok(ret); if(dos_enabled) dos_port_mask->portmask|=(0x1<<i); else dos_port_mask->portmask&=~(0x1<<i); } return (RT_ERR_RG_OK); } int _rtk_rg_dosType_set(rtk_rg_dos_type_t dos_type,int dos_enabled,rtk_rg_dos_action_t dos_action) { int ret; int enabled; rtk_rg_dos_action_t act; /* RG DoS Action: RTK_RG_DOS_ACTION_DROP = 0, RTK_RG_DOS_ACTION_TRAP, */ int dosActionMapping[]={ ACTION_DROP, ACTION_TRAP2CPU }; /* RG DoS type: RTK_RG_DOS_DAEQSA_DENY = 0, RTK_RG_DOS_LAND_DENY, RTK_RG_DOS_BLAT_DENY, RTK_RG_DOS_SYNFIN_DENY, RTK_RG_DOS_XMA_DENY, RTK_RG_DOS_NULLSCAN_DENY, RTK_RG_DOS_SYN_SPORTL1024_DENY, RTK_RG_DOS_TCPHDR_MIN_CHECK, RTK_RG_DOS_TCP_FRAG_OFF_MIN_CHECK, RTK_RG_DOS_ICMP_FRAG_PKTS_DENY, RTK_RG_DOS_POD_DENY, RTK_RG_DOS_UDPDOMB_DENY, RTK_RG_DOS_SYNWITHDATA_DENY, RTK_RG_DOS_SYNFLOOD_DENY, RTK_RG_DOS_FINFLOOD_DENY, RTK_RG_DOS_ICMPFLOOD_DENY, */ int dosTypeMapping[]={ DAEQSA_DENY, LAND_DENY, BLAT_DENY, SYNFIN_DENY, XMA_DENY, NULLSCAN_DENY, SYN_SPORTL1024_DENY, TCPHDR_MIN_CHECK, TCP_FRAG_OFF_MIN_CHECK, ICMP_FRAG_PKTS_DENY, POD_DENY, UDPDOMB_DENY, SYNWITHDATA_DENY, SYNFLOOD_DENY, FINFLOOD_DENY, ICMPFLOOD_DENY }; ret = rtk_rg_apollo_dosType_get(dos_type,&enabled,&act); if(dos_enabled!=-1) enabled=dos_enabled; if(dos_action!=-1) act=dos_action; //Configure DoS action if(enabled) ret = rtk_sec_attackPrevent_set(dosTypeMapping[dos_type],dosActionMapping[act]); else ret = rtk_sec_attackPrevent_set(dosTypeMapping[dos_type],ACTION_FORWARD); assert_ok(ret); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dosType_set(rtk_rg_dos_type_t dos_type,int dos_enabled,rtk_rg_dos_action_t dos_action) { int ret; //Check parameters if((dos_type<RTK_RG_DOS_DAEQSA_DENY) || (dos_type>RTK_RG_DOS_SYNWITHDATA_DENY)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dos_enabled!=1) && (dos_enabled!=0) && (dos_enabled!=-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dos_action!=RTK_RG_DOS_ACTION_DROP) && (dos_action!=RTK_RG_DOS_ACTION_TRAP) && (dos_action!=-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ret = _rtk_rg_dosType_set(dos_type,dos_enabled,dos_action); return (RT_ERR_RG_OK); } int _rtk_rg_dosType_get(rtk_rg_dos_type_t dos_type,int *dos_enabled,rtk_rg_dos_action_t *dos_action) { int ret; rtk_action_t act; /* RG DoS type: RTK_RG_DOS_DAEQSA_DENY = 0, RTK_RG_DOS_LAND_DENY, RTK_RG_DOS_BLAT_DENY, RTK_RG_DOS_SYNFIN_DENY, RTK_RG_DOS_XMA_DENY, RTK_RG_DOS_NULLSCAN_DENY, RTK_RG_DOS_SYN_SPORTL1024_DENY, RTK_RG_DOS_TCPHDR_MIN_CHECK, RTK_RG_DOS_TCP_FRAG_OFF_MIN_CHECK, RTK_RG_DOS_ICMP_FRAG_PKTS_DENY, RTK_RG_DOS_POD_DENY, RTK_RG_DOS_UDPDOMB_DENY, RTK_RG_DOS_SYNWITHDATA_DENY, RTK_RG_DOS_SYNFLOOD_DENY, RTK_RG_DOS_FINFLOOD_DENY, RTK_RG_DOS_ICMPFLOOD_DENY, */ int dosTypeMapping[]={ DAEQSA_DENY, LAND_DENY, BLAT_DENY, SYNFIN_DENY, XMA_DENY, NULLSCAN_DENY, SYN_SPORTL1024_DENY, TCPHDR_MIN_CHECK, TCP_FRAG_OFF_MIN_CHECK, ICMP_FRAG_PKTS_DENY, POD_DENY, UDPDOMB_DENY, SYNWITHDATA_DENY, SYNFLOOD_DENY, FINFLOOD_DENY, ICMPFLOOD_DENY }; //Get DoS action ret = rtk_sec_attackPrevent_get(dosTypeMapping[dos_type],&act); assert_ok(ret); if(act==ACTION_FORWARD) { *dos_enabled=0; *dos_action=0; } else { *dos_enabled=1; *dos_action=(act==ACTION_DROP)?RTK_RG_DOS_ACTION_DROP:RTK_RG_DOS_ACTION_TRAP; } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dosType_get(rtk_rg_dos_type_t dos_type,int *dos_enabled,rtk_rg_dos_action_t *dos_action) { int ret; if((dos_enabled==NULL) || (dos_action==NULL)) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ret = _rtk_rg_dosType_get(dos_type,dos_enabled,dos_action); assert_ok(ret); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dosFloodType_set(rtk_rg_dos_type_t dos_type,int dos_enabled,rtk_rg_dos_action_t dos_action,int dos_threshold) { int ret; int mapIdx; //Check parameters if((dos_type<RTK_RG_DOS_SYNFLOOD_DENY) || (dos_type>RTK_RG_DOS_ICMPFLOOD_DENY)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dos_enabled!=1) && (dos_enabled!=0) && (dos_enabled!=-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dos_action!=RTK_RG_DOS_ACTION_DROP) && (dos_action!=RTK_RG_DOS_ACTION_TRAP) && (dos_action!=-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(((dos_threshold<1) || (dos_threshold>232)) && (dos_threshold!=-1)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ret = _rtk_rg_dosType_set(dos_type,dos_enabled,dos_action); assert_ok(ret); //Don't change threshold if(dos_threshold==-1) return (RT_ERR_RG_OK); //Set DoS flooding attack protection threshold mapIdx=dos_threshold-1; if(dos_type==RTK_RG_DOS_SYNFLOOD_DENY) ret = rtk_sec_attackFloodThresh_set(SEC_SYNCFLOOD,dosThresholdMapping[mapIdx]); else if(dos_type==RTK_RG_DOS_FINFLOOD_DENY) ret = rtk_sec_attackFloodThresh_set(SEC_FINFLOOD,dosThresholdMapping[mapIdx]); else if(dos_type==RTK_RG_DOS_ICMPFLOOD_DENY) ret = rtk_sec_attackFloodThresh_set(SEC_ICMPFLOOD,dosThresholdMapping[mapIdx]); assert_ok(ret); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_dosFloodType_get(rtk_rg_dos_type_t dos_type,int *dos_enabled,rtk_rg_dos_action_t *dos_action,int *dos_threshold) { int ret; int i; int internal_threshold; if((dos_type<RTK_RG_DOS_SYNFLOOD_DENY) || (dos_type>RTK_RG_DOS_ICMPFLOOD_DENY)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((dos_enabled==NULL) || (dos_action==NULL) || (dos_threshold==NULL)) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ret = _rtk_rg_dosType_get(dos_type,dos_enabled,dos_action); assert_ok(ret); //Get DoS threshold internal_threshold=0; if(dos_type==RTK_RG_DOS_SYNFLOOD_DENY) ret = rtk_sec_attackFloodThresh_get(SEC_SYNCFLOOD,&internal_threshold); else if(dos_type==RTK_RG_DOS_FINFLOOD_DENY) ret = rtk_sec_attackFloodThresh_get(SEC_FINFLOOD,&internal_threshold); else if(dos_type==RTK_RG_DOS_ICMPFLOOD_DENY) ret = rtk_sec_attackFloodThresh_get(SEC_ICMPFLOOD,&internal_threshold); else return (RT_ERR_RG_OK); assert_ok(ret); //Recovery the setting value for(i=0;i<232;i++) if(internal_threshold==dosThresholdMapping[i]) *dos_threshold=i+1; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portMibInfo_get(rtk_rg_mac_port_idx_t port, rtk_rg_port_mib_info_t *mibInfo) { int ret; rtk_stat_port_cntr_t cntr; //Check param if(port < RTK_RG_MAC_PORT0 || port >= RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(mibInfo==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Get MIB data ret = rtk_stat_port_getAll( port,&cntr); assert_ok(ret); //Assign to RG data structure mibInfo->ifInOctets=cntr.ifInOctets; mibInfo->ifInUcastPkts=cntr.ifInUcastPkts; mibInfo->ifInMulticastPkts=cntr.ifInMulticastPkts; mibInfo->ifInBroadcastPkts=cntr.ifInBroadcastPkts; mibInfo->ifInDiscards=cntr.ifInDiscards; mibInfo->ifOutOctets=cntr.ifOutOctets; mibInfo->ifOutDiscards=cntr.ifOutDiscards; mibInfo->ifOutUcastPkts=cntr.ifOutUcastPkts; mibInfo->ifOutMulticastPkts=cntr.ifOutMulticastPkts; mibInfo->ifOutBrocastPkts=cntr.ifOutBrocastPkts; mibInfo->dot1dBasePortDelayExceededDiscards=cntr.dot1dBasePortDelayExceededDiscards; mibInfo->dot1dTpPortInDiscards=cntr.dot1dTpPortInDiscards; mibInfo->dot1dTpHcPortInDiscards=cntr.dot1dTpHcPortInDiscards; mibInfo->dot3InPauseFrames=cntr.dot3InPauseFrames; mibInfo->dot3OutPauseFrames=cntr.dot3OutPauseFrames; // mibInfo->dot3OutPauseOnFrames=cntr.dot3OutPauseOnFrames; mibInfo->dot3StatsAligmentErrors=cntr.dot3StatsAligmentErrors; mibInfo->dot3StatsFCSErrors=cntr.dot3StatsFCSErrors; mibInfo->dot3StatsSingleCollisionFrames=cntr.dot3StatsSingleCollisionFrames; mibInfo->dot3StatsMultipleCollisionFrames=cntr.dot3StatsMultipleCollisionFrames; mibInfo->dot3StatsDeferredTransmissions=cntr.dot3StatsDeferredTransmissions; mibInfo->dot3StatsLateCollisions=cntr.dot3StatsLateCollisions; mibInfo->dot3StatsExcessiveCollisions=cntr.dot3StatsExcessiveCollisions; mibInfo->dot3StatsFrameTooLongs=cntr.dot3StatsFrameTooLongs; mibInfo->dot3StatsSymbolErrors=cntr.dot3StatsSymbolErrors; mibInfo->dot3ControlInUnknownOpcodes=cntr.dot3ControlInUnknownOpcodes; mibInfo->etherStatsDropEvents=cntr.etherStatsDropEvents; mibInfo->etherStatsOctets=cntr.etherStatsOctets; mibInfo->etherStatsBcastPkts=cntr.etherStatsBcastPkts; mibInfo->etherStatsMcastPkts=cntr.etherStatsMcastPkts; mibInfo->etherStatsUndersizePkts=cntr.etherStatsUndersizePkts; mibInfo->etherStatsOversizePkts=cntr.etherStatsOversizePkts; mibInfo->etherStatsFragments=cntr.etherStatsFragments; mibInfo->etherStatsJabbers=cntr.etherStatsJabbers; mibInfo->etherStatsCollisions=cntr.etherStatsCollisions; mibInfo->etherStatsCRCAlignErrors=cntr.etherStatsCRCAlignErrors; mibInfo->etherStatsPkts64Octets=cntr.etherStatsPkts64Octets; mibInfo->etherStatsPkts65to127Octets=cntr.etherStatsPkts65to127Octets; mibInfo->etherStatsPkts128to255Octets=cntr.etherStatsPkts128to255Octets; mibInfo->etherStatsPkts256to511Octets=cntr.etherStatsPkts256to511Octets; mibInfo->etherStatsPkts512to1023Octets=cntr.etherStatsPkts512to1023Octets; mibInfo->etherStatsPkts1024to1518Octets=cntr.etherStatsPkts1024to1518Octets; mibInfo->etherStatsTxOctets=cntr.etherStatsTxOctets; mibInfo->etherStatsTxUndersizePkts=cntr.etherStatsTxUndersizePkts; mibInfo->etherStatsTxOversizePkts=cntr.etherStatsTxOversizePkts; mibInfo->etherStatsTxPkts64Octets=cntr.etherStatsTxPkts64Octets; mibInfo->etherStatsTxPkts65to127Octets=cntr.etherStatsTxPkts65to127Octets; mibInfo->etherStatsTxPkts128to255Octets=cntr.etherStatsTxPkts128to255Octets; mibInfo->etherStatsTxPkts256to511Octets=cntr.etherStatsTxPkts256to511Octets; mibInfo->etherStatsTxPkts512to1023Octets=cntr.etherStatsTxPkts512to1023Octets; mibInfo->etherStatsTxPkts1024to1518Octets=cntr.etherStatsTxPkts1024to1518Octets; mibInfo->etherStatsTxPkts1519toMaxOctets=cntr.etherStatsTxPkts1519toMaxOctets; mibInfo->etherStatsTxBcastPkts=cntr.etherStatsTxBcastPkts; mibInfo->etherStatsTxMcastPkts=cntr.etherStatsTxMcastPkts; mibInfo->etherStatsTxFragments=cntr.etherStatsTxFragments; mibInfo->etherStatsTxJabbers=cntr.etherStatsTxJabbers; mibInfo->etherStatsTxCRCAlignErrors=cntr.etherStatsTxCRCAlignErrors; mibInfo->etherStatsRxUndersizePkts=cntr.etherStatsRxUndersizePkts; mibInfo->etherStatsRxUndersizeDropPkts=cntr.etherStatsRxUndersizeDropPkts; mibInfo->etherStatsRxOversizePkts=cntr.etherStatsRxOversizePkts; mibInfo->etherStatsRxPkts64Octets=cntr.etherStatsRxPkts64Octets; mibInfo->etherStatsRxPkts65to127Octets=cntr.etherStatsRxPkts65to127Octets; mibInfo->etherStatsRxPkts128to255Octets=cntr.etherStatsRxPkts128to255Octets; mibInfo->etherStatsRxPkts256to511Octets=cntr.etherStatsRxPkts256to511Octets; mibInfo->etherStatsRxPkts512to1023Octets=cntr.etherStatsRxPkts512to1023Octets; mibInfo->etherStatsRxPkts1024to1518Octets=cntr.etherStatsRxPkts1024to1518Octets; mibInfo->etherStatsRxPkts1519toMaxOctets=cntr.etherStatsRxPkts1519toMaxOctets; mibInfo->inOampduPkts=cntr.inOampduPkts; mibInfo->outOampduPkts=cntr.outOampduPkts; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portMibInfo_clear(rtk_rg_mac_port_idx_t port) { int ret; //Check param if(port < RTK_RG_MAC_PORT0 || port >= RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ret = rtk_stat_port_reset(port); assert_ok(ret); return (RT_ERR_RG_OK); } #endif rtk_rg_err_code_t rtk_rg_apollo_portIsolation_set(rtk_rg_port_isolation_t isolationSetting) { rtk_portmask_t mbpmsk, etpmsk; //Check parameter if(rg_db.systemGlobal.storedInfo.valid)RETURN_ERR(RT_ERR_RG_STP_BLOCKING_ENABLED); if(isolationSetting.port>=RTK_RG_PORT_MAX)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(isolationSetting.portmask.portmask>0xfff)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); _rtk_rg_portmask_translator(isolationSetting.portmask, &mbpmsk, &etpmsk); if(isolationSetting.port==RTK_RG_PORT_CPU) { assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_0, isolationSetting.port, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_1, isolationSetting.port, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_0, isolationSetting.port-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_1, isolationSetting.port-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); } else if(isolationSetting.port<RTK_RG_PORT_CPU) //phy { assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_0, isolationSetting.port, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRY_SET(RTK_PORT_ISO_CFG_1, isolationSetting.port, &mbpmsk, &etpmsk)); } else //ext { assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_0, isolationSetting.port-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); assert_ok(RTK_PORT_ISOLATIONENTRYEXT_SET(RTK_PORT_ISO_CFG_1, isolationSetting.port-RTK_RG_PORT_CPU, &mbpmsk, &etpmsk)); } //Keep in software rg_db.systemGlobal.portIsolation[isolationSetting.port].portmask=isolationSetting.portmask.portmask; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portIsolation_get(rtk_rg_port_isolation_t *isolationSetting) { //Check parameter if(isolationSetting==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(isolationSetting->port>=RTK_RG_PORT_MAX)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); isolationSetting->portmask.portmask=rg_db.systemGlobal.portIsolation[isolationSetting->port].portmask; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosInternalPriMapToQueueId_set(int int_pri, int queue_id){ //set group[3]: internal<=>queue mapping. rg only use group[3] rtk_qos_pri2queue_t rtk_pri2qid; ASSERT_EQ(rtk_qos_priMap_get(3, &rtk_pri2qid),RT_ERR_OK); rtk_pri2qid.pri2queue[int_pri]=queue_id; ASSERT_EQ(rtk_qos_priMap_set(3, &rtk_pri2qid),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosInternalPriMapToQueueId_get(int int_pri, int *pQueue_Id){ rtk_qos_pri2queue_t pri2qid; bzero(&pri2qid,sizeof(pri2qid)); ASSERT_EQ(rtk_qos_priMap_get(3, &pri2qid),RT_ERR_OK); *pQueue_Id=pri2qid.pri2queue[int_pri]; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosInternalPriDecisionByWeight_set(rtk_rg_qos_priSelWeight_t weightOfPriSel){ int i,j; int int_pri_sel_weight[9]={0}; rtk_qos_priSelWeight_t weight; if(weightOfPriSel.weight_of_portBased==0) { WARNING("Weight of port-based priority can not be zero!!!"); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } memset(&weight,0,sizeof(weight)); //Check whether weight is identical int_pri_sel_weight[0]=weightOfPriSel.weight_of_portBased; int_pri_sel_weight[1]=weightOfPriSel.weight_of_dot1q; int_pri_sel_weight[2]=weightOfPriSel.weight_of_dscp; int_pri_sel_weight[3]=weightOfPriSel.weight_of_acl; int_pri_sel_weight[4]=weightOfPriSel.weight_of_lutFwd; int_pri_sel_weight[5]=weightOfPriSel.weight_of_saBaed; int_pri_sel_weight[6]=weightOfPriSel.weight_of_vlanBased; int_pri_sel_weight[7]=weightOfPriSel.weight_of_svlanBased; int_pri_sel_weight[8]=weightOfPriSel.weight_of_l4Based; for(i=0;i<9;i++) { int comp_weight = int_pri_sel_weight[i]; if(comp_weight==0) continue; for(j=0;j<9;j++) { if(i==j) continue; if(int_pri_sel_weight[j]==comp_weight) { rtlglue_printf("Identical weight is not allow!\n"); RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } } } weight.weight_of_portBased=weightOfPriSel.weight_of_portBased; weight.weight_of_dot1q=weightOfPriSel.weight_of_dot1q; weight.weight_of_dscp=weightOfPriSel.weight_of_dscp; weight.weight_of_acl=weightOfPriSel.weight_of_acl; weight.weight_of_lutFwd=weightOfPriSel.weight_of_lutFwd; weight.weight_of_saBaed=weightOfPriSel.weight_of_saBaed; weight.weight_of_vlanBased=weightOfPriSel.weight_of_vlanBased; weight.weight_of_svlanBased=weightOfPriSel.weight_of_svlanBased; weight.weight_of_l4Based=weightOfPriSel.weight_of_l4Based; { //Patch for B-cut SVLAN setting #if 0 int svlan_weight = weight.weight_of_dot1q+1; if(weight.weight_of_svlanBased <= weight.weight_of_dot1q) { i=0; while(i<9) { if(i==7) i++; if(svlan_weight==int_pri_sel_weight[i]) { svlan_weight+=1; i=0; continue; } i++; } } weight.weight_of_svlanBased=svlan_weight; /* If we setup SVLAN tpid as CVLAN ether type(0x8100),the priority source weight of svlan must be higher than weight of cvlan. Otherwise, ASIC will decide priority of svlan-tagged packet as cvlan source priority and QoS would work incorrectly. */ if(weight.weight_of_svlanBased <= weight.weight_of_dot1q) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif } ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(0, &weight),RT_ERR_OK); ASSERT_EQ(RTK_QOS_PRISELGROUP_SET(1, &weight),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosInternalPriDecisionByWeight_get(rtk_rg_qos_priSelWeight_t *pWeightOfPriSel){ rtk_qos_priSelWeight_t weight; memset(&weight,0,sizeof(weight)); ASSERT_EQ(rtk_qos_priSelGroup_get(0, &weight),RT_ERR_OK); pWeightOfPriSel->weight_of_portBased=weight.weight_of_portBased; pWeightOfPriSel->weight_of_dot1q=weight.weight_of_dot1q; pWeightOfPriSel->weight_of_dscp=weight.weight_of_dscp; pWeightOfPriSel->weight_of_acl=weight.weight_of_acl; pWeightOfPriSel->weight_of_lutFwd=weight.weight_of_lutFwd; pWeightOfPriSel->weight_of_saBaed=weight.weight_of_saBaed; pWeightOfPriSel->weight_of_vlanBased=weight.weight_of_vlanBased; pWeightOfPriSel->weight_of_svlanBased=weight.weight_of_svlanBased; pWeightOfPriSel->weight_of_l4Based=weight.weight_of_l4Based; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemapToInternalPri_set(uint32 dscp,uint32 int_pri){ #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(int_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif ASSERT_EQ(RTK_QOS_DSCPPRIREMAPDROUP_SET(0,dscp,int_pri,0),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemapToInternalPri_get(uint32 dscp,uint32 *pInt_pri){ uint32 dp; if((dscp < 0) || (dscp > 63)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pInt_pri==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //ASSERT_EQ(rtk_qos_dscpPriRemapGroup_get(0,dscp,pIntPri,0),RT_ERR_OK); ASSERT_EQ(rtk_qos_dscpPriRemapGroup_get(0,dscp,pInt_pri,&dp),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemapToInternalPri_set(uint32 dot1p,uint32 int_pri){ #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(int_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif //Patch for PPPoE+VLAN Bug: Using SVLAN setting //#ifdef CONFIG_RG_PPPOE_AND_VALN_ISSUE_PATCH #if defined(CONFIG_RTL9600_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT0)) { ASSERT_EQ(_rtk_rg_acl_reserved_pppoeCvidIssue_spriRemap2InternalPri(RTK_RG_MAC_PORT_PON,dot1p,int_pri),RT_ERR_OK); } #endif //#endif ASSERT_EQ(RTK_QOS_1PPRIREMAPGROUP_SET(0,dot1p,int_pri,0),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemapToInternalPri_get(uint32 dot1p,uint32 *pInt_pri) { uint32 dp; if((dot1p < 0) || (dot1p > 7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pInt_pri==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //ASSERT_EQ(rtk_qos_dscpPriRemapGroup_get(0,dscp,pIntPri,0),RT_ERR_OK); ASSERT_EQ(rtk_qos_1pPriRemapGroup_get(0,dot1p,pInt_pri,&dp),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkEgressPortEnableAndSrcSelect_set(rtk_rg_mac_port_idx_t rmk_port,rtk_rg_enable_t rmk_enable, rtk_rg_qos_dscpRmkSrc_t rmk_src_select) { rtk_qos_dscpRmkSrc_t rtk_src_sel=0; if((rmk_port < 0) || (rmk_port >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((rmk_enable!=RTK_RG_ENABLED) && (rmk_enable!=RTK_RG_DISABLED)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((rmk_src_select<0) || (rmk_src_select>=RTK_RG_DSCP_RMK_SRC_END)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Configure DSCP remarking enable port if(rmk_enable==RTK_RG_ENABLED) ASSERT_EQ(RTK_QOS_DSCPREMARKENABLE_SET(rmk_port,ENABLED),RT_ERR_OK); else ASSERT_EQ(RTK_QOS_DSCPREMARKENABLE_SET(rmk_port,DISABLED),RT_ERR_OK); //Configure DSCP remarking source select switch(rmk_src_select) { case RTK_RG_DSCP_RMK_SRC_INT_PRI: rtk_src_sel=DSCP_RMK_SRC_INT_PRI; break; case RTK_RG_DSCP_RMK_SRC_DSCP: rtk_src_sel=DSCP_RMK_SRC_DSCP; break; default: break; } ASSERT_EQ(RTK_QOS_PORTDSCPREMARKSRCSEL_SET(rmk_port,rtk_src_sel),RT_ERR_OK); //sync to rg_db for fwdEngine using. if(rmk_enable==RTK_RG_DISABLED){ rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[rmk_port] = DISABLED_DSCP_REMARK; }else{ if(rmk_src_select==RTK_RG_DSCP_RMK_SRC_INT_PRI){ rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[rmk_port] = ENABLED_DSCP_REMARK_AND_SRC_FROM_INTERNALPRI; }else if(rmk_src_select==RTK_RG_DSCP_RMK_SRC_DSCP){ rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[rmk_port] = ENABLED_DSCP_REMARK_AND_SRC_FROM_DSCP; } } //clear shorcut because the shortcut egressDSCP may change. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkEgressPortEnableAndSrcSelect_get(rtk_rg_mac_port_idx_t rmk_port,rtk_rg_enable_t *pRmk_enable, rtk_rg_qos_dscpRmkSrc_t *pRmk_src_select) { rtk_qos_dscpRmkSrc_t rtk_src_sel; rtk_enable_t is_enabled; if((rmk_port < 0) || (rmk_port >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((pRmk_enable==NULL)||(pRmk_src_select==NULL)) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Get DSCP remarking enabled port is_enabled=DISABLED; ASSERT_EQ(rtk_qos_dscpRemarkEnable_get(rmk_port,&is_enabled),RT_ERR_OK); *pRmk_enable=is_enabled; //Get DSCP remarking source ASSERT_EQ(rtk_qos_portDscpRemarkSrcSel_get(rmk_port,&rtk_src_sel),RT_ERR_OK); switch(rtk_src_sel) { case DSCP_RMK_SRC_INT_PRI: *pRmk_src_select=RTK_RG_DSCP_RMK_SRC_INT_PRI; break; case DSCP_RMK_SRC_DSCP: *pRmk_src_select=RTK_RG_DSCP_RMK_SRC_DSCP; break; default: RETURN_ERR(RT_ERR_RG_FAILED); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkByInternalPri_set(int int_pri,int dscp) { #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(int_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif if((dscp<0) || (dscp>63)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((int_pri<0) || (int_pri>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ASSERT_EQ(rtk_qos_dscpRemarkGroup_set(0,int_pri,0,dscp),RT_ERR_OK); //sync to rg_db for fwdEngine using. rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[int_pri]=(uint8)dscp; //clear shorcut because the shortcut egressDSCP may change. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkByInternalPri_get(int int_pri,int *pDscp) { if((int_pri<0) || (int_pri>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pDscp==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ASSERT_EQ(rtk_qos_dscpRemarkGroup_get(0,int_pri,0,pDscp),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkByDscp_set(int dscp,int rmk_dscp) { if((dscp<0) || (dscp>63)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((rmk_dscp<0) || (rmk_dscp>63)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ASSERT_EQ(rtk_qos_dscp2DscpRemarkGroup_set(0,dscp,rmk_dscp),RT_ERR_OK); //sync to rg_db for fwdEngine using rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByDscp[dscp]=(uint8)rmk_dscp; //clear shorcut because the shortcut egressDSCP may change. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDscpRemarkByDscp_get(int dscp,int *pRmk_dscp) { if((dscp<0) || (dscp>63)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pRmk_dscp==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ASSERT_EQ(rtk_qos_dscp2DscpRemarkGroup_get(0,dscp,pRmk_dscp),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_set(rtk_rg_mac_port_idx_t rmk_port, rtk_rg_enable_t rmk_enable) { if((rmk_port < 0) || (rmk_port >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((rmk_enable!=RTK_RG_ENABLED) && (rmk_enable!=RTK_RG_DISABLED)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //Configure 802.1p remarking enable port if(rmk_enable==RTK_RG_ENABLED) ASSERT_EQ(rtk_qos_1pRemarkEnable_set(rmk_port,ENABLED),RT_ERR_OK); else ASSERT_EQ(rtk_qos_1pRemarkEnable_set(rmk_port,DISABLED),RT_ERR_OK); //sync to rg_db for fwdEngine using rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemarkByInternalPriEgressPortEnable[rmk_port] = rmk_enable; //rearrange the ACL & CF for sync ACL_FWD_TYPE_DIR_INGRESS_OR_EGRESS_L34_UP_STREAMID patch in Pattern intPri ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); //clear shorcut because the shortcut egressDSCP may change. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_get(rtk_rg_mac_port_idx_t rmk_port, rtk_rg_enable_t *pRmk_enable) { rtk_enable_t rtk_enable; if((rmk_port < 0) || (rmk_port >= RTK_RG_MAC_PORT_MAX)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pRmk_enable==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); //Get 802.1p remarking enabled port ASSERT_EQ(rtk_qos_1pRemarkEnable_get(rmk_port,&rtk_enable),RT_ERR_OK); if(rtk_enable==ENABLED) *pRmk_enable=RTK_RG_ENABLED; else *pRmk_enable=RTK_RG_DISABLED; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(int int_pri,int rmk_dot1p) { #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(int_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif if((rmk_dot1p<0) || (rmk_dot1p>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((int_pri<0) || (int_pri>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ASSERT_EQ(rtk_qos_1pRemarkGroup_set(0,int_pri,0,rmk_dot1p),RT_ERR_OK); //record to rg_db for fwdEngine using rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemarkByInternalPri[int_pri]=rmk_dot1p; #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //rearrange the ACL & CF for sync ACL_FWD_TYPE_DIR_INGRESS_OR_EGRESS_L34_UP_STREAMID patch in Pattern intPri ASSERT_EQ(_rtk_rg_aclSWEntry_and_asic_rearrange(),RT_ERR_RG_OK); #endif //clear shorcut because the shortcut egressDSCP may change. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_get(int int_pri,int *pRmk_dot1p) { if((int_pri<0) || (int_pri>7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pRmk_dot1p==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); ASSERT_EQ(rtk_qos_1pRemarkGroup_get(0,int_pri,0,pRmk_dot1p),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosPortBasedPriority_set(rtk_rg_mac_port_idx_t port_idx,uint32 int_pri){ #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(int_pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #endif if(port_idx < RTK_RG_MAC_PORT0 || port_idx > RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if((int_pri < 0) || (int_pri > 7)) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RTL9601B_SERIES) if((port_idx<RTK_RG_MAC_PORT_PON) && (int_pri!=0)) { int ret; WARNING("Setting port[0]~port[3] without ppb-pri=0 will letting RG_ACL egress_ctag_pri pattern useless!!!"); ret = _rtk_rg_AclEgressPriorityPattern_Check(); if(ret==FAIL) RETURN_ERR(RT_ERR_RG_FAILED); } #endif ASSERT_EQ(RTK_QOS_PORTPRI_SET(port_idx, int_pri),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_qosPortBasedPriority_get(rtk_rg_mac_port_idx_t port_idx,uint32 *pInt_pri) { if(port_idx < 0 || port_idx >= RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); ASSERT_EQ(rtk_qos_portPri_get(port_idx, pInt_pri),RT_ERR_OK); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portBasedCVlanId_set(rtk_rg_port_idx_t port_idx,int pvid) { if(port_idx < 0 || port_idx >= RTK_RG_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pvid<=0 || pvid>=4095) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(rg_db.vlan[pvid].valid==0) RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(port_idx<=RTK_RG_PORT_CPU) { ASSERT_EQ(RTK_VLAN_PORTPVID_SET(port_idx,pvid),RT_ERR_OK); } if(port_idx>=RTK_RG_PORT_CPU) { ASSERT_EQ(RTK_VLAN_EXTPORTPVID_SET(port_idx-RTK_RG_PORT_CPU,pvid),RT_ERR_OK); } //20160308LUKE: Clear shorcut when PVID changes, otherwise untag packet would follow old decision. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_portBasedCVlanId_get(rtk_rg_port_idx_t port_idx,int *pPvid) { if(port_idx < 0 || port_idx >= RTK_RG_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(pPvid==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(port_idx<RTK_RG_PORT_CPU) { ASSERT_EQ(rtk_vlan_portPvid_get(port_idx,pPvid),RT_ERR_OK); } else { ASSERT_EQ(rtk_vlan_extPortPvid_get(port_idx-RTK_RG_PORT_CPU,pPvid),RT_ERR_OK); } return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_wlanDevBasedCVlanId_set(int wlan_idx,int dev_idx,int dvid) { //Check parameter if(dev_idx<0 || dev_idx>=MAX_WLAN_DEVICE_NUM)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(dvid<0 || dvid>=4095)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); if(rg_db.vlan[dvid].valid==0)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Set to WLAN's device DVID //20140708LUKE:we support WLAN0 only right now if(wlan_idx==0) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(!rg_db.systemGlobal.wlan0BindDecision[dev_idx].exist)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); rg_db.systemGlobal.wlan0DeviceBasedVID[dev_idx]=dvid; #endif } //20160308LUKE: Clear shorcut when SSID-based VID changes, otherwise untag packet would follow old decision. assert_ok(_rtk_rg_shortCut_clear()); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_wlanDevBasedCVlanId_get(int wlan_idx,int dev_idx,int *pDvid) { int dvid=0; //Check parameter if(pDvid==NULL)RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(dev_idx<0 || dev_idx>=MAX_WLAN_DEVICE_NUM)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); //Get WLAN's device DVID //20140708LUKE:we support WLAN0 only right now if(wlan_idx==0) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(!rg_db.systemGlobal.wlan0BindDecision[dev_idx].exist)RETURN_ERR(RT_ERR_RG_ENTRY_NOT_EXIST); dvid=rg_db.systemGlobal.wlan0DeviceBasedVID[dev_idx]; #endif } *pDvid=dvid; return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_gatewayServicePortRegister_add(rtk_rg_gatewayServicePortEntry_t *serviceEntry, int *index){ int i; if(serviceEntry==NULL || index==NULL) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); if(serviceEntry->valid==DISABLED) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //check if the entry exist. for(i=0;i<MAX_GATEWAYSERVICEPORT_TABLE_SIZE;i++){ if(rg_db.gatewayServicePortEntry[i].valid==ENABLED){ if(rg_db.gatewayServicePortEntry[i].port_num==serviceEntry->port_num && rg_db.gatewayServicePortEntry[i].type==serviceEntry->type){ RETURN_ERR(RT_ERR_RG_ENTRY_EXIST); } } } //add the entry in empty entry, and return index for(i=0;i<MAX_GATEWAYSERVICEPORT_TABLE_SIZE;i++){ if(rg_db.gatewayServicePortEntry[i].valid==DISABLED){ rg_db.gatewayServicePortEntry[i].valid=ENABLED; rg_db.gatewayServicePortEntry[i].port_num=serviceEntry->port_num; rg_db.gatewayServicePortEntry[i].type=serviceEntry->type; *index = i; return (RT_ERR_RG_OK); } } RETURN_ERR(RT_ERR_RG_ENTRY_FULL); } rtk_rg_err_code_t rtk_rg_apollo_gatewayServicePortRegister_del(int index){ //the deleted entry is not valid if(rg_db.gatewayServicePortEntry[index].valid==DISABLED){ RETURN_ERR(RT_ERR_RG_ENTRY_NOT_FOUND); } bzero(&rg_db.gatewayServicePortEntry[index],sizeof(rtk_rg_gatewayServicePortEntry_t)); return (RT_ERR_RG_OK); } rtk_rg_err_code_t rtk_rg_apollo_gatewayServicePortRegister_find(rtk_rg_gatewayServicePortEntry_t *serviceEntry, int *index){ //index==-1, find by serviceEntry and return index //index > 0 , find by index and return serviceEntry int i; if(*index==-1){ for(i=0;i<MAX_GATEWAYSERVICEPORT_TABLE_SIZE;i++){ if(rg_db.gatewayServicePortEntry[i].valid==ENABLED && rg_db.gatewayServicePortEntry[i].port_num==serviceEntry->port_num && rg_db.gatewayServicePortEntry[i].type == serviceEntry->type){ *index = i; return (RT_ERR_RG_OK); } } return (RT_ERR_RG_ENTRY_NOT_FOUND); }else{ if((*index < 0)|| (*index >MAX_GATEWAYSERVICEPORT_TABLE_SIZE)){ RETURN_ERR(RT_ERR_RG_INVALID_PARAM); } memcpy(serviceEntry,&rg_db.gatewayServicePortEntry[*index],sizeof(rtk_rg_gatewayServicePortEntry_t)); return (RT_ERR_RG_OK); } return (RT_ERR_RG_ENTRY_NOT_FOUND); } rtk_rg_err_code_t rtk_rg_apollo_staticRoute_add(rtk_rg_staticRoute_t *pStaticRoute, int *index){ int ret; int sridx,rtidx,nxtidx,l2idx=-1,ipidx=-1,first_invalid=-1; int nxtip_rtidx,nxtip_intfidx,nxtip_vlanId; int arpIdx=-1,arp_miss=0; int neighborIdx=-1,neighbor_miss=0; unsigned int input_ipmsk; rtk_rg_table_staticRoute_t *pSrEntry; rtk_l34_routing_entry_t rtEntry; rtk_ipv6Routing_entry_t v6rtEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_l34_ext_intip_entry_t extipEntry; rtk_rg_macEntry_t macEntry; rtk_rg_ipv4RoutingEntry_t cb_routeEntry; rtk_rg_arpInfo_t arpInfo; rtk_rg_neighborInfo_t neighborInfo; rtk_rg_ipv6RoutingEntry_t cb_routv6Entry; if(pStaticRoute==NULL || index==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); for(sridx=0;sridx<MAX_STATIC_ROUTE_SIZE;sridx++){ if(rg_db.staticRoute[sridx].valid){ //Check redundancy if(rg_db.staticRoute[sridx].info.ip_version && pStaticRoute->ip_version){ int r_byte=rg_db.staticRoute[sridx].info.ipv6.mask_length>>3; //remainder byte int r_bit=8-(rg_db.staticRoute[sridx].info.ipv6.mask_length-((rg_db.staticRoute[sridx].info.ipv6.mask_length>>3)<<3)); if(rg_db.staticRoute[sridx].info.ipv6.mask_length!=pStaticRoute->ipv6.mask_length)continue; if(memcmp(rg_db.staticRoute[sridx].info.ipv6.addr.ipv6_addr,pStaticRoute->ipv6.addr.ipv6_addr,r_byte))continue; if((rg_db.staticRoute[sridx].info.ipv6.addr.ipv6_addr[r_byte]>>r_bit)^(pStaticRoute->ipv6.addr.ipv6_addr[r_byte]>>r_bit))continue; }else if(!rg_db.staticRoute[sridx].info.ip_version && !pStaticRoute->ip_version){ if(rg_db.staticRoute[sridx].info.ipv4.mask!=pStaticRoute->ipv4.mask)continue; if((rg_db.staticRoute[sridx].info.ipv4.addr&rg_db.staticRoute[sridx].info.ipv4.mask)^(pStaticRoute->ipv4.addr&pStaticRoute->ipv4.mask))continue; }else if(rg_db.staticRoute[sridx].info.ip_version!=pStaticRoute->ip_version)continue; WARNING("Add redundant static route which same as idx[%d]!!",sridx); RETURN_ERR(RT_ERR_RG_SR_REDUNDANT); }else if(first_invalid<0) first_invalid=sridx; //keep first invalid index for use } if(first_invalid<0)RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //no static route entry sridx=first_invalid; pSrEntry=&rg_db.staticRoute[sridx]; //Check for empty nexthop entry for(nxtidx=0;nxtidx<MAX_NEXTHOP_SW_TABLE_SIZE && rg_db.systemGlobal.nxpRefCount[nxtidx];nxtidx++); if(nxtidx==MAX_NEXTHOP_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //no nexthop entry bzero(&nxpEntry,sizeof(rtk_l34_nexthop_entry_t)); if(pStaticRoute->ip_version){//v6 for(rtidx=0; rtidx<MAX_IPV6_ROUTING_SW_TABLE_SIZE && rg_db.v6route[rtidx].rtk_v6route.valid; rtidx++) {if(rtidx == V6_HW_DEFAULT_ROUTE_IDX) continue;} if(rtidx==MAX_IPV6_ROUTING_SW_TABLE_SIZE )RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //Check nexthop IPv6 belong to which interface nxtip_rtidx=_rtk_rg_v6L3lookup(pStaticRoute->ipv6.nexthop.ipv6_addr); if(nxtip_rtidx>=0){ if(rg_db.v6route[nxtip_rtidx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_LOCAL){ nxtip_intfidx=rg_db.v6route[nxtip_rtidx].rtk_v6route.nhOrIfidIdx; if(pStaticRoute->nexthop_mac_auto_learn){ //find Neighbor to get l2Idx memcpy(neighborInfo.neighborEntry.interfaceId,&pStaticRoute->ipv6.nexthop.ipv6_addr[8],8); neighborInfo.neighborEntry.matchRouteIdx=nxtip_rtidx; ret=rtk_rg_apollo_neighborEntry_find(&neighborInfo,&neighborIdx); if(ret==RT_ERR_RG_NO_MORE_ENTRY_FOUND || ret==RT_ERR_RG_NEIGHBOR_NOT_FOUND){ TRACE("can't find Neighbor...miss!!"); neighbor_miss=1; }else l2idx=neighborInfo.neighborEntry.l2Idx; } }else if(rg_db.v6route[nxtip_rtidx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_TRAP){ WARNING("Static route to CPU is not supported."); RETURN_ERR(RT_ERR_RG_SR_TO_CPU); //not support route to cpu }else if(rg_db.v6route[nxtip_rtidx].rtk_v6route.type==L34_IPV6_ROUTE_TYPE_DROP){ WARNING("Static route to DROP is not supported."); RETURN_ERR(RT_ERR_RG_SR_TO_DROP); //not support route to drop }else{ //L34_PROCESS_NH nxtidx=rg_db.v6route[nxtip_rtidx].rtk_v6route.nhOrIfidIdx; nxtip_intfidx=rg_db.nexthop[nxtidx].rtk_nexthop.ifIdx; } }else{ WARNING("v6 routing not found."); RETURN_ERR(RT_ERR_RG_SR_ROUTE_NOT_FOUND); //not support route to cpu } }else{//v4 for(rtidx=0; rtidx<MAX_L3_SW_TABLE_SIZE && rg_db.l3[rtidx].rtk_l3.valid; rtidx++){if(rtidx== V4_DEFAULT_ROUTE_IDX) continue;} if(rtidx==MAX_L3_SW_TABLE_SIZE )RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //no v4routing entry //Check nexthop IP belong to which interface nxtip_rtidx=_rtk_rg_l3lookup(pStaticRoute->ipv4.nexthop); if(rg_db.l3[nxtip_rtidx].rtk_l3.process==L34_PROCESS_CPU){ if(rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].valid && rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].storedInfo.is_wan && (rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPTP || rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_L2TP || rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_DSLITE || rg_db.systemGlobal.interfaceInfo[rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE_DSLITE)){ nxtip_intfidx=rg_db.nexthop[rg_db.l3[nxtip_rtidx].rtk_l3.nhStart].rtk_nexthop.ifIdx; }else if(rg_db.l3[nxtip_rtidx].rtk_l3.ipAddr > 0){ nxtip_intfidx=rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx; if(pStaticRoute->nexthop_mac_auto_learn){ //find swARP to get l2Idx arpInfo.arpEntry.ipv4Addr=pStaticRoute->ipv4.nexthop; if(rtk_rg_apollo_arpEntry_find(&arpInfo,&arpIdx)==RT_ERR_RG_NO_MORE_ENTRY_FOUND){ TRACE("can't find swARP...miss!!"); arp_miss=1; }else l2idx=arpInfo.arpEntry.macEntryIdx; } }else{ WARNING("Static route to CPU is not supported."); RETURN_ERR(RT_ERR_RG_SR_TO_CPU); //not support route to cpu } }else if(rg_db.l3[nxtip_rtidx].rtk_l3.process==L34_PROCESS_DROP){ WARNING("Static route to DROP is not supported."); RETURN_ERR(RT_ERR_RG_SR_TO_DROP); //not support route to drop }else if(rg_db.l3[nxtip_rtidx].rtk_l3.process==L34_PROCESS_ARP){ nxtip_intfidx=rg_db.l3[nxtip_rtidx].rtk_l3.netifIdx; if(pStaticRoute->nexthop_mac_auto_learn){ //find ARP to get l2Idx arpInfo.arpEntry.ipv4Addr=pStaticRoute->ipv4.nexthop; if(rtk_rg_apollo_arpEntry_find(&arpInfo,&arpIdx)==RT_ERR_RG_NO_MORE_ENTRY_FOUND){ TRACE("can't find ARP...miss!!"); arp_miss=1; }else l2idx=arpInfo.arpEntry.macEntryIdx; } }else{ //L34_PROCESS_NH nxtidx=rg_db.l3[nxtip_rtidx].rtk_l3.nhStart; nxtip_intfidx=rg_db.nexthop[nxtidx].rtk_nexthop.ifIdx; } //create ip table for NAPT mode if(rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.is_wan && rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].p_wanStaticInfo->napt_enable){ for(ipidx=0; ipidx<MAX_EXTIP_SW_TABLE_SIZE && rg_db.extip[ipidx].rtk_extip.valid; ipidx++); if(ipidx==MAX_EXTIP_SW_TABLE_SIZE)RETURN_ERR(RT_ERR_RG_ENTRY_FULL); //no extIP entry bzero(&extipEntry, sizeof(rtk_l34_ext_intip_entry_t)); extipEntry.nhIdx=nxtidx; //from static route, not original WAN extipEntry.intIpAddr=0; extipEntry.extIpAddr=rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].p_wanStaticInfo->ip_addr; extipEntry.prival=0; extipEntry.pri=0; extipEntry.type=L34_EXTIP_TYPE_NAPT; extipEntry.valid=1; ASSERT_EQ(RTK_L34_EXTINTIPTABLE_SET(ipidx, &extipEntry),RT_ERR_OK); } } if(l2idx>=0)goto COMPLETE_MAC; if(rg_db.systemGlobal.nxpRefCount[nxtidx])goto COMPLETE_NEXTHOP; if(pStaticRoute->nexthop_mac_auto_learn){ if(arp_miss){ //ipv4 ARP_miss rg_db.systemGlobal.staticRouteArpReq[sridx].finished=0; rg_db.systemGlobal.staticRouteArpReq[sridx].reqIp=pStaticRoute->ipv4.nexthop; rg_db.systemGlobal.staticRouteArpReq[sridx].gwMacReqCallBack=_rtk_rg_internal_STATICROUTEMACSetup; #ifdef __KERNEL__ if(timer_pending(&rg_kernel.staticRouteArpOrNBReqTimer[sridx])) del_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx]); init_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx]); rg_kernel.staticRouteArpOrNBReqTimer[sridx].data = (unsigned long)sridx; rg_kernel.staticRouteArpOrNBReqTimer[sridx].function = _rtk_rg_staticRouteArpOrNbReqTimerFunc; rg_kernel.staticRouteArpOrNBTimerCounter[sridx]=0; DEBUG("arp miss, request arp=%x\n",rg_db.systemGlobal.staticRouteArpReq[sridx].reqIp); mod_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif l2idx=rg_db.systemGlobal.defaultTrapLUTIdx; goto COMPLETE_MAC; }else if(neighbor_miss){ //ipv6 Neighbor_miss rg_db.systemGlobal.staticRouteNBDiscovery[sridx].finished=0; memcpy(&rg_db.systemGlobal.staticRouteNBDiscovery[sridx].reqIp,&pStaticRoute->ipv6.nexthop,sizeof(rtk_ipv6_addr_t)); rg_db.systemGlobal.staticRouteNBDiscovery[sridx].ipv6GwMacReqCallBack=_rtk_rg_internal_STATICROUTEV6MACSetup; #ifdef __KERNEL__ if(timer_pending(&rg_kernel.staticRouteArpOrNBReqTimer[sridx])) del_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx]); init_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx]); rg_kernel.staticRouteArpOrNBReqTimer[sridx].data = (unsigned long)(sridx); rg_kernel.staticRouteArpOrNBReqTimer[sridx].function = _rtk_rg_staticRouteArpOrNbReqTimerFunc; rg_kernel.staticRouteArpOrNBTimerCounter[sridx]=0; DEBUG("neighbor miss, discovery neighbor =%08x:%08x:%08x:%08x\n",*(unsigned int *)(rg_db.systemGlobal.staticRouteNBDiscovery[sridx].reqIp.ipv6_addr), *(unsigned int *)(rg_db.systemGlobal.staticRouteNBDiscovery[sridx].reqIp.ipv6_addr+4), *(unsigned int *)(rg_db.systemGlobal.staticRouteNBDiscovery[sridx].reqIp.ipv6_addr+8), *(unsigned int *)(rg_db.systemGlobal.staticRouteNBDiscovery[sridx].reqIp.ipv6_addr+12)); mod_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); #endif l2idx=rg_db.systemGlobal.defaultTrapLUTIdx; goto COMPLETE_MAC; } } //create LUT for nexthop mac address if(rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.is_wan) nxtip_vlanId=rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.wan_intf.wan_intf_conf.egress_vlan_id; else nxtip_vlanId=rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.lan_intf.intf_vlan_id; memcpy(macEntry.mac.octet,pStaticRoute->nexthop_mac.octet,ETHER_ADDR_LEN); macEntry.isIVL=rg_db.vlan[nxtip_vlanId].fidMode==VLAN_FID_IVL?1:0; macEntry.fid=rg_db.vlan[nxtip_vlanId].fid; macEntry.vlan_id=nxtip_vlanId; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(pStaticRoute->nexthop_port>=RTK_RG_PORT_CPU){ if(rg_db.vlan[nxtip_vlanId].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) macEntry.vlan_id=0; }else{ if(rg_db.vlan[nxtip_vlanId].UntagPortmask.bits[0]&(0x1<<pStaticRoute->nexthop_port)) macEntry.vlan_id=0; } #else // support ctag_if if(pStaticRoute->nexthop_port>=RTK_RG_PORT_CPU){ if(rg_db.vlan[nxtip_vlanId].UntagPortmask.bits[0]&(0x1<<RTK_RG_PORT_CPU)) macEntry.ctag_if=0; else macEntry.ctag_if=1; }else{ if(rg_db.vlan[nxtip_vlanId].UntagPortmask.bits[0]&(0x1<<pStaticRoute->nexthop_port)) macEntry.ctag_if=0; else macEntry.ctag_if=1; } #endif macEntry.port_idx=pStaticRoute->nexthop_port; macEntry.arp_used=1; macEntry.static_entry=1; macEntry.fix_l34_vlan=1; macEntry.auth=0; if(rtk_rg_apollo_macEntry_add(&macEntry,&l2idx)) RETURN_ERR(RT_ERR_RG_SR_MAC_FAILED); COMPLETE_MAC: nxpEntry.type=L34_NH_ETHER; nxpEntry.keepPppoe=1; nxpEntry.pppoeIdx=0; if(rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.is_wan){ if(rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE|| rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_PPPoE_DSLITE){ nxpEntry.type=L34_NH_PPPOE; #if defined(CONFIG_RTL9602C_SERIES) nxpEntry.keepPppoe=2; /* If original tagged, keep. Otherwise add tag with PPPIDX session id */ #else nxpEntry.keepPppoe=0; #endif nxpEntry.pppoeIdx=rg_db.systemGlobal.interfaceInfo[nxtip_intfidx].storedInfo.wan_intf.pppoe_idx; } } //Store all information in each table entry nxpEntry.ifIdx=nxtip_intfidx; nxpEntry.nhIdx=l2idx; rg_db.nexthop[nxtidx].valid=1; ret = RTK_L34_NEXTHOPTABLE_SET(nxtidx, &nxpEntry); if(ret!=RT_ERR_OK)goto RET_NEXTHOP_ERR; COMPLETE_NEXTHOP: if(pStaticRoute->ip_version){ //Set up v6Routing table bzero(&v6rtEntry, sizeof(rtk_ipv6Routing_entry_t)); v6rtEntry.valid=1; v6rtEntry.type=L34_IPV6_ROUTE_TYPE_GLOBAL; memcpy(&v6rtEntry.ipv6Addr,&pStaticRoute->ipv6.addr,sizeof(rtk_ipv6_addr_t)); v6rtEntry.ipv6PrefixLen=pStaticRoute->ipv6.mask_length; v6rtEntry.nhOrIfidIdx=nxtidx; v6rtEntry.rt2waninf=rg_db.v6route[nxtip_rtidx].rtk_v6route.rt2waninf; ret = RTK_L34_IPV6ROUTINGTABLE_SET(rtidx, &v6rtEntry); if(ret!=RT_ERR_OK)goto RET_ROUTING_ERR; rg_db.v6route[rtidx].internal=rg_db.v6route[nxtip_rtidx].internal; //routing or NAPT rg_db.systemGlobal.nxpRefCount[nxtidx]++; //for v6routing refer to it rg_db.systemGlobal.nxpRefCount[nxtidx]++; //for static-route entry refer to it // TODO:Call the initParam's v6RoutingAddByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack!=NULL) { bzero(&cb_routv6Entry, sizeof(rtk_rg_ipv6RoutingEntry_t)); memcpy(&cb_routv6Entry.dest_ip,&pStaticRoute->ipv6.addr,sizeof(rtk_ipv6_addr_t)); cb_routv6Entry.prefix_len=pStaticRoute->ipv6.mask_length; cb_routv6Entry.NhOrIntfIdx=nxtidx; cb_routv6Entry.type=L34_IPV6_ROUTE_TYPE_GLOBAL; rg_db.systemGlobal.initParam.v6RoutingAddByHwCallBack(&cb_routv6Entry); } }else{ //Set up Routing table bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.valid=1; rtEntry.process=L34_PROCESS_NH; rtEntry.ipAddr=pStaticRoute->ipv4.addr; rtEntry.internal=rg_db.l3[nxtip_rtidx].rtk_l3.internal; //routing or NAPT rtEntry.rt2waninf=rg_db.l3[nxtip_rtidx].rtk_l3.rt2waninf; input_ipmsk=pStaticRoute->ipv4.mask; // TODO:if load-balance is needed, here should be changed rtEntry.nhStart=nxtidx; rtEntry.nhNxt=nxtidx; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 RG_ONE_COUNT(input_ipmsk); rtEntry.ipMask=input_ipmsk-1; ret = RTK_L34_ROUTINGTABLE_SET(rtidx, &rtEntry); if(ret!=RT_ERR_OK)goto RET_ROUTING_ERR; rg_db.systemGlobal.nxpRefCount[nxtidx]++; //for routing refer to it rg_db.systemGlobal.nxpRefCount[nxtidx]++; //for static-route entry refer to it // TODO:Call the initParam's routngAddByHwCallBack if(rg_db.systemGlobal.initParam.routingAddByHwCallBack!=NULL) { bzero(&cb_routeEntry, sizeof(rtk_rg_ipv4RoutingEntry_t)); cb_routeEntry.dest_ip=pStaticRoute->ipv4.addr; cb_routeEntry.ip_mask=pStaticRoute->ipv4.mask; cb_routeEntry.nexthop=nxtidx; cb_routeEntry.wan_intf_idx=nxtip_intfidx; rg_db.systemGlobal.initParam.routingAddByHwCallBack(&cb_routeEntry); } } if( (((ipidx >= 0)&&(ipidx <MAX_EXTIP_SW_TABLE_SIZE)) && rg_db.extip[ipidx].valid==SOFTWARE_ONLY_ENTRY) || (rg_db.nexthop[nxtidx].valid==SOFTWARE_ONLY_ENTRY) || ((nxtip_intfidx>=MAX_NETIF_HW_TABLE_SIZE)&&(nxtip_intfidx<MAX_NETIF_SW_TABLE_SIZE)) || (rg_db.l3[rtidx].valid==SOFTWARE_ONLY_ENTRY)) { if(rg_db.staticRoute[sridx].valid !=SOFTWARE_ONLY_ENTRY) { rtk_rg_aclAndCf_reserved_dip_mask_trap_t dip_mask_trap; bzero(&dip_mask_trap,sizeof(dip_mask_trap)); dip_mask_trap.dip=rg_db.l3[rtidx].rtk_l3.ipAddr; dip_mask_trap.mask =~((1<<(31-(rg_db.l3[rtidx].rtk_l3.ipMask)))-1); _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP +rtidx, &dip_mask_trap); rg_db.staticRoute[sridx].valid=SOFTWARE_ONLY_ENTRY; WARNING("ReservedRuleAdd software data path By staticRoute ExtipIdx=%d NexthopIdx=%d netifIdx=%d L3Idx=%d",ipidx,nxtidx,nxtip_intfidx,rtidx); } } memcpy(&rg_db.staticRoute[sridx].info,pStaticRoute,sizeof(rtk_rg_staticRoute_t)); rg_db.staticRoute[sridx].route_idx=rtidx; rg_db.staticRoute[sridx].nxtip_rtidx=nxtip_rtidx; rg_db.staticRoute[sridx].nxtip_intfidx=nxtip_intfidx; rg_db.staticRoute[sridx].valid=1; *index=sridx; //return index return (RT_ERR_RG_OK); RET_ROUTING_ERR: RET_NEXTHOP_ERR: //remove MAC entry if(arp_miss==0 && neighbor_miss==0) rtk_rg_apollo_macEntry_del(l2idx); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.staticRouteArpOrNBReqTimer[sridx])) del_timer(&rg_kernel.staticRouteArpOrNBReqTimer[sridx]); #endif RETURN_ERR(RT_ERR_RG_FAILED); } rtk_rg_err_code_t rtk_rg_apollo_staticRoute_del(int index){ int nxtidx; rtk_l34_routing_entry_t rtEntry; rtk_ipv6Routing_entry_t v6rtEntry; rtk_rg_table_staticRoute_t *pStaticRouteTable; rtk_rg_ipv4RoutingEntry_t cb_routeEntry; rtk_rg_ipv6RoutingEntry_t cb_routv6Entry; if(index<0 || index>=MAX_STATIC_ROUTE_SIZE)RETURN_ERR(RT_ERR_RG_INVALID_PARAM); //the deleted entry is not valid pStaticRouteTable=&rg_db.staticRoute[index]; if(pStaticRouteTable->valid){ if(pStaticRouteTable->info.ip_version){//v6 rg_db.systemGlobal.staticRouteNBDiscovery[index].finished = 1; //Deleting the routing entry bzero(&v6rtEntry, sizeof(rtk_ipv6Routing_entry_t)); bzero(&cb_routv6Entry, sizeof(rtk_rg_ipv6RoutingEntry_t)); nxtidx=rg_db.v6route[pStaticRouteTable->route_idx].rtk_v6route.nhOrIfidIdx; memcpy(cb_routv6Entry.dest_ip.ipv6_addr,rg_db.v6route[pStaticRouteTable->route_idx].rtk_v6route.ipv6Addr.ipv6_addr,IPV6_ADDR_LEN); cb_routv6Entry.prefix_len=rg_db.v6route[pStaticRouteTable->route_idx].rtk_v6route.ipv6PrefixLen; cb_routv6Entry.NhOrIntfIdx=nxtidx; cb_routv6Entry.type=rg_db.v6route[pStaticRouteTable->route_idx].rtk_v6route.type; _rtk_rg_decreaseNexthopReference(nxtidx); //for delete routing ASSERT_EQ(RTK_L34_IPV6ROUTINGTABLE_SET(pStaticRouteTable->route_idx, &v6rtEntry),RT_ERR_OK); //2 Call the initParam's v6routingDelByHwCallBack if(rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.v6RoutingDelByHwCallBack(&cb_routv6Entry); } }else{//v4 rg_db.systemGlobal.staticRouteArpReq[index].finished = 1; bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); bzero(&cb_routeEntry, sizeof(rtk_rg_ipv4RoutingEntry_t)); nxtidx=rg_db.l3[pStaticRouteTable->route_idx].rtk_l3.nhStart; cb_routeEntry.dest_ip=rg_db.l3[pStaticRouteTable->route_idx].rtk_l3.ipAddr; cb_routeEntry.ip_mask=rg_db.l3[pStaticRouteTable->route_idx].netmask; cb_routeEntry.nexthop=rg_db.l3[pStaticRouteTable->route_idx].rtk_l3.nhStart; cb_routeEntry.wan_intf_idx=rg_db.nexthop[nxtidx].rtk_nexthop.ifIdx; _rtk_rg_decreaseNexthopReference(nxtidx); //for delete routing ASSERT_EQ(RTK_L34_ROUTINGTABLE_SET(pStaticRouteTable->route_idx, &rtEntry),RT_ERR_OK); //2 Call the initParam's routingDelByHwCallBack if(rg_db.systemGlobal.initParam.routingDelByHwCallBack != NULL) { rg_db.systemGlobal.initParam.routingDelByHwCallBack(&cb_routeEntry); } } if(pStaticRouteTable->valid == SOFTWARE_ONLY_ENTRY) { //delete ipv4 reserved acl for software entry _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_RULE0_DIP_MASK_TRAP+pStaticRouteTable->route_idx); } //Clear static route table entry bzero(pStaticRouteTable,sizeof(rtk_rg_table_staticRoute_t)); _rtk_rg_decreaseNexthopReference(nxtidx); //for delete static-route entry return (RT_ERR_RG_OK); } RETURN_ERR(RT_ERR_RG_ENTRY_NOT_FOUND); } rtk_rg_err_code_t rtk_rg_apollo_staticRoute_find(rtk_rg_staticRoute_t *pStaticRoute, int *index){ //index==-1, find by staticRouteEntry and return index //index > 0 , find by index and return staticRouteEntry int i; if(*index==-1){ for(i=0;i<MAX_STATIC_ROUTE_SIZE;i++){ if(rg_db.staticRoute[i].valid!=INVALID_ENTRY){ if(pStaticRoute->ip_version){//v6 if(!memcmp(&pStaticRoute->ipv6.addr,&rg_db.staticRoute[i].info.ipv6.addr,sizeof(rtk_ipv6_addr_t)) && pStaticRoute->ipv6.mask_length==rg_db.staticRoute[i].info.ipv6.mask_length && !memcmp(&pStaticRoute->ipv6.nexthop,&rg_db.staticRoute[i].info.ipv6.nexthop,sizeof(rtk_ipv6_addr_t))){ *index = i; return (RT_ERR_RG_OK); } }else if(pStaticRoute->ipv4.addr==rg_db.staticRoute[i].info.ipv4.addr && pStaticRoute->ipv4.mask==rg_db.staticRoute[i].info.ipv4.mask && pStaticRoute->ipv4.nexthop==rg_db.staticRoute[i].info.ipv4.nexthop){ *index = i; return (RT_ERR_RG_OK); } } } return (RT_ERR_RG_ENTRY_NOT_FOUND); }else{ if((*index < 0)|| (*index > MAX_STATIC_ROUTE_SIZE)){ RETURN_ERR(RT_ERR_RG_INVALID_PARAM); }else if(rg_db.staticRoute[*index].valid){ memcpy(pStaticRoute,&rg_db.staticRoute[*index].info,sizeof(rtk_rg_staticRoute_t)); return (RT_ERR_RG_OK); } } return (RT_ERR_RG_ENTRY_NOT_FOUND); } rtk_rg_err_code_t rtk_rg_apollo_aclLogCounterControl_get(int index, int *type, int *mode) { int32 ret; rtk_stat_log_ctrl_t ctrl; ret = rtk_stat_logCtrl_get(index, &ctrl); *type = ctrl.type; *mode = ctrl.mode; return ret; } rtk_rg_err_code_t rtk_rg_apollo_aclLogCounterControl_set(int index, int type, int mode) { int32 ret; rtk_stat_log_ctrl_t ctrl; ctrl.type = type; ctrl.mode = mode; ret = rtk_stat_logCtrl_set(index, ctrl); return ret; } rtk_rg_err_code_t rtk_rg_apollo_aclLogCounter_get(int index, uint64 *count) { int32 ret; ret = rtk_stat_log_get(index, count); return ret; } rtk_rg_err_code_t rtk_rg_apollo_aclLogCounter_reset(int index) { int32 ret; ret = rtk_stat_log_reset(index); return ret; } #ifdef __KERNEL__ rtk_rg_err_code_t rtk_rg_apollo_portStatus_get(rtk_rg_mac_port_idx_t port, rtk_rg_portStatusInfo_t *portInfo) { rtk_port_linkStatus_t linkStatus; rtk_port_speed_t linkSpeed; rtk_port_duplex_t linkDuplex; if(portInfo==NULL) RETURN_ERR(RT_ERR_RG_NULL_POINTER); if(port>RTK_RG_MAC_PORT_MAX) RETURN_ERR(RT_ERR_RG_INVALID_PARAM); bzero(&linkStatus,sizeof(rtk_port_linkStatus_t)); bzero(&linkSpeed,sizeof(rtk_port_speed_t)); bzero(&linkDuplex,sizeof(rtk_port_duplex_t)); assert_ok(rtk_port_link_get(port, &linkStatus)); portInfo->linkStatus = linkStatus; assert_ok(rtk_port_speedDuplex_get(port, &linkSpeed, &linkDuplex)); portInfo->linkSpeed = linkSpeed; portInfo->linkDuplex = linkDuplex; return (RT_ERR_RG_OK); } #endif #if 0 //MIB Proc int rtt_rg_proc_mibInfo_get(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret; rtk_rg_port_mib_info_t mib; char *tmpbuf=NULL; char *strptr=NULL; int val; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } sscanf(strptr,"%d",&val); ret = rtk_rg_portMibInfo_get(val,&mib); assert_ok(ret); rtlglue_printf("[Port:%d]\n",val); rtlglue_printf("ifInOctets: %lld\n",mib.ifInOctets); rtlglue_printf("ifInUcastPkts: %d\n",mib.ifInUcastPkts); rtlglue_printf("ifInMulticastPkts: %d\n",mib.ifInMulticastPkts); rtlglue_printf("ifInBroadcastPkts: %d\n",mib.ifInBroadcastPkts); rtlglue_printf("ifInDiscards: %d\n",mib.ifInDiscards); rtlglue_printf("ifOutOctets: %lld\n",mib.ifOutOctets); rtlglue_printf("ifOutDiscards: %d\n",mib.ifOutDiscards); rtlglue_printf("ifOutUcastPkts: %d\n",mib.ifOutUcastPkts); rtlglue_printf("ifOutMulticastPkts: %d\n",mib.ifOutMulticastPkts); rtlglue_printf("ifOutBrocastPkts: %d\n",mib.ifOutBrocastPkts); rtlglue_printf("dot1dBasePortDelayExceededDiscards: %d\n",mib.dot1dBasePortDelayExceededDiscards); rtlglue_printf("dot1dTpPortInDiscards: %d\n",mib.dot1dTpPortInDiscards); rtlglue_printf("dot1dTpHcPortInDiscards: %d\n",mib.dot1dTpHcPortInDiscards); rtlglue_printf("dot3InPauseFrames: %d\n",mib.dot3InPauseFrames); rtlglue_printf("dot3OutPauseFrames: %d\n",mib.dot3OutPauseFrames); // rtlglue_printf("dot3OutPauseOnFrames: %d\n",mib.dot3OutPauseOnFrames); rtlglue_printf("dot3StatsAligmentErrors: %d\n",mib.dot3StatsAligmentErrors); rtlglue_printf("dot3StatsFCSErrors: %d\n",mib.dot3StatsFCSErrors); rtlglue_printf("dot3StatsSingleCollisionFrames: %d\n",mib.dot3StatsSingleCollisionFrames); rtlglue_printf("dot3StatsMultipleCollisionFrames: %d\n",mib.dot3StatsMultipleCollisionFrames); rtlglue_printf("dot3StatsDeferredTransmissions: %d\n",mib.dot3StatsDeferredTransmissions); rtlglue_printf("dot3StatsLateCollisions: %d\n",mib.dot3StatsLateCollisions); rtlglue_printf("dot3StatsExcessiveCollisions: %d\n",mib.dot3StatsExcessiveCollisions); rtlglue_printf("dot3StatsFrameTooLongs: %d\n",mib.dot3StatsFrameTooLongs); rtlglue_printf("dot3StatsSymbolErrors: %d\n",mib.dot3StatsSymbolErrors); rtlglue_printf("dot3ControlInUnknownOpcodes: %d\n",mib.dot3ControlInUnknownOpcodes); rtlglue_printf("etherStatsDropEvents: %d\n",mib.etherStatsDropEvents); rtlglue_printf("etherStatsOctets: %lld\n",mib.etherStatsOctets); rtlglue_printf("etherStatsBcastPkts: %d\n",mib.etherStatsBcastPkts); rtlglue_printf("etherStatsMcastPkts: %d\n",mib.etherStatsMcastPkts); rtlglue_printf("etherStatsUndersizePkts: %d\n",mib.etherStatsUndersizePkts); rtlglue_printf("etherStatsOversizePkts: %d\n",mib.etherStatsOversizePkts); rtlglue_printf("etherStatsFragments: %d\n",mib.etherStatsFragments); rtlglue_printf("etherStatsJabbers: %d\n",mib.etherStatsJabbers); rtlglue_printf("etherStatsCollisions: %d\n",mib.etherStatsCollisions); rtlglue_printf("etherStatsCRCAlignErrors: %d\n",mib.etherStatsCRCAlignErrors); rtlglue_printf("etherStatsPkts64Octets: %d\n",mib.etherStatsPkts64Octets); rtlglue_printf("etherStatsPkts65to127Octets: %d\n",mib.etherStatsPkts65to127Octets); rtlglue_printf("etherStatsPkts128to255Octets: %d\n",mib.etherStatsPkts128to255Octets); rtlglue_printf("etherStatsPkts256to511Octets: %d\n",mib.etherStatsPkts256to511Octets); rtlglue_printf("etherStatsPkts512to1023Octets: %d\n",mib.etherStatsPkts512to1023Octets); rtlglue_printf("etherStatsPkts1024to1518Octets: %d\n",mib.etherStatsPkts1024to1518Octets); rtlglue_printf("etherStatsTxOctets: %lld\n",mib.etherStatsTxOctets); rtlglue_printf("etherStatsTxUndersizePkts: %d\n",mib.etherStatsTxUndersizePkts); rtlglue_printf("etherStatsTxOversizePkts: %d\n",mib.etherStatsTxOversizePkts); rtlglue_printf("etherStatsTxPkts64Octets: %d\n",mib.etherStatsTxPkts64Octets); rtlglue_printf("etherStatsTxPkts65to127Octets: %d\n",mib.etherStatsTxPkts65to127Octets); rtlglue_printf("etherStatsTxPkts128to255Octets: %d\n",mib.etherStatsTxPkts128to255Octets); rtlglue_printf("etherStatsTxPkts256to511Octets: %d\n",mib.etherStatsTxPkts256to511Octets); rtlglue_printf("etherStatsTxPkts512to1023Octets: %d\n",mib.etherStatsTxPkts512to1023Octets); rtlglue_printf("etherStatsTxPkts1024to1518Octets: %d\n",mib.etherStatsTxPkts1024to1518Octets); rtlglue_printf("etherStatsTxPkts1519toMaxOctets: %d\n",mib.etherStatsTxPkts1519toMaxOctets); rtlglue_printf("etherStatsTxBcastPkts: %d\n",mib.etherStatsTxBcastPkts); rtlglue_printf("etherStatsTxMcastPkts: %d\n",mib.etherStatsTxBcastPkts); rtlglue_printf("etherStatsTxFragments: %d\n",mib.etherStatsTxFragments); rtlglue_printf("etherStatsTxJabbers: %d\n",mib.etherStatsTxJabbers); rtlglue_printf("etherStatsTxCRCAlignErrors: %d\n",mib.etherStatsTxCRCAlignErrors); rtlglue_printf("etherStatsRxUndersizePkts: %d\n",mib.etherStatsRxUndersizePkts); rtlglue_printf("etherStatsRxUndersizeDropPkts: %d\n",mib.etherStatsRxUndersizeDropPkts); rtlglue_printf("etherStatsRxOversizePkts: %d\n",mib.etherStatsRxOversizePkts); rtlglue_printf("etherStatsRxPkts64Octets: %d\n",mib.etherStatsRxPkts64Octets); rtlglue_printf("etherStatsRxPkts65to127Octets: %d\n",mib.etherStatsRxPkts65to127Octets); rtlglue_printf("etherStatsRxPkts128to255Octets: %d\n",mib.etherStatsRxPkts128to255Octets); rtlglue_printf("etherStatsRxPkts256to511Octets: %d\n",mib.etherStatsRxPkts256to511Octets); rtlglue_printf("etherStatsRxPkts512to1023Octets: %d\n",mib.etherStatsRxPkts512to1023Octets); rtlglue_printf("etherStatsRxPkts1024to1518Octets: %d\n",mib.etherStatsRxPkts1024to1518Octets); rtlglue_printf("etherStatsRxPkts1519toMaxOctets: %d\n",mib.etherStatsRxPkts1519toMaxOctets); rtlglue_printf("inOampduPkts: %d\n",mib.inOampduPkts); rtlglue_printf("outOampduPkts: %d\n",mib.outOampduPkts); return len; } int rtt_rg_proc_mibInfo_clear(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret; rtk_rg_port_mib_info_t mib; char *tmpbuf=NULL; char *strptr=NULL; int val; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } sscanf(strptr,"%d",&val); ret = rtk_rg_portMibInfo_clear(val); assert_ok(ret); return len; } #endif #if 0 //DoS Proc int32 rtk_rg_apollo_proc_dos_port_set( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf=NULL; char *strptr=NULL; int val; int i; int ret; rtk_rg_mac_portmask_t pmsk; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } sscanf(strptr,"0x%x",&val); pmsk.portmask = val; ret = rtk_rg_dosPortMaskEnable_set(pmsk); assert_ok(ret); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) { rtk_enable_t valid; ret = rtk_sec_portAttackPreventState_get(i,&valid); assert_ok(ret); rtlglue_printf("Port %d : %s.\n",i,(valid==1)?"ENABLED":"DISABLED"); } return len; } int32 rtk_rg_apollo_proc_dos_port_get(void) { int ret; int i; rtk_rg_mac_portmask_t pmsk; ret = rtk_rg_dosPortMaskEnable_get(&pmsk); assert_ok(ret); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++) rtlglue_printf("Port %d security is %s\n",i,(pmsk.portmask&(0x1<<i))?"enabled":"disabled"); return (RT_ERR_RG_OK); } int32 rtk_rg_apollo_proc_dos_type_set( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf=NULL; char *strptr=NULL; int val[3]; int ret; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } sscanf(strptr,"%d %d %d",&val[0],&val[1],&val[2]); ret = rtk_rg_dosType_set(val[0],val[1],val[2]); assert_ok(ret); return len; } int32 rtk_rg_apollo_proc_dos_type_get(void) { int i; int ret; char* strType[] = {"DAEQSA_DENY","LAND_DENY","BLAT_DENY","SYNFIN_DENY","XMA_DENY","NULLSCAN_DENY", "SYN_SPORTL1024_DENY","TCPHDR_MIN_CHECK","TCP_FRAG_OFF_MIN_CHECK","ICMP_FRAG_PKTS_DENY", "POD_DENY","UDPDOMB_DENY","SYNWITHDATA_DENY","SYNFLOOD_DENY","FINFLOOD_DENY","ICMPFLOOD_DENY"}; for(i=0;i<=RTK_RG_DOS_SYNWITHDATA_DENY;i++) { int dos_enable=-1; rtk_rg_dos_action_t dos_action=-1; ret = rtk_rg_dosType_get(i,&dos_enable,&dos_action); assert_ok(ret); rtlglue_printf("[%s] [%s] [Action:%s] \n",strType[i],(dos_enable==1)?"Enabled":"Disabled", (dos_action==RTK_RG_DOS_ACTION_DROP)?"Drop":"Trap"); } return (RT_ERR_RG_OK); } int32 rtk_rg_apollo_proc_dos_flood_set( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf=NULL; char *strptr=NULL; int val[4]; int ret; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } sscanf(strptr,"%d %d %d %d",&val[0],&val[1],&val[2],&val[3]); ret = rtk_rg_dosFlood_set(val[0],val[1],val[2],val[3]); assert_ok(ret); return len; } int32 rtk_rg_apollo_proc_dos_flood_get(void) { int i; int ret; char* strType[] = {"DAEQSA_DENY","LAND_DENY","BLAT_DENY","SYNFIN_DENY","XMA_DENY","NULLSCAN_DENY", "SYN_SPORTL1024_DENY","TCPHDR_MIN_CHECK","TCP_FRAG_OFF_MIN_CHECK","ICMP_FRAG_PKTS_DENY", "POD_DENY","UDPDOMB_DENY","SYNWITHDATA_DENY","SYNFLOOD_DENY","FINFLOOD_DENY","ICMPFLOOD_DENY"}; for(i=RTK_RG_DOS_SYNFLOOD_DENY;i<=RTK_RG_DOS_ICMPFLOOD_DENY;i++) { int dos_enable=-1,dos_action=-1,dos_threshold=-1; ret = rtk_rg_dosFlood_get(i,&dos_enable,&dos_action,&dos_threshold); assert_ok(ret); rtlglue_printf("[%s] [%s] [Action:%s] [Threshold:%d]\n",strType[i],(dos_enable==1)?"Enabled":"Disabled", (dos_action==RTK_RG_DOS_ACTION_DROP)?"Drop":"Trap",dos_threshold); } return (RT_ERR_RG_OK); } #endif #ifdef CONFIG_RG_WLAN_HWNAT_ACCELERATION void _rtk_rg_wlanMbssidLearning(u8* smac,rtk_rg_pktHdr_t *pPktHdr) { int i; for(i=rg_db.wlanMbssidHeadIdx;i<rg_db.wlanMbssidHeadIdx+MAX_WLAN_MBSSID_SW_TABLE_SIZE;i++) { int idx=i%MAX_WLAN_MBSSID_SW_TABLE_SIZE; if(memcmp(rg_db.wlanMbssid[idx].mac.octet,smac,6)==0) //the MAC is finded in table. { if(rg_db.wlanMbssid[idx].vid!=pPktHdr->ctagVid) //update ctag info for dmac2cvid { rg_db.wlanMbssid[idx].vlan_tag_if=(pPktHdr->tagif&CVLAN_TAGIF)?1:0; //update ingress tag info rg_db.wlanMbssid[idx].vid=pPktHdr->ctagVid; MACLN("#### UPDATE MBSSID DMAC2CVID INFO, MAC=%02x:%02x:%02x:%02x:%02x:%02x WLAN_DEV_IDX=%d cTagIf=%d VID=%d ####", smac[0],smac[1],smac[2],smac[3],smac[4],smac[5],pPktHdr->wlan_dev_idx,rg_db.wlanMbssid[idx].vlan_tag_if,pPktHdr->ctagVid); } if(rg_db.wlanMbssid[idx].wlan_dev_idx!=pPktHdr->wlan_dev_idx) { //Clear All shortcut, otherwise QoS remarking or DSCP may be diff because of SSID-MOVING! _rtk_rg_shortCut_clear(); rg_db.wlanMbssid[idx].wlan_dev_idx=pPktHdr->wlan_dev_idx; //update ingress device rg_db.wlanMbssid[idx].vlan_tag_if=(pPktHdr->tagif&CVLAN_TAGIF)?1:0; //update ingress tag info rg_db.wlanMbssid[idx].vid=pPktHdr->ctagVid; MACLN("#### UPDATE MBSSID INTF INFO, MAC=%02x:%02x:%02x:%02x:%02x:%02x WLAN_DEV_IDX=%d cTagIf=%d VID=%d ####", smac[0],smac[1],smac[2],smac[3],smac[4],smac[5],pPktHdr->wlan_dev_idx,rg_db.wlanMbssid[idx].vlan_tag_if,pPktHdr->ctagVid); } rg_db.wlanMbssid[idx].learn_jiffies=jiffies; return; } } //not found in table rg_db.wlanMbssidHeadIdx=(rg_db.wlanMbssidHeadIdx+MAX_WLAN_MBSSID_SW_TABLE_SIZE-1)%MAX_WLAN_MBSSID_SW_TABLE_SIZE; rg_db.wlanMbssid[rg_db.wlanMbssidHeadIdx].wlan_dev_idx=pPktHdr->wlan_dev_idx; rg_db.wlanMbssid[rg_db.wlanMbssidHeadIdx].vlan_tag_if=(pPktHdr->tagif&CVLAN_TAGIF)?1:0; rg_db.wlanMbssid[rg_db.wlanMbssidHeadIdx].vid=pPktHdr->ctagVid; rg_db.wlanMbssid[rg_db.wlanMbssidHeadIdx].learn_jiffies=jiffies; memcpy(rg_db.wlanMbssid[rg_db.wlanMbssidHeadIdx].mac.octet,smac,6); MACLN("#### LEARNING MBSSID, MAC=%02x:%02x:%02x:%02x:%02x:%02x WLAN_DEV_IDX=%d cTagIf=%d VID=%d ####", smac[0],smac[1],smac[2],smac[3],smac[4],smac[5],pPktHdr->wlan_dev_idx,(pPktHdr->tagif&CVLAN_TAGIF)?1:0,pPktHdr->ctagVid); return; } #ifdef CONFIG_MASTER_WLAN0_ENABLE rtk_rg_lookupIdxReturn_t _rtk_rg_wlanMbssidLookup(u8 *dmac,rtk_rg_mbssidDev_t *wlan_dev_idx) { int i; for(i=rg_db.wlanMbssidHeadIdx;i<rg_db.wlanMbssidHeadIdx+MAX_WLAN_MBSSID_SW_TABLE_SIZE;i++) { int idx=i%MAX_WLAN_MBSSID_SW_TABLE_SIZE; if(memcmp(rg_db.wlanMbssid[idx].mac.octet,dmac,6)==0) //the MAC is finded in table. { int newIdx; rtk_rg_table_wlan_mbssid_t tmpBuf; *wlan_dev_idx=rg_db.wlanMbssid[idx].wlan_dev_idx; #if 0 //update mbsssid vlan if(rg_db.wlanMbssid[idx].vid!=rg_db.pktHdr->ctagVid) { rg_db.wlanMbssid[idx].vlan_tag_if=(rg_db.pktHdr->tagif&CVLAN_TAGIF)?1:0; //update ingress tag info rg_db.wlanMbssid[idx].vid=rg_db.pktHdr->ctagVid; TRACE("update mbssid tagif=%d vid=%d",rg_db.wlanMbssid[idx].vlan_tag_if,rg_db.wlanMbssid[idx].vid); } #endif if(idx==rg_db.wlanMbssidHeadIdx) return idx; //LRU: Swaping this entry to first lookup index. newIdx=(rg_db.wlanMbssidHeadIdx+MAX_WLAN_MBSSID_SW_TABLE_SIZE-1)%MAX_WLAN_MBSSID_SW_TABLE_SIZE; memcpy(&tmpBuf,&rg_db.wlanMbssid[newIdx],sizeof(tmpBuf)); memcpy(&rg_db.wlanMbssid[newIdx],&rg_db.wlanMbssid[idx],sizeof(tmpBuf)); memcpy(&rg_db.wlanMbssid[idx],&tmpBuf,sizeof(tmpBuf)); rg_db.wlanMbssidHeadIdx=newIdx; return newIdx; } } *wlan_dev_idx=RG_RET_MBSSID_NOT_FOUND; return RG_RET_LOOKUPIDX_NOT_FOUND; } #endif rtk_rg_mbssidDev_t _rtk_master_wlan_mbssid_tx(rtk_rg_pktHdr_t *pPktHdr,struct sk_buff *skb) { #ifdef CONFIG_MASTER_WLAN0_ENABLE int wlan_dev_idx; rtk_rg_mbssidDev_t wlan_src_dev_idx=RG_RET_MBSSID_NOT_FOUND; int flooding=0; int i; struct sk_buff *new_skb=skb; unsigned int capable_dev_mask=0x0; assert(skb != NULL); #if 0 //Forced removing CTAG when send to master WiFi!! if((*(u16*)&skb->data[12])==0x8100) { for(i=12;i>=4;i-=4) { *(u32*)&skb->data[i]=*(u32*)&skb->data[i-4]; } skb_pull_rcsum(skb, 4); } #endif //Vlan Egress filter if (pPktHdr->pRxDesc->rx_origformat) { capable_dev_mask=rg_db.vlan[pPktHdr->internalVlanID].wlan0DevMask; }else{ capable_dev_mask=0xffffffff; //disable vlan egress filter if orig is zero } if (skb->data[0]&1) { /*flooding=1; TRACE("Flood to all VLAN-matched Master WIFI intf!"); _rtk_rg_wlanMbssidLookup(skb->data+6,&wlan_src_dev_idx); //source MAC lookup */ #define NIPQUAD(addr) \ ((unsigned char *)&addr)[0], \ ((unsigned char *)&addr)[1], \ ((unsigned char *)&addr)[2], \ ((unsigned char *)&addr)[3] #define NIP4QUAD(addr) \ NIPQUAD((addr[0])) #define NIP6QUAD(addr) \ NIPQUAD((addr[0])), \ NIPQUAD((addr[1])), \ NIPQUAD((addr[2])), \ NIPQUAD((addr[3])) #define IP4D "%d.%d.%d.%d" #define IP4H "%X:%X:%X:%X" #define IP6D IP4D" "IP4D" "IP4D" "IP4D #define IP6H IP4H" "IP4H" "IP4H" "IP4H if (!rg_db.systemGlobal.initParam.igmpSnoopingEnable || !rg_db.systemGlobal.igmpWifiRefEnable) { flooding=1; TRACE("Flood to all VLAN-matched Master WIFI intf!"); } else { /*flooding=0; TRACE("Forward to all VLAN-matched Master/Slave WIFI intf!"); */ flooding=1; TRACE("Flood to at leave one client Master only / no Slave WIFI intf!"); if (pPktHdr->ingressLocation!=RG_IGR_IGMP_OR_MLD) { { struct rtl_multicastDataInfo multicastDataInfo; struct rtl_multicastFwdInfo multicastFwdInfo; int retVal; bzero(&multicastDataInfo, sizeof(struct rtl_multicastDataInfo)); #if 0 multicastDataInfo.vlanId=param->vlanId; multicastDataInfo.groupAddr[0]= param->groupAddr[0]; multicastDataInfo.sourceIp[0]= sourceEntry->sourceAddr[0]; if (IP_VERSION6==param->ipVersion) { multicastDataInfo.groupAddr[1]= param->groupAddr[1]; multicastDataInfo.groupAddr[2]= param->groupAddr[2]; multicastDataInfo.groupAddr[3]= param->groupAddr[3]; multicastDataInfo.sourceIp[1]= sourceEntry->sourceAddr[1]; multicastDataInfo.sourceIp[2]= sourceEntry->sourceAddr[2]; multicastDataInfo.sourceIp[3]= sourceEntry->sourceAddr[3]; DEBUG("test SrcIP(" IP6H ")", NIP6QUAD(sourceEntry->sourceAddr)); } else { DEBUG("test SrcIP(" IP4D ")", NIPQUAD(sourceEntry->sourceAddr)); } #else multicastDataInfo.vlanId = rg_db.pktHdr->internalVlanID; if (pPktHdr->tagif & IPV4_TAGIF) { multicastDataInfo.ipVersion = IP_VERSION4; multicastDataInfo.sourceIp[0] = pPktHdr->ipv4Sip; multicastDataInfo.groupAddr[0] = pPktHdr->ipv4Dip; DEBUG("MC Data SrcIP(" IP4D ")", NIPQUAD(multicastDataInfo.sourceIp)); DEBUG("MC Data GrpIP(" IP4D ")", NIPQUAD(multicastDataInfo.groupAddr)); } else if (pPktHdr->tagif & IPV6_TAGIF) { multicastDataInfo.ipVersion=IP_VERSION6; multicastDataInfo.groupAddr[0] = (pPktHdr->pIpv6Dip[0] <<12)+(pPktHdr->pIpv6Dip[1] <<8)+(pPktHdr->pIpv6Dip[2] <<4)+(pPktHdr->pIpv6Dip[3]); multicastDataInfo.groupAddr[1] = (pPktHdr->pIpv6Dip[4] <<12)+(pPktHdr->pIpv6Dip[5] <<8)+(pPktHdr->pIpv6Dip[6] <<4)+(pPktHdr->pIpv6Dip[7]); multicastDataInfo.groupAddr[2] = (pPktHdr->pIpv6Dip[8] <<12)+(pPktHdr->pIpv6Dip[9] <<8)+(pPktHdr->pIpv6Dip[10]<<4)+(pPktHdr->pIpv6Dip[11]); multicastDataInfo.groupAddr[3] = (pPktHdr->pIpv6Dip[12]<<12)+(pPktHdr->pIpv6Dip[13]<<8)+(pPktHdr->pIpv6Dip[14]<<4)+(pPktHdr->pIpv6Dip[15]); multicastDataInfo.sourceIp[0] = (pPktHdr->pIpv6Sip[0] <<12)+(pPktHdr->pIpv6Sip[1] <<8)+(pPktHdr->pIpv6Sip[2] <<4)+(pPktHdr->pIpv6Sip[3]); multicastDataInfo.sourceIp[1] = (pPktHdr->pIpv6Sip[4] <<12)+(pPktHdr->pIpv6Sip[5] <<8)+(pPktHdr->pIpv6Sip[6] <<4)+(pPktHdr->pIpv6Sip[7]); multicastDataInfo.sourceIp[2] = (pPktHdr->pIpv6Sip[8] <<12)+(pPktHdr->pIpv6Sip[9] <<8)+(pPktHdr->pIpv6Sip[10]<<4)+(pPktHdr->pIpv6Sip[11]); multicastDataInfo.sourceIp[3] = (pPktHdr->pIpv6Sip[12]<<12)+(pPktHdr->pIpv6Sip[13]<<8)+(pPktHdr->pIpv6Sip[14]<<4)+(pPktHdr->pIpv6Sip[15]); DEBUG("MC Data SrcIP(" IP6H ")", NIP6QUAD(multicastDataInfo.sourceIp)); DEBUG("MC Data GrpIP(" IP6H ")", NIP6QUAD(multicastDataInfo.groupAddr)); } else { IGMP("ignore non-IPv4 or non-IPv6 MC packet"); } #endif //multicastDataInfo.srcFilterMode = param->srcFilterMode; retVal = rtl_getMulticastDataFwdInfo(rg_db.systemGlobal.nicIgmpModuleIndex, &multicastDataInfo, &multicastFwdInfo); if (retVal!=SUCCESS) { DEBUG("FAIL: rtl_getMulticastDataFwdInfo\n"); } #if 0 if (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { #if defined(CONFIG_RTL9600_SERIES) mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask & ((1<<RTK_RG_MAC_PORT_MAX) - 1); #else mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask; #endif } else { mapping_entry->fwdmembr=multicastFwdInfo.fwdPortMask; } #endif DEBUG("Mode:%d l2PortMask :0x%X, fwdPortMask: 0x%X", multicastFwdInfo.srcFilterMode, multicastFwdInfo.l2PortMask, multicastFwdInfo.fwdPortMask); #ifdef CONFIG_MASTER_WLAN0_ENABLE flooding=1; DEBUG("capable_dev_mask:0x%X mbssidPortMask: 0x%X", capable_dev_mask, multicastFwdInfo.wlan0DevMask); capable_dev_mask &= multicastFwdInfo.wlan0DevMask; DEBUG("capable_dev_mask:0x%X mbssidPortMask: 0x%X", capable_dev_mask, multicastFwdInfo.wlan0DevMask); #else { unsigned int wifi0_full_mask=0x0; unsigned int wifi1_full_mask=0x0; flooding=1; wifi0_full_mask=((1<<RG_RET_MBSSID_MASTER_CLIENT_INTF)-1); wifi1_full_mask=((1<<RG_RET_MBSSID_SLAVE_CLIENT_INTF)-1) & (~((1<<WLAN_DEVICE_NUM)-1)); DEBUG("wifi0_full_mask :0x%X, wifi1_full_mask: 0x%X", wifi0_full_mask, wifi1_full_mask); if ((multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_INCLUDE) && (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_DONT_CARE_SRC)) { if (!(multicastFwdInfo.fwdPortMask & 0x80)) { DEBUG("in-mo, before wifi0 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi0_full_mask); DEBUG("in-mo, after wifi0 capable_dev_mask=%X", capable_dev_mask); } if (!(multicastFwdInfo.fwdPortMask & 0x100)) { DEBUG("in-mo, before wifi1 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi1_full_mask); DEBUG("in-mo, after wifi1 capable_dev_mask=%X", capable_dev_mask); } } else if (multicastFwdInfo.srcFilterMode==RTK_RG_IPV4MC_EXCLUDE) { if (!(multicastFwdInfo.l2PortMask & 0x80)) //if (!(multicastFwdInfo.fwdPortMask & 0x80)) { DEBUG("ex-mo, efore wifi0 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi0_full_mask); DEBUG("ex-mo, after wifi0 capable_dev_mask=%X", capable_dev_mask); } if (!(multicastFwdInfo.l2PortMask & 0x100)) //if (!(multicastFwdInfo.fwdPortMask & 0x100)) { DEBUG("ex-mo, before wifi1 capable_dev_mask=%X", capable_dev_mask); capable_dev_mask &= (~wifi1_full_mask); DEBUG("ex-mo, after wifi1 capable_dev_mask=%X", capable_dev_mask); } } else { DEBUG("Do not care igmp"); DEBUG("capable_dev_mask=%X", capable_dev_mask); } } #endif } } else { extern int32 rtl_getQueryPortMask(uint32 moduleIndex, rtk_rg_pktHdr_t * pPktHdr, uint32 *fwdPortMask, uint32 *fwdMbssidMask); uint32 fwdportmask=0x0, fwdmbssidmask=0x0; rtl_getQueryPortMask(rg_db.systemGlobal.nicIgmpModuleIndex, pPktHdr, &fwdportmask, &fwdmbssidmask); capable_dev_mask &= fwdmbssidmask; IGMP("IGMP Query capable_dev_mask[0x%x] fwdportmask[0x%x], fwdmbssidmask[0x%x]", capable_dev_mask, fwdportmask, fwdmbssidmask); } } #define MACH "%02X:%02X:%02X:%02X:%02X:%02X" #define NMAC(addr) \ ((unsigned char *)(addr))[0], \ ((unsigned char *)(addr))[1], \ ((unsigned char *)(addr))[2], \ ((unsigned char *)(addr))[3], \ ((unsigned char *)(addr))[4], \ ((unsigned char *)(addr))[5] DEBUG("DA("MACH") SA("MACH")", NMAC(skb->data), NMAC(skb->data+6)); _rtk_rg_wlanMbssidLookup(skb->data+6,&wlan_src_dev_idx); //source MAC lookup } else { //20150514LUKE: for packet after DA-lookup process, we could got the wlan_dev_idx from LUT without lookup mbssid table if(pPktHdr->dmacL2Idx!=FAIL) { wlan_dev_idx=rg_db.lut[pPktHdr->dmacL2Idx].wlan_device_idx; TRACE("Get wlan_dev_idx=%d from LUT[%d]",wlan_dev_idx,pPktHdr->dmacL2Idx); } else { int idx; idx=_rtk_rg_wlanMbssidLookup(skb->data,&wlan_dev_idx); if(idx==RG_RET_LOOKUPIDX_NOT_FOUND) { TRACE("Lookup fail...flood to all VLAN-matched Master WIFI intf!"); //destination MAC lookup flooding=1; _rtk_rg_wlanMbssidLookup(skb->data+6,&wlan_src_dev_idx); //source MAC lookup } else { if(rg_db.systemGlobal.initParam.macBasedTagDecision==1) // wifi dmac2cvid by mbssid table { pPktHdr->dmac2VlanTagif=rg_db.wlanMbssid[idx].vlan_tag_if; if(pPktHdr->dmac2VlanTagif==0) { pPktHdr->dmac2VlanID=0; } else { pPktHdr->dmac2VlanID=rg_db.wlanMbssid[idx].vid; } TRACE("DMAC2CVID by mbssid table(tagif=%d, VID=%d)",pPktHdr->dmac2VlanTagif,pPktHdr->dmac2VlanID); } } } } #if 1 //DEBUG("pPktHdr->ingressLocation=%d wlan_dev_idx=%d, wlan_src_dev_idx=%d ppkthdr tagif=0x%x, l3modify=%d, l4Modify=%d, fwdDecision=%d, isHairpinNat=%d",pPktHdr->ingressLocation, wlan_dev_idx, wlan_src_dev_idx, pPktHdr->tagif, pPktHdr->l3Modify, pPktHdr->l4Modify, pPktHdr->fwdDecision, pPktHdr->isHairpinNat); //DEBUG("ori_L3CheckSum=0x%x, L3CheckSum=0x%x, ori_L4CheckSum=0x%x, L4CheckSum=0x%x", pPktHdr->ipv4Checksum, *pPktHdr->pIpv4Checksum, pPktHdr->l4Checksum, *pPktHdr->pL4Checksum); //because IPv6 packet won't change IP, therefore L4 checksum do not need to recaculate here! if(pPktHdr->tagif&IPV4_TAGIF && (pPktHdr->l3Modify||(pPktHdr->fwdDecision==RG_FWD_DECISION_ROUTING)) && (pPktHdr->ipv4Checksum==*pPktHdr->pIpv4Checksum || pPktHdr->isHairpinNat==1)) //20150514LUKE: update checksum only when checksum is not change { //*pPktHdr->pIpv4Checksum=0; //*pPktHdr->pIpv4Checksum=htons(inet_chksum(skb->data+pPktHdr->l3Offset,pPktHdr->l4Offset-pPktHdr->l3Offset)); //inbound, we use DIP to replace SIP!! if(pPktHdr->isHairpinNat==1) { *pPktHdr->pIpv4Checksum=htons(_rtk_rg_fwdengine_L3checksumUpdate(*pPktHdr->pIpv4Checksum,pPktHdr->ipv4Dip,pPktHdr->ipv4TTL,pPktHdr->ipProtocol,ntohl(*pPktHdr->pIpv4Dip),*pPktHdr->pIpv4TTL)); } else { *pPktHdr->pIpv4Checksum=htons(_rtk_rg_fwdengine_L3checksumUpdate(*pPktHdr->pIpv4Checksum,pPktHdr->ipv4Dip,pPktHdr->ipv4TTL,pPktHdr->ipProtocol,ntohl(*pPktHdr->pIpv4Dip),*pPktHdr->pIpv4TTL)); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT *pPktHdr->pIpv4Checksum=htons(_rtk_rg_fwdengine_L3checksumUpdate(*pPktHdr->pIpv4Checksum,pPktHdr->ipv4Sip,0,pPktHdr->ipProtocol,ntohl(*pPktHdr->pIpv4Sip),0)); #endif } //20140625LUKE:when packet are fragemented, we should not re-caculate checksum here!! //DEBUG("l4modify=%d, fwdDecision=%d ipv4frag=%d",pPktHdr->l4Modify,pPktHdr->fwdDecision,pPktHdr->ipv4FragPacket); if(pPktHdr->l4Modify && pPktHdr->fwdDecision!=RG_FWD_DECISION_ROUTING && pPktHdr->ipv4FragPacket==0 && (pPktHdr->l4Checksum==*pPktHdr->pL4Checksum || pPktHdr->isHairpinNat==1)) //20150514LUKE: update checksum only when checksum is not change { if(pPktHdr->isHairpinNat==1) { if(pPktHdr->tagif&TCP_TAGIF) *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(pPktHdr->tcpFlags.ack,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,pPktHdr->tcpSeq,pPktHdr->tcpAck,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),ntohl(*pPktHdr->pTcpSeq),ntohl(*pPktHdr->pTcpAck))); else if(pPktHdr->tagif&UDP_TAGIF) *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,0,0,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),0,0)); } else { //inbound, we use DIP to replace SIP, DPORT to replace SPORT!! if(pPktHdr->tagif&TCP_TAGIF) { *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(pPktHdr->tcpFlags.ack,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,pPktHdr->tcpSeq,pPktHdr->tcpAck,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),ntohl(*pPktHdr->pTcpSeq),ntohl(*pPktHdr->pTcpAck))); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Sip,pPktHdr->sport,0,0,ntohl(*pPktHdr->pIpv4Sip),ntohs(*pPktHdr->pSport),0,0)); #endif } else if(pPktHdr->tagif&UDP_TAGIF) { *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Dip,pPktHdr->dport,0,0,ntohl(*pPktHdr->pIpv4Dip),ntohs(*pPktHdr->pDport),0,0)); #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT *pPktHdr->pL4Checksum = htons(_rtk_rg_fwdengine_L4checksumUpdate(0,*pPktHdr->pL4Checksum,pPktHdr->ipv4Sip,pPktHdr->sport,0,0,ntohl(*pPktHdr->pIpv4Sip),ntohs(*pPktHdr->pSport),0,0)); #endif } } } /*if((pPktHdr->tagif&(TCP_TAGIF|UDP_TAGIF)) && pPktHdr->fwdDecision!=RG_FWD_DECISION_ROUTING && pPktHdr->ipv4FragPacket==0) //only recaculate the non-fragment packet, since fragment packet was recaculate in l34forward { TRACE("to WIFI1 software L4 checksum, ori_chksum is %x",*pPktHdr->pL4Checksum); *pPktHdr->pL4Checksum=0; *pPktHdr->pL4Checksum=htons(inet_chksum_pseudo(skb->data+pPktHdr->l4Offset,total_len-pPktHdr->l4Offset,ntohl(*pPktHdr->pIpv4Sip),ntohl(*pPktHdr->pIpv4Dip),pPktHdr->ipProtocol)); }*/ } #endif //TRACE("wlan_dev_idx=%d, flooding=%d\n",wlan_dev_idx,flooding); //TRACE("wlan_src_dev_idx=%d, pPktHdr->internalVlanID=%d, capable_dev_mask=0x%x\n",wlan_src_dev_idx,pPktHdr->internalVlanID, capable_dev_mask); for(i=0;i<MAX_WLAN_DEVICE_NUM;i++) // 1:root_intf, 4:vap_intfs, 8:wds_intfs { if(flooding==0) { i=wlan_dev_idx; } switch(i) { case RG_RET_MBSSID_MASTER_ROOT_INTF: if(wlan_root_netdev && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan_root_netdev->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Master WIFI root intf",flooding?"Flood":"Send"); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan_root_netdev); if(flooding==0) return i; } } } break; case RG_RET_MBSSID_MASTER_VAP0_INTF: case RG_RET_MBSSID_MASTER_VAP1_INTF: case RG_RET_MBSSID_MASTER_VAP2_INTF: case RG_RET_MBSSID_MASTER_VAP3_INTF: if((wlan_vap_netdev[i-1]) && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan_vap_netdev[i-1]->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Master WIFI vap[%d] intf",flooding?"Flood":"Send",i-1); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan_vap_netdev[i-1]); if(flooding==0) return i; } } } break; case RG_RET_MBSSID_MASTER_WDS0_INTF: case RG_RET_MBSSID_MASTER_WDS1_INTF: case RG_RET_MBSSID_MASTER_WDS2_INTF: case RG_RET_MBSSID_MASTER_WDS3_INTF: case RG_RET_MBSSID_MASTER_WDS4_INTF: case RG_RET_MBSSID_MASTER_WDS5_INTF: case RG_RET_MBSSID_MASTER_WDS6_INTF: case RG_RET_MBSSID_MASTER_WDS7_INTF: if((wlan_wds_netdev[i-5]) && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan_wds_netdev[i-5]->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Master WIFI wds[%d] intf",flooding?"Flood":"Send",i-5); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan_wds_netdev[i-5]); if(flooding==0) return i; } } } break; #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT case RG_RET_MBSSID_MASTER_CLIENT_INTF: if(wlan_vxd_netdev && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan_vxd_netdev->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Master WIFI vxd intf",flooding?"Flood":"Send"); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan_vxd_netdev); if(flooding==0) return i; } } } break; #endif #if defined(CONFIG_RG_WLAN_HWNAT_ACCELERATION) case RG_RET_MBSSID_SLAVE_ROOT_INTF: if (!wlan1_root_netdev) TRACE("no wlan1 interface"); if(wlan1_root_netdev && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan1_root_netdev->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Slave WIFI root intf",flooding?"Flood":"Send"); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan1_root_netdev); if(flooding==0) return i; } } } break; case RG_RET_MBSSID_SLAVE_VAP0_INTF: case RG_RET_MBSSID_SLAVE_VAP1_INTF: case RG_RET_MBSSID_SLAVE_VAP2_INTF: case RG_RET_MBSSID_SLAVE_VAP3_INTF: if((wlan1_vap_netdev[i-WLAN_DEVICE_NUM-1]) && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan1_vap_netdev[i-WLAN_DEVICE_NUM-1]->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Slave WIFI vap[%d] intf",flooding?"Flood":"Send",i-WLAN_DEVICE_NUM-1); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan1_vap_netdev[i-WLAN_DEVICE_NUM-1]); if(flooding==0) return i; } } } break; case RG_RET_MBSSID_SLAVE_WDS0_INTF: case RG_RET_MBSSID_SLAVE_WDS1_INTF: case RG_RET_MBSSID_SLAVE_WDS2_INTF: case RG_RET_MBSSID_SLAVE_WDS3_INTF: case RG_RET_MBSSID_SLAVE_WDS4_INTF: case RG_RET_MBSSID_SLAVE_WDS5_INTF: case RG_RET_MBSSID_SLAVE_WDS6_INTF: case RG_RET_MBSSID_SLAVE_WDS7_INTF: if((wlan1_wds_netdev[i-WLAN_DEVICE_NUM-5]) && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan1_wds_netdev[i-WLAN_DEVICE_NUM-5]->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Slave WIFI wds[%d] intf",flooding?"Flood":"Send",i-WLAN_DEVICE_NUM-5); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan1_wds_netdev[i-WLAN_DEVICE_NUM-5]); if(flooding==0) return i; } } } break; #ifdef CONFIG_RTL_CLIENT_MODE_SUPPORT case RG_RET_MBSSID_SLAVE_CLIENT_INTF: if(wlan1_vxd_netdev && (flooding==0 || wlan_src_dev_idx!=i)) //src device filtering when flooding { if(test_bit(__LINK_STATE_START, &wlan1_vxd_netdev->state)) { if(pPktHdr->ingressLocation==RG_IGR_PROTOCOL_STACK || (capable_dev_mask&(0x1<<i))) { if(flooding==1) { new_skb=rtk_rg_skbCopyToPreAllocSkb(skb); if(new_skb==NULL) goto OUT_OF_MEM; } TRACE("%s to Slave WIFI vxd intf",flooding?"Flood":"Send"); pPktHdr->egressWlanDevIdx=i; //20151210LUKE: keep idx for rate limit _rtk_rg_splitJumboSendToMasterWifi(pPktHdr,new_skb,wlan1_vxd_netdev); if(flooding==0) return i; } } } break; #endif #endif default: break; } if(flooding==0) { TRACE("Unknown where to send..."); return RG_RET_ENTRY_NOT_GET; } } if(flooding==1) { _rtk_rg_dev_kfree_skb_any(skb); return RG_RET_MBSSID_FLOOD_ALL_INTF; } return RG_RET_MBSSID_NOT_FOUND; OUT_OF_MEM: FIXME("out of mem(%s:%d)\n",__FUNCTION__,__LINE__); return RG_RET_MBSSID_NOT_FOUND; #else return RG_RET_MBSSID_NOT_FOUND; #endif } #endif #if defined(CONFIG_RG_ARP_AUTO_AGEOUT) && !defined(CONFIG_RG_FLOW_BASED_PLATFORM) #if defined(CONFIG_APOLLO) int _rtk_rg_arpTraffic_get(uint32 *trafficSet) { int32 i; #if defined(__KERNEL__) #if defined(CONFIG_RTL9600_SERIES) uint32 pValue; int newTable,oldTable; uint32 CLR_ARP_TRF[2] = {CLR_ARP_TRF0,CLR_ARP_TRF1}; /* parameter check */ RT_PARAM_CHK((NULL == trafficSet), RT_ERR_RG_NULL_POINTER); /*get current work table*/ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); oldTable = (pValue>>FIELD_ARP_TRF_SEL_OFFSET)&0x1; newTable = !oldTable; /*swap*/ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); if(newTable) pValue |= (0x1<<FIELD_ARP_TRF_SEL_OFFSET); else pValue &= ~(0x1<<FIELD_ARP_TRF_SEL_OFFSET); ioal_mem32_write((uint32)(REG_NAT_CTRL),pValue); while(1) { //Check swap is done ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); if(!((pValue>>FIELD_ARP_TRF_CHG_OFFSET)&0x1)) break; } /*get*/ for(i=0;i<(MAX_ARP_HW_TABLE_SIZE>>5);i++) { if(rg_db.arpValidSet[i]) { if(oldTable) ioal_mem32_read((uint32)(REG_ARP_TRF1+(i<<2)),&trafficSet[i]); else ioal_mem32_read((uint32)(REG_ARP_TRF0+(i<<2)),&trafficSet[i]); //DEBUG("valid[%d]:%x traffic[%d]:%x\n",i,rg_db.arpValidSet[i],i,trafficSet[i]); } } /* Clear table */ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); pValue |= CLR_ARP_TRF[oldTable]<<FIELD_ARP_TRF_CLR_OFFSET; ioal_mem32_write((uint32)REG_NAT_CTRL,pValue); #elif defined(CONFIG_RTL9602C_SERIES) /* parameter check */ RT_PARAM_CHK((NULL == trafficSet), RT_ERR_RG_NULL_POINTER); /*get*/ for(i=0;i<(MAX_ARP_HW_TABLE_SIZE>>5);i++) { if(rg_db.arpValidSet[i]) { ioal_mem32_read((uint32)(REG_ARP_TRF_BASE+(i<<2)),&trafficSet[i]); //DEBUG("valid[%d]:%x traffic[%d]:%x\n",i,rg_db.arpValidSet[i],i,trafficSet[i]); } } #endif #else //#if defined(__KERNEL__) for(i=0;i<(MAX_ARP_HW_TABLE_SIZE>>5);i++) { trafficSet[i]=0; } #endif return (RT_ERR_RG_OK); } int _rtk_rg_neighborTraffic_get(uint32 *trafficSet) { int32 i; /* parameter check */ RT_PARAM_CHK((NULL == trafficSet), RT_ERR_RG_NULL_POINTER); /*get*/ for(i=0;i<(MAX_IPV6_NEIGHBOR_HW_TABLE_SIZE>>5);i++) { if(rg_db.neighborValidSet[i]) { ioal_mem32_read((uint32)(REG_NEIGHBOR_TRF+(i<<2)),&trafficSet[i]); } } return (RT_ERR_RG_OK); } #endif #endif #if defined(CONFIG_RG_NAPT_AUTO_AGEOUT) #if defined(CONFIG_APOLLO) && !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int _rtk_rg_naptTraffic_get(uint32 *validSet, uint32 *trafficSet) { int32 i; #if defined(__KERNEL__) #if defined(CONFIG_RTL9600_SERIES) uint32 pValue; int newTable,oldTable; uint32 CLR_L4_TRF[2] = {CLR_L4_TRF0,CLR_L4_TRF1}; /* parameter check */ RT_PARAM_CHK((NULL == validSet), RT_ERR_RG_NULL_POINTER); RT_PARAM_CHK((NULL == trafficSet), RT_ERR_RG_NULL_POINTER); /*get current work table*/ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); oldTable = (pValue>>FIELD_L4_TRF_SEL_OFFSET)&0x1; newTable = !oldTable; /*swap*/ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); if(newTable) pValue |= (0x1<<FIELD_L4_TRF_SEL_OFFSET); else pValue &= ~(0x1<<FIELD_L4_TRF_SEL_OFFSET); ioal_mem32_write((uint32)(REG_NAT_CTRL),pValue); while(1) { //Check swap is done ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); if(!((pValue>>FIELD_L4_TRF_CHG_OFFSET)&0x1)) break; } /*get*/ for(i=0;i<(MAX_NAPT_OUT_HW_TABLE_SIZE>>5);i++) { if(validSet[i]) { if(oldTable) ioal_mem32_read((uint32)(REG_L4_TRF1+(i<<2)),&trafficSet[i]); else ioal_mem32_read((uint32)(REG_L4_TRF0+(i<<2)),&trafficSet[i]); //DEBUG("valid[%d]:%x traffic[%d]:%x\n",i,rg_db.naptValidSet[i],i,trafficSet[i]); } } /* Clear table */ ioal_mem32_read((uint32)(REG_NAT_CTRL),&pValue); pValue |= CLR_L4_TRF[oldTable]<<FIELD_L4_TRF_CLR_OFFSET; ioal_mem32_write((uint32)REG_NAT_CTRL,pValue); #elif defined(CONFIG_RTL9602C_SERIES) /* parameter check */ RT_PARAM_CHK((NULL == validSet), RT_ERR_RG_NULL_POINTER); RT_PARAM_CHK((NULL == trafficSet), RT_ERR_RG_NULL_POINTER); /*get*/ for(i=0;i<(MAX_NAPT_OUT_HW_TABLE_SIZE>>5);i++) { if(validSet[i]) { ioal_mem32_read((uint32)(REG_L4_TRF1+(i<<2)),&trafficSet[i]); //DEBUG("valid[%d]:%x traffic[%d]:%x\n",i,rg_db.naptValidSet[i],i,trafficSet[i]); } } #endif #else //#if defined(__KERNEL__) for(i=0;i<(MAX_NAPT_OUT_HW_TABLE_SIZE>>5);i++) { trafficSet[i]=0; } #endif return (RT_ERR_RG_OK); } #endif //end defined(CONFIG_APOLLO) void _rtk_rg_naptTimeoutCheckByOutIdx(int naptIdx) { int isTimeout = 0; #if defined(CONFIG_XDSL_ROMEDRIVER) if(naptIdx > MAX_NAPT_OUT_SW_TABLE_SIZE) { WARNING("naptIdx %d> MAX_NAPT_OUT_SW_TABLE_SIZE",naptIdx); return; } #endif if(rg_db.naptOut[naptIdx].state==INVALID) { //NO action } else { switch(rg_db.naptOut[naptIdx].state) { case INVALID: break; case TCP_CONNECTED: if(rg_db.naptOut[naptIdx].idleSecs>=rg_db.systemGlobal.tcp_long_timeout) isTimeout = 1; break; case UDP_CONNECTED: if(rg_db.naptOut[naptIdx].idleSecs>=rg_db.systemGlobal.udp_long_timeout) isTimeout = 1; break; case SYN_RECV: case SYN_ACK_RECV: case FIRST_FIN: case RST_RECV: case FIN_SEND_AND_RECV: case LAST_ACK: if(rg_db.naptOut[naptIdx].idleSecs>=rg_db.systemGlobal.tcp_short_timeout) isTimeout = 1; break; case UDP_FIRST: case UDP_SECOND: if(rg_db.naptOut[naptIdx].idleSecs>=rg_db.systemGlobal.udp_short_timeout) isTimeout = 1; break; } } //Timeout check if(isTimeout) { DEBUG("tcp time out naptIdx=%d idleSecs=%d udp_long_timeout=%d udp_short_timeout=%d tcp_short_timeout=%d udp_short_timeout=%d" ,naptIdx,rg_db.naptOut[naptIdx].idleSecs,rg_db.systemGlobal.udp_long_timeout,rg_db.systemGlobal.udp_short_timeout,rg_db.systemGlobal.tcp_short_timeout,rg_db.systemGlobal.udp_short_timeout); assert_ok(rtk_rg_apollo_naptConnection_del(naptIdx)); } } int _rtk_rg_naptTimeoutCheck(void) { #if defined(CONFIG_APOLLO) #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int i=0,j=0; int inIdx,outIdx; int ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs uint32 notIdleSet[MAX_NAPT_OUT_HW_TABLE_SIZE/32]; rtk_rg_table_naptOut_linkList_t *pNaptOutList,*pNextNaptOutList; #if defined(CONFIG_RTL9600_SERIES) int nhIdx; #endif //rtlglue_printf("TICK at %ld . Check NAPT timeout.\n",jiffies); //Read traffic bits ret = _rtk_rg_naptTraffic_get(rg_db.naptValidSet,notIdleSet); if(ret!=RT_ERR_RG_OK) return ret; #if defined(CONFIG_RTL9600_SERIES) for(i=0;i<MAX_PPPOE_SW_TABLE_SIZE;i++) rg_db.pppoe[i].idleSecs+=elapsedTime; #endif rg_db.longestIdleSecs = 0; //Check idle for(i=0;i<(MAX_NAPT_OUT_HW_TABLE_SIZE>>5);i++) { if(rg_db.naptValidSet[i]) { for(j=0;j<32;j++) { if(rg_db.naptValidSet[i] & (0x1<<j)) { outIdx = (i<<5)+j; inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; if(notIdleSet[i]&(0x1<<j)) { rg_db.naptOut[outIdx].idleSecs = 0; rg_db.naptIn[inIdx].idleSecs = 0; #if defined(CONFIG_RTL9600_SERIES) nhIdx=rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.nhIdx; if(rg_db.nexthop[nhIdx].rtk_nexthop.type==L34_NH_PPPOE) rg_db.pppoe[rg_db.nexthop[nhIdx].rtk_nexthop.pppoeIdx].idleSecs=0; #endif } else { #if defined(NAPT_TABLE_SIZE_DEBUG) assert(rg_db.naptOut[outIdx].idleSecs < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH) - elapsedTime); assert(rg_db.naptIn[inIdx].idleSecs < (1 << MAX_NAPT_IN_IDLESEC_WIDTH) - elapsedTime); #endif rg_db.naptOut[outIdx].idleSecs += elapsedTime; rg_db.naptIn[inIdx].idleSecs += elapsedTime; //By the way, find the longest expired entry if(rg_db.naptOut[outIdx].idleSecs>rg_db.longestIdleSecs) { rg_db.longestIdleNaptIdx = outIdx; rg_db.longestIdleSecs = rg_db.naptOut[outIdx].idleSecs; } } _rtk_rg_naptTimeoutCheckByOutIdx(outIdx); } } } } //Check software Link List for(i=0;i<MAX_NAPT_OUT_HASH_SIZE;i++) { pNaptOutList=rg_db.pNaptOutHashListHead[i]; while(pNaptOutList!=NULL) { pNextNaptOutList=pNaptOutList->pNext; //20160707LUKE: avoid pNaptOutList been released to freePool cause pointer misconnection. outIdx = pNaptOutList->idx; inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; #if defined(NAPT_TABLE_SIZE_DEBUG) assert(rg_db.naptOut[outIdx].idleSecs < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH) - elapsedTime); assert(rg_db.naptIn[inIdx].idleSecs < (1 << MAX_NAPT_IN_IDLESEC_WIDTH) - elapsedTime); #endif rg_db.naptOut[outIdx].idleSecs += elapsedTime; rg_db.naptIn[inIdx].idleSecs += elapsedTime; //By the way, find the longest expired entry if(rg_db.naptOut[outIdx].idleSecs>rg_db.longestIdleSecs) { rg_db.longestIdleNaptIdx = outIdx; rg_db.longestIdleSecs = rg_db.naptOut[outIdx].idleSecs; } _rtk_rg_naptTimeoutCheckByOutIdx(outIdx); pNaptOutList=pNextNaptOutList; } } return (RT_ERR_RG_OK); #else //defined(CONFIG_RG_FLOW_BASED_PLATFORM), only check sw napt int i=0; int inIdx,outIdx; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs rtk_rg_table_naptOut_linkList_t *pNaptOutList,*pNextNaptOutList; rg_db.longestIdleSecs = 0; //Check software Link List for(i=0;i<MAX_NAPT_OUT_HASH_SIZE;i++) { pNaptOutList=rg_db.pNaptOutHashListHead[i]; while(pNaptOutList!=NULL) { pNextNaptOutList=pNaptOutList->pNext; //20160707LUKE: avoid pNaptOutList been released to freePool cause pointer misconnection. outIdx = pNaptOutList->idx; inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; #if defined(NAPT_TABLE_SIZE_DEBUG) assert(rg_db.naptOut[outIdx].idleSecs < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH) - elapsedTime); assert(rg_db.naptIn[inIdx].idleSecs < (1 << MAX_NAPT_IN_IDLESEC_WIDTH) - elapsedTime); #endif rg_db.naptOut[outIdx].idleSecs += elapsedTime; rg_db.naptIn[inIdx].idleSecs += elapsedTime; //By the way, find the longest expired entry if(rg_db.naptOut[outIdx].idleSecs>rg_db.longestIdleSecs) { rg_db.longestIdleNaptIdx = outIdx; rg_db.longestIdleSecs = rg_db.naptOut[outIdx].idleSecs; } _rtk_rg_naptTimeoutCheckByOutIdx(outIdx); pNaptOutList=pNextNaptOutList; } } return (RT_ERR_RG_OK); #endif #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //end if defined(CONFIG_APOLLO) int i,j,outIdx,nhIdx,inIdx,idleSec; rtl865x_tblAsicDrv_naptTcpUdpParam_t asic_naptOut; rtk_rg_table_naptOut_linkList_t *pNaptOutList; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs for(i=0;i<8;i++) rg_db.pppoe[i].idleSecs+=elapsedTime; rg_db.longestIdleSecs = 0; for(i=0;i<(MAX_NAPT_OUT_HW_TABLE_SIZE>>5);i++) { if(rg_db.naptValidSet[i]) { for(j=0;j<32;j++) { if(rg_db.naptValidSet[i] & (0x1<<j)) { outIdx = (i<<5)+j; inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; rtl8651_getAsicNaptTcpUdpTable(outIdx,&asic_naptOut); if(asic_naptOut.isValid){ //hw only aging long timeout //update idleSecs if(asic_naptOut.isTcp){ idleSec=rg_db.systemGlobal.tcp_long_timeout-asic_naptOut.ageSec; if(idleSec<0) idleSec=0; #if defined(NAPT_TABLE_SIZE_DEBUG) assert(idleSec < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH)); assert(idleSec < (1 << MAX_NAPT_IN_IDLESEC_WIDTH)); #endif rg_db.naptOut[outIdx].idleSecs = idleSec; rg_db.naptIn[inIdx].idleSecs = idleSec; rg_db.pppoe[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].idleSecs=idleSec; }else{ idleSec=rg_db.systemGlobal.udp_long_timeout-asic_naptOut.ageSec; if(idleSec<0) idleSec=0; #if defined(NAPT_TABLE_SIZE_DEBUG) assert(idleSec < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH)); assert(idleSec < (1 << MAX_NAPT_IN_IDLESEC_WIDTH)); #endif rg_db.naptOut[outIdx].idleSecs = idleSec; rg_db.naptIn[inIdx].idleSecs = idleSec; nhIdx=rg_db.extip[rg_db.naptIn[inIdx].rtk_naptIn.extIpIdx].rtk_extip.nhIdx; if(rg_db.nexthop[nhIdx].rtk_nexthop.type==L34_NH_PPPOE) rg_db.pppoe[rg_db.nexthop[nhIdx].rtk_nexthop.pppoeIdx].idleSecs=idleSec; } // DEBUG("napt tick hw idx=%d rg_db.systemGlobal.tcp_long_timeout=%d asic_naptOut.ageSec=%d idleSecs=%d " // ,outIdx,rg_db.systemGlobal.tcp_long_timeout,asic_naptOut.ageSec,rg_db.naptOut[outIdx].idleSecs); }else{ //sw aging short timeout #if defined(NAPT_TABLE_SIZE_DEBUG) assert(rg_db.naptOut[outIdx].idleSecs < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH) - elapsedTime); assert(rg_db.naptIn[inIdx].idleSecs < (1 << MAX_NAPT_IN_IDLESEC_WIDTH) - elapsedTime); #endif rg_db.naptOut[outIdx].idleSecs += elapsedTime; rg_db.naptIn[inIdx].idleSecs += elapsedTime; DEBUG("napt tick sw idx=%d elapsedTime=%d idleSecs=%d",outIdx,elapsedTime,rg_db.naptOut[outIdx].idleSecs); } //By the way, find the longest expired entry if(rg_db.naptOut[outIdx].idleSecs>rg_db.longestIdleSecs) { rg_db.longestIdleNaptIdx = outIdx; rg_db.longestIdleSecs = rg_db.naptOut[outIdx].idleSecs; } //delete timeout connection _rtk_rg_naptTimeoutCheckByOutIdx(outIdx); } } } } //Check software Link List for(i=0;i<MAX_NAPT_OUT_HASH_SIZE;i++) { pNaptOutList=rg_db.pNaptOutHashListHead[i]; while(pNaptOutList!=NULL) { outIdx = pNaptOutList->idx; inIdx = rg_db.naptOut[outIdx].rtk_naptOut.hashIdx; #if defined(NAPT_TABLE_SIZE_DEBUG) assert(rg_db.naptOut[outIdx].idleSecs < (1 << MAX_NAPT_OUT_IDLESEC_WIDTH) - elapsedTime); assert(rg_db.naptIn[inIdx].idleSecs < (1 << MAX_NAPT_IN_IDLESEC_WIDTH) - elapsedTime); #endif rg_db.naptOut[outIdx].idleSecs += elapsedTime; rg_db.naptIn[inIdx].idleSecs += elapsedTime; //By the way, find the longest expired entry if(rg_db.naptOut[outIdx].idleSecs>rg_db.longestIdleSecs) { rg_db.longestIdleNaptIdx = outIdx; rg_db.longestIdleSecs = rg_db.naptOut[outIdx].idleSecs; } _rtk_rg_naptTimeoutCheckByOutIdx(outIdx); pNaptOutList=pNaptOutList->pNext; } } return (RT_ERR_RG_OK); #endif //end elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) } #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT void _rtk_rg_v6StatefulTimeoutCheckByList(rtk_rg_ipv6_layer4_linkList_t *pIPv6ConnList) { int isTimeout = 0; if(pIPv6ConnList->state==INVALID) { //NO action but fragment if(pIPv6ConnList->isFrag) { if(time_after_eq(jiffies,pIPv6ConnList->beginIdleTime+(FRAGMENT_LIST_TIMEOUT*TICKTIME_PERIOD))) { DEBUG("FragmentList timeout!free it..."); isTimeout = 1; } } } else { switch(pIPv6ConnList->state) { case INVALID: break; case TCP_CONNECTED: if(pIPv6ConnList->idleSecs>=rg_db.systemGlobal.tcp_long_timeout) isTimeout = 1; break; case UDP_CONNECTED: if(pIPv6ConnList->idleSecs>=rg_db.systemGlobal.udp_long_timeout) isTimeout = 1; break; case SYN_RECV: case SYN_ACK_RECV: case FIRST_FIN: case RST_RECV: case FIN_SEND_AND_RECV: case LAST_ACK: if(pIPv6ConnList->idleSecs>=rg_db.systemGlobal.tcp_short_timeout) isTimeout = 1; break; case UDP_FIRST: case UDP_SECOND: if(pIPv6ConnList->idleSecs>=rg_db.systemGlobal.udp_short_timeout) isTimeout = 1; break; } } //Timeout check if(isTimeout) { //DEBUG("ipv6Conn[%p](state=%d) is timeout(idleSecs=%d) ",pIPv6ConnList,pIPv6ConnList->state,pIPv6ConnList->idleSecs) //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.ipv6StatefulLock); _rtk_rg_fwdEngine_ipv6ConnList_del(pIPv6ConnList); //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.ipv6StatefulLock); } } int _rtk_rg_v6StatefulTimeoutCheck(void) { int i=0,indexCheck=0,j=0; rtk_rg_ipv6_layer4_linkList_t *pTmpConnList, *pNextConnList; rtk_rg_ipv6_layer4_linkList_t *pNextNextConnList;//for avoid specail case int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs if(atomic_read(&rg_db.systemGlobal.v6StatefulConnectionNum) > v6StatefulHOUSE_THRESHOLD) { //Each time we check v6StatefulHOUSE_KEEP_NUM entries if(rg_db.systemGlobal.v6StatefulHouseKeepIndex > MAX_IPV6_STATEFUL_TABLE_SIZE-v6StatefulHOUSE_KEEP_NUM) { FIXME("the v6StatefulHouseKeepIndex is %d",rg_db.systemGlobal.v6StatefulHouseKeepIndex); rg_db.systemGlobal.v6StatefulHouseKeepIndex=0; } WARNING("the v6StatefulHouseKeepIndex is %d",rg_db.systemGlobal.v6StatefulHouseKeepIndex); indexCheck=rg_db.systemGlobal.v6StatefulHouseKeepIndex; for(i=0;i<v6StatefulHOUSE_KEEP_NUM;i++) { if(rg_db.ipv6Layer4FreeList[indexCheck].valid) { rg_db.ipv6Layer4FreeList[indexCheck].idleSecs += elapsedTime; DEBUG("ConnList[%p] TimeoutCheck",&rg_db.ipv6Layer4FreeList[indexCheck]); } indexCheck++; } rg_db.systemGlobal.v6StatefulHouseKeepIndex=indexCheck; //WARNING("after check, the v6StatefulHouseKeepIndex is %d",rg_db.systemGlobal.v6StatefulHouseKeepIndex); } else { //Lookup all valid connections here for(i=0;i<MAX_IPV6_STATEFUL_HASH_HEAD_SIZE;i++) { if(!list_empty(&rg_db.ipv6Layer4HashListHead[i])) { //redo_listHead: //DEBUG("Check for rg_db.ipv6Layer4HashListHead[%d]:%p",i,&rg_db.ipv6Layer4HashListHead[i]); j=0; list_for_each_entry_safe(pTmpConnList,pNextConnList,&rg_db.ipv6Layer4HashListHead[i],layer4_list) { if(j>MAX_IPV6_STATEFUL_TABLE_SIZE){//avoid infinite loop WARNING("ConnList timeout check may have infinite loop @ rg_db.ipv6Layer4HashListHead[%d]!!!",i); break; } DEBUG("=======================ConnList Timeout check pTmpConnList[%p]==============================",pTmpConnList); DEBUG("rg_db.ipv6Layer4HashListHead[%d]:%p rg_db.ipv6Layer4HashListHead->prve=%p rg_db.ipv6Layer4HashListHead->next=%p",i,&rg_db.ipv6Layer4HashListHead[i],rg_db.ipv6Layer4HashListHead[i].prev,rg_db.ipv6Layer4HashListHead[i],rg_db.ipv6Layer4HashListHead[i].next); DEBUG("pTmpConnList[%p]: pTmpConnList->pPair_list=%p pTmpConnList->prev=%p pTmpConnList->next=%p ",pTmpConnList,pTmpConnList->pPair_list,pTmpConnList->layer4_list.prev,pTmpConnList->layer4_list.next); DEBUG("pNextConnList[%p]: pNextConnList->pPair_list=%p pNextConnList->prev=%p pNextConnList->next=%p",pNextConnList,pNextConnList->pPair_list,pNextConnList->layer4_list.prev,pNextConnList->layer4_list.next); if((pTmpConnList->pPair_list)==pNextConnList){ //pTmpConnList->pPair_list is pTmpConnList->layer4_list.next, the two connList may delete at the same time. So we should move the next checked connList to NextNext! pNextNextConnList = (rtk_rg_ipv6_layer4_linkList_t *)pNextConnList->layer4_list.next; DEBUG("pTmpConnList->pPair_list is pNextConnList, pNextNextConnList[%p]",pNextNextConnList); pTmpConnList->idleSecs += elapsedTime; DEBUG("ConnList[%p] TimeoutCheck",pTmpConnList); _rtk_rg_v6StatefulTimeoutCheckByList(pTmpConnList); //change the original pTmpConnList, pNextConnList to pNextNextConnList, then after this loop the both of them will be change by macro list_for_each_entry_safe() pTmpConnList = pNextNextConnList; pNextConnList = pNextNextConnList; DEBUG(" After jump to pNextNextConnList:"); DEBUG(" rg_db.ipv6Layer4HashListHead[%d]:%p rg_db.ipv6Layer4HashListHead->prve=%p rg_db.ipv6Layer4HashListHead->next=%p",i,&rg_db.ipv6Layer4HashListHead[i],rg_db.ipv6Layer4HashListHead[i].prev,rg_db.ipv6Layer4HashListHead[i],rg_db.ipv6Layer4HashListHead[i].next); DEBUG(" pTmpConnList[%p]: pTmpConnList->pPair_list=%p pTmpConnList->prev=%p pTmpConnList->next=%p ",pTmpConnList,pTmpConnList->pPair_list,pTmpConnList->layer4_list.prev,pTmpConnList->layer4_list.next); DEBUG(" pNextConnList[%p]: pNextConnList->pPair_list=%p pNextConnList->prev=%p pNextConnList->next=%p",pNextConnList,pNextConnList->pPair_list,pNextConnList->layer4_list.prev,pNextConnList->layer4_list.next); if(pTmpConnList == (rtk_rg_ipv6_layer4_linkList_t *)(&rg_db.ipv6Layer4HashListHead[i])){ DEBUG("Check ipv6Layer4HashListHead[%d] finished.",i); break; } }else{ //normal case pTmpConnList->idleSecs += elapsedTime; DEBUG("ConnList[%p] TimeoutCheck",pTmpConnList); _rtk_rg_v6StatefulTimeoutCheckByList(pTmpConnList); } j++; } } } } return (RT_ERR_RG_OK); } int _rtk_rg_v6FragQueueTimeoutCheck(void) { int i; for(i=0;i<MAX_IPV6_FRAGMENT_QUEUE_SIZE;i++) { //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.ipv6FragQueueLock); if(rg_db.ipv6FragmentQueue[i].occupied && time_after_eq(jiffies,rg_db.ipv6FragmentQueue[i].queue_time+(FRAGMENT_QUEUE_TIMEOUT*TICKTIME_PERIOD))) { DEBUG("v6FragQueue[%d] timeout!! free it...",i); _rtk_rg_dev_kfree_skb_any(rg_db.ipv6FragmentQueue[i].queue_skb); rg_db.systemGlobal.ipv6FragmentQueueNum--; rg_db.ipv6FragmentQueue[i].occupied = 0; rg_db.ipv6FragmentQueue[i].queue_time = 0; } //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.ipv6FragQueueLock); } return (RT_ERR_RG_OK); } #endif int _rtk_rg_v4FragQueueTimeoutCheck(void) { int i; if(rg_db.systemGlobal.ipv4FragmentQueueNum) { for(i=0;i<MAX_IPV4_FRAGMENT_QUEUE_SIZE;i++) { //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.ipv4FragQueueLock); if(rg_db.ipv4FragmentQueue[i].occupied && time_after_eq(jiffies,rg_db.ipv4FragmentQueue[i].queue_time+(FRAGMENT_QUEUE_TIMEOUT*TICKTIME_PERIOD))) { DEBUG("v4FragQueue[%d] timeout!! free it...",i); _rtk_rg_dev_kfree_skb_any(rg_db.ipv4FragmentQueue[i].queue_skb); rg_db.systemGlobal.ipv4FragmentQueueNum--; rg_db.ipv4FragmentQueue[i].occupied = 0; rg_db.ipv4FragmentQueue[i].queue_time = 0; } //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.ipv4FragQueueLock); } } return (RT_ERR_RG_OK); } int _rtk_rg_upnpTimeoutCheck(void) { int i; for(i=0;i<MAX_UPNP_SW_TABLE_SIZE;i++) { if(rg_db.upnp[i].timeout==0) continue; if(rg_db.upnp[i].valid) { rg_db.upnp[i].idle+=rg_db.systemGlobal.house_keep_sec; rg_db.upnp[i].conn_create_idle+=rg_db.systemGlobal.house_keep_sec; if(rg_db.upnp[i].idle >= rg_db.upnp[i].timeout) ASSERT_EQ(rtk_rg_apollo_upnpConnection_del(i),RT_ERR_RG_OK); } } return (RT_ERR_RG_OK); } int _rtk_rg_pppoeIdleTimeRefresh(void) { #if defined(CONFIG_APOLLO) && !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int i; uint32 pValue; ioal_mem32_read((uint32)(REG_PPPOE_TRF),&pValue); for(i=0;i<MAX_PPPOE_HW_TABLE_SIZE;i++){ if(((pValue&0xff)&(1<<i))!=0x0){ //there is traffic in some PPPoE Interfcae rg_db.pppoe[i].idleSecs = 0; //clear idle time }else{ rg_db.pppoe[i].idleSecs+=rg_db.systemGlobal.house_keep_sec; //add idle time counter } } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) // xdsl do nothing this function #endif return (RT_ERR_RG_OK); } #if defined(CONFIG_RG_ARP_AUTO_AGEOUT) #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int _rtk_rg_arpTimeoutCheck(void) { #if defined(CONFIG_APOLLO) int i=0,j=0; int idx,ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs uint32 notIdleSet[(MAX_ARP_HW_TABLE_SIZE/32)]={0}; rtk_rg_table_l3_t *pL3; rtk_rg_arp_request_t arpReq; //20141013LUKE: if arp_traffic_off !=0, we should check ARP traffic table in hw. if(!rg_db.systemGlobal.arp_traffic_off) { //TRACE("TICK at %ld . Check ARP timeout.\n",jiffies); //Read traffic bits ret = _rtk_rg_arpTraffic_get(¬IdleSet[0]); if(ret!=RT_ERR_RG_OK) return ret; } //Check idle for(i=0;i<(MAX_ARP_HW_TABLE_SIZE>>5);i++) { if(rg_db.arpValidSet[i]) { //TRACE("Not Idle Set[%d]:%x\n",i,notIdleSet[i]); #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) for(j=0;j<MAX_ARP_HW_TABLE_SIZE_FPGA;j++) #else for(j=0;j<32;j++) #endif { if(rg_db.arpValidSet[i] & (0x1<<j)) { idx = (i<<5)+j; if(rg_db.arp[idx].staticEntry)continue; //for static ARP we should not refresh or delete it. if(notIdleSet[i]&(0x1<<j)) { //20141009LUKE: update idleSecs and sendReqCount rg_db.arp[idx].idleSecs = 0; rg_db.arp[idx].sendReqCount = 0; } else rg_db.arp[idx].idleSecs += elapsedTime; if(rg_db.arp[idx].idleSecs>=rg_db.systemGlobal.arp_timeout) { if(rg_db.arp[idx].sendReqCount>=rg_db.systemGlobal.arp_max_request_count) { TRACE("arp idx [%d] timeout!!\n",idx); ret=rtk_rg_apollo_arpEntry_del(idx); if(ret==RT_ERR_RG_ARP_NOT_FOUND) rg_db.arpValidSet[idx>>5] &= ~(0x1<<(idx&31)); else assert_ok(ret); } else { TRACE("arp idx [%d] stale..",idx); pL3=&rg_db.l3[rg_db.arp[idx].routingIdx]; if(pL3->rtk_l3.process==L34_PROCESS_ARP) { TRACE("ask %x from %x for live!\n",pL3->gateway_ip,rg_db.arp[idx].ipv4Addr); arpReq.finished=0; arpReq.gwMacReqCallBack=NULL; arpReq.reqIp=rg_db.arp[idx].ipv4Addr; _rtk_rg_arpGeneration(pL3->rtk_l3.netifIdx, pL3->gateway_ip,&arpReq); rg_db.arp[idx].sendReqCount++; } else { TRACE("none-ARP routing, do nothing\n"); } } } } } } } return (RT_ERR_RG_OK); #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //end #if defined(CONFIG_APOLLO) int i=0,j=0; int idx,ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs rtk_rg_table_l3_t *pL3; rtk_rg_arp_request_t arpReq; rtl865x_tblAsicDrv_arpParam_t asicArp; //check elapsedTime if(elapsedTime>31){ WARNING("XDSL WARNING elapsedTime LARGER THAN MAX ASIC AGING LIMITATION"); } //Check idle for(i=0;i<(MAX_ARP_HW_TABLE_SIZE>>5);i++) { if(rg_db.arpValidSet[i]) { //TRACE("Not Idle Set[%d]:%x\n",i,notIdleSet[i]); for(j=0;j<32;j++) { if(rg_db.arpValidSet[i] & (0x1<<j)) { idx = (i<<5)+j; if(rg_db.arp[idx].staticEntry)continue; //for static ARP we should not refresh or delete it. rtl8651_getAsicArp(idx, &asicArp) ; //DEBUG("[%d]ASIC TIME=%d SOFTWARE TIME=%d\n",idx,31-asicArp.aging,elapsedTime); /* Update idelSecs Boyce 2015-08-24*/ // (SOFTWARE TIME)-(ASIC TIME) >0 -> non-idle // (SOFTWARE TIME)-(ASIC TIME) <=0 -> idel if( (uint32)(elapsedTime) <= (31-asicArp.aging) ) {//idel rg_db.arp[idx].idleSecs += elapsedTime; } else {//non-idle rg_db.arp[idx].idleSecs = 0; rg_db.arp[idx].sendReqCount = 0; } //DEBUG("idleSecs =%d sendReqCount=%d\n",rg_db.arp[idx].idleSecs ,rg_db.arp[idx].sendReqCount); asicArp.aging=31; rtl8651_setAsicArp(idx, &asicArp) ; if(rg_db.arp[idx].idleSecs>=rg_db.systemGlobal.arp_timeout) { if(rg_db.arp[idx].sendReqCount>=rg_db.systemGlobal.arp_max_request_count) { TRACE("arp idx [%d] timeout!!\n",idx); ret=rtk_rg_apollo_arpEntry_del(idx); if(ret==RT_ERR_RG_ARP_NOT_FOUND) rg_db.arpValidSet[idx>>5] &= ~(0x1<<(idx&31)); else assert_ok(ret); } else { TRACE("arp idx [%d] stale..",idx); pL3=&rg_db.l3[(uint8)(rg_db.arp[idx].routingIdx)]; if(pL3->rtk_l3.process==L34_PROCESS_ARP) { TRACE("ask %x from %x for live!\n",pL3->gateway_ip,rg_db.arp[idx].ipv4Addr); arpReq.finished=0; arpReq.gwMacReqCallBack=NULL; arpReq.reqIp=rg_db.arp[idx].ipv4Addr; _rtk_rg_arpGeneration(pL3->rtk_l3.netifIdx, pL3->gateway_ip,&arpReq); rg_db.arp[idx].sendReqCount++; } else { TRACE("none-ARP routing, do nothing\n"); } } } } } } } return (RT_ERR_RG_OK); #endif //end elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) } #endif int _rtk_rg_swArpTimeoutCheck(void) { int i; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs rtk_rg_arp_linkList_t *pArpList,*pNextArpList; rtk_rg_table_l3_t *pL3; rtk_rg_arp_request_t arpReq; for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) { if(!list_empty(&rg_db.softwareArpTableHead[i])) //not empty, check each entry { list_for_each_entry_safe(pArpList,pNextArpList,&rg_db.softwareArpTableHead[i],arp_list) { #if defined(CONFIG_RTL9602C_SERIES) { uint16 arpIdx; int ret, j; //Move sw arp entry of zero idle time to hw arp table if( !list_empty(&rg_db.hardwareArpFreeListHead) && (rg_db.arp[pArpList->idx].idleSecs==0) && (rg_db.l3[rg_db.arp[pArpList->idx].routingIdx].rtk_l3.process==L34_PROCESS_ARP)) { DEBUG("Move sw arp to hw arp table!!\n"); ret = _rtk_rg_hardwareArpTableAdd(rg_db.arp[pArpList->idx].routingIdx, rg_db.arp[pArpList->idx].ipv4Addr, rg_db.arp[pArpList->idx].rtk_arp.nhIdx, rg_db.arp[pArpList->idx].staticEntry, &arpIdx); if(ret == RT_ERR_RG_OK) { for(j=0; j<MAX_NAPT_SHORTCUT_SIZE; j++) { if(rg_db.naptShortCut[j].sip==0) continue; if(rg_db.naptShortCut[j].sip==rg_db.arp[pArpList->idx].ipv4Addr) { #if defined(SHORTCUT_BITFILED_DEBUG) assert(arpIdx < (1 << (BFW_ARPIDX-1))); #endif rg_db.naptShortCut[j].arpIdx = arpIdx; DEBUG("Update arpIdx[%d] of shortcut[%d].\n", rg_db.naptShortCut[j].arpIdx, j); } } _rtk_rg_softwareArpTableDel(pArpList); if(rg_db.arp[arpIdx].staticEntry==0) rg_db.arp[arpIdx].idleSecs += elapsedTime; continue; } } } #endif if(rg_db.arp[pArpList->idx].staticEntry) continue; //for static ARP we should not refresh or delete it. rg_db.arp[pArpList->idx].idleSecs+=elapsedTime; if(rg_db.arp[pArpList->idx].idleSecs>=rg_db.systemGlobal.arp_timeout) { if(rg_db.arp[pArpList->idx].sendReqCount>=rg_db.systemGlobal.arp_max_request_count) { //Clear ARP and LUT table entry TRACE("arp idx [%d] timeout!!\n",pArpList->idx); //20140529: l2 entry is deleted when pppoe gateway nexthop arp timeout. //assert_ok(rtk_rg_macEntry_del(rg_db.arp[pArpList->idx].rtk_arp.nhIdx)); _rtk_rg_softwareArpTableDel(pArpList); } else { TRACE("arp idx [%d] stale..",pArpList->idx); pL3=&rg_db.l3[(uint8)(rg_db.arp[pArpList->idx].routingIdx)]; //20141013LUKE: since software ARP will has routing type != ARP, so we just send out ARP request without check type. TRACE("ask %x from %x for live!\n",pL3->gateway_ip,rg_db.arp[pArpList->idx].ipv4Addr); arpReq.finished=0; arpReq.gwMacReqCallBack=NULL; arpReq.reqIp=rg_db.arp[pArpList->idx].ipv4Addr; _rtk_rg_arpGeneration(pL3->rtk_l3.netifIdx, pL3->gateway_ip,&arpReq); rg_db.arp[pArpList->idx].sendReqCount++; } } } } } return (RT_ERR_RG_OK); } int _rtk_rg_neighborTimeoutCheck(void) { #if defined(CONFIG_APOLLO) #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) int i=0,j=0; int idx,ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs uint32 notIdleSet[(MAX_IPV6_NEIGHBOR_HW_TABLE_SIZE/32)]={0}; //rtlglue_printf("TICK at %ld . Check ARP timeout.\n",jiffies); //Read traffic bits ret = _rtk_rg_neighborTraffic_get(¬IdleSet[0]); if(ret!=RT_ERR_RG_OK) return ret; //Check idle for(i=0;i<(MAX_IPV6_NEIGHBOR_HW_TABLE_SIZE>>5);i++) { if(rg_db.neighborValidSet[i]) { //rtlglue_printf("Not Idle Set[%d]:%x\n",i,notIdleSet[i]); for(j=0;j<32;j++) { if(rg_db.neighborValidSet[i] & (0x1<<j)) { idx = (i<<5)+j; if(rg_db.v6neighbor[idx].staticEntry) continue; if(notIdleSet[i]&(0x1<<j)) rg_db.v6neighbor[idx].idleSecs = 0; else rg_db.v6neighbor[idx].idleSecs += elapsedTime; if(rg_db.v6neighbor[idx].idleSecs>=rg_db.systemGlobal.neighbor_timeout && rg_db.v6neighbor[idx].staticEntry==0) { //rtlglue_printf("arp idx [%d] timeout!!\n",idx); ret=rtk_rg_apollo_neighborEntry_del(idx); if(ret==RT_ERR_RG_ARP_NOT_FOUND) rg_db.neighborValidSet[idx>>5] &= ~(0x1<<(idx&31)); else assert_ok(ret); } } } } } #else //defined(CONFIG_RG_FLOW_BASED_PLATFORM), only check sw neighbor int idx=0; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs for(idx=0; idx<MAX_IPV6_NEIGHBOR_SW_TABLE_SIZE; idx++) { if(rg_db.v6neighbor[idx].staticEntry) continue; if(rg_db.v6neighbor[idx].rtk_v6neighbor.valid==0) continue; rg_db.v6neighbor[idx].idleSecs += elapsedTime; if(rg_db.v6neighbor[idx].idleSecs>=rg_db.systemGlobal.neighbor_timeout) { TRACE("neighbor idx [%d] timeout!!", idx); assert_ok(rtk_rg_apollo_neighborEntry_del(idx)); } } #endif #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) && defined(CONFIG_RTL_8685S_HWNAT) int i=0,j=0; int idx,ret; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs rtl8198C_tblAsicDrv_v6NeighParam_t asicArpv6; //Check idle for(i=0;i<(MAX_IPV6_NEIGHBOR_HW_TABLE_SIZE>>5);i++) { if(rg_db.neighborValidSet[i]) { //rtlglue_printf("Not Idle Set[%d]:%x\n",i,notIdleSet[i]); for(j=0;j<32;j++) { if(rg_db.neighborValidSet[i] & (0x1<<j)) { idx = (i<<5)+j; if(rg_db.v6neighbor[idx].staticEntry) continue; rtl8198C_getAsicArp6(idx, &asicArpv6) ; //DEBUG("[%d]ASIC TIME=%d SOFTWARE TIME=%d\n",idx,31-asicArp.aging,elapsedTime); /* Update idelSecs Boyce 2015-08-24*/ // (SOFTWARE TIME)-(ASIC TIME) >0 -> non-idle // (SOFTWARE TIME)-(ASIC TIME) <=0 -> idel if( (uint32)(elapsedTime) <= (31-asicArpv6.age) ) {//idel rg_db.v6neighbor[idx].idleSecs += elapsedTime; } else {//non-idle rg_db.v6neighbor[idx].idleSecs = 0; } DEBUG("v6neighbor[%d] idleSecs =%d \n",idx,rg_db.v6neighbor[idx].idleSecs ); asicArpv6.age=31; rtl8198C_setAsicArp6_idx(idx, &asicArpv6) ; if(rg_db.v6neighbor[idx].idleSecs>=rg_db.systemGlobal.neighbor_timeout && rg_db.v6neighbor[idx].staticEntry==0) { //rtlglue_printf("arp idx [%d] timeout!!\n",idx); ret=rtk_rg_apollo_neighborEntry_del(idx); if(ret==RT_ERR_RG_ARP_NOT_FOUND) rg_db.neighborValidSet[idx>>5] &= ~(0x1<<(idx&31)); else assert_ok(ret); } } } } } #endif return (RT_ERR_RG_OK); } #endif #ifdef CONFIG_RG_LAYER2_SOFTWARE_LEARN int _rtk_rg_layer2SoftwareAgeOut(void) { int i,ret,indexCheck; #if defined(CONFIG_RTL9600_SERIES) #else int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs #endif if(rg_db.systemGlobal.layer2HouseKeepIndex > MAX_LUT_HW_TABLE_SIZE-Layer2HOUSE_KEEP_NUM) { FIXME("the layer2Housekeepindex is %d",rg_db.systemGlobal.layer2HouseKeepIndex); rg_db.systemGlobal.layer2HouseKeepIndex=0; } //each time we check Layer2HOUSE_KEEP_NUM entries indexCheck=rg_db.systemGlobal.layer2HouseKeepIndex; //NOTICE: since there is bCAM enabled, it will become 2048+64 entries to check at most //therefore we check 264 entries for each time to cover the bCAM's range. for(i=0;i<Layer2HOUSE_KEEP_NUM;i++) { if(rg_db.lut[indexCheck].valid && rg_db.lut[indexCheck].rtk_lut.entryType==RTK_LUT_L2UC && (rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_STATIC)==0) //check dynamic entries only { //Check the hardware table's age field, if it is zero, invalid this software entry ret=rtk_l2_addr_get(&rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry); if(ret!=RT_ERR_OK) { DEBUG("Get l2 entry %d fail, ret=%d",indexCheck, ret); if(ret!=RT_ERR_L2_ENTRY_NOTFOUND) return ret; } if(ret==RT_ERR_L2_ENTRY_NOTFOUND || rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.age==0) { DEBUG("invalid the age zero entry %d !",indexCheck); if(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.age==0) DEBUG("Age of l2 entry %d is zero!",indexCheck); //------------------ Critical Section start -----------------------// //rg_lock(&rg_kernel.saLearningLimitLock); if(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.port>=RTK_RG_MAC_PORT_CPU) { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU))&&rg_db.lut[indexCheck].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.ext_port+RTK_RG_PORT_CPU]); //decrease wlan's device count if(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT0-RTK_RG_PORT_CPU) #ifdef CONFIG_DUALBAND_CONCURRENT ||(rg_db.systemGlobal.enableSlaveSSIDBind && rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.ext_port==(RTK_RG_EXT_PORT1-RTK_RG_PORT_CPU)) #endif ) { #ifdef CONFIG_MASTER_WLAN0_ENABLE if(rg_db.systemGlobal.accessWanLimitPortMask_wlan0member&(0x1<<(rg_db.lut[indexCheck].wlan_device_idx))&&rg_db.lut[indexCheck].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.wlan0SourceAddrLearningCount[(int)rg_db.lut[indexCheck].wlan_device_idx]); #endif } } else { if(rg_db.systemGlobal.accessWanLimitPortMask_member.portmask&(0x1<<(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.port))&&rg_db.lut[indexCheck].permit_for_l34_forward) atomic_dec(&rg_db.systemGlobal.accessWanLimitPortMaskCount); atomic_dec(&rg_db.systemGlobal.sourceAddrLearningCount[rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.port]); } if(_rtK_rg_checkCategoryPortmask(&rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry)==SUCCESS) atomic_dec(&rg_db.systemGlobal.accessWanLimitCategoryCount[(unsigned int)rg_db.lut[indexCheck].category]); //------------------ Critical Section End -----------------------// //rg_unlock(&rg_kernel.saLearningLimitLock);ac rg_db.lut[indexCheck].valid=0; } #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit else { if(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.age != 1) rg_db.lut[indexCheck].idleSecs = 0; else rg_db.lut[indexCheck].idleSecs += (elapsedTime*((MAX_LUT_HW_TABLE_SIZE)/Layer2HOUSE_KEEP_NUM)); //DEBUG("lut[%d], idle %d secs !!\n", indexCheck, rg_db.lut[indexCheck].idleSecs); if(rg_db.lut[indexCheck].idleSecs>=rg_db.systemGlobal.l2_timeout) { if((rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.flags&RTK_L2_UCAST_FLAG_ARP_USED)==0) { TRACE("lut idx [%d] timeout, idle %d secs !!\n", indexCheck, rg_db.lut[indexCheck].idleSecs); ret=rtk_rg_apollo_macEntry_del(indexCheck); if(ret==RT_ERR_RG_ENTRY_NOT_EXIST) { #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(indexCheck<MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE) rg_db.lut[indexCheck].valid=0; #else if((indexCheck>=MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE) && (indexCheck<MAX_LUT_HW_TABLE_SIZE)) _rtk_rg_lutCamListDel(indexCheck); else if(indexCheck<MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE) rg_db.lut[indexCheck].valid=0; #endif } else assert_ok(ret); } } else //reset age to 1 if this l2 entry is not timeout { if(rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry.age != 1) ASSERT_EQ(_rtk_rg_l2_trafficBit_reset(&rg_db.lut[indexCheck].rtk_lut.entry.l2UcEntry), RT_ERR_OK); } } #endif } indexCheck++; } #if 0 if(indexCheck==MAX_LUT_HW_TABLE_SIZE) { #ifdef __KERNEL__ #ifdef CONFIG_RG_LAYER2_SOFTWARE_LEARN if(rg_kernel.l2_hw_aging==1) { FIXME("enable auto learning in a bit time.(2ms)"); // 20130725: patch for N2X traffic full loading will cause LUT timeout and the packets will out of sequence. for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNT_SET(i,2112),RT_ERR_OK); } mdelay(2); for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) { ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNT_SET(i,0),RT_ERR_OK); } } #endif #endif } #endif rg_db.systemGlobal.layer2HouseKeepIndex=indexCheck; return (RT_ERR_RG_OK); } #endif #if defined(CONFIG_ROME_NAPT_SHORTCUT) || defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) int _rtk_rg_shortcutTimeoutCheck(void) { int i,j,indexCheck; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs #if defined(CONFIG_ROME_NAPT_SHORTCUT) { if(rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex >= MAX_NAPT_SHORTCUT_SIZE) { FIXME("the v4Shoutcut_HouseKeepIndex is %d",rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex); rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex=0; } //DEBUG("v4 shortCut timeout check range: %d to %d", rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex, rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex+V4_SHORTCUT_KEEP_NUM); //each time we check V4_SHORTCUT_KEEP_NUM entries for(i=(rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex>>5); i<((rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex+V4_SHORTCUT_KEEP_NUM)>>5); i++) { if(rg_db.v4ShortCutValidSet[i]) { for(j=0;j<32;j++) { indexCheck = (i<<5) + j; if(rg_db.naptShortCut[indexCheck].sip!=0) //valid { rg_db.naptShortCut[indexCheck].idleSecs += (elapsedTime*(MAX_NAPT_SHORTCUT_SIZE/V4_SHORTCUT_KEEP_NUM)); if(rg_db.naptShortCut[indexCheck].idleSecs >= rg_db.systemGlobal.v4ShortCut_timeout) _rtk_rg_v4ShortCut_delete(indexCheck); } } } } rg_db.systemGlobal.v4Shoutcut_HouseKeepIndex += V4_SHORTCUT_KEEP_NUM; } #endif #if defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) { rtk_ipv6_addr_t zeroIP={{0}}; if(rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex >= MAX_NAPT_V6_SHORTCUT_SIZE) { FIXME("the v6Shoutcut_HouseKeepIndex is %d",rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex); rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex=0; } //DEBUG("v6 shortCut timeout check range: %d to %d", rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex, rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex+V6_SHORTCUT_KEEP_NUM); //each time we check V6_SHORTCUT_KEEP_NUM entries for(i=(rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex>>5); i<((rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex+V6_SHORTCUT_KEEP_NUM)>>5); i++) { if(rg_db.v6ShortCutValidSet[i]) { for(j=0;j<32;j++) { indexCheck = (i<<5) + j; if(memcmp(rg_db.naptv6ShortCut[indexCheck].sip.ipv6_addr, zeroIP.ipv6_addr, IPV6_ADDR_LEN)) //valid { rg_db.naptv6ShortCut[indexCheck].idleSecs += (elapsedTime*(MAX_NAPT_V6_SHORTCUT_SIZE/V6_SHORTCUT_KEEP_NUM)); if(rg_db.naptv6ShortCut[indexCheck].idleSecs >= rg_db.systemGlobal.v6ShortCut_timeout) _rtk_rg_v6ShortCut_delete(indexCheck); } } } } rg_db.systemGlobal.v6Shoutcut_HouseKeepIndex += V6_SHORTCUT_KEEP_NUM; } #endif return (RT_ERR_RG_OK); } #endif int _rtk_rg_algDynamicPortTimeoutCheck(void) { rtk_rg_alg_dynamicPort_t *pList,*pNext; int elapsedTime = (rg_db.systemGlobal.house_keep_sec*RTK_RG_MAX_HOUSE_KEEP_SELECT); //TICKTIME_PERIOD_SECOND; //secs //------------------ Critical Section start -----------------------// rg_lock(&rg_kernel.algDynamicLock); if(!list_empty(&rg_db.algDynamicCheckListHead)) { list_for_each_entry_safe(pList,pNext,&rg_db.algDynamicCheckListHead,alg_list) { if(pList->timeout>0) { pList->timeout-=elapsedTime; if(pList->timeout<=0) { DEBUG("ALG Dynamic Port[%d] Timeout!!",pList->portNum); if(pList->serverInLan) _rtk_rg_alg_setSrvInLanPortWithIntIP(pList->portNum,pList->isTCP,0,pList->intIP); else _rtk_rg_alg_setPort(pList->portNum,pList->isTCP,0); list_del_init(&pList->alg_list); list_add(&pList->alg_list,&rg_db.algDynamicFreeListHead); } } } } //------------------ Critical Section End -----------------------// rg_unlock(&rg_kernel.algDynamicLock); return (RT_ERR_RG_OK); } #ifdef __KERNEL__ #ifdef CONFIG_SMP void rtk_rg_fwdEngineHouseKeepingTimerFuncTasklet(unsigned long task_priv) #else void rtk_rg_fwdEngineHouseKeepingTimerFunc(unsigned long task_priv) #endif { //DEBUG("[Timer rtk_rg_fwdEngineHouseKeepingTimerFunc]\n"); rg_kernel.tracefilterShow =0; //disable tracefilter show //20150519LUKE: if RG is not initialized, do nothing and goto sleep if(_rtk_rg_get_initState()==RTK_RG_DURING_INIT) goto EXIT; //struct rtl8190_priv *priv = (struct rtl8190_priv *)task_priv; //rtlglue_printf("%s\n",__func__); //DEBUG("Into HouseKeeping..."); switch(rg_kernel.timer_selector) { #if defined(CONFIG_ROME_NAPT_SHORTCUT) || defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) case RTK_RG_SHORTCUT_HOUSE_KEEP_SELECT: // DEBUG("SHORTCUT Tick!\n"); _rtk_rg_shortcutTimeoutCheck(); break; #endif #if defined(CONFIG_RG_FLOW_AUTO_AGEOUT) //FLOW Timeout case RTK_RG_FLOW_HOUSE_KEEP_SELECT: // DEBUG("FLOW Tick!\n"); _rtk_rg_flowTimeoutCheck(); break; #endif #if defined(CONFIG_RG_NAPT_AUTO_AGEOUT) //NAPT Timeout //if(rg_kernel.timer_selector==RTK_RG_NAPT_HOUSE_KEEP_SELECT) case RTK_RG_NAPT_HOUSE_KEEP_SELECT: // DEBUG("NAPT Tick!\n"); _rtk_rg_naptTimeoutCheck(); break; #endif #ifdef CONFIG_RG_LAYER2_SOFTWARE_LEARN case RTK_RG_LAYER2_HOUSE_KEEP_SELECT: // DEBUG("LUT Tick!\n"); _rtk_rg_layer2SoftwareAgeOut(); break; #endif #if defined(CONFIG_RG_ARP_AUTO_AGEOUT) //ARP Timeout case RTK_RG_ARP_HOUSE_KEEP_SELECT: // DEBUG("ARP Tick!\n"); #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) _rtk_rg_arpTimeoutCheck(); #endif _rtk_rg_swArpTimeoutCheck(); break; case RTK_RG_NEIGHBOR_HOUSE_KEEP_SELECT: _rtk_rg_neighborTimeoutCheck(); break; #endif #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT //IPv6 Stateful connection Timeout case RTK_RG_IPV6_STATEFUL_HOUSE_KEEP_SELECT: _rtk_rg_v6StatefulTimeoutCheck(); break; //IPv6 fragment queue Timeout case RTK_RG_IPV6_FRAGMENT_HOUSE_KEEP_SELECT: _rtk_rg_v6FragQueueTimeoutCheck(); break; #endif //IPv4 fragment queue Timeout case RTK_RG_IPV4_FRAGMENT_HOUSE_KEEP_SELECT: _rtk_rg_v4FragQueueTimeoutCheck(); break; case RTK_RG_ALG_DYNAMIC_PORT_HOUSE_KEEP_SELECT: _rtk_rg_algDynamicPortTimeoutCheck(); break; default: break; } //UPNP timeout check _rtk_rg_upnpTimeoutCheck(); //PPPoE idle Timer refresh #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_RG_FLOW_BASED_PLATFORM) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //support pppoe traffic bit _rtk_rg_pppoeIdleTimeRefresh(); #endif if(RTK_RG_MAX_HOUSE_KEEP_SELECT>0) rg_kernel.timer_selector = ((rg_kernel.timer_selector+1)%RTK_RG_MAX_HOUSE_KEEP_SELECT); EXIT: mod_timer(&rg_kernel.fwdEngineHouseKeepingTimer, jiffies+(rg_db.systemGlobal.house_keep_sec*TICKTIME_PERIOD)); } #ifdef CONFIG_SMP void rtk_rg_fwdEngineHouseKeepingTimerFunc(unsigned long task_priv) { unsigned long lock_flags; int end_index,next_end_index; rg_tasklet_queue_lock_irqsave(&rg_kernel.rg_tasklet_queue_lock,lock_flags); end_index=atomic_read(&rg_kernel.rg_tasklet_data.end_index); next_end_index=((end_index+1)&(MAX_RG_TASKLET_QUEUE_SIZE-1)); rg_kernel.rg_tasklet_data.tasklet_type[end_index]=RG_TASKLET_TYPE_FROM_TIMER; atomic_set(&rg_kernel.rg_tasklet_data.end_index,next_end_index); tasklet_hi_schedule(&rg_kernel.rg_tasklets); rg_tasklet_queue_unlock_irqrestore(&rg_kernel.rg_tasklet_queue_lock,lock_flags); } #endif #if 0 int32 rtk_rg_apollo_fwdEngine_pppoe(void) { int lanIntfIdx; int wanIntfIdx; rtk_rg_lanIntfConf_t lan_info; rtk_rg_wanIntfConf_t wan_info; //rtk_rg_ipStaticInfo_t staticInfo; rtk_rg_pppoeClientInfoBeforeDial_t pppoeClientInfoB; rtk_rg_pppoeClientInfoAfterDial_t pppoeClientInfoA; assert_ok(rtk_rg_initParam_set(NULL)); memset(&wan_info, 0, sizeof(wan_info)); memset(&pppoeClientInfoB, 0, sizeof(pppoeClientInfoB)); memset(&pppoeClientInfoA, 0, sizeof(pppoeClientInfoA)); memset(&lan_info,0,sizeof(lan_info)); lan_info.gmac.octet[0]=0x00; lan_info.gmac.octet[1]=0xe0; lan_info.gmac.octet[2]=0x4c; lan_info.gmac.octet[3]=0x86; lan_info.gmac.octet[4]=0x70; lan_info.gmac.octet[5]=0x01; lan_info.intf_vlan_id=9; lan_info.ip_addr=htonl(0xc0a80101); lan_info.ip_network_mask=htonl(0xffffff00); lan_info.mtu=1500; lan_info.port_mask.portmask=((1<<RTK_RG_PORT0)|(1<<RTK_RG_PORT1)|(1<<RTK_RG_PORT2)|(1<<RTK_RG_PORT3)); lan_info.untag_mask.portmask=((1<<RTK_RG_MAC_PORT0)|(1<<RTK_RG_MAC_PORT1)|(1<<RTK_RG_MAC_PORT2)|(1<<RTK_RG_MAC_PORT_CPU)); assert_ok(rtk_rg_lanInterface_add(&lan_info,&lanIntfIdx)); memset(&wan_info,0,sizeof(wan_info)); wan_info.egress_vlan_id=8; wan_info.vlan_based_pri=0; wan_info.egress_vlan_tag_on=0; wan_info.gmac.octet[0]=0x00; wan_info.gmac.octet[1]=0xe0; wan_info.gmac.octet[2]=0x4c; wan_info.gmac.octet[3]=0x86; wan_info.gmac.octet[4]=0x70; wan_info.gmac.octet[5]=0x02; //_rtk_rg_strtomac((void *)&wan_info.gmac.octet[0], romeWAN1_HOST_mac); wan_info.port_binding_mask.portmask=0; wan_info.wan_port_idx=RTK_RG_PORT_PON; // wan_info.wan_port_idx=RTK_RG_MAC_PORT3; wan_info.wan_type=RTK_RG_PPPoE; assert_ok(rtk_rg_wanInterface_add(&wan_info,&wanIntfIdx)); assert_ok(rtk_rg_pppoeClientInfoBeforeDial_set(wanIntfIdx, &pppoeClientInfoB)); pppoeClientInfoA.hw_info.napt_enable = 1; pppoeClientInfoA.hw_info.ip_addr = htonl(0xc0a805c9); pppoeClientInfoA.hw_info.ip_network_mask = htonl(0xffffff00); pppoeClientInfoA.hw_info.ipv4_default_gateway_on = 1; pppoeClientInfoA.hw_info.gateway_ipv4_addr = htonl(0xc0a805c8); pppoeClientInfoA.hw_info.mtu = 1500; pppoeClientInfoA.sessionId = 5; pppoeClientInfoA.hw_info.gw_mac_auto_learn_for_ipv4 = 0; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[0]=0x00; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[1]=0x00; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[2]=0x00; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[3]=0x00; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[4]=0x00; pppoeClientInfoA.hw_info.gateway_mac_addr_for_ipv4.octet[5]=0x02; //_rtk_rg_strtomac((void *)&pppoeClientInfoA.hw_info.gateway_mac_addr.octet[0], romeWAN2_HOST_mac); assert_ok(rtk_rg_pppoeClientInfoAfterDial_set(wanIntfIdx, &pppoeClientInfoA)); return 0; } #endif #if 0 //#define PPPOE_WAN_TEST #define STATIC_WAN_TEST //#define LUKE_MAC_IP //#define LUKE_PPB_TEST //#define LUKE_ALG_TEST //#define PORTBINDING_TEST //#define LUKE_IPV6_TEST //#define LUKE_WAN_PON_PORT //#define LUKE_FIND_TEST int32 rtk_rg_apollo_fwdEngine_start(void) { #if defined(LUKE_PPB_TEST) int i,retval; //rtk_l34_pppoe_entry_t pppEntry; #endif #ifdef LUKE_ALG_TEST rtk_rg_alg_type_t algGet; #endif int lanIntfIdx; int wanIntfIdx; rtk_rg_lanIntfConf_t lan_info; rtk_rg_wanIntfConf_t wan_info; rtk_rg_ipStaticInfo_t staticInfo; #ifdef PPPOE_WAN_TEST rtk_rg_pppoeClientInfoBeforeDial_t pppoeBefore; rtk_rg_pppoeClientInfoAfterDial_t pppoeAfter; #endif #ifdef LUKE_IPV6_TEST rtk_rg_neighborEntry_t neighborEntry; rtk_rg_neighborInfo_t neighborInfo; rtk_rg_macEntry_t macEntry; int l2Idx,ret,neighbor_valid_idx; unsigned int tmpInteger; #endif #ifdef LUKE_FIND_TEST rtk_rg_intfInfo_t findIntfInfo; int intf_valid_idx; #endif assert_ok(rtk_rg_initParam_set(NULL)); #ifdef LUKE_PPB_TEST //DSCP remarking enable for(i=0;i<7;i++) { //retval = ioal_mem32_write(0x231cc+(i*4),0x1); //assert(retval==RT_ERR_OK); //DEBUG("DSCP on %d",i); retval = rtk_qos_dscpRemarkEnable_set(i,DISABLED); assert(retval==RT_ERR_OK); retval = rtk_qos_1pRemarkEnable_set(i,DISABLED); assert(retval==RT_ERR_OK); //pppEntry.sessionID=0x5+i; //retval = RTK_L34_PPPOETABLE_SET(i,&pppEntry); //DEBUG("PPPOE TABLE %d is %d",i,pppEntry.sessionID); } #endif memset(&lan_info,0,sizeof(lan_info)); lan_info.gmac.octet[0]=0x00; lan_info.gmac.octet[1]=0xa1; #ifdef LUKE_MAC_IP lan_info.gmac.octet[1]=0x21; #endif lan_info.gmac.octet[2]=0x4c; lan_info.gmac.octet[3]=0x86; lan_info.gmac.octet[4]=0x70; lan_info.gmac.octet[5]=0x01; lan_info.intf_vlan_id=9; lan_info.ip_addr=htonl(0xc0a80101); //192.168.1.1 lan_info.ip_network_mask=htonl(0xffffff00); lan_info.mtu=1500; lan_info.port_mask.portmask=((1<<RTK_RG_PORT0)|(1<<RTK_RG_PORT1)|(1<<RTK_RG_PORT2)|(1<<RTK_RG_PORT3)); lan_info.untag_mask.portmask=((1<<RTK_RG_MAC_PORT0)|(1<<RTK_RG_MAC_PORT1)|(1<<RTK_RG_MAC_PORT2)|(1<<RTK_RG_MAC_PORT_CPU)); #ifdef LUKE_PPB_TEST //lan_info.pppoe_passThrough=1; #endif #if defined(PORTBINDING_TEST) || defined(LUKE_PPB_TEST) || defined(LUKE_MAC_IP) lan_info.intf_vlan_id=100; lan_info.ip_network_mask=htonl(0xffffff00); lan_info.port_mask.portmask=((1<<RTK_RG_PORT0)|(1<<RTK_RG_PORT1)|(1<<RTK_RG_PORT2)); #endif #ifdef LUKE_IPV6_TEST lan_info.ip_version=IPVER_V4V6; tmpInteger=htonl(0xfe800000); memcpy(lan_info.ipv6_addr.ipv6_addr,&tmpInteger,4); tmpInteger=htonl(0x00000101); memcpy(lan_info.ipv6_addr.ipv6_addr+4,&tmpInteger,4); tmpInteger=htonl(0x12345678); memcpy(lan_info.ipv6_addr.ipv6_addr+8,&tmpInteger,4); tmpInteger=htonl(0x9abcdeff); memcpy(lan_info.ipv6_addr.ipv6_addr+12,&tmpInteger,4); lan_info.ipv6_network_mask_length=64; #endif assert_ok(rtk_rg_lanInterface_add(&lan_info,&lanIntfIdx)); memset(&wan_info,0,sizeof(wan_info)); wan_info.egress_vlan_id=8; wan_info.vlan_based_pri=0; wan_info.egress_vlan_tag_on=0; wan_info.gmac.octet[0]=0x00; wan_info.gmac.octet[1]=0xa1; #ifdef LUKE_MAC_IP wan_info.gmac.octet[1]=0x21; wan_info.egress_vlan_id=200; #endif wan_info.gmac.octet[2]=0x4c; wan_info.gmac.octet[3]=0x86; wan_info.gmac.octet[4]=0x70; wan_info.gmac.octet[5]=0x02; wan_info.port_binding_mask.portmask=0; wan_info.wan_port_idx=RTK_RG_MAC_PORT3; #ifdef LUKE_WAN_PON_PORT wan_info.wan_port_idx=RTK_RG_MAC_PORT_PON; #endif #ifdef STATIC_WAN_TEST wan_info.wan_type=RTK_RG_STATIC; #endif #ifdef PPPOE_WAN_TEST wan_info.wan_type=RTK_RG_PPPoE; #endif assert_ok(rtk_rg_wanInterface_add(&wan_info,&wanIntfIdx)); memset(&staticInfo,0,sizeof(staticInfo)); staticInfo.ipv4_default_gateway_on=1; staticInfo.gateway_ipv4_addr=htonl(0xc0a89675); //wan ip:192.168.150.117 staticInfo.ip_addr=htonl(0xc0a89674); //wan ip:192.168.150.116 staticInfo.ip_network_mask=htonl(0xffffff00); staticInfo.mtu=1500; staticInfo.napt_enable=1; staticInfo.gw_mac_auto_learn_for_ipv4=1; #ifdef LUKE_MAC_IP staticInfo.gateway_ipv4_addr=htonl(0xc0a89727); //wan ip:192.168.151.39 staticInfo.ip_addr=htonl(0xc0a89720); //wan ip:192.168.151.32 staticInfo.ip_network_mask=htonl(0xfffffff8); #endif #ifdef LUKE_IPV6_TEST staticInfo.ip_version=IPVER_V4V6; staticInfo.ipv6_default_gateway_on=1; tmpInteger=htonl(0xfe800000); memcpy(staticInfo.ipv6_addr.ipv6_addr,&tmpInteger,4); tmpInteger=htonl(0x01010101); memcpy(staticInfo.ipv6_addr.ipv6_addr+4,&tmpInteger,4); tmpInteger=htonl(0x44444444); memcpy(staticInfo.ipv6_addr.ipv6_addr+8,&tmpInteger,4); tmpInteger=htonl(0x55555555); memcpy(staticInfo.ipv6_addr.ipv6_addr+12,&tmpInteger,4); staticInfo.ipv6_mask_length=64; /*tmpInteger=htonl(0x00556611); memcpy(staticInfo.gateway_mac_addr_for_ipv6.octet,&tmpInteger,4); staticInfo.gateway_mac_addr_for_ipv6.octet[4]=0x22; staticInfo.gateway_mac_addr_for_ipv6.octet[5]=0x33;*/ tmpInteger=htonl(0xfe800000); memcpy(staticInfo.gateway_ipv6_addr.ipv6_addr,&tmpInteger,4); tmpInteger=htonl(0x01010101); memcpy(staticInfo.gateway_ipv6_addr.ipv6_addr+4,&tmpInteger,4); tmpInteger=htonl(0x22222222); memcpy(staticInfo.gateway_ipv6_addr.ipv6_addr+8,&tmpInteger,4); tmpInteger=htonl(0x33333333); memcpy(staticInfo.gateway_ipv6_addr.ipv6_addr+12,&tmpInteger,4); staticInfo.gw_mac_auto_learn_for_ipv6=0; tmpInteger=htonl(0xc860008c); memcpy(staticInfo.gateway_mac_addr_for_ipv6.octet,&tmpInteger,4); staticInfo.gateway_mac_addr_for_ipv6.octet[4]=0x10; staticInfo.gateway_mac_addr_for_ipv6.octet[5]=0x0d; #endif #ifdef STATIC_WAN_TEST assert_ok(rtk_rg_staticInfo_set(wanIntfIdx,&staticInfo)); #endif #ifdef LUKE_IPV6_TEST //mac add bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); tmpInteger=htonl(0x00113333); memcpy(macEntry.mac.octet,&tmpInteger,4); macEntry.mac.octet[4]=0x33; macEntry.mac.octet[5]=0x33; macEntry.fid=LAN_FID; macEntry.static_entry=1; macEntry.arp_used=1; assert_ok(rtk_rg_macEntry_add(&macEntry,&l2Idx)); //find for nothing bzero(&neighborInfo,sizeof(rtk_rg_neighborInfo_t)); neighbor_valid_idx=-1; tmpInteger=htonl(0x12345678); memcpy(neighborInfo.neighborEntry.interfaceId,&tmpInteger,4); tmpInteger=htonl(0x9abcdeff); memcpy(neighborInfo.neighborEntry.interfaceId+4,&tmpInteger,4); neighborInfo.neighborEntry.matchRouteIdx=0; ret=rtk_rg_neighborEntry_find(&neighborInfo,&neighbor_valid_idx); DEBUG("ret of first find: %x, idx=%d",ret,neighbor_valid_idx); //neighbor table add bzero(&neighborEntry,sizeof(rtk_rg_neighborEntry_t)); tmpInteger=htonl(0x12345678); memcpy(neighborEntry.interfaceId,&tmpInteger,4); tmpInteger=htonl(0x9abcdeff); memcpy(neighborEntry.interfaceId+4,&tmpInteger,4); neighborEntry.l2Idx=l2Idx; neighborEntry.matchRouteIdx=0; neighborEntry.staticEntry=1; neighborEntry.valid=1; assert_ok(rtk_rg_neighborEntry_add(&neighborEntry,&l2Idx)); //find for something bzero(&neighborInfo,sizeof(rtk_rg_neighborInfo_t)); neighbor_valid_idx=-1; memcpy(neighborInfo.neighborEntry.interfaceId,neighborEntry.interfaceId,8); neighborInfo.neighborEntry.matchRouteIdx=neighborEntry.matchRouteIdx; ret=rtk_rg_neighborEntry_find(&neighborInfo,&neighbor_valid_idx); DEBUG("ret of second find: %x, idx=%d",ret,neighbor_valid_idx); //delete neighbor ret=rtk_rg_neighborEntry_del(neighbor_valid_idx); DEBUG("ret of del: %x",ret); //find for nothing bzero(&neighborInfo,sizeof(rtk_rg_neighborInfo_t)); neighbor_valid_idx=-1; tmpInteger=htonl(0x12345678); memcpy(neighborInfo.neighborEntry.interfaceId,&tmpInteger,4); tmpInteger=htonl(0x9abcdeff); memcpy(neighborInfo.neighborEntry.interfaceId+4,&tmpInteger,4); neighborInfo.neighborEntry.matchRouteIdx=0; ret=rtk_rg_neighborEntry_find(&neighborInfo,&neighbor_valid_idx); DEBUG("ret of last find: %x, idx=%d",ret,neighbor_valid_idx); #endif #ifdef LUKE_FIND_TEST bzero(&findIntfInfo,sizeof(rtk_rg_intfInfo_t)); intf_valid_idx=1; ret=rtk_rg_intfInfo_find(&findIntfInfo,&intf_valid_idx); DEBUG("ret=%d",ret); if(memcmp(&findIntfInfo,&rg_db.systemGlobal.interfaceInfo[1].storedInfo,sizeof(rtk_rg_intfInfo_t))==0) { DEBUG("find interface %d is success!!",intf_valid_idx); } else { DEBUG("find interface %d failed...",intf_valid_idx); } bzero(&findIntfInfo,sizeof(rtk_rg_intfInfo_t)); intf_valid_idx=-1; findIntfInfo.lan_intf.ip_addr=htonl(0xc0a89720); ret=rtk_rg_intfInfo_find(&findIntfInfo,&intf_valid_idx); DEBUG("ret=%d",ret); if(memcmp(&findIntfInfo,&rg_db.systemGlobal.interfaceInfo[intf_valid_idx].storedInfo,sizeof(rtk_rg_intfInfo_t))==0) { DEBUG("find interface %d by ip 0xc0a89674 is success!!",intf_valid_idx); } else { DEBUG("find interface %d by ip 0xc0a89674 failed...",intf_valid_idx); } #ifdef LUKE_IPV6_TEST bzero(&findIntfInfo,sizeof(rtk_rg_intfInfo_t)); intf_valid_idx=-1; tmpInteger=htonl(0xfe800000); memcpy(findIntfInfo.lan_intf.ipv6_addr.ipv6_addr,&tmpInteger,4); tmpInteger=htonl(0x01010101); memcpy(findIntfInfo.lan_intf.ipv6_addr.ipv6_addr+4,&tmpInteger,4); tmpInteger=htonl(0x44444444); memcpy(findIntfInfo.lan_intf.ipv6_addr.ipv6_addr+8,&tmpInteger,4); tmpInteger=htonl(0x55555555); memcpy(findIntfInfo.lan_intf.ipv6_addr.ipv6_addr+12,&tmpInteger,4); ret=rtk_rg_intfInfo_find(&findIntfInfo,&intf_valid_idx); DEBUG("ret=%d",ret); if(memcmp(&findIntfInfo,&rg_db.systemGlobal.interfaceInfo[intf_valid_idx].storedInfo,sizeof(rtk_rg_intfInfo_t))==0) { DEBUG("find interface %d by ipv6 0xfe800000010101014444444455555555 is success!!",intf_valid_idx); } else { DEBUG("find interface %d by ipv6 0xfe800000010101014444444455555555 failed...",intf_valid_idx); } #endif #endif #ifdef PPPOE_WAN_TEST memset(&pppoeBefore,0,sizeof(pppoeBefore)); assert_ok(rtk_rg_pppoeClientInfoBeforeDial_set(wanIntfIdx,&pppoeBefore)); memset(&pppoeAfter,0,sizeof(pppoeAfter)); memcpy(&pppoeAfter.hw_info,&staticInfo,sizeof(staticInfo)); pppoeAfter.sessionId=50; pppoeAfter.hw_info.ipv6_mask_length=128; //pppoe WAN do not need interface route assert_ok(rtk_rg_pppoeClientInfoAfterDial_set(wanIntfIdx,&pppoeAfter)); #endif //================================================================== #ifdef PORTBINDING_TEST wanIntfIdx=0; memset(&wan_info,0,sizeof(wan_info)); wan_info.egress_vlan_id=300; wan_info.vlan_based_pri=0; wan_info.egress_vlan_tag_on=1; wan_info.gmac.octet[0]=0x00; wan_info.gmac.octet[1]=0xa1; #ifdef LUKE_MAC_IP wan_info.gmac.octet[1]=0x21; #endif wan_info.gmac.octet[2]=0x4c; wan_info.gmac.octet[3]=0x86; wan_info.gmac.octet[4]=0x70; wan_info.gmac.octet[5]=0x03; wan_info.port_binding_mask.portmask = 1<<RTK_RG_MAC_PORT1; //port 1 will bind this wan wan_info.wan_port_idx=RTK_RG_MAC_PORT_PON; #ifdef STATIC_WAN_TEST wan_info.wan_type=RTK_RG_STATIC; #endif #ifdef PPPOE_WAN_TEST wan_info.wan_type=RTK_RG_PPPoE; #endif assert_ok(rtk_rg_wanInterface_add(&wan_info,&wanIntfIdx)); memset(&staticInfo,0,sizeof(staticInfo)); staticInfo.ipv4_default_gateway_on=0; staticInfo.gateway_ipv4_addr=htonl(0xc0a89675); //wan ip:192.168.150.117 staticInfo.ip_addr=htonl(0xc0a896f0); //wan ip:192.168.150.240 staticInfo.ip_network_mask=htonl(0xfffffff8); staticInfo.mtu=1500; staticInfo.napt_enable=1; staticInfo.gw_mac_auto_learn=0; staticInfo.gateway_mac_addr.octet[0]=0x68; staticInfo.gateway_mac_addr.octet[1]=0x05; staticInfo.gateway_mac_addr.octet[2]=0xCA; staticInfo.gateway_mac_addr.octet[3]=0x0F; staticInfo.gateway_mac_addr.octet[4]=0x8B; staticInfo.gateway_mac_addr.octet[5]=0x73; assert_ok(rtk_rg_staticInfo_set(wanIntfIdx,&staticInfo)); #endif //test for alg #ifdef LUKE_ALG_TEST assert_ok(rtk_rg_algApps_get(&algGet)); DEBUG("the algGet is %x",algGet); assert_ok(rtk_rg_algApps_set(RTK_RG_ALG_FTP_BIT | RTK_RG_ALG_RTSP_BIT)); assert_ok(rtk_rg_algApps_get(&algGet)); DEBUG("after set, the algGet is %x",algGet); #endif //test for inbound connection #ifdef YSLEU_INBOUND_NAPT_TEST int lanIntfIdx; int wanIntfIdx; rtk_rg_lanIntfConf_t lan_info; rtk_rg_wanIntfConf_t wan_info; rtk_rg_ipStaticInfo_t staticInfo; assert_ok(rtk_rg_initParam_set(NULL)); rtk_rg_rome_driver_init(); memset(&lan_info,0,sizeof(lan_info)); lan_info.gmac.octet[0]=0x00; lan_info.gmac.octet[1]=0xE0; lan_info.gmac.octet[2]=0x4C; lan_info.gmac.octet[3]=0x86; lan_info.gmac.octet[4]=0x70; lan_info.gmac.octet[5]=0x01; lan_info.intf_vlan_id=9; lan_info.ip_addr=htonl(0xc0a80101); //192.168.1.1 lan_info.ip_network_mask=htonl(0xffffff00); lan_info.mtu=1500; lan_info.port_mask.portmask=((1<<RTK_RG_PORT0)|(1<<RTK_RG_PORT1)|(1<<RTK_RG_PORT2)); lan_info.untag_mask.portmask=((1<<RTK_RG_MAC_PORT0)|(1<<RTK_RG_MAC_PORT1)|(1<<RTK_RG_MAC_PORT2)|(1<<RTK_RG_MAC_PORT_CPU)); assert_ok(rtk_rg_lanInterface_add(&lan_info,&lanIntfIdx)); memset(&wan_info,0,sizeof(wan_info)); wan_info.egress_vlan_id=8; wan_info.vlan_based_pri=0; wan_info.egress_vlan_tag_on=0; #if 1 wan_info.gmac.octet[0]=0x00; wan_info.gmac.octet[1]=0xE0; wan_info.gmac.octet[2]=0x4C; wan_info.gmac.octet[3]=0x86; wan_info.gmac.octet[4]=0x70; wan_info.gmac.octet[5]=0x04; #else wan_info.gmac.octet[0]=0x00; wan_info.gmac.octet[1]=0x00; wan_info.gmac.octet[2]=0x00; wan_info.gmac.octet[3]=0x01; wan_info.gmac.octet[4]=0x00; wan_info.gmac.octet[5]=0x02; #endif wan_info.port_binding_mask.portmask=0; wan_info.wan_port_idx=RTK_RG_MAC_PORT3; #ifdef STATIC_WAN_TEST wan_info.wan_type=RTK_RG_STATIC; #endif assert_ok(rtk_rg_wanInterface_add(&wan_info,&wanIntfIdx)); memset(&staticInfo,0,sizeof(staticInfo)); staticInfo.ipv4_default_gateway_on=1; staticInfo.gateway_ipv4_addr=htonl(0xc0a80216); staticInfo.ip_addr=htonl(0xc0a80201); staticInfo.ip_network_mask=htonl(0xffffff00); staticInfo.mtu=1500; staticInfo.napt_enable=1; staticInfo.gw_mac_auto_learn_for_ipv4=1; #ifdef STATIC_WAN_TEST assert_ok(rtk_rg_staticInfo_set(wanIntfIdx,&staticInfo)); #endif #ifdef CONFIG_RG_NAPT_DMZ_SUPPORT { rtk_rg_dmzInfo_t dmzInfo; memset(&dmzInfo,0,sizeof(rtk_rg_dmzInfo_t)); dmzInfo.enabled = 1; dmzInfo.private_ip = 0xc0a8010b; rtk_rg_dmzHost_set(1,&dmzInfo); } #endif #ifdef CONFIG_RG_NAPT_UPNP_SUPPORT { int upnpIdx; rtk_rg_upnpConnection_t upnp; memset(&upnp,0,sizeof(rtk_rg_upnpConnection_t)); upnpIdx=0; upnp.is_tcp = 1; upnp.valid = 1; upnp.wan_intf_idx = 1; upnp.gateway_port = 1001; upnp.local_ip = 0xc0a8010b; upnp.local_port = 10001; upnp.limit_remote_ip = 1; upnp.limit_remote_port = 0; upnp.remote_ip = 0xc0a80216; upnp.remote_port = 0; upnp.type = UPNP_TYPE_ONESHOT; upnp.timeout = 0; rtk_rg_upnpConnection_add(&upnp,&upnpIdx); memset(&upnp,0,sizeof(rtk_rg_upnpConnection_t)); upnpIdx=1; upnp.is_tcp = 0; upnp.valid = 1; upnp.wan_intf_idx = 1; upnp.gateway_port = 1002; upnp.local_ip = 0xc0a8010b; upnp.local_port = 10002; upnp.limit_remote_ip = 1; upnp.limit_remote_port = 0; upnp.remote_ip = 0xc0a80216; upnp.remote_port = 0; upnp.type = UPNP_TYPE_PERSIST; upnp.timeout = 0; rtk_rg_upnpConnection_add(&upnp,&upnpIdx); } #endif #ifdef CONFIG_RG_NAPT_VIRTUAL_SERVER_SUPPORT { int vsIdx; rtk_rg_virtualServer_t vs; vsIdx=1; memset(&vs,0,sizeof(rtk_rg_virtualServer_t)); vs.gateway_port_start = 2001; vs.is_tcp = 1; vs.local_ip = 0xc0a8010b; vs.local_port_start = 20001; vs.mappingPortRangeCnt = 100; vs.valid = 1; vs.wan_intf_idx = 1; rtk_rg_virtualServer_add(&vs,&vsIdx); memset(&vs,0,sizeof(rtk_rg_virtualServer_t)); vsIdx=2; vs.gateway_port_start = 2002; vs.is_tcp = 0; vs.local_ip = 0xc0a8010b; vs.local_port_start = 20002; vs.mappingPortRangeCnt = 100; vs.valid = 1; vs.wan_intf_idx = 1; rtk_rg_virtualServer_add(&vs,&vsIdx); } #endif #endif return SUCCESS; } int start_wan_config_type=0; int32 rtk_rg_apollo_start_wan_config( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf=NULL; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } if(strncmp(strptr, "ipoe",4) == 0 || strncmp(strptr, "0",1) == 0){ rtk_rg_fwdEngine_start(); start_wan_config_type=0; }else if(strncmp(strptr, "pppoe",5) == 0||strncmp(strptr, "1",1) == 0){ rtk_rg_fwdEngine_pppoe(); start_wan_config_type=1; }else { printk("(0:ipoe,1:pppoe)\n"); } return len; } int32 rtk_rg_apollo_start_wan_config_type(char *page, char **start, off_t off, int count, int *eof, void *data) { int len=0; len = sprintf(page, "%d(0:ipoe,1:pppoe)\n",start_wan_config_type); if (len <= off+count) *eof = 1; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } #endif #if defined(RTK_RG_INGRESS_QOS_TEST_PATCH) && defined(CONFIG_RTL9600_SERIES) int32 rtk_rg_qos_type_sel( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; int i; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "0",1) == 0) rg_db.systemGlobal.qos_type=RTK_RG_INGRESS_QOS_ORIGINAL; else if(strncmp(strptr, "1",1) == 0) rg_db.systemGlobal.qos_type=RTK_RG_INGRESS_QOS_ALL_HIGH_QUEUE; } _rtk_rg_qos_acl_flush(); //Re-config ingress rate for(i=0;i<RTK_RG_MAC_PORT_PON;i++) { if(rg_db.systemGlobal.qos_ingress_rate[i]) { _rtk_rg_qos_acl_patch(i,rg_db.systemGlobal.qos_ingress_rate[i]); } } return len; } #endif void _rtk_rg_hwnatACLManipulate(rtk_enable_t natSwitch) { #if 0 rtk_rg_aclFilterAndQos_t acl_filter; //the index should always be in the aclLowerBoundary-1 uint16 ruleIdx = (rg_db.systemGlobal.aclAndCfReservedRule.aclLowerBoundary-1); bzero(&acl_filter,sizeof(acl_filter)); acl_filter.filter_fields |= INGRESS_PORT_BIT; acl_filter.ingress_port_mask.portmask = RTK_RG_ALL_MAC_PORTMASK_WITHOUT_CPU; acl_filter.action_type = ACL_ACTION_TYPE_TRAP; #endif #if defined(CONFIG_APOLLO) if(natSwitch==ENABLED) { //if(rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_DISABLED) _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_BROADCAST_TRAP,NULL); //rg_kernel.layer2LookupMissFlood2CPU is determind in reserved ACL when adding HW ACL entry _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_ALL_TRAP); _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_UNICAST_TRAP);//only enabled when echo 4 > /proc/rg/hwnat } else { //rtk_l2_portLimitLearningCnt_set(RTK_RG_MAC_PORT_CPU,0); //: patch for l2 pure software (auto learn from CPU port) _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_ALL_TRAP,NULL); _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_BROADCAST_TRAP); //rg_kernel.layer2LookupMissFlood2CPU is determind in reserved ACL when adding HW ACL entry _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_UNICAST_TRAP);//only enabled when echo 4 > /proc/rg/hwnat } #elif defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) rtk_hwEnable(natSwitch); #endif } int _rtk_rg_ipv6MC_tranlate_ingressVid_control( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_ENABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT, NULL)); } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable=RTK_RG_DISABLED; assert_ok(_rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_MULTICAST_VID_TRANSLATE_FOR_IPV6_PASSTHROUGHT)); } } return len; } int _rtk_rg_ipv6MC_tranlate_ingressVid_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable==RTK_RG_ENABLED){ PROC_PRINTF("tranlate IPv6 Multicast ingressVid Enabled!\n"); }else if(rg_db.systemGlobal.ipv6MC_translate_ingressVID_enable==RTK_RG_DISABLED){ PROC_PRINTF("tranlate IPv6 Multicast ingressVid DISABLED!\n"); } return len; } int _rtk_rg_mld_trap_to_PS_change( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.mld_Trap_to_PS_enable=RTK_RG_ENABLED; } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.mld_Trap_to_PS_enable=RTK_RG_DISABLED; } } return len; } int _rtk_rg_mld_trap_to_PS_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.mld_Trap_to_PS_enable==RTK_RG_ENABLED){ PROC_PRINTF("mld_Trap_to_PS Enabled!\n"); }else if(rg_db.systemGlobal.mld_Trap_to_PS_enable==RTK_RG_DISABLED){ PROC_PRINTF("mld_Trap_to_PS DISABLED!\n"); } return len; } int _rtk_rg_igmp_trap_to_PS_change( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.igmp_Trap_to_PS_enable=RTK_RG_ENABLED; } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.igmp_Trap_to_PS_enable=RTK_RG_DISABLED; } } return len; } int _rtk_rg_igmp_trap_to_PS_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.igmp_Trap_to_PS_enable==RTK_RG_ENABLED){ PROC_PRINTF("igmp_Trap_to_PS Enabled!\n"); }else if(rg_db.systemGlobal.igmp_Trap_to_PS_enable==RTK_RG_DISABLED){ PROC_PRINTF("igmp_Trap_to_PS DISABLED!\n"); } return len; } int _rtk_rg_pppoe_bc_passthrought_to_bindingWan_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_ENABLED){ PROC_PRINTF("pppoe_bc_passthrought_to_bindingWan_enable Enabled!\n"); }else if(rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable==RTK_RG_DISABLED){ PROC_PRINTF("pppoe_bc_passthrought_to_bindingWan_enable DISABLED!\n"); } return len; } int _rtk_rg_pppoe_bc_passthrought_to_bindingWan_set( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable=RTK_RG_ENABLED; } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.pppoe_bc_passthrought_to_bindingWan_enable=RTK_RG_DISABLED; } } _rtk_rg_pppoe_bc_passthrought_to_bindingWan_get(NULL,NULL); return len; } int _rtk_rg_pppoe_mc_routing_trap_state_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.pppoe_mc_routing_trap==RTK_RG_ENABLED){ PROC_PRINTF("pppoe_mc_routing_trap Enabled!\n"); }else if(rg_db.systemGlobal.pppoe_mc_routing_trap==RTK_RG_DISABLED){ PROC_PRINTF("pppoe_mc_routing_trap DISABLED!\n"); } _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_TAIL_END,NULL); //reflash reserved acl rules to see if it is need to add or not. return len; } int _rtk_rg_pppoe_mc_routing_trap_state_set( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.pppoe_mc_routing_trap=RTK_RG_ENABLED; } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.pppoe_mc_routing_trap=RTK_RG_DISABLED; } } _rtk_rg_pppoe_mc_routing_trap_state_get(NULL,NULL); return len; } int _rtk_rg_unknowDA_trap_to_PS_change( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0) { rg_db.systemGlobal.unknownDA_Trap_to_PS_enable=RTK_RG_ENABLED; } else if(strncmp(strptr, "0",1) == 0) { rg_db.systemGlobal.unknownDA_Trap_to_PS_enable=RTK_RG_DISABLED; } } return len; } int _rtk_rg_unknowDA_trap_to_PS_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.unknownDA_Trap_to_PS_enable==RTK_RG_ENABLED){ PROC_PRINTF("unknowDA_trap_to_PS Enabled!\n"); }else if(rg_db.systemGlobal.unknownDA_Trap_to_PS_enable==RTK_RG_DISABLED){ PROC_PRINTF("unknowDA_trap_to_PS DISABLED!\n"); } return len; } int rtk_rg_hwnat_enable( struct file *filp, const char *buff,unsigned long len, void *data ) { int i; char *tmpbuf; char *strptr=NULL; rtk_l2_limitLearnCntAction_t act; rtk_rg_hwnatState_t original_state=rg_db.systemGlobal.hwnat_enable; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if(strncmp(strptr, "1",1) == 0)//enable: remove the ACL rule { _rtk_rg_hwnatACLManipulate(ENABLED); rg_db.systemGlobal.hwnat_enable=RG_HWNAT_ENABLE; } else if(strncmp(strptr, "0",1) == 0)//disable:add a ACL rule to trap all packet to CPU { _rtk_rg_hwnatACLManipulate(DISABLED); rg_db.systemGlobal.hwnat_enable=RG_HWNAT_DISABLE; } else if(strncmp(strptr, "2",1) == 0)//rtn PS { _rtk_rg_hwnatACLManipulate(DISABLED); rg_db.systemGlobal.hwnat_enable=RG_HWNAT_PROTOCOL_STACK; } else if(strncmp(strptr, "3",1) == 0)//force unicast fwd by HW, if pkt goes into fwdEngine will be drop. (not include broadcast) { _rtk_rg_hwnatACLManipulate(ENABLED); rg_db.systemGlobal.hwnat_enable=RG_HWNAT_UC_FORCE_HW_FWD; } else if(strncmp(strptr, "4",1) == 0)//force unicast trap to PS, BC/MC will ignore. { _rtk_rg_hwnatACLManipulate(ENABLED); //Enabled HWNAT, and reserved one more for MC temporary permit while ACL rearrange by type RTK_RG_ACLANDCF_RESERVED_BROADCAST_TRAP _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_UNICAST_TRAP,NULL);//but trap all unicast only rg_db.systemGlobal.hwnat_enable=RG_HWNAT_UC_FORCE_PROTOCOL_STACK; } } //set mac learning limit action and ebtables based on hwnat's value if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_PROTOCOL_STACK) { act=LIMIT_LEARN_CNT_ACTION_FORWARD; //set FORWARD policy to ACCEPT if we are using diagshell with default callback if(rg_db.systemGlobal.initParam.initByHwCallBack==&_rtk_rg_initParameterSetByHwCallBack) { rtk_rg_callback_pipe_cmd("ebtables -P FORWARD ACCEPT"); rtk_rg_callback_pipe_cmd("iptables -P FORWARD ACCEPT"); rtk_rg_callback_pipe_cmd("ip6tables -P FORWARD ACCEPT"); } else { //delete disBCMC rtk_rg_callback_pipe_cmd("ebtables -D FORWARD -j disBCMC"); rtk_rg_callback_pipe_cmd("ebtables -X disBCMC"); } #ifdef CONFIG_RG_SET_NIC_RX_TX_FUNC //cxy 2014-11-17: hwnat is disable, so set eth device rx and tx func directly to re8670_rx_skb and re8670_start_xmit rtk_rg_callback_pipe_cmd("echo re8670_rx_skb > /proc/rtl8686gmac/port2rxfunc"); rtk_rg_callback_pipe_cmd("echo re8670_start_xmit > /proc/rtl8686gmac/port_txfunc"); //cxy 2014-11-17: clear fastbridge table, because unicast pkt dst dev to wifi (eth0, learned when romedirver enbale) //will cause loop back to cpu(eth0's tx portmask is 0,so hwloopup). rtk_rg_callback_pipe_cmd("echo 2 > /proc/fastbridge"); //cxy 2014-11-17: br0 fdb has wifi mac to eth0 fdb entry, so clear fastbridge table is not enough, //eth0 is deleted from br0 to clear eth0 related fdb entry rtk_rg_callback_pipe_cmd("brctl delif br0 eth0"); #endif } else { act=LIMIT_LEARN_CNT_ACTION_TO_CPU; if(original_state==RG_HWNAT_PROTOCOL_STACK) { //set FORWARD policy to DROP if we are using diagshell with default callback if(rg_db.systemGlobal.initParam.initByHwCallBack==&_rtk_rg_initParameterSetByHwCallBack) { rtk_rg_callback_pipe_cmd("ebtables -P FORWARD DROP"); rtk_rg_callback_pipe_cmd("iptables -P FORWARD DROP"); rtk_rg_callback_pipe_cmd("ip6tables -P FORWARD DROP"); } else { //add back disBCMC rtk_rg_callback_pipe_cmd("ebtables -N disBCMC"); rtk_rg_callback_pipe_cmd("ebtables -A disBCMC -d Broadcast -j DROP"); rtk_rg_callback_pipe_cmd("ebtables -A disBCMC -d Multicast -j DROP"); rtk_rg_callback_pipe_cmd("ebtables -I FORWARD 1 -j disBCMC"); } } #ifdef CONFIG_RG_SET_NIC_RX_TX_FUNC rtk_rg_callback_pipe_cmd("echo drv_nic_rx_list > /proc/rtl8686gmac/port2rxfunc"); rtk_rg_callback_pipe_cmd("echo re8670_start_xmit_check > /proc/rtl8686gmac/port_txfunc"); rtk_rg_callback_pipe_cmd("echo 2 > /proc/fastbridge"); rtk_rg_callback_pipe_cmd("brctl addif br0 eth0"); #endif } #ifdef CONFIG_RG_WLAN_HWNAT_ACCELERATION // set CPU port's SA learning limit action ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNTACTION_SET(RTK_RG_MAC_PORT_CPU,act),RT_ERR_OK); #endif for(i=0;i<RTK_RG_MAC_PORT_CPU;i++) ASSERT_EQ(RTK_L2_PORTLIMITLEARNINGCNTACTION_SET(i,act),RT_ERR_OK); return len; } #define REMINDER_SEP "---------------------------------------------------------\nREMINDER:\n" #define REMINDER_HWNAT_1 "* Set HWNAT to 2 will bypass romeDriver and it's MAC leraning functionality, so protocol stack or user themselves should use RG api to add/delete MAC. Otherwise the packets may not be forwarded.\n" #define REMINDER_HWNAT_2 "* By default the iptables may set INPUT policy to DROP, if you don't want this, set it to accept by typing \"iptables -P INPUT ACCEPT\".\n" int rtk_rg_hwnat_is_enabled(struct seq_file *s, void *v) { int len=0; // char status[64]; if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_ENABLE){ PROC_PRINTF("1:hwnat ENABLED, fwdEngine ENABLED\n"); }else if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_DISABLE){ PROC_PRINTF("0:hwnat DISABLED, fwdEngine ENABLED\n"); }else if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_PROTOCOL_STACK){ PROC_PRINTF("2:hwnat DISABLED, fwdEngine DISABLED\n"); }else if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_UC_FORCE_HW_FWD){ PROC_PRINTF("3:hwnat ENABLED, unicast will be drop in fwdEngine\n"); }else if(rg_db.systemGlobal.hwnat_enable==RG_HWNAT_UC_FORCE_PROTOCOL_STACK){ PROC_PRINTF("4:hwnat ENABLED, unicast will be trap tp PS\n"); } //add description about "echo 2" PROC_PRINTF("%s%s%s",REMINDER_SEP,REMINDER_HWNAT_1,REMINDER_HWNAT_2); return len; } int rtk_rg_stag_enable( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; if (strncmp(strptr, "1",1) == 0)//enable { if (rg_kernel.stag_enable==RTK_RG_DISABLED) { rg_kernel.stag_enable=RTK_RG_ENABLED; assert_ok(RTK_SVLAN_SERVICEPORT_SET(RTK_RG_MAC_PORT_PON, ENABLED)); _rtk_rg_default_svlan_manipulate(); } rtlglue_printf("to enable stag_enable\n"); } else if (strncmp(strptr, "0",1) == 0)//disable { if (rg_kernel.stag_enable==RTK_RG_ENABLED) { rg_kernel.stag_enable=RTK_RG_DISABLED; assert_ok(RTK_SVLAN_SERVICEPORT_SET(RTK_RG_MAC_PORT_PON, DISABLED)); _rtk_rg_default_svlan_manipulate(); } rtlglue_printf("to disable stag_enable\n"); } else { rtlglue_printf("Unknown Setting\n0, Disabled\n1, Enabled\n"); } } return len; } int rtk_rg_stag_is_enabled(struct seq_file *s, void *v) { int len=0; if (rg_kernel.stag_enable==RTK_RG_ENABLED){ PROC_PRINTF("1, Enabled\n"); } else if (rg_kernel.stag_enable==RTK_RG_DISABLED){ PROC_PRINTF("0, Disabled\n"); }else { PROC_PRINTF("Unknown value\n"); } return len; } int rtk_rg_debug_level_show(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("RomeDriver Debug level=0x%x [0x1:DEBUG=%s][0x2:FIXME=%s][0x4:CALLBACK=%s][0x8:TRACE=%s][0x10:ACL=%s][0x20:WARN=%s][0x40:TRACE_DUMP=%s][0x80:WMUX=%s][0x100:MACLN=%s][0x200:TABLE=%s][0x400:ALG=%s][0x800:IGMP=%s][0x1000:ACL_RSV=%s][0x2000:RG_API=%s]\n",rg_kernel.debug_level, (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_DEBUG)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_FIXME)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_CALLBACK)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_TRACE)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_ACL)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_WARN)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_TRACE_DUMP)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_WMUX)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_MACLEARN)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_TABLE)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_ALG)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_IGMP)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_ACL_RRESERVED)?"on":"off", (rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_RG_API)?"on":"off" ); return len; } int rtk_rg_filter_level_show(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("RomeDriver Filter level=0x%x [0x1:DEBUG=%s][0x2:FIXME=%s][0x4:CALLBACK=%s][0x8:TRACE=%s][0x10:ACL=%s][0x20:WARN=%s][0x40:TRACE_DUMP=%s][0x80:WMUX=%s][0x100:MACLN=%s][0x200:TABLE=%s][0x400:ALG=%s][0x800:IGMP=%s][0x1000:ACL_RSV=%s][0x2000:RG_API=%s]\n",rg_kernel.filter_level, (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_DEBUG)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_FIXME)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_CALLBACK)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_TRACE)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_ACL)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_WARN)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_TRACE_DUMP)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_WMUX)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_MACLEARN)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_TABLE)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_ALG)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_IGMP)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_ACL_RRESERVED)?"on":"off", (rg_kernel.filter_level&RTK_RG_DEBUG_LEVEL_RG_API)?"on":"off" ); return len; } #ifdef CONFIG_RG_CALLBACK int rtk_rg_callback_show(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("[rg callback functions]\t\t\t\t [Address] \n"); PROC_PRINTF("_rtk_rg_initParameterSetByHwCallBack\t\t 0x%p\n",&_rtk_rg_initParameterSetByHwCallBack); PROC_PRINTF("_rtk_rg_arpAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_arpAddByHwCallBack); PROC_PRINTF("_rtk_rg_arpDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_arpDelByHwCallBack); PROC_PRINTF("_rtk_rg_macAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_macAddByHwCallBack); PROC_PRINTF("_rtk_rg_macDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_macDelByHwCallBack); //5 PROC_PRINTF("_rtk_rg_routingAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_routingAddByHwCallBack); PROC_PRINTF("_rtk_rg_routingDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_routingDelByHwCallBack); PROC_PRINTF("_rtk_rg_naptAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_naptAddByHwCallBack); PROC_PRINTF("_rtk_rg_naptDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_naptDelByHwCallBack); PROC_PRINTF("_rtk_rg_bindingAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_bindingAddByHwCallBack); //10 PROC_PRINTF("_rtk_rg_bindingDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_bindingDelByHwCallBack); PROC_PRINTF("_rtk_rg_interfaceAddByHwCallBack\t\t 0x%p\n",&_rtk_rg_interfaceAddByHwCallBack); PROC_PRINTF("_rtk_rg_interfaceDelByHwCallBack\t\t 0x%p\n",&_rtk_rg_interfaceDelByHwCallBack); PROC_PRINTF("_rtk_rg_neighborAddByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_neighborAddByHwCallBack); PROC_PRINTF("_rtk_rg_neighborDelByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_neighborDelByHwCallBack); //15 PROC_PRINTF("_rtk_rg_v6RoutingAddByHwCallBack\t\t 0x%p\n",&_rtk_rg_v6RoutingAddByHwCallBack); PROC_PRINTF("_rtk_rg_v6RoutingDelByHwCallBack\t\t 0x%p\n",&_rtk_rg_v6RoutingDelByHwCallBack); #ifdef CONFIG_RG_NAPT_UPNP_SUPPORT PROC_PRINTF("_rtk_rg_fwdEngine_upnpCheck\t\t\t 0x%p\n",&_rtk_rg_fwdEngine_upnpCheck); #else PROC_PRINTF("_rtk_rg_fwdEngine_upnpCheck\t\t\t NULL\n"); #endif #ifdef CONFIG_RG_NAPT_VIRTUAL_SERVER_SUPPORT PROC_PRINTF("_rtk_rg_fwdEngine_virtualServerCheck\t\t 0x%p\n",&_rtk_rg_fwdEngine_virtualServerCheck); #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupSecondCallBack=_rtk_rg_fwdEngine_ipv6VirtualServerCheck; #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #else PROC_PRINTF("_rtk_rg_fwdEngine_virtualServerCheck\t\t NULL\n"); #endif #ifdef CONFIG_RG_NAPT_DMZ_SUPPORT PROC_PRINTF("_rtk_rg_fwdEngine_dmzCheck\t\t\t 0x%p\n",&_rtk_rg_fwdEngine_dmzCheck); #ifdef CONFIG_RG_IPV6_NAPT_SUPPORT rg_db.systemGlobal.initParam.ipv6NaptInboundConnLookupThirdCallBack=_rtk_rg_fwdEngine_ipv6DmzCheck; #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #else PROC_PRINTF("_rtk_rg_fwdEngine_dmzCheck\t\t\t NULL\n"); #endif //end of CONFIG_RG_NAPT_DMZ_SUPPORT //20 PROC_PRINTF("_rtk_rg_dhcpRequestByHwCallBack\t\t\t 0x%p\n",&_rtk_rg_dhcpRequestByHwCallBack); PROC_PRINTF("_rtk_rg_pppoeBeforeDialByHwCallBack\t\t 0x%p\n",&_rtk_rg_pppoeBeforeDialByHwCallBack); PROC_PRINTF("_rtk_rg_pptpBeforeDialByHwCallBack\t\t 0x%p\n",&_rtk_rg_pptpBeforeDialByHwCallBack); PROC_PRINTF("_rtk_rg_l2tpBeforeDialByHwCallBack\t\t 0x%p\n",&_rtk_rg_l2tpBeforeDialByHwCallBack); PROC_PRINTF("_rtk_rg_pppoeDsliteBeforeDialByHwCallBack\t 0x%p\n",&_rtk_rg_pppoeDsliteBeforeDialByHwCallBack); //25 PROC_PRINTF("ipv6NaptInboundConnLookupFirstCallBack\t\t NULL\n"); #if defined(CONFIG_RG_NAPT_VIRTUAL_SERVER_SUPPORT) && defined(CONFIG_RG_IPV6_NAPT_SUPPORT) PROC_PRINTF("_rtk_rg_fwdEngine_ipv6VirtualServerCheck\t 0x%p\n",&_rtk_rg_fwdEngine_ipv6VirtualServerCheck); #else PROC_PRINTF("_rtk_rg_fwdEngine_ipv6VirtualServerCheck\t NULL\n"); #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT #if defined(CONFIG_RG_NAPT_DMZ_SUPPORT) && defined(CONFIG_RG_IPV6_NAPT_SUPPORT) PROC_PRINTF("_rtk_rg_fwdEngine_ipv6DmzCheck\t\t\t 0x%p\n",&_rtk_rg_fwdEngine_ipv6DmzCheck); #else PROC_PRINTF("_rtk_rg_fwdEngine_ipv6DmzCheck\t\t\t NULL\n"); #endif //end of CONFIG_RG_IPV6_NAPT_SUPPORT PROC_PRINTF("_rtk_rg_softwareNaptInfoAddCallBack\t\t 0x%p\n",&_rtk_rg_softwareNaptInfoAddCallBack); PROC_PRINTF("_rtk_rg_softwareNaptInfoDeleteCallBack\t\t 0x%p\n",&_rtk_rg_softwareNaptInfoDeleteCallBack); //30 PROC_PRINTF("_rtk_rg_naptPreRouteDPICallBack\t\t\t 0x%p\n",&_rtk_rg_naptPreRouteDPICallBack); PROC_PRINTF("_rtk_rg_naptForwardDPICallBack\t\t\t 0x%p\n",&_rtk_rg_naptForwardDPICallBack); PROC_PRINTF("_rtk_rg_pppoeLCPStateCallBack\t\t\t 0x%p\n",&_rtk_rg_pppoeLCPStateCallBack); return len; } #endif int acl_counter_mode_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n", rg_db.systemGlobal.aclPolicingMode); // PROC_PRINTF("acl POLICACT mode: %d\n", rg_db.systemGlobal.aclPolicingMode); // PROC_PRINTF("currently support:\n ACL_IGR_LOG_MIB_ACT(1)\n ACL_IGR_LOG_POLICING_ACT(0)\n"); return len; } int acl_counter_mode_set(struct file *filp, const char *buff,unsigned long len, void *data) { int mode = _rtk_rg_pasring_proc_string_to_integer(buff,len); int ret; struct seq_file *s = NULL; if(mode==ACL_IGR_LOG_POLICING_ACT) { ret = _rtk_rg_check_acl_committed_rule(ACL_IGR_LOG_MIB_ACT); if(ret) PROC_PRINTF("acl log mib rule exist, need to remove log mib rule before switching to log policing mode.\n"); else rg_db.systemGlobal.aclPolicingMode = mode; } else if(mode==ACL_IGR_LOG_MIB_ACT) { ret = _rtk_rg_check_acl_committed_rule(ACL_IGR_LOG_POLICING_ACT); if(ret) PROC_PRINTF("acl log policing rule exist, need to remove log policing rule before switching to log mib mode.\n"); else rg_db.systemGlobal.aclPolicingMode = mode; } else { acl_counter_mode_get(NULL, NULL); PROC_PRINTF("currently support:\n ACL_IGR_LOG_POLICING_ACT(0)\n ACL_IGR_LOG_MIB_ACT(1)\n"); } return len; } int acl_drop_ip_range_by_sw_get(struct seq_file *s, void *v) { int len=0; //printk("aaa\n"); if(rg_db.systemGlobal.aclDropIpRangeBySwEnable){ PROC_PRINTF("acl trap IP range with Drop action rule Enabled!\n"); }else{ PROC_PRINTF("acl trap IP range with Drop action rule Disabled!\n"); } //printk("bbb\n"); return len; } int acl_drop_ip_range_by_sw_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int enabled; enabled = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(enabled==0){ rg_db.systemGlobal.aclDropIpRangeBySwEnable=0; acl_drop_ip_range_by_sw_get(NULL,NULL); }else if(enabled==1){ rg_db.systemGlobal.aclDropIpRangeBySwEnable=1; acl_drop_ip_range_by_sw_get(NULL,NULL); }else{ rtlglue_printf("invalid parameter\n"); } return len; } int acl_permit_ip_range_by_sw_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.aclPermitIpRangeBySwEnable){ PROC_PRINTF("acl trap IP range with Permit action rule Enabled!\n"); }else{ PROC_PRINTF("acl trap IP range with Permit action rule Disabled!\n"); } return len; } int acl_permit_ip_range_by_sw_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int enabled; enabled = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(enabled==0){ rg_db.systemGlobal.aclPermitIpRangeBySwEnable=0; acl_permit_ip_range_by_sw_get(NULL,NULL); }else if(enabled==1){ rg_db.systemGlobal.aclPermitIpRangeBySwEnable=1; acl_permit_ip_range_by_sw_get(NULL,NULL); }else{ rtlglue_printf("invalid parameter\n"); } return len; } int rtk_rg_unknownDARateLimit_set(struct file *filp, const char *buff,unsigned long len, void *data) { int meterIdx; meterIdx = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(meterIdx==-1){ rg_db.systemGlobal.unKnownDARateLimitShareMeterIdx= -1; }else if((meterIdx >=0) && (meterIdx < MAX_SHAREMETER_TABLE_SIZE)){ rg_db.systemGlobal.unKnownDARateLimitShareMeterIdx = meterIdx; }else{ rtlglue_printf("invalid parameter\n"); } return len; } int rtk_rg_unknownDARateLimit_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.unKnownDARateLimitShareMeterIdx==-1){ PROC_PRINTF("unKnownDA rate limit disabled.\n"); }else{ PROC_PRINTF("unKnownDA rate limit by shareMeter[%d].\n",rg_db.systemGlobal.unKnownDARateLimitShareMeterIdx); } return len; } int rtk_rg_unknownDARateLimitPortMask_set(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_db.systemGlobal.unKnownDARateLimitPortMask=simple_strtoul(tmpBuf, NULL, 16); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask) ||rg_db.systemGlobal.naptSwRateLimitTriggered #ifdef CONFIG_MASTER_WLAN0_ENABLE ||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask) #endif ){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*TICKTIME_PERIOD/16/*unit:(1/16)sec*/)); } #endif return count; } return -EFAULT; } int rtk_rg_unknownDARateLimitPortMask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("unknownDA rate limit portmask = 0x%x.\n",rg_db.systemGlobal.unKnownDARateLimitPortMask); return len; } int rtk_rg_BCRateLimit_set(struct file *filp, const char *buff,unsigned long len, void *data) { int meterIdx; meterIdx = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(meterIdx==-1){ rg_db.systemGlobal.BCRateLimitShareMeterIdx= -1; }else if((meterIdx >=0) && (meterIdx < MAX_SHAREMETER_TABLE_SIZE)){ rg_db.systemGlobal.BCRateLimitShareMeterIdx = meterIdx; }else{ rtlglue_printf("invalid parameter\n"); } return len; } int rtk_rg_BCRateLimit_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.BCRateLimitShareMeterIdx==-1){ PROC_PRINTF("BC rate limit disabled.\n"); }else{ PROC_PRINTF("BC rate limit by shareMeter[%d].\n",rg_db.systemGlobal.BCRateLimitShareMeterIdx); } return len; } int rtk_rg_BCRateLimitPortMask_set(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_db.systemGlobal.BCRateLimitPortMask=simple_strtoul(tmpBuf, NULL, 16); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask) ||rg_db.systemGlobal.naptSwRateLimitTriggered #ifdef CONFIG_MASTER_WLAN0_ENABLE ||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask) #endif ){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*TICKTIME_PERIOD/16/*unit:(1/16)sec*/)); } #endif return count; } return -EFAULT; } int rtk_rg_BCRateLimitPortMask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("BC rate limit portmask = 0x%x.\n",rg_db.systemGlobal.BCRateLimitPortMask); return len; } int rtk_rg_IPv6MCRateLimit_set(struct file *filp, const char *buff,unsigned long len, void *data) { int meterIdx; meterIdx = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(meterIdx==-1){ rg_db.systemGlobal.IPv6MCRateLimitShareMeterIdx= -1; }else if((meterIdx >=0) && (meterIdx < MAX_SHAREMETER_TABLE_SIZE)){ rg_db.systemGlobal.IPv6MCRateLimitShareMeterIdx = meterIdx; }else{ rtlglue_printf("invalid parameter\n"); } return len; } int rtk_rg_IPv6MCRateLimit_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.IPv6MCRateLimitShareMeterIdx==-1){ PROC_PRINTF("IPv6 MC rate limit disabled.\n"); }else{ PROC_PRINTF("IPv6 MC rate limit by shareMeter[%d].\n",rg_db.systemGlobal.IPv6MCRateLimitShareMeterIdx); } return len; } int rtk_rg_IPv4MCRateLimit_set(struct file *filp, const char *buff,unsigned long len, void *data) { int meterIdx; meterIdx = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(meterIdx==-1){ rg_db.systemGlobal.IPv4MCRateLimitShareMeterIdx= -1; }else if((meterIdx >=0) && (meterIdx < MAX_SHAREMETER_TABLE_SIZE)){ rg_db.systemGlobal.IPv4MCRateLimitShareMeterIdx = meterIdx; }else{ rtlglue_printf("invalid parameter\n"); } return len; } int rtk_rg_IPv4MCRateLimit_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.IPv4MCRateLimitShareMeterIdx==-1){ PROC_PRINTF("IPv4 MC rate limit disabled.\n"); }else{ PROC_PRINTF("IPv4 MC rate limit by shareMeter[%d].\n",rg_db.systemGlobal.IPv4MCRateLimitShareMeterIdx); } return len; } int rtk_rg_IPv6MCRateLimitPortMask_set(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_db.systemGlobal.IPv6MCRateLimitPortMask=simple_strtoul(tmpBuf, NULL, 16); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask) ||rg_db.systemGlobal.naptSwRateLimitTriggered #ifdef CONFIG_MASTER_WLAN0_ENABLE ||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask) #endif ){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*TICKTIME_PERIOD/16/*unit:(1/16)sec*/)); } #endif return count; } return -EFAULT; } int rtk_rg_IPv6MCRateLimitPortMask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Pv6 MC rate limit portmask = 0x%x.\n",rg_db.systemGlobal.IPv6MCRateLimitPortMask); return len; } int rtk_rg_IPv4MCRateLimitPortMask_set(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_db.systemGlobal.IPv4MCRateLimitPortMask=simple_strtoul(tmpBuf, NULL, 16); #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask) ||rg_db.systemGlobal.naptSwRateLimitTriggered #ifdef CONFIG_MASTER_WLAN0_ENABLE ||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask) #endif ){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*TICKTIME_PERIOD/16/*unit:(1/16)sec*/)); } #endif return count; } return -EFAULT; } int rtk_rg_IPv4MCRateLimitPortMask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Pv4 MC rate limit portmask = 0x%x.\n",rg_db.systemGlobal.IPv4MCRateLimitPortMask); return len; } #ifdef CONFIG_MASTER_WLAN0_ENABLE int rtk_rg_WifiIngressRateLimit_get(struct seq_file *s, void *v) { int i,usage=1; for(i=0;i<MAX_WLAN_DEVICE_NUM;i++){ if(rg_db.systemGlobal.wifiIngressRateLimitMeter[i]){ PROC_PRINTF("Wlan[%d] Ingress rate limit by rate %d kbps.\n",i,rg_db.systemGlobal.wifiIngressRateLimitMeter[i]); usage--; } } if(usage>0){ PROC_PRINTF("Usage: echo Wlan_idx Rate > /proc/rg/wifi_ingress_rate_limit\n"); PROC_PRINTF(" Wlan_idx: wlan device index.\n"); PROC_PRINTF(" Rate: unit is 1 kbps and the range is from 8k to %dk.\n",MAX_WIFI_LIMIT_RATE); PROC_PRINTF(" The granularity of rate is 8 kbps. Set zero for reset.\n"); PROC_PRINTF(" echo -1 -1 > /proc/rg/wifi_ingress_rate_limit will reset all.\n"); } return 0; } int rtk_rg_WifiIngressRateLimit_set(struct file *file, const char *buff, unsigned long len, void *data) { char tmpbuf[64]={'\0'}; char *strptr; int wlan_idx; int rate; char *rateptr=NULL,*wlanptr=NULL; //The rate unit is 1 kbps and the range is from 8k to 1048568k. //The granularity of rate is 8 kbps //echo wlan_idx rate > WifiIngressRateLimit_set if (buff && !copy_from_user(tmpbuf, buff, len)) { //tmpbuf[len] = '\0'; strptr=tmpbuf; wlanptr = strsep(&strptr," "); if (wlanptr==NULL) goto errout; wlan_idx = simple_strtol(wlanptr, NULL, 0); if(wlan_idx > MAX_WLAN_DEVICE_NUM || wlan_idx < -1) goto errout; rateptr = strsep(&strptr," "); if (rateptr==NULL) goto errout; rate = simple_strtol(rateptr, NULL, 0); if((rate > 0 && (rate < 8 || rate > MAX_WIFI_LIMIT_RATE))||rate < -1) //the range is from 8k to HW_SHARE_METER_MAX goto errout; //reset all if(wlan_idx == -1 && rate == -1){ rg_db.systemGlobal.wifiIngressRateLimitDevMask=0; rg_db.systemGlobal.wifiIngressRateLimitDevOverMask=0; memset(rg_db.systemGlobal.wifiIngressRateLimitMeter,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); memset(rg_db.systemGlobal.wifiIngressByteCount,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); rtlglue_printf("All Wlan Ingress rate limit disabled.\n"); }else if(wlan_idx < 0 || rate < 0){ goto errout; }else{ rg_db.systemGlobal.wifiIngressByteCount[wlan_idx]=0; rg_db.systemGlobal.wifiIngressRateLimitMeter[wlan_idx]=rate; rg_db.systemGlobal.wifiIngressRateLimitDevOverMask&=(~(0x1<<wlan_idx)); if(rate){ rg_db.systemGlobal.wifiIngressRateLimitDevMask|=0x1<<wlan_idx; rtlglue_printf("Wlan[%d] Ingress rate limit by rate %d kbps.\n",wlan_idx,rate); }else{ rg_db.systemGlobal.wifiIngressRateLimitDevMask&=(~(0x1<<wlan_idx)); rtlglue_printf("Wlan[%d] Ingress rate limit disabled.\n",wlan_idx); } } #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask)||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask)||rg_db.systemGlobal.naptSwRateLimitTriggered){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*(TICKTIME_PERIOD>>4)/*unit:(1/16)sec*/)); } #endif } else { //struct seq_file *s=NULL;//used by PROC_PRINTF // char *buf=NULL; //used by PROC_PRINTF // int len=0;//used by PROC_PRINTF errout: rtlglue_printf("Usage: echo Wlan_idx Rate > /proc/rg/wifi_ingress_rate_limit\n"); rtlglue_printf(" Wlan_idx: wlan device index.\n"); rtlglue_printf(" Rate: unit is 1 kbps and the range is from 8k to %dk.\n",MAX_WIFI_LIMIT_RATE); rtlglue_printf(" The granularity of rate is 8 kbps. Set zero for reset.\n"); rtlglue_printf(" echo -1 -1 > /proc/rg/wifi_ingress_rate_limit will reset all.\n"); } return len; } int rtk_rg_WifiEgressRateLimit_get(struct seq_file *s, void *v) { int i,usage=1; for(i=0;i<MAX_WLAN_DEVICE_NUM;i++){ if(rg_db.systemGlobal.wifiEgressRateLimitMeter[i]){ PROC_PRINTF("Wlan[%d] Egress rate limit by rate %d kbps.\n",i,rg_db.systemGlobal.wifiEgressRateLimitMeter[i]); usage--; } } if(usage>0){ PROC_PRINTF("Usage: echo Wlan_idx Rate > /proc/rg/wifi_egress_rate_limit\n"); PROC_PRINTF(" Wlan_idx: wlan device index.\n"); PROC_PRINTF(" Rate: unit is 1 kbps and the range is from 8k to %dk.\n",MAX_WIFI_LIMIT_RATE); PROC_PRINTF(" The granularity of rate is 8 kbps. Set zero for reset.\n"); PROC_PRINTF(" echo -1 -1 > /proc/rg/wifi_egress_rate_limit will reset all.\n"); } return 0; } int rtk_rg_WifiEgressRateLimit_set(struct file *file, const char *buff, unsigned long len, void *data) { char tmpbuf[64]={'\0'}; char *strptr; int wlan_idx; int rate; char *rateptr=NULL,*wlanptr=NULL; //The rate unit is 1 kbps and the range is from 8k to 1048568k. //The granularity of rate is 8 kbps //echo wlan_idx rate > WifiEgressRateLimit_set if (buff && !copy_from_user(tmpbuf, buff, len)) { //tmpbuf[len] = '\0'; strptr=tmpbuf; wlanptr = strsep(&strptr," "); if (wlanptr==NULL) goto errout; wlan_idx = simple_strtol(wlanptr, NULL, 0); if(wlan_idx > MAX_WLAN_DEVICE_NUM || wlan_idx < -1) goto errout; rateptr = strsep(&strptr," "); if (rateptr==NULL) goto errout; rate = simple_strtol(rateptr, NULL, 0); if((rate > 0 && (rate < 8 || rate > MAX_WIFI_LIMIT_RATE))||rate < -1) //the range is from 8k to HW_SHARE_METER_MAX goto errout; //reset all if(wlan_idx == -1 && rate == -1){ rg_db.systemGlobal.wifiEgressRateLimitDevMask=0; rg_db.systemGlobal.wifiEgressRateLimitDevOverMask=0; memset(rg_db.systemGlobal.wifiEgressRateLimitMeter,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); memset(rg_db.systemGlobal.wifiEgressByteCount,0,sizeof(int)*MAX_WLAN_DEVICE_NUM); rtlglue_printf("All Wlan Egress rate limit disabled.\n"); }else if(wlan_idx < 0 || rate < 0){ goto errout; }else{ rg_db.systemGlobal.wifiEgressByteCount[wlan_idx]=0; rg_db.systemGlobal.wifiEgressRateLimitMeter[wlan_idx]=rate; rg_db.systemGlobal.wifiEgressRateLimitDevOverMask&=(~(0x1<<wlan_idx)); if(rate){ rg_db.systemGlobal.wifiEgressRateLimitDevMask|=0x1<<wlan_idx; rtlglue_printf("Wlan[%d] Egress rate limit by rate %d kbps.\n",wlan_idx,rate); }else{ rg_db.systemGlobal.wifiEgressRateLimitDevMask&=(~(0x1<<wlan_idx)); rtlglue_printf("Wlan[%d] Egress rate limit disabled.\n",wlan_idx); } } #ifdef __KERNEL__ if(timer_pending(&rg_kernel.swRateLimitTimer)) del_timer(&rg_kernel.swRateLimitTimer); if((rg_db.systemGlobal.BCRateLimitPortMask)||(rg_db.systemGlobal.IPv6MCRateLimitPortMask)||(rg_db.systemGlobal.IPv4MCRateLimitPortMask)||(rg_db.systemGlobal.unKnownDARateLimitPortMask)||(rg_db.systemGlobal.wifiIngressRateLimitDevMask)||(rg_db.systemGlobal.wifiEgressRateLimitDevMask)||rg_db.systemGlobal.naptSwRateLimitTriggered){ init_timer(&rg_kernel.swRateLimitTimer); rg_kernel.swRateLimitTimer.function = rtk_rg_swRateLimitTimerFunc; mod_timer(&rg_kernel.swRateLimitTimer, jiffies+(RTK_RG_SWRATELIMIT_SECOND*(TICKTIME_PERIOD>>4)/*unit:(1/16)sec*/)); } #endif } else { //struct seq_file *s=NULL;//used by PROC_PRINTF // char *buf=NULL; //used by PROC_PRINTF // int len=0;//used by PROC_PRINTF errout: rtlglue_printf("Usage: echo Wlan_idx Rate > /proc/rg/wifi_egress_rate_limit\n"); rtlglue_printf(" Wlan_idx: wlan device index.\n"); rtlglue_printf(" Rate: unit is 1 kbps and the range is from 8k to %dk.\n",MAX_WIFI_LIMIT_RATE); rtlglue_printf(" The granularity of rate is 8 kbps. Set zero for reset.\n"); rtlglue_printf(" echo -1 -1 > /proc/rg/wifi_egress_rate_limit will reset all.\n"); } return len; } #endif #define ALG_NAME(x) #x int rtk_rg_algUserDefinedPortNum_get(struct seq_file *s, void *v) { int i; for(i=0;i<MAX_ALG_FUNCTIONS;i++){ switch(i){ //Server in WAN case RTK_RG_ALG_SIP_TCP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_SIP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_SIP_TCP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_SIP_UDP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_SIP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_SIP_UDP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_H323_TCP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_H323_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_H323_TCP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_H323_UDP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_H323_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_H323_UDP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_RTSP_TCP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_RTSP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_RTSP_TCP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_RTSP_UDP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_RTSP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_RTSP_UDP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_FTP_TCP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_FTP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_FTP_TCP),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_FTP_UDP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_FTP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_FTP_UDP),rg_db.algUserDefinedPort[i]); break; //Server in LAN case RTK_RG_ALG_SIP_TCP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_SIP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_SIP_TCP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_SIP_UDP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_SIP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_SIP_UDP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_H323_TCP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_H323_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_H323_TCP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_H323_UDP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_H323_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_H323_UDP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_RTSP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_RTSP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_FTP_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_FTP_TCP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_FTP_UDP_SRV_IN_LAN: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_FTP_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_FTP_UDP_SRV_IN_LAN),rg_db.algUserDefinedPort[i]); break; //Pass through case RTK_RG_ALG_PPTP_TCP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_PPTP_PASSTHROUGH_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_PPTP_TCP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_PPTP_UDP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_PPTP_PASSTHROUGH_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_PPTP_UDP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_L2TP_TCP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_L2TP_PASSTHROUGH_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_L2TP_TCP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_L2TP_UDP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_L2TP_PASSTHROUGH_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_L2TP_UDP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_IPSEC_PASSTHROUGH_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; case RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_IPSEC_PASSTHROUGH_UDP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH),rg_db.algUserDefinedPort[i]); break; #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT case RTK_RG_ALG_BATTLENET_TCP: if(rg_db.algUserDefinedPort[i]!=RTK_RG_ALG_BATTLENET_TCP_PORT)PROC_PRINTF("%s assign to %d.\n",ALG_NAME(RTK_RG_ALG_BATTLENET_TCP),rg_db.algUserDefinedPort[i]); break; #endif default: break; } } return 0; } int rtk_rg_algUserDefinedPortNum_set(struct file *file, const char *buff, unsigned long len, void *data) { char tmpbuf[64]={'\0'}; char *strptr; int alg_type,alg_idx; int portNum; char *algptr=NULL,*portptr=NULL; //echo algType PortNum > alg_user_defined_port_num if (buff && !copy_from_user(tmpbuf, buff, len)) { strptr=tmpbuf; algptr = strsep(&strptr," "); if (algptr==NULL) goto errout; alg_type = simple_strtol(algptr, NULL, 0); if(alg_type > 0xffffffff || alg_type <= 0) goto errout; portptr = strsep(&strptr," "); if (portptr==NULL) goto errout; portNum = simple_strtol(portptr, NULL, 0); if(portNum > 65535 || portNum < 0) goto errout; for(alg_idx=0;alg_idx<MAX_ALG_FUNCTIONS;alg_type>>=1,alg_idx++) if(alg_type&0x1)break; if(alg_type>0x1 || alg_idx==MAX_ALG_FUNCTIONS)goto errout;//only one bit mask at one time switch(alg_idx) { case RTK_RG_ALG_SIP_TCP_SRV_IN_LAN: case RTK_RG_ALG_H323_TCP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN: case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: if(_rtk_rg_naptExtPortInUsedCheck(FALSE, 1, portNum, FALSE, TRUE)==1) { WARNING("port %d is used by other service"); goto errout; } break; case RTK_RG_ALG_SIP_UDP_SRV_IN_LAN: case RTK_RG_ALG_H323_UDP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN: case RTK_RG_ALG_FTP_UDP_SRV_IN_LAN: if(_rtk_rg_naptExtPortInUsedCheck(FALSE, 0, portNum, FALSE, TRUE)==1) { WARNING("port %d is used by other service"); goto errout; } break; default: break; } rtlglue_printf("The algtype is 0x%x, index is %d, portnum is %d\n",(unsigned int)simple_strtol(algptr, NULL, 0),alg_idx,portNum); rg_db.algUserDefinedPort[alg_idx]=portNum; //reset Alg port-function mapping switch(alg_idx){ case RTK_RG_ALG_SIP_TCP: case RTK_RG_ALG_H323_TCP: case RTK_RG_ALG_RTSP_TCP: case RTK_RG_ALG_FTP_TCP: case RTK_RG_ALG_SIP_TCP_SRV_IN_LAN: case RTK_RG_ALG_H323_TCP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN: case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: case RTK_RG_ALG_PPTP_TCP_PASSTHROUGH: case RTK_RG_ALG_L2TP_TCP_PASSTHROUGH: case RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH: #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT case RTK_RG_ALG_BATTLENET_TCP: #endif if(rg_db.algTcpFunctionMapping[alg_idx].registerFunction)rg_db.algTcpFunctionMapping[alg_idx].portNum=portNum; break; case RTK_RG_ALG_SIP_UDP: case RTK_RG_ALG_H323_UDP: case RTK_RG_ALG_RTSP_UDP: case RTK_RG_ALG_FTP_UDP: case RTK_RG_ALG_SIP_UDP_SRV_IN_LAN: case RTK_RG_ALG_H323_UDP_SRV_IN_LAN: case RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN: case RTK_RG_ALG_FTP_UDP_SRV_IN_LAN: case RTK_RG_ALG_PPTP_UDP_PASSTHROUGH: case RTK_RG_ALG_L2TP_UDP_PASSTHROUGH: case RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH: if(rg_db.algUdpFunctionMapping[alg_idx].registerFunction)rg_db.algUdpFunctionMapping[alg_idx].portNum=portNum; break; default: break; } //20151123LUKE: if we had set alg before, clear it and re-enable them! if(rg_db.algFunctionMask){ rtk_rg_alg_type_t tmpAlgApps=rg_db.algFunctionMask; assert_ok(rtk_rg_apollo_algApps_set(0x0)); assert_ok(rtk_rg_apollo_algApps_set(tmpAlgApps)); } }else{ errout: rtlglue_printf("Usage: echo algType PortNum > /proc/rg/alg_user_defined_port_num\n"); rtlglue_printf(" algType: ALG type bitmask, only one bit can be set once.\n"); rtlglue_printf(" PortNum: user-defined port number for the ALG type.\n"); } return len; } int rtk_rg_algUserDefinedTimeOut_get(struct seq_file *s, void *v) { int i; for(i=0;i<MAX_ALG_FUNCTIONS;i++){ switch(i){ case RTK_RG_ALG_SIP_TCP: if(rg_db.algUserDefinedTimeout[i]!=ALG_SIP_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_SIP_TCP),rg_db.algUserDefinedTimeout[i]); break; case RTK_RG_ALG_H323_TCP: if(rg_db.algUserDefinedTimeout[i]!=ALG_H323_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_H323_TCP),rg_db.algUserDefinedTimeout[i]); break; case RTK_RG_ALG_RTSP_TCP: if(rg_db.algUserDefinedTimeout[i]!=ALG_RTSP_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_RTSP_TCP),rg_db.algUserDefinedTimeout[i]); break; case RTK_RG_ALG_FTP_TCP: if(rg_db.algUserDefinedTimeout[i]!=ALG_FTP_ACTIVE_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_FTP_TCP),rg_db.algUserDefinedTimeout[i]); break; case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: if(rg_db.algUserDefinedTimeout[i]!=ALG_FTP_PASV_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_FTP_TCP_SRV_IN_LAN),rg_db.algUserDefinedTimeout[i]); break; #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT case RTK_RG_ALG_BATTLENET_TCP: if(rg_db.algUserDefinedTimeout[i]!=ALG_BATTLENET_UPNP_TIMEOUT)PROC_PRINTF("%s assign to %d secs.\n",ALG_NAME(RTK_RG_ALG_BATTLENET_TCP),rg_db.algUserDefinedTimeout[i]); break; #endif default: break; } } return 0; } int rtk_rg_algUserDefinedTimeOut_set(struct file *file, const char *buff, unsigned long len, void *data) { char tmpbuf[64]={'\0'}; char *strptr; int alg_type,alg_idx; int timeOut; char *algptr=NULL,*timeptr=NULL; //echo algType TimeOut > alg_user_defined_time_out if (buff && !copy_from_user(tmpbuf, buff, len)) { strptr=tmpbuf; algptr = strsep(&strptr," "); if (algptr==NULL) goto errout; alg_type = simple_strtol(algptr, NULL, 0); if(alg_type > 0xffffffff || alg_type <= 0) goto errout; timeptr = strsep(&strptr," "); if (timeptr==NULL) goto errout; timeOut = simple_strtol(timeptr, NULL, 0); if(timeOut > 65535 || timeOut < 0) goto errout; for(alg_idx=0;alg_idx<MAX_ALG_FUNCTIONS;alg_type>>=1,alg_idx++) if(alg_type&0x1)break; if(alg_type>0x1 || alg_idx==MAX_ALG_FUNCTIONS)goto errout;//only one bit mask at one time //reset Alg port-function mapping switch(alg_idx){ case RTK_RG_ALG_SIP_TCP: case RTK_RG_ALG_H323_TCP: case RTK_RG_ALG_RTSP_TCP: case RTK_RG_ALG_FTP_TCP: case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT case RTK_RG_ALG_BATTLENET_TCP: #endif rtlglue_printf("The algtype is 0x%x, index is %d, timeout is %d\n",(unsigned int)simple_strtol(algptr, NULL, 0),alg_idx,timeOut); rg_db.algUserDefinedTimeout[alg_idx]=timeOut; break; default: rtlglue_printf("Not Supported."); break; } }else{ errout: rtlglue_printf("Usage: echo algType TimeOut > /proc/rg/alg_user_defined_time_out\n"); rtlglue_printf(" algType: ALG type bitmask, only one bit can be set once.\n"); rtlglue_printf(" TimeOut: user-defined UPNP time out seconds for the ALG type.\n"); } return len; } int rtk_rg_layer2_PonPortPriority_reflash(void){ int i,valid_idx; rtk_rg_macEntry_t macEntry; for(i=0;i<MAX_LUT_HW_TABLE_SIZE;i++){ valid_idx= i; bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); assert_ok(rtk_rg_apollo_macEntry_find(&macEntry,&valid_idx)); //find the assigned entry if(valid_idx==i){ if(macEntry.port_idx==RTK_RG_PORT_PON){ assert_ok(rtk_rg_apollo_macEntry_del(i)); assert_ok(rtk_rg_apollo_macEntry_add(&macEntry,&valid_idx)); } } } return (RT_ERR_RG_OK); } #if 0 int rtk_rg_layer2_inetrface_remark_set(struct file *filp, const char *buff,unsigned long len, void *data){ int i,enabled; enabled = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(enabled==1){ rg_db.systemGlobal.IntfRmkEnabled = 1; assert_ok(rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_set(RTK_RG_PORT_PON,RTK_RG_ENABLED)); //internal-pri and dot1p one-to-one mapping assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(0,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(1,1)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(2,2)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(3,3)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(4,4)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(5,5)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(6,6)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(7,7)); rtlglue_printf("Layer2 wan interface p-bit remarking enabled.\n"); }else{ rg_db.systemGlobal.IntfRmkEnabled = 0; assert_ok(rtk_rg_apollo_qosDot1pPriRemarkByInternalPriEgressPortEnable_set(RTK_RG_PORT_PON,RTK_RG_DISABLED)); rtlglue_printf("Layer2 wan interface p-bit remarking disabled.\n"); //all internal-pri mapping to dot1p[0] assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(0,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(1,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(2,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(3,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(4,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(5,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(6,0)); assert_ok( rtk_rg_apollo_qosDot1pPriRemarkByInternalPri_set(7,0)); for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++){ rg_db.systemGlobal.intfMappingToPbit[i] = 0; } assert_ok(rtk_rg_layer2_PonPortPriority_reflash()); } return len; } int rtk_rg_layer2_inetrface_remark_get(void){ if(rg_db.systemGlobal.IntfRmkEnabled==1){ rtlglue_printf("Layer2 wan interface p-bit remarking enabled.\n"); }else{ rtlglue_printf("Layer2 wan interface p-bit remarking disabled.\n"); } return 0; } #endif int rtk_rg_layer2_interfcae_remark_mapping_set(struct file *filp, const char *buff,unsigned long len, void *data){ char tmpbuf[64]; char *strptr; int intfIdx, pbit; char *tokptr; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; //parsing intf idx strptr=tmpbuf; tokptr = strsep(&strptr," "); if (tokptr==NULL) { goto errout; } intfIdx = simple_strtol(tokptr, NULL, 0); //parsing p-bit tokptr = strsep(&strptr," "); if (tokptr==NULL) { goto errout; } pbit = simple_strtol(tokptr, NULL, 0); //assign p-bit to rg_db rg_db.systemGlobal.intfMappingToPbit[intfIdx] = pbit; assert_ok(rtk_rg_layer2_PonPortPriority_reflash()); } else { errout: rtlglue_printf("Usage: echo [interfaceIdx] [pbit] > /proc/rg/layer2_interfcae_remark_mapping \n"); } return len; } int rtk_rg_layer2_interfcae_remark_mapping_get(void){ int i; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++){ //Check it is vlaid bridgeWan if(rg_db.systemGlobal.interfaceInfo[i].valid==1 && rg_db.systemGlobal.interfaceInfo[i].storedInfo.is_wan==1 && rg_db.systemGlobal.interfaceInfo[i].storedInfo.wan_intf.wan_intf_conf.wan_type==RTK_RG_BRIDGE ){ rtlglue_printf("Layer2 Wan Intercae Remarking:"); rtlglue_printf("Interfcae[%d] remarking p-bit to %d.\n",i,rg_db.systemGlobal.intfMappingToPbit[i]); } } return 0; } int rtk_rg_tcp_hw_learning_at_syn_set(struct file *filp, const char *buff,unsigned long len, void *data){ int on; on = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(on==0) rg_db.systemGlobal.tcp_hw_learning_at_syn=0; else rg_db.systemGlobal.tcp_hw_learning_at_syn=1; return len; } int rtk_rg_tcp_hw_learning_at_syn_get(struct seq_file *s, void *v){ int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.tcp_hw_learning_at_syn); return len; } int rtk_rg_tcp_in_shortcut_learning_at_syn_set(struct file *filp, const char *buff,unsigned long len, void *data){ int on; on = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(on==0) rg_db.systemGlobal.tcp_in_shortcut_learning_at_syn=0; else rg_db.systemGlobal.tcp_in_shortcut_learning_at_syn=1; return len; } int rtk_rg_tcp_in_shortcut_learning_at_syn_get(struct seq_file *s, void *v){ int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.tcp_in_shortcut_learning_at_syn); return len; } #ifdef CONFIG_ROME_NAPT_SHORTCUT int rtk_rg_ipv4_shortcutOff_function_set(struct file *filp, const char *buff,unsigned long len, void *data){ int on; on = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(on==0) rg_db.systemGlobal.ipv4_shortcut_off=0; else rg_db.systemGlobal.ipv4_shortcut_off=1; return len; } int rtk_rg_ipv4_shortcutOff_function_get(struct seq_file *s, void *v){ int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.ipv4_shortcut_off); return len; } int _rtk_rg_ipv4_shortcut_timeout_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.v4ShortCut_timeout,RTK_RG_DEFAULT_V4_SHORTCUT_TIMEOUT); return len; } int _rtk_rg_ipv4_shortcut_timeout_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.v4ShortCut_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_ipv4_shortcut_timeout_get(NULL,NULL); return len; } #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT int rtk_rg_ipv6_shortcutOff_function_set(struct file *filp, const char *buff,unsigned long len, void *data){ int on; on = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(on==0) rg_db.systemGlobal.ipv6_shortcut_off=0; else rg_db.systemGlobal.ipv6_shortcut_off=1; return len; } int rtk_rg_ipv6_shortcutOff_function_get(struct seq_file *s, void *v){ int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.ipv6_shortcut_off); return len; } int _rtk_rg_ipv6_shortcut_timeout_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.v6ShortCut_timeout,RTK_RG_DEFAULT_V6_SHORTCUT_TIMEOUT); return len; } int _rtk_rg_ipv6_shortcut_timeout_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.v6ShortCut_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_ipv6_shortcut_timeout_get(NULL,NULL); return len; } #endif #if defined(CONFIG_RG_FLOW_AUTO_AGEOUT) int _rtk_rg_flow_timeout_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.flow_timeout,RTK_RG_DEFAULT_FLOW_TIMEOUT); return len; } int _rtk_rg_flow_timeout_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.flow_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_flow_timeout_get(NULL,NULL); return len; } #endif int _rtk_rg_get_arp_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.arp_timeout,RTK_RG_DEFAULT_ARP_TIMEOUT); return len; } int _rtk_rg_set_arp_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.arp_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_arp_timeout(NULL,NULL); return len; } int _rtk_rg_get_neighbor_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.neighbor_timeout,RTK_RG_DEFAULT_NEIGHBOR_TIMEOUT); return len; } int _rtk_rg_set_neighbor_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.neighbor_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_neighbor_timeout(NULL,NULL); return len; } int _rtk_rg_get_tcp_long_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.tcp_long_timeout,RTK_RG_DEFAULT_TCP_LONG_TIMEOUT); return len; } int _rtk_rg_set_tcp_long_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.tcp_long_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_tcp_long_timeout(NULL,NULL); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //sync to hw _rtk_rg_refresh_tcp_longTimeout(rg_db.systemGlobal.tcp_long_timeout); #endif return len; } int _rtk_rg_get_tcp_short_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.tcp_short_timeout,RTK_RG_DEFAULT_TCP_SHORT_TIMEOUT); return len; } int _rtk_rg_set_tcp_short_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.tcp_short_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_tcp_short_timeout(NULL,NULL); return len; } int _rtk_rg_get_udp_long_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.udp_long_timeout,RTK_RG_DEFAULT_UDP_LONG_TIMEOUT); return len; } int _rtk_rg_set_udp_long_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.udp_long_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_udp_long_timeout(NULL,NULL); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) //sync to hw _rtk_rg_refresh_udp_longTimeout(rg_db.systemGlobal.udp_long_timeout); #endif return len; } int _rtk_rg_get_udp_short_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.udp_short_timeout,RTK_RG_DEFAULT_UDP_SHORT_TIMEOUT); return len; } int _rtk_rg_set_udp_short_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.udp_short_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_udp_short_timeout(NULL,NULL); return len; } int _rtk_rg_get_house_keep_sec(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.house_keep_sec,RTK_RG_DEFAULT_HOUSE_KEEP_SECOND); return len; } int _rtk_rg_set_house_keep_sec( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.house_keep_sec=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_house_keep_sec(NULL,NULL); if(timer_pending(&rg_kernel.fwdEngineHouseKeepingTimer)) del_timer(&rg_kernel.fwdEngineHouseKeepingTimer); init_timer(&rg_kernel.fwdEngineHouseKeepingTimer); rg_kernel.fwdEngineHouseKeepingTimer.function = rtk_rg_fwdEngineHouseKeepingTimerFunc; if(rg_db.systemGlobal.house_keep_sec==0) rg_db.systemGlobal.house_keep_sec=RTK_RG_DEFAULT_HOUSE_KEEP_SECOND; mod_timer(&rg_kernel.fwdEngineHouseKeepingTimer, jiffies+(rg_db.systemGlobal.house_keep_sec*TICKTIME_PERIOD)); return len; } int _rtk_rg_arp_request_interval_sec_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs, minimal: 1 sec)\n",rg_db.systemGlobal.arp_requset_interval_sec,RTK_RG_DEFAULT_ARP_REQUEST_INTERVAL_SECOND); return len; } int _rtk_rg_arp_request_interval_sec_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int wan_intf_idx; rg_db.systemGlobal.arp_requset_interval_sec=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.arp_requset_interval_sec<1)rg_db.systemGlobal.arp_requset_interval_sec=1; _rtk_rg_arp_request_interval_sec_get(NULL,NULL); #ifdef __KERNEL__ for(wan_intf_idx=0; wan_intf_idx<MAX_NETIF_SW_TABLE_SIZE<<1; wan_intf_idx++) { if(rg_db.systemGlobal.intfArpRequest[wan_intf_idx].finished==0) { DEBUG("arp[%d](pptp or l2tp) miss, request arp=%x\n",wan_intf_idx,rg_db.systemGlobal.intfArpRequest[wan_intf_idx].reqIp); if(timer_pending(&rg_kernel.arpRequestTimer[wan_intf_idx])) mod_timer(&rg_kernel.arpRequestTimer[wan_intf_idx], jiffies+(rg_db.systemGlobal.arp_requset_interval_sec*TICKTIME_PERIOD)); } } #endif return len; } #if defined(CONFIG_RTL9602C_SERIES) int _rtk_rg_test_l34_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int test_item; int i, k; uint64 count; rtk_rg_macEntry_t macEntry; rtk_rg_arpEntry_t arpEntry; int l2Idx, arpIdx; test_item = _rtk_rg_pasring_proc_string_to_integer(buff,len); switch(test_item) { case 1: #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) && defined(CONFIG_RTL9602C_SERIES) for(i=0, count=3; i<MAX_ARP_HW_TABLE_SIZE_FPGA-3; i++, count++) #else for(i=0, count=3; i<MAX_ARP_HW_TABLE_SIZE-3; i++, count++) #endif { l2Idx=-1; memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); for(k=0; k<ETHER_ADDR_LEN; k++) { macEntry.mac.octet[ETHER_ADDR_LEN-k-1] = ((count+256)>>(k*8))&0xff; } macEntry.isIVL=0; macEntry.fid=1; macEntry.vlan_id=9; macEntry.port_idx=0; macEntry.arp_used=1; printk("MAC[%02x:%02x:%02x:%02x:%02x:%02x]\n",macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); assert_ok(rtk_rg_apollo_macEntry_add(&macEntry, &l2Idx)); arpEntry.ipv4Addr=0xc0a80100 + count; arpEntry.macEntryIdx=l2Idx; arpEntry.staticEntry=0; assert_ok(rtk_rg_apollo_arpEntry_add(&arpEntry,&arpIdx)); } break; case 2: #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) && defined(CONFIG_RTL9602C_SERIES) for(i=0, count=257*2; i<MAX_LUT_BCAM_TABLE_SIZE_FPGA; i++, count+=257) #else for(i=0, count=257*2; i<MAX_LUT_BCAM_TABLE_SIZE; i++, count+=257) #endif { l2Idx=-1; memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); for(k=0; k<ETHER_ADDR_LEN; k++) { macEntry.mac.octet[ETHER_ADDR_LEN-k-1] = (count>>(k*8))&0xff; } macEntry.isIVL=0; macEntry.fid=1; macEntry.vlan_id=10; macEntry.port_idx=2; //macEntry.arp_used=1; printk("MAC[%02x:%02x:%02x:%02x:%02x:%02x]\n",macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); assert_ok(rtk_rg_apollo_macEntry_add(&macEntry, &l2Idx)); } for(i=0, count=0; i<6; i++, count+=257) { l2Idx=-1; memset(&macEntry,0,sizeof(rtk_rg_macEntry_t)); for(k=0; k<ETHER_ADDR_LEN; k++) { macEntry.mac.octet[ETHER_ADDR_LEN-k-1] = (count>>(k*8))&0xff; } macEntry.mac.octet[1] += 0x1; macEntry.isIVL=0; macEntry.fid=1; macEntry.vlan_id=9; macEntry.port_idx=2; //macEntry.arp_used=1; printk("MAC[%02x:%02x:%02x:%02x:%02x:%02x]\n",macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2],macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); assert_ok(rtk_rg_apollo_macEntry_add(&macEntry, &l2Idx)); } break; case 3: rg_db.lut[992].valid=1; rg_db.lut[994].valid=1; rg_db.lut[995].valid=1; break; default: break; } return len; } int _rtk_rg_test_l34_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("_rtk_rg_test_get\n"); return len; } #endif int _rtk_rg_auto_test_fail_arp_interval_sec_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d ) [unit: 0.1 sec]\n",rg_db.systemGlobal.auto_test_fail_arp_interval_sec, RTK_RG_DEFAULT_AUTO_TEST_FAIL_ARP_INTERVAL_SECOND); return len; } int _rtk_rg_auto_test_fail_arp_interval_sec_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.auto_test_fail_arp_interval_sec=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_auto_test_fail_arp_interval_sec_get(NULL,NULL); return len; } void _rtk_rg_arpForFailTimerFunc(unsigned long reqIp) { #ifdef __KERNEL__ ipaddr_t gwIpAddr; rtk_rg_arp_request_t arpReq; gwIpAddr = 0xc0a80101; arpReq.finished = 1; arpReq.gwMacReqCallBack = NULL; arpReq.reqIp = (ipaddr_t)reqIp; _rtk_rg_arpGeneration(0, gwIpAddr, &arpReq); if(--rg_kernel.arpForAutoTestTimerCounter) mod_timer(&rg_kernel.arpForAutoTestTimer, jiffies+(TICKTIME_PERIOD*rg_db.systemGlobal.auto_test_fail_arp_interval_sec/10)); #endif } int _rtk_rg_SpoofIpExist(void) { int arp_valid_idx; rtk_rg_arpInfo_t arpInfo; int ret = 0; int i, spoofIp[] = {0xC0A80107, 0xC0A80108, 0xC0A80109}; for(i = 0; i < 3; i++) { bzero(&arpInfo, sizeof(rtk_rg_arpInfo_t)); //arpInfo.arpEntry.ipv4Addr=spoofIp[i]; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) arp_valid_idx = 7 + i; #else arp_valid_idx = i; #endif if(rtk_rg_apollo_arpEntry_find(&arpInfo, &arp_valid_idx)==RT_ERR_RG_OK) { //rtlglue_printf("arpInfo.arpEntry.macEntryIdx: %d, arpInfo.arpEntry.ipv4Addr: %x\n", arpInfo.arpEntry.macEntryIdx, arpInfo.arpEntry.ipv4Addr); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if( (arpInfo.arpEntry.macEntryIdx == 652) && (arpInfo.arpEntry.ipv4Addr == spoofIp[i]) ) #else if( (arpInfo.arpEntry.macEntryIdx == 12) && (arpInfo.arpEntry.ipv4Addr == spoofIp[i]) ) #endif ret++; } } return (ret > 1)? 1: 0; } int _rtk_rg_SpoofMacExist(void) { int arp_valid_idx=-1; rtk_rg_arpInfo_t arpInfo; int ret = 0; arpInfo.arpEntry.ipv4Addr=0xC0A80107; if(rtk_rg_apollo_arpEntry_find(&arpInfo, &arp_valid_idx)==RT_ERR_RG_OK) { //rtlglue_printf("arpInfo.arpEntry.macEntryIdx: %d\n", arpInfo.arpEntry.macEntryIdx); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if((arpInfo.arpEntry.macEntryIdx != 640) && (arpInfo.arpEntry.macEntryIdx != 1664)) #else if((arpInfo.arpEntry.macEntryIdx != 16) && (arpInfo.arpEntry.macEntryIdx != 20)) #endif ret = 0; else ret = 1; } return ret; } int _rtk_rg_SpoofNeighborExist(void) { int rtidx; rtk_rg_neighborInfo_t neighborInfo; unsigned char *ipv6Addr = "2001::1"; unsigned char ipv6Sip[16]; int neighbor_valid_idx; int ret = 0; in6_pton(ipv6Sip, -1, ipv6Addr, -1, NULL); memcpy(neighborInfo.neighborEntry.interfaceId,ipv6Sip+8,8); neighborInfo.neighborEntry.matchRouteIdx=0; neighbor_valid_idx=8; if((rtidx=rtk_rg_apollo_neighborEntry_find(&neighborInfo,&neighbor_valid_idx))==RT_ERR_RG_OK){ //rtlglue_printf("neighborInfo.neighborEntry.l2Idx: %d\n", neighborInfo.neighborEntry.l2Idx); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if( (neighborInfo.neighborEntry.l2Idx != 640) && (neighborInfo.neighborEntry.l2Idx != 1664) ) #else if( (neighborInfo.neighborEntry.l2Idx != 16) && (neighborInfo.neighborEntry.l2Idx != 20) ) #endif ret = 0; else ret = 1; } else { rtlglue_printf("\nret: %d, fail neighborInfo.neighborEntry.l2Idx: %d\n", rtidx, neighborInfo.neighborEntry.l2Idx); ret = 0; } return ret; } int _rtk_rg_auto_test_result_check_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int test_item; int test_result = FAIL; test_item = _rtk_rg_pasring_proc_string_to_integer(buff,len); switch(test_item) { case 1: if(strcmp(rg_db.lut[rg_db.arp[2].rtk_arp.nhIdx].dev_name, "R0")==0 && rg_db.arp[2].lanNetInfo.dev_type==RG_LANNET_TYPE_COMPUTER && rg_db.arp[2].lanNetInfo.brand==RG_BRAND_OTHER && rg_db.arp[2].lanNetInfo.model==RG_MODEL_OTHER && rg_db.arp[2].lanNetInfo.os==RG_OS_WINDOWS_NT && rg_db.lut[rg_db.arp[2].rtk_arp.nhIdx].conn_type==RG_CONN_MAC_PORT) test_result = SUCCESS; break; case 2: if(_rtk_rg_SpoofMacExist()) test_result = SUCCESS; break; case 3: if(!_rtk_rg_SpoofMacExist()) test_result = SUCCESS; break; case 4: if(_rtk_rg_SpoofNeighborExist()) test_result = SUCCESS; break; case 5: if(!_rtk_rg_SpoofNeighborExist()) test_result = SUCCESS; break; case 6: if(_rtk_rg_SpoofIpExist()) test_result = SUCCESS; break; case 7: if(!_rtk_rg_SpoofIpExist()) test_result = SUCCESS; break; default: break; } if(test_result==FAIL) { #ifdef __KERNEL__ unsigned long reqIp=0xc0a85566; rg_kernel.arpForAutoTestTimerCounter = 3; if(timer_pending(&rg_kernel.arpForAutoTestTimer)) del_timer(&rg_kernel.arpForAutoTestTimer); init_timer(&rg_kernel.arpForAutoTestTimer); rg_kernel.arpForAutoTestTimer.data = reqIp; rg_kernel.arpForAutoTestTimer.function = _rtk_rg_arpForFailTimerFunc; mod_timer(&rg_kernel.arpForAutoTestTimer, jiffies+(TICKTIME_PERIOD*rg_db.systemGlobal.auto_test_fail_arp_interval_sec/10)); #endif rtlglue_printf("\033[1;32mTest case no.%d is failed.\033[0m\n", test_item); } else { rtlglue_printf("\033[1;32mTest case no.%d is successful.\033[0m\n", test_item); } return len; } int _rtk_rg_auto_test_result_check_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("_rtk_rg_auto_test_result_check_get\n"); return len; } int _rtk_rg_get_igmp_sys_timer_sec(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.igmp_sys_timer_sec,RTK_RG_DEFAULT_IGMP_SYS_TIMER_INTERVAL); return len; } int _rtk_rg_set_igmp_sys_timer_sec( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.igmp_sys_timer_sec=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_igmp_sys_timer_sec(NULL,NULL); _rtk_rg_igmpSnoopingOnOff(rg_db.systemGlobal.initParam.igmpSnoopingEnable,1,rg_db.systemGlobal.initParam.ivlMulticastSupport); return len; } int _rtk_rg_get_igmp_max_simultaneous_group_size(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.igmp_max_simultaneous_group_size==RTK_RG_DEFAULT_IGMP_SYS_MAX_SIMULTANEOUS_GROUP_SIZE_UNLIMIT) { PROC_PRINTF("igmp_max_simultaneous_group_size is unlimited \n"); } else { PROC_PRINTF("igmp_max_simultaneous_group_size is %d \n",rg_db.systemGlobal.igmp_max_simultaneous_group_size); } return len; } int _rtk_rg_set_igmp_max_simultaneous_group_size( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.igmp_max_simultaneous_group_size=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_igmp_max_simultaneous_group_size(NULL,NULL); return len; } int _rtk_rg_get_mcast_query_sec(struct seq_file *s, void *v) { int len=0; if (rg_db.systemGlobal.mcast_query_sec==0) { PROC_PRINTF("disable (0:disable; default: %d secs)\n",RTK_RG_DEFAULT_MCAST_QUERY_INTERVAL); }else{ PROC_PRINTF("%d (0:disable; default: %d secs)\n",rg_db.systemGlobal.mcast_query_sec,RTK_RG_DEFAULT_MCAST_QUERY_INTERVAL); } return len; } int _rtk_rg_set_mcast_query_sec( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.mcast_query_sec =_rtk_rg_pasring_proc_string_to_integer(buff,len); //if(rg_db.systemGlobal.mcast_query_sec<1)rg_db.systemGlobal.mcast_query_sec=1; _rtk_rg_get_mcast_query_sec(NULL,NULL); _rtk_rg_igmpSnoopingOnOff(rg_db.systemGlobal.initParam.igmpSnoopingEnable,1,rg_db.systemGlobal.initParam.ivlMulticastSupport); return len; } int _rtk_rg_get_mcast_force_report_sec(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.forceReportResponseTime==0){ PROC_PRINTF("client force report time disabled\n"); }else{ PROC_PRINTF("client must report in %d sec\n",rg_db.systemGlobal.forceReportResponseTime); } return len; } int _rtk_rg_set_mcast_force_report_sec( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.forceReportResponseTime=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_mcast_force_report_sec(NULL,NULL); return len; } int _rtk_rg_get_mcast_protocol(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%s\n",rg_db.systemGlobal.multicastProtocol==RG_MC_BOTH_IGMP_MLD?"Both IGMP and MLD.": rg_db.systemGlobal.multicastProtocol==RG_MC_IGMP_ONLY?"IGMP Only.":"MLD Only."); return len; } int _rtk_rg_set_mcast_protocol( struct file *filp, const char *buff,unsigned long len, void *data ) { int tmp; tmp=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(tmp>=RG_MC_PROTOCOL_END) { rtlglue_printf("echo 0 for Both IGMP and MLD, echo 1 for IGMP only, echo 2 for MLD only.\n"); return len; } rg_db.systemGlobal.multicastProtocol=tmp; _rtk_rg_get_mcast_protocol(NULL,NULL); _rtk_rg_igmpSnoopingOnOff(rg_db.systemGlobal.initParam.igmpSnoopingEnable,0,rg_db.systemGlobal.initParam.ivlMulticastSupport); return len; } int rtk_rg_qosInternalAndRemark_show(struct seq_file *s, void *v) { int len=0; int i; PROC_PRINTF("============== Qos=>InternalPri=======================\n"); PROC_PRINTF("\n"); PROC_PRINTF("WEIGHT_OF_PORTBASED:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_PORTBASED]); PROC_PRINTF("WEIGHT_OF_DOT1Q:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_DOT1Q]); PROC_PRINTF("WEIGHT_OF_DSCP:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_DSCP]); PROC_PRINTF("WEIGHT_OF_ACL:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_ACL]); PROC_PRINTF("WEIGHT_OF_LUTFWD:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_LUTFWD]); PROC_PRINTF("WEIGHT_OF_SABASED:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_SABASED]); PROC_PRINTF("WEIGHT_OF_VLANBASED:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_VLANBASED]); PROC_PRINTF("WEIGHT_OF_SVLANBASED:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_SVLANBASED]); PROC_PRINTF("WEIGHT_OF_L4BASED:%d\n",rg_db.systemGlobal.qosInternalDecision.internalPriSelectWeight[WEIGHT_OF_L4BASED]); PROC_PRINTF("\nqosDot1pPriRemapToInternalPriTbl:\n"); for(i=0;i<8;i++){ PROC_PRINTF("Dot1p[%d]=>remap to InternalPri:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemapToInternalPriTbl[i]); } PROC_PRINTF("\nqosDscpRemapToInternalPri:\n"); for(i=0;i<64;i++){ PROC_PRINTF("Dscp[%d]=>remap to InternalPri:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDscpRemapToInternalPri[i]); } PROC_PRINTF("\nqosPortBasedPriority:\n"); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ PROC_PRINTF("Port[%d]=>remap to InternalPri:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosPortBasedPriority[i]); } PROC_PRINTF("============== InternalPri=>dot1p Remaking=======================\n"); PROC_PRINTF("\nqosDot1pPriRemarkByInternalPriEgressPortEnable:\n"); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ PROC_PRINTF("Port[%d] dot1p Remark Enable:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemarkByInternalPriEgressPortEnable[i]); } PROC_PRINTF("\nqosDot1pPriRemarkByInternalPri:\n"); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ PROC_PRINTF("InternalPri[%d]=>remark to dot1p:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDot1pPriRemarkByInternalPri[i]); } PROC_PRINTF("============== InternalPri or DSCP=>DSCP Remaking=======================\n"); PROC_PRINTF("\nqosDscpRemarkEgressPortEnableAndSrcSelect:\n"); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ PROC_PRINTF("Port[%d] DSCP Remark Enable:%d (0:disable 1:Remark by Internal Pri 2:Remark by DSCP)\n",i,rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[i]); } PROC_PRINTF("\nqosDscpRemarkByInternalPri:\n"); for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ PROC_PRINTF("InternalPri[%d]=>remark to dscp:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[i]); } PROC_PRINTF("\nqosDscpRemarkByDscp:\n"); for(i=0;i<64;i++){ PROC_PRINTF("DSCP[%d]=>remark to DSCP:%d\n",i,rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByDscp[i]); } #if 0 TRACE("QoS dscp Remarking by port[%d]:%s",egressPort,rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]?"ENABLED":"DISABLED"); if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]){ if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]==ENABLED_DSCP_REMARK_AND_SRC_FROM_INTERNALPRI){ if(pPktHdr->pTos!=NULL){//packet may not have IP header if(pPktHdr->tagif&IPV6_TAGIF) { //dscp is the MSB 6 bits of traffic class tos = rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]>>0x2; //dscp MSB 4 bits tos |= (*pPktHdr->pTos)&0xf0; //keep version 4 bits *pPktHdr->pTos=tos; tos = (rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]&0x3)<<0x6; //dscp LSB 2 bits tos |= (*(pPktHdr->pTos+1))&0x3f; //keep original traffic label LSB 2 bits and flow label MSB 4 bits *(pPktHdr->pTos+1)=tos; } else { tos = rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]<<0x2; tos |= (*pPktHdr->pTos)&0x3; //keep 2 bits from LSB *pPktHdr->pTos=tos; //remarking tos of packet } } }else if(rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkEgressPortEnableAndSrcSelect[egressPort]==ENABLED_DSCP_REMARK_AND_SRC_FROM_DSCP){ if(pPktHdr->pTos!=NULL){//packet may not have IP header if(pPktHdr->tagif&IPV6_TAGIF) { //dscp is the MSB 6 bits of traffic class tos = rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]>>0x2; //dscp MSB 4 bits tos |= (*pPktHdr->pTos)&0xf0; //keep version 4 bits *pPktHdr->pTos=tos; tos = (rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]&0x3)<<0x6; //dscp LSB 2 bits tos |= (*(pPktHdr->pTos+1))&0x3f; //keep original traffic label LSB 2 bits and flow label MSB 4 bits *(pPktHdr->pTos+1)=tos; } else { tos = rg_db.systemGlobal.qosInternalDecision.qosDscpRemarkByInternalPri[pPktHdr->internalPriority]<<0x2; tos |= (*pPktHdr->pTos)&0x3; //keep 2 bits from LSB *pPktHdr->pTos=tos; //remarking tos of packet } } } } #endif return len; } int rtk_rg_debug_level_change(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_kernel.debug_level=simple_strtoul(tmpBuf, NULL, 16); rtk_rg_debug_level_show(NULL,NULL); #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) { extern int DumpSwNicTxRx_debug; if(rg_kernel.debug_level&RTK_RG_DEBUG_LEVEL_DEBUG) DumpSwNicTxRx_debug=1; else DumpSwNicTxRx_debug=0; } #endif return count; } return -EFAULT; } int rtk_rg_filter_level_change(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_kernel.filter_level=simple_strtoul(tmpBuf, NULL, 16); rtk_rg_filter_level_show(NULL,NULL); return count; } return -EFAULT; } #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit int _rtk_rg_get_l2_timeout(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d (default: %d secs)\n",rg_db.systemGlobal.l2_timeout,RTK_RG_DEFAULT_L2_TIMEOUT); return len; } int _rtk_rg_set_l2_timeout( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.l2_timeout=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_get_l2_timeout(NULL,NULL); return len; } #endif int rtk_rg_l2HwAgingShow(void) { rtlglue_printf("RomeDriver L2 HW Aging:0x%x\n",rg_kernel.l2_hw_aging); return 0; } int rtk_rg_l2HwAgingChange(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_kernel.l2_hw_aging=simple_strtoul(tmpBuf, NULL, 0); rtk_rg_l2HwAgingShow(); return count; } return -EFAULT; } int rtk_rg_traceFilterShow(struct seq_file *s, void *v) { int i; int len=0; PROC_PRINTF("(Items: SPA,DA,SA,ETH,SIP,DIP,IP,L4PROTO,SPORT,DPORT,REASON,CVLAN,SVLAN,PPPOESSID,SIP6,DIP6,TIMES)\n"); PROC_PRINTF("RULE MSK=%x\n",rg_kernel.traceFilterRuleMask); for(i=0;i<TRACFILTER_MAX;i++){ PROC_PRINTF("RomeDriver Trace Filter[%d]:0x%x \n",i,rg_kernel.trace_filter_bitmask[i]); } PROC_PRINTF("\nExample1(Dump TRACE_LOG when it is IPv4 unicast packet from CPU):\n"); PROC_PRINTF(" echo \"SPA 6 DA 00:00:00:00:00:00 01:00:00:00:00:00 ETH 0800\" > /proc/rg/trace_filter\n"); PROC_PRINTF(" echo 8 > /proc/rg/filter_level\n"); PROC_PRINTF(" echo 8 > /proc/rg/debug_level\n"); PROC_PRINTF("Example2(Dump DEBUG_LOG when it is Broadcast ARP packet):\n"); PROC_PRINTF(" echo \"DA FF:FF:FF:FF:FF:FF FF:FF:FF:FF:FF:FF ETH 0806\" > /proc/rg/trace_filter\n"); PROC_PRINTF(" echo 0x80000001 > /proc/rg/filter_level\n"); PROC_PRINTF(" echo 0x80000001 > /proc/rg/debug_level\n\n"); PROC_PRINTF("Example3(rule option) select specific RULE from 0-3 (default used RULE 0) :\n"); PROC_PRINTF(" echo \"RULE 3 SPA 2 SIP 192.168.1.1 DIP 8.8.8.8 \" > /proc/rg/trace_filter\n"); PROC_PRINTF(" echo 0x80000001 > /proc/rg/filter_level\n"); PROC_PRINTF(" echo 0x80000001 > /proc/rg/debug_level\n\n"); for(i=0;i<TRACFILTER_MAX;i++){ PROC_PRINTF("[RULE %d]:\n",i); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SPA) PROC_PRINTF("SPA:%d\n",rg_kernel.trace_filter[i].spa); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_DA) PROC_PRINTF("DA:%02x:%02x:%02x:%02x:%02x:%02x MASK:%02x:%02x:%02x:%02x:%02x:%02x\n" ,rg_kernel.trace_filter[i].dmac.octet[0] ,rg_kernel.trace_filter[i].dmac.octet[1] ,rg_kernel.trace_filter[i].dmac.octet[2] ,rg_kernel.trace_filter[i].dmac.octet[3] ,rg_kernel.trace_filter[i].dmac.octet[4] ,rg_kernel.trace_filter[i].dmac.octet[5] ,rg_kernel.trace_filter[i].dmac_mask.octet[0] ,rg_kernel.trace_filter[i].dmac_mask.octet[1] ,rg_kernel.trace_filter[i].dmac_mask.octet[2] ,rg_kernel.trace_filter[i].dmac_mask.octet[3] ,rg_kernel.trace_filter[i].dmac_mask.octet[4] ,rg_kernel.trace_filter[i].dmac_mask.octet[5]); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SA) PROC_PRINTF("SA:%02x:%02x:%02x:%02x:%02x:%02x MASK:%02x:%02x:%02x:%02x:%02x:%02x\n" ,rg_kernel.trace_filter[i].smac.octet[0] ,rg_kernel.trace_filter[i].smac.octet[1] ,rg_kernel.trace_filter[i].smac.octet[2] ,rg_kernel.trace_filter[i].smac.octet[3] ,rg_kernel.trace_filter[i].smac.octet[4] ,rg_kernel.trace_filter[i].smac.octet[5] ,rg_kernel.trace_filter[i].smac_mask.octet[0] ,rg_kernel.trace_filter[i].smac_mask.octet[1] ,rg_kernel.trace_filter[i].smac_mask.octet[2] ,rg_kernel.trace_filter[i].smac_mask.octet[3] ,rg_kernel.trace_filter[i].smac_mask.octet[4] ,rg_kernel.trace_filter[i].smac_mask.octet[5]); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_ETH) PROC_PRINTF("ETH:0x%04x\n",rg_kernel.trace_filter[i].ethertype); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SIP) PROC_PRINTF("SIP:%d.%d.%d.%d\n" ,(rg_kernel.trace_filter[i].sip>>24)&0xff ,(rg_kernel.trace_filter[i].sip>>16)&0xff ,(rg_kernel.trace_filter[i].sip>>8)&0xff ,(rg_kernel.trace_filter[i].sip)&0xff); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_DIP) PROC_PRINTF("DIP:%d.%d.%d.%d\n" ,(rg_kernel.trace_filter[i].dip>>24)&0xff ,(rg_kernel.trace_filter[i].dip>>16)&0xff ,(rg_kernel.trace_filter[i].dip>>8)&0xff ,(rg_kernel.trace_filter[i].dip)&0xff); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_IP) PROC_PRINTF("IP:%d.%d.%d.%d\n" ,(rg_kernel.trace_filter[i].ip>>24)&0xff ,(rg_kernel.trace_filter[i].ip>>16)&0xff ,(rg_kernel.trace_filter[i].ip>>8)&0xff ,(rg_kernel.trace_filter[i].ip)&0xff); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_L4PROTO) PROC_PRINTF("L4PROTO:0x%04x\n",rg_kernel.trace_filter[i].l4proto); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SPORT) PROC_PRINTF("SPORT:%d\n",rg_kernel.trace_filter[i].sport); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_DPORT) PROC_PRINTF("DPORT:%d\n",rg_kernel.trace_filter[i].dport); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_REASON) PROC_PRINTF("REASON:%d\n",rg_kernel.trace_filter[i].reason); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_CVLAN) PROC_PRINTF("CVLAN:%d\n",rg_kernel.trace_filter[i].cvlanid); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SVLAN) PROC_PRINTF("SVLAN:%d\n",rg_kernel.trace_filter[i].svlanid); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_PPPOESESSIONID) PROC_PRINTF("PPPOE_SESSION:%d\n",rg_kernel.trace_filter[i].sessionid); if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_V6SIP) { PROC_PRINTF("SIP6:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", rg_kernel.trace_filter[i].sipv6[0],rg_kernel.trace_filter[i].sipv6[1],rg_kernel.trace_filter[i].sipv6[2],rg_kernel.trace_filter[i].sipv6[3], rg_kernel.trace_filter[i].sipv6[4],rg_kernel.trace_filter[i].sipv6[5],rg_kernel.trace_filter[i].sipv6[6],rg_kernel.trace_filter[i].sipv6[7], rg_kernel.trace_filter[i].sipv6[8],rg_kernel.trace_filter[i].sipv6[9],rg_kernel.trace_filter[i].sipv6[10],rg_kernel.trace_filter[i].sipv6[11], rg_kernel.trace_filter[i].sipv6[12],rg_kernel.trace_filter[i].sipv6[13],rg_kernel.trace_filter[i].sipv6[14],rg_kernel.trace_filter[i].sipv6[15]); } if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_V6DIP) { PROC_PRINTF("SIP6:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x\n", rg_kernel.trace_filter[i].dipv6[0],rg_kernel.trace_filter[i].dipv6[1],rg_kernel.trace_filter[i].dipv6[2],rg_kernel.trace_filter[i].dipv6[3], rg_kernel.trace_filter[i].dipv6[4],rg_kernel.trace_filter[i].dipv6[5],rg_kernel.trace_filter[i].dipv6[6],rg_kernel.trace_filter[i].dipv6[7], rg_kernel.trace_filter[i].dipv6[8],rg_kernel.trace_filter[i].dipv6[9],rg_kernel.trace_filter[i].dipv6[10],rg_kernel.trace_filter[i].dipv6[11], rg_kernel.trace_filter[i].dipv6[12],rg_kernel.trace_filter[i].dipv6[13],rg_kernel.trace_filter[i].dipv6[14],rg_kernel.trace_filter[i].dipv6[15]); } if(rg_kernel.trace_filter_bitmask[i]&RTK_RG_DEBUG_TRACE_FILTER_SHOWNUMBEROFTIMES) PROC_PRINTF("ShowNumberOfTimes:%d counter=%d \n",rg_kernel.trace_filter[i].showNumberOfTimes,rg_kernel.trace_filter[i].showNumberOfTimesCounter); } return len; } int rtk_rg_traceFilterChange(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[256] = {0}; int len = (count > 256) ? 256 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { //rg_kernel.debug_level=simple_strtoul(tmpBuf, NULL, 16); char *strptr,*split_str; int i=-1; tmpBuf[count] = '\0'; strptr=tmpBuf; while(1) { split_str=strsep(&strptr," "); next_token: //printk("%d:%s\n",i++,split_str); if(i==-1){ //MUST be the first parameter if(strcasecmp(split_str,"RULE")==0) { split_str=strsep(&strptr," "); i=simple_strtol(split_str, NULL, 0); if(i>=TRACFILTER_MAX || i<0) i=0; }else{ i=0; } rg_kernel.trace_filter_bitmask[i]=0; memset(&rg_kernel.trace_filter[i],0,sizeof(rtk_rgDebugTraceFilter_t)); } if(strcasecmp(split_str,"SPA")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SPA; rg_kernel.trace_filter[i].spa=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"DA")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_DA; _rtk_rg_str2mac(split_str,&rg_kernel.trace_filter[i].dmac); if(strptr==NULL) { memset(rg_kernel.trace_filter[i].dmac_mask.octet,0xff,6); break; } split_str=strsep(&strptr," "); if(strlen(split_str)<10) goto next_token; _rtk_rg_str2mac(split_str,&rg_kernel.trace_filter[i].dmac_mask); } else if(strcasecmp(split_str,"SA")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SA; _rtk_rg_str2mac(split_str,&rg_kernel.trace_filter[i].smac); if(strptr==NULL) { memset(rg_kernel.trace_filter[i].smac_mask.octet,0xff,6); break; } split_str=strsep(&strptr," "); if(strlen(split_str)<10) goto next_token; _rtk_rg_str2mac(split_str,&rg_kernel.trace_filter[i].smac_mask); } else if(strcasecmp(split_str,"ETH")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_ETH; rg_kernel.trace_filter[i].ethertype=simple_strtol(split_str, NULL, 16); } else if(strcasecmp(split_str,"SIP")==0) { char *ip_token,*split_ip_token,j; split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SIP; ip_token=split_str; rg_kernel.trace_filter[i].sip=0; for(j=0;j<4;j++) { split_ip_token=strsep(&ip_token,"."); rg_kernel.trace_filter[i].sip|=(simple_strtol(split_ip_token, NULL, 0)<<((3-j)<<3)); if(ip_token==NULL) break; } } else if(strcasecmp(split_str,"DIP")==0) { char *ip_token,*split_ip_token,j; split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_DIP; ip_token=split_str; rg_kernel.trace_filter[i].dip=0; for(j=0;j<4;j++) { split_ip_token=strsep(&ip_token,"."); rg_kernel.trace_filter[i].dip|=(simple_strtol(split_ip_token, NULL, 0)<<((3-j)<<3)); if(ip_token==NULL) break; } } else if(strcasecmp(split_str,"IP")==0) { char *ip_token,*split_ip_token,j; split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_IP; ip_token=split_str; rg_kernel.trace_filter[i].ip=0; for(j=0;j<4;j++) { split_ip_token=strsep(&ip_token,"."); rg_kernel.trace_filter[i].ip|=(simple_strtol(split_ip_token, NULL, 0)<<((3-j)<<3)); if(ip_token==NULL) break; } } else if(strcasecmp(split_str,"L4PROTO")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_L4PROTO; rg_kernel.trace_filter[i].l4proto=simple_strtol(split_str, NULL, 16); } else if(strcasecmp(split_str,"SPORT")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SPORT; rg_kernel.trace_filter[i].sport=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"DPORT")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_DPORT; rg_kernel.trace_filter[i].dport=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"REASON")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_REASON; rg_kernel.trace_filter[i].reason=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"CVLAN")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_CVLAN; rg_kernel.trace_filter[i].cvlanid=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"SVLAN")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SVLAN; rg_kernel.trace_filter[i].svlanid=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"PPPOESSID")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_PPPOESESSIONID; rg_kernel.trace_filter[i].sessionid=simple_strtol(split_str, NULL, 0); } else if(strcasecmp(split_str,"TIMES")==0) { int32 showNumberOfTimes; split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_SHOWNUMBEROFTIMES; showNumberOfTimes=simple_strtol(split_str, NULL, 0); if(showNumberOfTimes<=0) showNumberOfTimes=1; rg_kernel.trace_filter[i].showNumberOfTimes=showNumberOfTimes; rg_kernel.trace_filter[i].showNumberOfTimesCounter=0; } else if(strcasecmp(split_str,"SIP6")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_V6SIP; in6_pton(split_str,-1,&(rg_kernel.trace_filter[i].sipv6[0]),-1,NULL); } else if(strcasecmp(split_str,"DIP6")==0) { split_str=strsep(&strptr," "); rg_kernel.trace_filter_bitmask[i]|=RTK_RG_DEBUG_TRACE_FILTER_V6DIP; in6_pton(split_str,-1,&(rg_kernel.trace_filter[i].dipv6[0]),-1,NULL); } if (strptr==NULL) break; } for(i=0 ;i<TRACFILTER_MAX ;i++) { if(rg_kernel.trace_filter_bitmask[i] && rg_kernel.trace_filter_bitmask[i]!=RTK_RG_DEBUG_TRACE_FILTER_SHOWNUMBEROFTIMES) { //if any filter rule ,enable this rule (ingore only FILTER_SHOWNUMBEROFTIMES rule) rg_kernel.traceFilterRuleMask|=(1<<i); } else { //disable rule[i] rg_kernel.traceFilterRuleMask&=(~(1<<i)); } } if(rg_kernel.traceFilterRuleMask==0)//no any rule enable rule 0 rg_kernel.traceFilterRuleMask=0x1; rtk_rg_traceFilterShow(NULL,NULL); return count; } return -EFAULT; } int _rtk_rg_strangePacketDrop_change( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } if(strncmp(strptr, "1",1) == 0) rg_db.systemGlobal.strangeSA_drop=RG_HWNAT_ENABLE; else if(strncmp(strptr, "0",1) == 0) rg_db.systemGlobal.strangeSA_drop=RG_HWNAT_DISABLE; return len; } int _rtk_rg_strangePacketDrop_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.strangeSA_drop==RG_HWNAT_ENABLE) { PROC_PRINTF("strangeSA_drop=1, DROP!\n"); } else if(rg_db.systemGlobal.strangeSA_drop==RG_HWNAT_DISABLE) { PROC_PRINTF("strangeSA_drop=0, PERMIT!\n"); } return len; } #if defined(CONFIG_APOLLO) int _rtk_rg_portBindingByProtocal_change( struct file *filp, const char *buff,unsigned long len, void *data ) { char *tmpbuf; char *strptr=NULL; tmpbuf=&rg_kernel.proc_parsing_buf[0]; if (buff && !copy_from_user(tmpbuf, buff, len)) { tmpbuf[len] = '\0'; strptr=tmpbuf; } if(strncmp(strptr, "0",1) == 0){ rg_db.systemGlobal.port_binding_by_protocal=0; rtlglue_printf("IPv4+IPv6 both bridge\n"); } else if(strncmp(strptr, "1",1) == 0){ rg_db.systemGlobal.port_binding_by_protocal=1; rtlglue_printf("IPv4 Routing, IPv6 Bridge\n"); } else if(strncmp(strptr, "2",1) == 0){ rg_db.systemGlobal.port_binding_by_protocal=2; rtlglue_printf("IPv6 Routing, IPv4 Bridge\n"); }else{ rtlglue_printf("echo X > /proc/rg/portBindingByProtocal 0:IPv4+IPv6 both bridge 1:IPv4 Routing, IPv6 Bridge 2:IPv6 Routing, IPv4 Bridge \n"); } return len; } int _rtk_rg_portBindingByProtocal_state(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.port_binding_by_protocal==0) { PROC_PRINTF("Port binding valid while IPv4+IPv6\n"); } else if(rg_db.systemGlobal.port_binding_by_protocal==1) { PROC_PRINTF("Port binding valid while IPv4\n"); } else if(rg_db.systemGlobal.port_binding_by_protocal==2) { PROC_PRINTF("Port binding valid while IPv6\n"); } return len; } int _rtk_rg_portBindingByProtocal_filter_vid_for_downstream_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.port_binding_by_protocal_filter_vid=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.port_binding_by_protocal == 0){ rtlglue_printf("IPv4+IPv6 both bridge, this vid have no meaning.\n"); } else if(rg_db.systemGlobal.port_binding_by_protocal == 1){ rtlglue_printf("IPv4 Routing, IPv6 Bridge. Filter downstream IPCP with vid-%d (vid-0 means untag)\n",rg_db.systemGlobal.port_binding_by_protocal_filter_vid); } else if(rg_db.systemGlobal.port_binding_by_protocal ==2){ rtlglue_printf("IPv6 Routing, IPv4 Bridge. Filter downstream IP6CP with vid-%d (vid-0 means untag)\n",rg_db.systemGlobal.port_binding_by_protocal_filter_vid); }else{ rtlglue_printf("echo X > /proc/rg/portBindingByProtocal 0:IPv4+IPv6 both bridge 1:IPv4 Routing, IPv6 Bridge 2:IPv6 Routing, IPv4 Bridge \n"); } return len; } int _rtk_rg_portBindingByProtocal_filter_vid_for_downstream_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.port_binding_by_protocal == 0){ rtlglue_printf("IPv4+IPv6 both bridge, this vid have no meaning.\n"); } else if(rg_db.systemGlobal.port_binding_by_protocal == 1){ rtlglue_printf("IPv4 Routing, IPv6 Bridge. Filter downstream IPCP with vid-%d (vid-0 means untag)\n",rg_db.systemGlobal.port_binding_by_protocal_filter_vid); } else if(rg_db.systemGlobal.port_binding_by_protocal ==2){ rtlglue_printf("IPv6 Routing, IPv4 Bridge. Filter downstream IP6CP with vid-%d (vid-0 means untag)\n",rg_db.systemGlobal.port_binding_by_protocal_filter_vid); }else{ rtlglue_printf("echo X > /proc/rg/portBindingByProtocal 0:IPv4+IPv6 both bridge 1:IPv4 Routing, IPv6 Bridge 2:IPv6 Routing, IPv4 Bridge \n"); } return len; } #endif // defined(CONFIG_APOLLO) int32 _rtk_rg_proc_l4ways_dump(struct seq_file *s, void *v) { int i; int j=0; if(rg_db.systemGlobal.enableL4MaxWays!=1) { rtlglue_printf("disabled!\n"); return 0; } rtlglue_printf(">>Inbound NAPT max ways:\n"); for(i=0;i<MAX_NAPT_IN_HASH_SIZE;i++) { if(rg_db.systemGlobal.l4InboundMaxWays[i]!=0) { if((j%10)==9) rtlglue_printf("%03d[%d]\n",i,rg_db.systemGlobal.l4InboundMaxWays[i]); else rtlglue_printf("%03d[%d] ",i,rg_db.systemGlobal.l4InboundMaxWays[i]); j++; } } rtlglue_printf("\n\n>>Outbound NAPT max ways:\n"); j=0; for(i=0;i<MAX_NAPT_OUT_HASH_SIZE;i++) { if(rg_db.systemGlobal.l4OutboundMaxWays[i]!=0) { if((j%10)==9) rtlglue_printf("%03d[%d]\n",i,rg_db.systemGlobal.l4OutboundMaxWays[i]); else rtlglue_printf("%03d[%d] ",i,rg_db.systemGlobal.l4OutboundMaxWays[i]); j++; } } rtlglue_printf("\n\n"); return 0; } int _rtk_rg_proc_l4ways_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.enableL4MaxWays=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.enableL4MaxWays==1) { memset(rg_db.systemGlobal.l4InboundMaxWays,0,sizeof(rg_db.systemGlobal.l4InboundMaxWays)); memset(rg_db.systemGlobal.l4OutboundMaxWays,0,sizeof(rg_db.systemGlobal.l4OutboundMaxWays)); rtlglue_printf("Enable and reset L4 hash max ways statistic!\n"); } return len; } int32 _rtk_rg_proc_l4waysList_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.enableL4WaysList); return len; } int _rtk_rg_proc_l4waysList_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.enableL4WaysList=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_proc_l4waysList_get(NULL,NULL); return len; } int32 _rtk_rg_proc_l4ChoiceHwIn_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.enableL4ChoiceHwIn); return len; } int _rtk_rg_proc_l4ChoiceHwIn_set(struct file *filp, const char *buff,unsigned long len, void *data ) { #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) if(rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_SVID]==DISABLED || rg_db.systemGlobal.fbGlobalState[FB_GLOBAL_PATH5_SKIP_CVID]==DISABLED) { rtlglue_printf("l4ChoiceHwIn does not support when flow uses svid/cvid hash\n"); return len; } #endif rg_db.systemGlobal.enableL4ChoiceHwIn=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_proc_l4ChoiceHwIn_get(NULL,NULL); return len; } int32 _rtk_rg_proc_tcpDoNotDelWhenRstFin_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.tcpDoNotDelWhenRstFin); return len; } int _rtk_rg_proc_tcpDoNotDelWhenRstFin_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.tcpDoNotDelWhenRstFin=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_proc_tcpDoNotDelWhenRstFin_get(NULL,NULL); return len; } int32 _rtk_rg_proc_tcpSwapFinDelRst_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.tcpSwapFinDelRst); return len; } int _rtk_rg_proc_tcpSwapFinDelRst_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.tcpSwapFinDelRst=_rtk_rg_pasring_proc_string_to_integer(buff,len); _rtk_rg_proc_tcpSwapFinDelRst_get(NULL,NULL); return len; } int32 _rtk_rg_proc_tcpDisableStatefulTracking_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%s\n",rg_db.systemGlobal.tcpDisableStatefulTracking==1?"Disable tracking.":"Enable tracking."); return len; } int _rtk_rg_proc_tcpDisableStatefulTracking_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.tcpDisableStatefulTracking=_rtk_rg_pasring_proc_string_to_integer(buff,len); rtlglue_printf("%s\n",rg_db.systemGlobal.tcpDisableStatefulTracking==1?"Disable tracking.":"Enable tracking."); return len; } int32 _rtk_rg_proc_fwdStatistic_get(struct seq_file *s, void *v) { int i,j; int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.fwdStatistic); if(rg_db.systemGlobal.fwdStatistic==0) return 0; PROC_PRINTF("Ingress PORT\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",i); PROC_PRINTF("\n========================================================================================"); PROC_PRINTF("\nBC\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_broadcast[i]); PROC_PRINTF("\nMC\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_multicast[i]); PROC_PRINTF("\nUC\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_unicast[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nARP Request\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_ARP_request[i]); PROC_PRINTF("\nARP Reply\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_ARP_reply[i]); PROC_PRINTF("\nNB Sol\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_NB_solicitation[i]); PROC_PRINTF("\nNB Adv\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_NB_advertisement[i]); PROC_PRINTF("\nUDP\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_UDP[i]); PROC_PRINTF("\nTCP\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_TCP[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nSYN\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_SYN[i]); PROC_PRINTF("\nSYN_ACK\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_SYN_ACK[i]); PROC_PRINTF("\nFIN\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_FIN[i]); PROC_PRINTF("\nFIN_ACK\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_FIN_ACK[i]); PROC_PRINTF("\nFIN_PSH_ACK\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_FIN_PSH_ACK[i]); PROC_PRINTF("\nRST\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_RST[i]); PROC_PRINTF("\nRST_ACK\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_RST_ACK[i]); PROC_PRINTF("\nACK\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_ACK[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nSlowpath\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_slowPath[i]); PROC_PRINTF("\nShortcut\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_shortcut[i]); PROC_PRINTF("\nShortcut_v6\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_shortcutv6[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nL2Fwd\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_L2FWD[i]); PROC_PRINTF("\nIPv4 L3Fwd\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_IPv4_L3FWD[i]); PROC_PRINTF("\nIPv6 L3Fwd\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_IPv6_L3FWD[i]); PROC_PRINTF("\nL4Fwd\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_L4FWD[i]); PROC_PRINTF("\nDrop\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_Drop[i]); PROC_PRINTF("\nTo PS\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_ToPS[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nNaptOut LRU\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_naptOutLRU[i]); PROC_PRINTF("\nNaptIn LRU\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_naptInLRU[i]); PROC_PRINTF("\nShortcut LRU\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_v4ShortcutLRU[i]); PROC_PRINTF("\nShortcut_v6 LRU\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_v6ShortcutLRU[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); PROC_PRINTF("\nDynAlloc\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_skb_alloc[i]); PROC_PRINTF("\nUcPreAlloc\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_uc[i]); PROC_PRINTF("\nBcMcPreAlc\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_mc_bc[i]); PROC_PRINTF("\nNIC TX\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[i]); PROC_PRINTF("\nWIFI TX\t\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[i]); PROC_PRINTF("\nFree Skb\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_skb_free[i]); PROC_PRINTF("\nTotalGet\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_broadcast[i] +rg_db.systemGlobal.statistic.perPortCnt_multicast[i] +rg_db.systemGlobal.statistic.perPortCnt_unicast[i] +rg_db.systemGlobal.statistic.perPortCnt_skb_alloc[i] +rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_uc[i] +rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_mc_bc[i]); PROC_PRINTF("\nTotalPut\t"); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_Drop[i] +rg_db.systemGlobal.statistic.perPortCnt_ToPS[i] +rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[i] +rg_db.systemGlobal.statistic.perPortCnt_WIFI_TX[i] +rg_db.systemGlobal.statistic.perPortCnt_skb_free[i]); PROC_PRINTF("\n----------------------------------------------------------------------------------------"); for(j=0;j<256;j++) { int show=0; for(i=0;i<RTK_RG_EXT_PORT2;i++) if(rg_db.systemGlobal.statistic.perPortCnt_Reason[j][i]!=0) { show=1; break; } if(show==1) { PROC_PRINTF("\nRSN:%d\t\t",j); for(i=0;i<RTK_RG_EXT_PORT2;i++) PROC_PRINTF("%8d",rg_db.systemGlobal.statistic.perPortCnt_Reason[j][i]); } } PROC_PRINTF("\n"); return len; } int _rtk_rg_proc_fwdStatistic_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.fwdStatistic=_rtk_rg_pasring_proc_string_to_integer(buff,len); memset(&rg_db.systemGlobal.statistic,0,sizeof(rg_db.systemGlobal.statistic)); rtlglue_printf("%d\n",rg_db.systemGlobal.fwdStatistic); return len; } int32 _rtk_rg_proc_tcpShortTimeoutHouseKeep_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d jiffies\n",rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies); if(rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies!=0) { int i=rg_db.tcpShortTimeoutFreedIdx; u32 now=jiffies&0xffffffff; PROC_PRINTF(" Idx naptIdx jiffies (now=%u)\n",now); PROC_PRINTF("======== ======= ===============\n"); while(i!=rg_db.tcpShortTimeoutRecycleIdx) { PROC_PRINTF("%8d%8d%16u\n",i,rg_db.tcpShortTimeoutRing[i].naptOutIdx, rg_db.tcpShortTimeoutRing[i].timeoutJiffies); i++; if(i>=MAX_NAPT_OUT_SW_TABLE_SIZE) i=0; } } return len; } void _rtk_rg_tcpShortTimeoutHouseKeepingTimerFunc(unsigned long task_priv) { while(rg_db.tcpShortTimeoutFreedIdx!=rg_db.tcpShortTimeoutRecycleIdx) { u32 now=jiffies&0xffffffff; u32 entryTimeout=rg_db.tcpShortTimeoutRing[rg_db.tcpShortTimeoutFreedIdx].timeoutJiffies; if((now>=entryTimeout)|| ((now<=0x10000)&&(entryTimeout>=0xffff0000))) { int outIdx=rg_db.tcpShortTimeoutRing[rg_db.tcpShortTimeoutFreedIdx].naptOutIdx; if(rg_db.naptOut[outIdx].state>=FIRST_FIN) //we can't delete this flow when this flow re-connect. { DEBUG("_rtk_rg_tcpShortTimeoutHouseKeepingTimerFunc index=%d",outIdx); assert_ok(rtk_rg_apollo_naptConnection_del(outIdx)); } rg_db.tcpShortTimeoutFreedIdx++; if(rg_db.tcpShortTimeoutFreedIdx>=MAX_NAPT_OUT_SW_TABLE_SIZE) rg_db.tcpShortTimeoutFreedIdx=0; } else break; } mod_timer(&rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer, jiffies+(rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies)); } void _rtk_rg_tcpShortTimeoutHouseKeep_set(uint32 jiffies_interval) { rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies=jiffies_interval; if(timer_pending(&rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer)) del_timer(&rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer); if(rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies!=0) { rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer.function = _rtk_rg_tcpShortTimeoutHouseKeepingTimerFunc; init_timer(&rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer); mod_timer(&rg_kernel.fwdEngineTcpShortTimeoutHouseKeepingTimer, jiffies+(rg_db.systemGlobal.tcpShortTimeoutHousekeepJiffies)); } } int _rtk_rg_proc_tcpShortTimeoutHouseKeep_set(struct file *filp, const char *buff,unsigned long len, void *data ) { uint32 jiffies_interval; jiffies_interval=_rtk_rg_pasring_proc_string_to_integer(buff,len); rtlglue_printf("%d jiffies(%d/%d sec)\n",jiffies_interval,jiffies_interval,CONFIG_HZ); _rtk_rg_tcpShortTimeoutHouseKeep_set(jiffies_interval); return len; } int32 _rtk_rg_proc_congestionCtrl_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Interval: %d usec(0: disable)\n",rg_db.systemGlobal.congestionCtrlIntervalMicroSecs); if(rg_db.systemGlobal.congestionCtrlIntervalMicroSecs!=0) { int i,j; PROC_PRINTF("EnablePortMask: 0x%x\n",rg_db.systemGlobal.congestionCtrlPortMask); for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { PROC_PRINTF("SendBytesPerSec[%d]: %d\n",i,rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]); PROC_PRINTF("SendBytesInterval[%d]: %d\n",i,rg_db.systemGlobal.congestionCtrlSendBytesInterval[i]); } PROC_PRINTF("SendTimesPerPort: %d\n",rg_db.systemGlobal.congestionCtrlSendTimesPerPort); PROC_PRINTF("SendRemainderInNextGap: %d\n",rg_db.systemGlobal.congestionCtrlSendRemainderInNextGap); PROC_PRINTF("InboundAckToHighQueue: %d\n",rg_db.systemGlobal.congestionCtrlInboundAckToHighQueue); for(j=1;j>=0;j--) { PROC_PRINTF("%s:\n",(j==1)?"HighQueue":"LowQueue"); for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { if(rg_db.congestionCtrlMaxQueueCounter[j][i]!=0) { PROC_PRINTF("[%d]SendIdx: %d\n",i,rg_db.congestionCtrlSendIdx[j][i]); PROC_PRINTF("[%d]QueueIdx: %d\n",i,rg_db.congestionCtrlQueueIdx[j][i]); PROC_PRINTF("[%d]SendRemainder: %d\n",i,rg_db.congestionCtrlSendedRemainder[i]); PROC_PRINTF("[%d]QueueCounter: %d(MAX:%d)\n",i,rg_db.congestionCtrlQueueCounter[j][i],rg_db.congestionCtrlMaxQueueCounter[j][i]); PROC_PRINTF("[%d]FullDrop: %d\n",i,rg_db.congestionCtrlFullDrop[j][i]); } } } } return len; } int _rtk_rg_proc_congestionCtrlHwTimerFunc_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rtk_rg_congestionCtrlRing_t *ccr; int i,j; rg_db.systemGlobal.congestionCtrlIntervalMicroSecs=_rtk_rg_pasring_proc_string_to_integer(buff,len); rtlglue_printf("%d usec\n",rg_db.systemGlobal.congestionCtrlIntervalMicroSecs); for(j=1;j>=0;j--) { for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { while(1) { if(rg_db.congestionCtrlQueueCounter[j][i]==0) break; ccr=&rg_db.congestionCtrlRing[j][i][rg_db.congestionCtrlSendIdx[j][i]]; if(rg_db.systemGlobal.fwdStatistic) { rg_db.systemGlobal.statistic.perPortCnt_NIC_TX[rg_db.pktHdr->ingressPort]++; } re8686_send_with_txInfo_and_mask(ccr->pSkb,&ccr->ptxInfo,0,&ccr->ptxInfoMsk); rg_db.congestionCtrlQueueCounter[j][i]--; rg_db.congestionCtrlSendIdx[j][i]++; if(rg_db.congestionCtrlSendIdx[j][i]==MAX_CONGESTION_CTRL_RING_SIZE) rg_db.congestionCtrlSendIdx[j][i]=0; } } } if(rg_db.systemGlobal.congestionCtrlIntervalMicroSecs!=0) { for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { if(rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]==0) { if(i==RTK_RG_PORT_PON) rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]=125000000; //1000Mbps else rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]=12500000; //100Mbps } rg_db.systemGlobal.congestionCtrlSendBytesInterval[i]=rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]/100000*rg_db.systemGlobal.congestionCtrlIntervalMicroSecs/10; } if(rg_db.systemGlobal.congestionCtrlSendTimesPerPort==0) rg_db.systemGlobal.congestionCtrlSendTimesPerPort=1; //rg_db.congestionCtrlDropSynThreshold for(j=1;j>=0;j--) { for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { rg_db.congestionCtrlQueueIdx[j][i]=0; rg_db.congestionCtrlSendIdx[j][i]=0; rg_db.congestionCtrlQueueCounter[j][i]=0; rg_db.congestionCtrlMaxQueueCounter[j][i]=0; rg_db.congestionCtrlFullDrop[j][i]=0; rg_db.congestionCtrlSendedRemainder[i]=0; } } memset(rg_db.congestionCtrlRing,0,sizeof(rg_db.congestionCtrlRing)); memset(&rg_db.systemGlobal.congestionCtrlTasklets, 0, sizeof(struct tasklet_struct)); rg_db.systemGlobal.congestionCtrlTasklets.func=(void (*)(unsigned long))_rtk_rg_congestionCtrlTimerFunc; request_irq(BSP_TC2_IRQ, (irq_handler_t)rtk_rg_timer_interrupt, IRQF_DISABLED, "rtk_rg_timer", NULL); //enable Timer2 REG32(TC2INT)=0x10000; REG32(TC2CTL)=200; REG32(TC2DATA)=rg_db.systemGlobal.congestionCtrlIntervalMicroSecs; REG32(TC2CTL)|=0x11000000; //enable Timer(interrupt mode) REG32(TC2INT)|=0x100000; //enable interrupt } else { //disable timer REG32(TC2INT)=0x10000; REG32(TC2CTL)=200; REG32(TC2DATA)=1; REG32(TC2CTL)|=0x10000000; //enable Timer(counter mode) } return len; } int _rtk_rg_proc_congestionCtrlSendTimesPerPort_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.congestionCtrlSendTimesPerPort=_rtk_rg_pasring_proc_string_to_integer(buff,len); rtlglue_printf("%d\n",rg_db.systemGlobal.congestionCtrlSendTimesPerPort); return len; } int _rtk_rg_proc_congestionCtrlSendBytePerSec_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int i; int sendBytePerSec=_rtk_rg_pasring_proc_string_to_integer(buff,len); for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { if(i!=RTK_RG_PORT_PON) { rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]=sendBytePerSec; rg_db.systemGlobal.congestionCtrlSendBytesInterval[i]=rg_db.systemGlobal.congestionCtrlSendBytesPerSec[i]/100000*rg_db.systemGlobal.congestionCtrlIntervalMicroSecs/10; } } rtlglue_printf("%d(default: 12500000)\n",rg_db.systemGlobal.congestionCtrlSendBytesPerSec[0]); return len; } int _rtk_rg_proc_congestionCtrlSendBytePerSecForWan_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int sendBytePerSec=_rtk_rg_pasring_proc_string_to_integer(buff,len); rg_db.systemGlobal.congestionCtrlSendBytesPerSec[RTK_RG_PORT_PON]=sendBytePerSec; rg_db.systemGlobal.congestionCtrlSendBytesInterval[RTK_RG_PORT_PON]=rg_db.systemGlobal.congestionCtrlSendBytesPerSec[RTK_RG_PORT_PON]/100000*rg_db.systemGlobal.congestionCtrlIntervalMicroSecs/10; rtlglue_printf("%d(default: 125000000)\n",rg_db.systemGlobal.congestionCtrlSendBytesPerSec[RTK_RG_PORT_PON]); return len; } int _rtk_rg_proc_congestionCtrlPortMask_set(struct file *file, const char *buffer, unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { rg_db.systemGlobal.congestionCtrlPortMask=simple_strtoul(tmpBuf, NULL, 16); rtlglue_printf("0x%x\n",rg_db.systemGlobal.congestionCtrlPortMask); return count; } return -EFAULT; } int _rtk_rg_proc_congestionCtrlInboundAckToHighQueue_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.congestionCtrlInboundAckToHighQueue=_rtk_rg_pasring_proc_string_to_integer(buff,len); rtlglue_printf("%d\n",rg_db.systemGlobal.congestionCtrlInboundAckToHighQueue); return len; } int _rtk_rg_proc_congestionCtrlSendRemainderInNextGap_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int i; rg_db.systemGlobal.congestionCtrlSendRemainderInNextGap=_rtk_rg_pasring_proc_string_to_integer(buff,len); for(i=0;i<MAX_CONGESTION_CTRL_PORTS;i++) { rg_db.congestionCtrlSendedRemainder[i]=0; } rtlglue_printf("%d\n",rg_db.systemGlobal.congestionCtrlSendRemainderInNextGap); return len; } int32 _wanIntf_disable_add_ipv6_linkLocal_state_get(struct seq_file *s, void *v) { int i; int len=0; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++){ if(rg_db.systemGlobal.wanIntf_disable_linkLocal_rsvACL[i]){ PROC_PRINTF("Netif[%d]:force disabled reserved ACL RTK_RG_ACLANDCF_RESERVED_IPV6_INTF%d_LINK_LOCAL_TRAP add to H/W \n",i,i); }else{ PROC_PRINTF("Netif[%d]:do nothing\n",i); } } return len; } void _wanIntf_disable_add_ipv6_linkLocal_usage(void){ rtlglue_printf("usage:\n"); rtlglue_printf("echo [netIfIdx] [Action] > /proc/rg/wanIntf_disable_ipv6_linkLocal_rsvACL\n"); rtlglue_printf("[netIfIdx]: 0~7\n"); rtlglue_printf("[Action]: 0:nothing 1:force disable rsvACL RTK_RG_ACLANDCF_RESERVED_IPV6_INTFn_LINK_LOCAL_TRAP to H/W\n"); } int _wanIntf_disable_add_ipv6_linkLocal_state_set(struct file *filp, const char *buffer,unsigned long count, void *data ) { unsigned char tmpBuf[256] = {0}; int len = (count > 256) ? 256 : count; int netIfIdx; int action; //0:do nothing 1:force diabled reserved ACL(RTK_RG_ACLANDCF_RESERVED_IPV6_INTFn_LINK_LOCAL_TRAP) for this interface _wanIntf_disable_add_ipv6_linkLocal_usage(); if (buffer && !copy_from_user(tmpBuf, buffer, len)) { //rg_kernel.debug_level=simple_strtoul(tmpBuf, NULL, 16); char *strptr,*split_str; tmpBuf[count] = '\0'; strptr=tmpBuf; split_str=strsep(&strptr," "); netIfIdx = simple_strtol(split_str, NULL, 0); //get intfidx if(netIfIdx<0 || netIfIdx>=MAX_NETIF_SW_TABLE_SIZE){ rtlglue_printf("invalid netIfIdx!\n"); return len; } split_str=strsep(&strptr," "); //get action action = simple_strtol(split_str, NULL, 0); //get action if(action<0 || action>1){ rtlglue_printf("invalid Action!\n"); return len; } rg_db.systemGlobal.wanIntf_disable_linkLocal_rsvACL[netIfIdx] = action; } _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_TAIL_END, NULL); //re-arrange reserved ACL _wanIntf_disable_add_ipv6_linkLocal_state_get(NULL,NULL); return len; } int32 _bridgeWan_drop_by_protocal_get(struct seq_file *s, void *v) { int i; int len=0; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++){ if(rg_db.systemGlobal.bridge_netIfIdx_drop_by_portocal[i]){ PROC_PRINTF("Netif[%d]:%s\n",i,rg_db.systemGlobal.bridge_netIfIdx_drop_by_portocal[i]==1?"Drop IPv6(IPv4 Pass)":"Drop IPv4(IPv46 Pass)"); }else{ PROC_PRINTF("Netif[%d]:Both IPv4/IPv6 Pass\n",i); } } PROC_PRINTF("This proc only valid when netIf is bridgeWan!!!\n"); return len; } void _bridgeWan_drop_by_protocal_usage(void){ rtlglue_printf("usage:\n"); rtlglue_printf("echo [netIfIdx] [Action] > /proc/rg/bridgeWan_drop_by_protocal\n"); rtlglue_printf("[netIfIdx]: 0~7\n"); rtlglue_printf("[Action]: 0:both pass, 1:IPv6 Drop, 2:IPv4 Drop\n"); rtlglue_printf("\nThis proc only valid when netIf is bridgeWan!!!\n"); } int _bridgeWan_drop_by_protocal_set(struct file *filp, const char *buffer,unsigned long count, void *data ) { unsigned char tmpBuf[256] = {0}; int len = (count > 256) ? 256 : count; int netIfIdx; int action; //0:both pass, 1:drop IPv6, 2:drop IPv4 _bridgeWan_drop_by_protocal_usage(); if (buffer && !copy_from_user(tmpBuf, buffer, len)) { //rg_kernel.debug_level=simple_strtoul(tmpBuf, NULL, 16); char *strptr,*split_str; tmpBuf[count] = '\0'; strptr=tmpBuf; split_str=strsep(&strptr," "); netIfIdx = simple_strtol(split_str, NULL, 0); //get intfidx if(netIfIdx<0 || netIfIdx>=MAX_NETIF_SW_TABLE_SIZE){ return -EFAULT; } split_str=strsep(&strptr," "); //get action action = simple_strtol(split_str, NULL, 0); //get action if(action<0 || action>2){ return -EFAULT; } rg_db.systemGlobal.bridge_netIfIdx_drop_by_portocal[netIfIdx] = action; } _bridgeWan_drop_by_protocal_get(NULL,NULL); return len; } int32 _rtk_rg_proc_port_range_used_by_protocolStack_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d %d\n", rg_db.systemGlobal.lowerBoundPortUsedByPS, rg_db.systemGlobal.upperBoundPortUsedByPS); return len; } int _rtk_rg_proc_port_range_used_by_protocolStack_set(struct file *filp, const char *buffer,unsigned long count, void *data ) { unsigned char tmpBuf[256] = {0}; int len = (count > 256) ? 256 : count; uint16 lowerBound, upperBound; if (buffer && !copy_from_user(tmpBuf, buffer, len)) { char *strptr, *split_str; tmpBuf[count] = '\0'; strptr=tmpBuf; split_str=strsep(&strptr," "); lowerBound = simple_strtol(split_str, NULL, 0); if(lowerBound>65535 || lowerBound<1024) goto errout; split_str=strsep(&strptr," "); upperBound = simple_strtol(split_str, NULL, 0); if(upperBound>65535 || upperBound<1024) goto errout; if(lowerBound>upperBound) goto errout; rg_db.systemGlobal.lowerBoundPortUsedByPS = lowerBound; rg_db.systemGlobal.upperBoundPortUsedByPS = upperBound; _rtk_rg_proc_port_range_used_by_protocolStack_get(NULL,NULL); } else { errout: rtlglue_printf("usage:\n"); rtlglue_printf("echo [lower bound] [upper bound] > /proc/rg/port_range_used_by_ps\n"); rtlglue_printf("[lower bound]: 1024~65535, lower bound of port range used by protocol stack\n"); rtlglue_printf("[upper bound]: 1024~65535, upper bound of port range used by protocol stack\n"); rtlglue_printf("\nDo not filter ext port used by protocol stack:\n"); rtlglue_printf("echo 0 0 > /proc/rg/port_range_used_by_ps\n"); } return len; } int32 _rtk_rg_proc_log_rx_pcap_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d, %s log rx pcap.\n",rg_db.systemGlobal.log_rx_pcap,(rg_db.systemGlobal.log_rx_pcap==1)?"enable":"disable"); if((rg_db.systemGlobal.log_rx_pcap==1 && rg_db.systemGlobal.log_rx_pcap_fp==NULL) || (rg_db.systemGlobal.log_rx_pcap!=1 && rg_db.systemGlobal.log_rx_pcap_fp!=NULL)) PROC_PRINTF("Warning!! file descriptor of log rx pcap is error!\n"); return len; } int _rtk_rg_proc_log_rx_pcap_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.log_rx_pcap=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.log_rx_pcap==1) { if(rg_db.systemGlobal.log_rx_pcap_fp==NULL) { struct sysinfo sysInfo; si_meminfo(&sysInfo); sysInfo.freeram *= sysInfo.mem_unit; rtlglue_printf("\033[1;33;40mFree ram size: %ld\033[0m\n", sysInfo.freeram); if(sysInfo.freeram >= (unsigned long)(RESERVED_FREE_RAM_SIZE+24/*pcap file header*/+16/*pcap packet header*/+60/*smallest packet size*/)) { rg_db.systemGlobal.log_rx_pcap_fp = filp_open("/var/log_rx_pcap.pcap", O_CREAT|O_WRONLY|O_TRUNC|O_SYNC|O_NONBLOCK, 0644); if(rg_db.systemGlobal.log_rx_pcap_fp!=NULL && !IS_ERR(rg_db.systemGlobal.log_rx_pcap_fp)) { mm_segment_t old_fs; struct file *fp = rg_db.systemGlobal.log_rx_pcap_fp; uint32 wCount; int ret; u8 pcap_file_hdr[24]={0xa1, 0xb2, 0xc3, 0xd4, /* magic number */ 0x00, 0x02, /* major version number */ 0x00, 0x04, /* minor version number */ 0x00, 0x00, 0x00, 0x00, /* GMT to local correction */ 0x00, 0x00, 0x00, 0x00, /* accuracy of timestamps */ 0x00, 0x00, 0xff, 0xff, /* max length of captured packets, in octets */ 0x00, 0x00, 0x00, 0x01}; /* data link type */ rtlglue_printf("Open and clear pcap file successfully.\n"); old_fs = get_fs(); set_fs(KERNEL_DS); wCount=0; while(wCount<sizeof(pcap_file_hdr)) { ret = fp->f_op->write(fp, pcap_file_hdr+wCount, sizeof(pcap_file_hdr)-wCount, &fp->f_pos); if(ret>0) wCount += ret; } set_fs(old_fs); } else { rg_db.systemGlobal.log_rx_pcap = 0; rg_db.systemGlobal.log_rx_pcap_fp = NULL; rtlglue_printf("\033[1;33;41mFail to open pcap file.\033[0m\n"); } } else { rg_db.systemGlobal.log_rx_pcap = 0; rg_db.systemGlobal.log_rx_pcap_fp = NULL; rtlglue_printf("\033[1;33;41mFail to open pcap file since ram size is not enough.\033[0m\n"); } } else { rtlglue_printf("\033[1;33;40mPcap file is already opened, append to it.\033[0m\n"); } } else { if(rg_db.systemGlobal.log_rx_pcap_fp!=NULL) { filp_close(rg_db.systemGlobal.log_rx_pcap_fp, NULL); rg_db.systemGlobal.log_rx_pcap_fp = NULL; rtlglue_printf("Close pcap file.\n"); } } _rtk_rg_proc_log_rx_pcap_get(NULL,NULL); return len; } int32 _rtk_rg_proc_log_to_PS_pcap_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d, %s log to PS pcap.\n",rg_db.systemGlobal.log_to_PS_pcap,(rg_db.systemGlobal.log_to_PS_pcap==1)?"enable":"disable"); if((rg_db.systemGlobal.log_to_PS_pcap==1 && rg_db.systemGlobal.log_to_PS_pcap_fp==NULL) || (rg_db.systemGlobal.log_to_PS_pcap!=1 && rg_db.systemGlobal.log_to_PS_pcap_fp!=NULL)) PROC_PRINTF("Warning!! file descriptor of log to PS pcap is error!\n"); return len; } int _rtk_rg_proc_log_to_PS_pcap_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.log_to_PS_pcap=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.log_to_PS_pcap==1) { if(rg_db.systemGlobal.log_to_PS_pcap_fp==NULL) { struct sysinfo sysInfo; si_meminfo(&sysInfo); sysInfo.freeram *= sysInfo.mem_unit; rtlglue_printf("\033[1;33;40mFree ram size: %ld\033[0m\n", sysInfo.freeram); if(sysInfo.freeram >= (unsigned long)(RESERVED_FREE_RAM_SIZE+24/*pcap file header*/+16/*pcap packet header*/+60/*smallest packet size*/)) { rg_db.systemGlobal.log_to_PS_pcap_fp = filp_open("/var/log_to_PS_pcap.pcap", O_CREAT|O_WRONLY|O_TRUNC|O_SYNC|O_NONBLOCK, 0644); if(rg_db.systemGlobal.log_to_PS_pcap_fp!=NULL && !IS_ERR(rg_db.systemGlobal.log_to_PS_pcap_fp)) { mm_segment_t old_fs; struct file *fp = rg_db.systemGlobal.log_to_PS_pcap_fp; uint32 wCount; int ret; u8 pcap_file_hdr[24]={0xa1, 0xb2, 0xc3, 0xd4, /* magic number */ 0x00, 0x02, /* major version number */ 0x00, 0x04, /* minor version number */ 0x00, 0x00, 0x00, 0x00, /* GMT to local correction */ 0x00, 0x00, 0x00, 0x00, /* accuracy of timestamps */ 0x00, 0x00, 0xff, 0xff, /* max length of captured packets, in octets */ 0x00, 0x00, 0x00, 0x01}; /* data link type */ rtlglue_printf("Open and clear pcap file successfully.\n"); old_fs = get_fs(); set_fs(KERNEL_DS); wCount=0; while(wCount<sizeof(pcap_file_hdr)) { ret = fp->f_op->write(fp, pcap_file_hdr+wCount, sizeof(pcap_file_hdr)-wCount, &fp->f_pos); if(ret>0) wCount += ret; } set_fs(old_fs); } else { rg_db.systemGlobal.log_to_PS_pcap = 0; rg_db.systemGlobal.log_to_PS_pcap_fp = NULL; rtlglue_printf("\033[1;33;41mFail to open pcap file.\033[0m\n"); } } else { rg_db.systemGlobal.log_to_PS_pcap = 0; rg_db.systemGlobal.log_to_PS_pcap_fp = NULL; rtlglue_printf("\033[1;33;41mFail to open pcap file since ram size is not enough.\033[0m\n"); } } else { rtlglue_printf("\033[1;33;40mPcap file is already opened, append to it.\033[0m\n"); } } else { if(rg_db.systemGlobal.log_to_PS_pcap_fp!=NULL) { filp_close(rg_db.systemGlobal.log_to_PS_pcap_fp, NULL); rg_db.systemGlobal.log_to_PS_pcap_fp = NULL; rtlglue_printf("Close pcap file.\n"); } } _rtk_rg_proc_log_to_PS_pcap_get(NULL,NULL); return len; } int32 _rtk_rg_proc_turnOffARPTrafficInfo_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.arp_traffic_off); return len; } int _rtk_rg_proc_turnOffARPTrafficInfo_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.arp_traffic_off=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } int32 _rtk_rg_proc_ARPMaxRequestCount_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.arp_max_request_count); return len; } int _rtk_rg_proc_ARPMaxRequestCount_set(struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.arp_max_request_count=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } int32 _rtk_rg_proc_IpAntiSpoof_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.antiIpSpoofStatus); return len; } int32 _rtk_rg_proc_IpAntiSpoof_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret) rg_db.systemGlobal.antiIpSpoofStatus = RTK_RG_ENABLED; else rg_db.systemGlobal.antiIpSpoofStatus = RTK_RG_DISABLED; return len; } int32 _rtk_rg_proc_MacAntiSpoof_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.antiMacSpoofStatus); return len; } int32 _rtk_rg_proc_MacAntiSpoof_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret) rg_db.systemGlobal.antiMacSpoofStatus = RTK_RG_ENABLED; else rg_db.systemGlobal.antiMacSpoofStatus = RTK_RG_DISABLED; return len; } int32 _rtk_rg_proc_RemoveL34TagOption_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.fix_l34_to_untag_enable==RG_HWNAT_ENABLE) PROC_PRINTF("Enabled, remove L34 tag if need.\n"); else PROC_PRINTF("Disabled.\n"); return len; } int _rtk_rg_proc_RemoveL34TagOption_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret==1) rg_db.systemGlobal.fix_l34_to_untag_enable=RG_HWNAT_ENABLE; else if(ret==0) rg_db.systemGlobal.fix_l34_to_untag_enable=RG_HWNAT_DISABLE; return len; } int32 _rtk_rg_proc_UrlFilterMode_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.urlFilterMode==RG_FILTER_BLACK) PROC_PRINTF("Black-list Mode.\n"); else PROC_PRINTF("White-list Mode.\n"); return len; } int _rtk_rg_proc_UrlFilterMode_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret==1) rg_db.systemGlobal.urlFilterMode=RG_FILTER_WHITE; else if(ret==0) rg_db.systemGlobal.urlFilterMode=RG_FILTER_BLACK; _rtk_rg_proc_UrlFilterMode_get(NULL,NULL); return len; } int32 _rtk_rg_proc_trapSynState_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.aclAndCfReservedRule.reservedMask[RTK_RG_ACLANDCF_RESERVED_SYN_PACKET_TRAP]==ENABLED) PROC_PRINTF("Trap SYN and disabled SVLAN is Enabled.\n"); else PROC_PRINTF("Trap SYN and disabled SVLAN is Disabled.\n"); return len; } int _rtk_rg_proc_trapSynState_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret==1) _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_SYN_PACKET_TRAP, NULL); else if(ret==0) _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_SYN_PACKET_TRAP); _rtk_rg_proc_trapSynState_get(NULL,NULL); return len; } int32 _rtk_rg_proc_assignAckPriority_get(struct seq_file *s, void *v) { int len=0; if(rg_db.systemGlobal.aclAndCfReservedRule.reservedMask[RTK_RG_ACLANDCF_RESERVED_ACK_PACKET_ASSIGN_PRIORITY]==ENABLED) { PROC_PRINTF("Assigned TCP ACK priority to %d.\n",rg_db.systemGlobal.aclAndCfReservedRule.ack_packet_assign_priority.priority); } else { PROC_PRINTF("Assigned TCP ACK priority Disabled.\n"); } return len; } int _rtk_rg_proc_assignAckPriority_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int pri; rtk_rg_aclAndCf_reserved_ack_packet_assign_priority_t ack_packet_assign_priority;; pri=_rtk_rg_pasring_proc_string_to_integer(buff,len); if((pri<-1) || (pri>7)){ rtlglue_printf("Invalid parameter. \n\n"); rtlglue_printf("Usage \n"); rtlglue_printf("echo [priority] > /proc/rg/assign_ack_priority, [priority:-1(disabled patch)] [priority:0~7(assigned value)] \n"); }else if(pri==-1){ _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_ACK_PACKET_ASSIGN_PRIORITY); }else{ #ifdef CONFIG_DUALBAND_CONCURRENT /*internal-priority CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI is reserved for send packet from master to slave in dual wifi architechture*/ if(pri==CONFIG_DEFAULT_TO_SLAVE_GMAC_PRI){ rtlglue_printf("Failed!!! Priority %d is reserved for DUAL BAND WIFI!\n\n",pri); return len; } #endif bzero(&ack_packet_assign_priority,sizeof(rtk_rg_aclAndCf_reserved_ack_packet_assign_priority_t)); ack_packet_assign_priority.priority=pri; _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_ACK_PACKET_ASSIGN_PRIORITY, &ack_packet_assign_priority); } _rtk_rg_proc_assignAckPriority_get(NULL,NULL); return len; } int32 _rtk_rg_proc_redirectFirstHttpMAC_show(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Usage: echo z [y] xx:xx:xx:xx:xx:xx > /proc/rg/redirect_first_http_req_by_mac\n"); PROC_PRINTF(" z means add or del, use \'a\' or \'d\' to indicate, respectively.\n"); PROC_PRINTF(" y means macType, range from 0 to %d. if z equals to \'d\', y should not be filled.\n",MAX_FORCE_PORTAL_URL_NUM-1); PROC_PRINTF(" xx:xx:xx:xx:xx:xx means valid unicast mac address\n"); PROC_PRINTF("Note: Check setting by \"cat /proc/dump/sw_lut\".\n"); return len; } int _rtk_rg_proc_redirectFirstHttpMAC_modify(struct file *filp, const char *buff,unsigned long len, void *data ) { int i,type=0,type_value=0,type_idx=0; char tmpbuf[64]={'\0'}; char *strptr; unsigned int mac_addr,count=0; char *modptr=NULL,*typeptr,*macaddr=NULL; rtk_rg_macEntry_t macEntry; //echo a 1 00:44:55:66:11:22 > redirect_first_http_req_by_mac //echo d 00:44:55:66:11:22 > redirect_first_http_req_by_mac if (buff && len<64 && !copy_from_user(tmpbuf, buff, len)) { //tmpbuf[len] = '\0'; strptr=tmpbuf; modptr = strsep(&strptr," "); if (modptr==NULL || (*modptr!='a' && *modptr!='d'))goto errout; if(*modptr=='a') { typeptr = strsep(&strptr," "); if (typeptr==NULL)goto errout; type = simple_strtol(typeptr, NULL, 10); if(type > 7 || type < 0)goto errout; type_idx=type+1; //type index 0 stands for default URL, 1 stands for type 0, 2 stands for type 1, ... type_value=type+2; //type value 0 stands for disable, 1 stands for default URL, 2 stands for type0, 3 stands for type 1,... if(!rg_db.systemGlobal.forcePortal_url_list[type_idx].valid) { rtlglue_printf("Type[%d] is invalid, please add URL first!!\n",type); return len; } } NEXT_MAC: macaddr = strsep(&strptr,":"); if(macaddr==NULL)goto errout; mac_addr = simple_strtol(macaddr, NULL, 16); if(mac_addr>0xff)goto errout; macEntry.mac.octet[count++]=mac_addr; if(count<ETHER_ADDR_LEN)goto NEXT_MAC; //found exist one, if not, return fail for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++){ if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && !memcmp(rg_db.lut[i].rtk_lut.entry.l2UcEntry.mac.octet,macEntry.mac.octet,ETHER_ADDR_LEN)){ rg_db.lut[i].redirect_http_req=type_value; break; } } if(i==MAX_LUT_SW_TABLE_SIZE)goto errout; rtlglue_printf("success modify!\n"); } else { errout: _rtk_rg_proc_redirectFirstHttpMAC_show(NULL,NULL); } return len; } int32 _rtk_rg_proc_redirectFirstHttpURL_show(struct seq_file *s, void *v) { int len=0; int i; for(i=0;i<=MAX_FORCE_PORTAL_URL_NUM;i++){ if(rg_db.systemGlobal.forcePortal_url_list[i].valid){ if(i==0)PROC_PRINTF("Default URL: http://%s\n",rg_db.systemGlobal.forcePortal_url_list[i].url_string); else PROC_PRINTF("Type[%d]: http://%s\n",i-1,rg_db.systemGlobal.forcePortal_url_list[i].url_string); } } return len; } int _rtk_rg_proc_redirectFirstHttpURL_modify(struct file *filp, const char *buff,unsigned long len, void *data ) { int i,type=0,type_value=0,type_idx=0,portidx; char tmpbuf[MAX_URL_FILTER_STR_LENGTH]={'\0'}; char *strptr,*att_url; char *modptr=NULL,*typeptr,*urlptr=NULL,*newlineptr=NULL; //echo a 1 aa.b.rc/t > redirect_first_http_req_set_url //echo d 1 > redirect_first_http_req_set_url if (buff && len<MAX_URL_FILTER_STR_LENGTH && !copy_from_user(tmpbuf, buff, len)) { //tmpbuf[len] = '\0'; strptr=tmpbuf; modptr = strsep(&strptr," "); if (modptr==NULL || (*modptr!='a' && *modptr!='d'))goto errout; typeptr = strsep(&strptr," "); if (typeptr==NULL)goto errout; type = simple_strtol(typeptr, NULL, 10); if(type >= MAX_FORCE_PORTAL_URL_NUM || type < -1)goto errout; type_idx=type+1; //type index 0 stands for default URL, 1 stands for type 0, 2 stands for type 1, ... type_value=type+2; //type value 0 stands for disable, 1 stands for default URL, 2 stands for type0, 3 stands for type 1,... if(*modptr=='d'){ rg_db.systemGlobal.forcePortal_url_list[type_idx].valid=0; if(type>=0 || !rg_db.redirectHttpAll.enable){ //clear all same type mac for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++) if(rg_db.lut[i].valid && rg_db.lut[i].redirect_http_req==type_value) rg_db.lut[i].redirect_http_req=0; } if(type>=0) rtlglue_printf("delete Type[%d] success!\n",type); else rtlglue_printf("delete Default Type success!\n"); return len; } urlptr = strsep(&strptr," "); if(urlptr==NULL)goto errout; if(strlen(urlptr)>=MAX_URL_FILTER_STR_LENGTH)goto errout; //20160115LUKE: removing strange new-line character when receive string from console. if ((newlineptr=strchr(urlptr, '\n')) != NULL) *newlineptr = '\0'; rg_db.systemGlobal.forcePortal_url_list[type_idx].valid=1; bzero(rg_db.systemGlobal.forcePortal_url_list[type_idx].url_string,MAX_URL_FILTER_STR_LENGTH); strncpy(rg_db.systemGlobal.forcePortal_url_list[type_idx].url_string,urlptr,MAX_URL_FILTER_STR_LENGTH); att_url=strstr(urlptr,"&url="); rg_db.systemGlobal.forcePortal_url_list[type_idx].attach_orig_url=0; if(att_url && strlen(att_url)==strlen("&url="))rg_db.systemGlobal.forcePortal_url_list[type_idx].attach_orig_url=1; if(type>=0) rtlglue_printf("success add Type[%d] %s!\n",type,urlptr); else{ //reset all non-type mac as default type for(i=0;i<MAX_LUT_SW_TABLE_SIZE;i++){ if(rg_db.lut[i].valid && rg_db.lut[i].rtk_lut.entryType==RTK_LUT_L2UC && rg_db.lut[i].redirect_http_req==0){ if(rg_db.lut[i].rtk_lut.entry.l2UcEntry.port==RTK_RG_PORT_CPU)portidx=RTK_RG_PORT_CPU+rg_db.lut[i].rtk_lut.entry.l2UcEntry.ext_port; else portidx=rg_db.lut[i].rtk_lut.entry.l2UcEntry.port; if(portidx!=RTK_RG_PORT_CPU && rg_db.systemGlobal.lanPortMask.portmask&(0x1<<portidx)) rg_db.lut[i].redirect_http_req=type_value; } } rtlglue_printf("success add Default portal %s!\n",rg_db.systemGlobal.forcePortal_url_list[type_idx].url_string); } } else { errout: rtlglue_printf("Usage: echo z y [url] > /proc/rg/redirect_first_http_req_set_url\n"); rtlglue_printf(" z means add or del, use \'a\' or \'d\' to indicate, respectively.\n"); rtlglue_printf(" y means macType, range from 0 to %d.\n",MAX_FORCE_PORTAL_URL_NUM-1); rtlglue_printf(" if y set to -1, means all MAC will redirect to this default URL once.\n"); rtlglue_printf(" url stands for URL without head \'http://\'. It can be omitted while deleting.\n"); } return len; } int32 _rtk_rg_proc_trapLan_get(struct seq_file *s, void *v) { int len=0; if(rg_kernel.arp_number_for_LAN==0 && rg_kernel.arp_number_for_WAN==MAX_ARP_HW_TABLE_SIZE) PROC_PRINTF("Trap LAN is Enabled.\n"); else PROC_PRINTF("Trap LAN is Disabled.\n"); return len; } int _rtk_rg_proc_trapLan_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int ret=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(ret==1) { rg_kernel.arp_number_for_LAN=0; rg_kernel.arp_number_for_WAN=MAX_ARP_HW_TABLE_SIZE; } else if(ret==0) { rg_kernel.arp_number_for_LAN=MAX_ARP_FOR_LAN_INTF; rg_kernel.arp_number_for_WAN=MAX_ARP_FOR_WAN_INTF; } _rtk_rg_proc_trapLan_get(NULL,NULL); return len; } int32 _rtk_rg_proc_trapLan_show(struct seq_file *s, void *v) { int len=0; int i; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.nexthop_lan_table[i].valid) { PROC_PRINTF("%d. \n",i); PROC_PRINTF("IP: %d.%d.%d.%d\n",(rg_db.nexthop_lan_table[i].ipAddr&0xff000000)>>24, (rg_db.nexthop_lan_table[i].ipAddr&0xff0000)>>16, (rg_db.nexthop_lan_table[i].ipAddr&0xff00)>>8, rg_db.nexthop_lan_table[i].ipAddr&0xff); PROC_PRINTF("MAC: %02x:%02x:%02x:%02x:%02x:%02x\n", rg_db.nexthop_lan_table[i].macAddr.octet[0], rg_db.nexthop_lan_table[i].macAddr.octet[1], rg_db.nexthop_lan_table[i].macAddr.octet[2], rg_db.nexthop_lan_table[i].macAddr.octet[3], rg_db.nexthop_lan_table[i].macAddr.octet[4], rg_db.nexthop_lan_table[i].macAddr.octet[5]); PROC_PRINTF("Port: %d\n",rg_db.nexthop_lan_table[i].port); } } return len; } int _rtk_rg_proc_trapLanAdd_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int i,ret; char tmpbuf[64]={'\0'}; char *strptr; unsigned int mac_addr,count=0; char *tokptr=NULL,*ipaddr=NULL,*macaddr=NULL; rtk_l34_netif_entry_t intfEntry; rtk_l34_routing_entry_t rtEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_rg_macEntry_t macEntry; rtk_rg_nexthop_lan_host_t *pNexthopHost; //echo 1 192.168.1.2 00:44:55:66:11:22 > trapLan_add if (buff && len<64 && !copy_from_user(tmpbuf, buff, len)) { //tmpbuf[len] = '\0'; for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(!rg_db.nexthop_lan_table[i].valid)break; } if(i==MAX_NETIF_SW_TABLE_SIZE)goto errout; pNexthopHost=&rg_db.nexthop_lan_table[i]; pNexthopHost->valid=1; strptr=tmpbuf; tokptr = strsep(&strptr," "); if (tokptr==NULL)goto errout; pNexthopHost->port = simple_strtol(tokptr, NULL, 0); if(pNexthopHost->port > RTK_RG_MAC_PORT_MAX)goto errout; ipaddr = strsep(&strptr," "); if (ipaddr==NULL)goto errout; NEXT_MAC: macaddr = strsep(&strptr,":"); if(macaddr==NULL)goto errout; mac_addr = simple_strtol(macaddr, NULL, 16); if(mac_addr>0xff)goto errout; pNexthopHost->macAddr.octet[count++]=mac_addr; if(count<ETHER_ADDR_LEN)goto NEXT_MAC; pNexthopHost->ipAddr = in_aton(ipaddr); //create mac, nexthop, route //check route, nexthop valid for(i=0;i<MAX_L3_SW_TABLE_SIZE ;i++) //because idx 7 is reserved for default route { if(i== V4_DEFAULT_ROUTE_IDX) continue; if(rg_db.l3[i].rtk_l3.valid == 0) break; } if(i==MAX_L3_SW_TABLE_SIZE){rtlglue_printf("%d failed\n",__LINE__);goto errout;} pNexthopHost->rtIdx=i; //Check interface table available or not for(i=MAX_NETIF_SW_TABLE_SIZE-1;i>=0;i--) { if(rg_db.systemGlobal.interfaceInfo[i].valid == 0) break; } if(i<0){rtlglue_printf("%d failed\n",__LINE__);goto errout;} pNexthopHost->intfIdx=i; //set intf & nexthop bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); for(i=0;i<rg_db.systemGlobal.lanIntfTotalNum;i++) { if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->port_mask.portmask&(0x1<<pNexthopHost->port)) { memcpy(intfEntry.gateway_mac.octet,rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->gmac.octet,ETHER_ADDR_LEN); intfEntry.vlan_id=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->intf_vlan_id; intfEntry.mtu=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->mtu; rtlglue_printf("vlan is %d, mtu is %d",intfEntry.vlan_id,intfEntry.mtu); break; } } if(i==rg_db.systemGlobal.lanIntfTotalNum){rtlglue_printf("%d failed\n",__LINE__);goto errout;} //set mac pNexthopHost->macIdx=FAIL; bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); memcpy(macEntry.mac.octet,pNexthopHost->macAddr.octet,ETHER_ADDR_LEN); ret=rtk_rg_apollo_macEntry_find(&macEntry, &pNexthopHost->macIdx); if(ret==RT_ERR_RG_OK)rtk_rg_apollo_macEntry_del(pNexthopHost->macIdx); macEntry.port_idx=pNexthopHost->port; macEntry.fid=rg_db.vlan[intfEntry.vlan_id].fid; macEntry.vlan_id=intfEntry.vlan_id; if(rg_db.vlan[macEntry.vlan_id].fidMode==VLAN_FID_IVL) macEntry.isIVL=1; else{ macEntry.isIVL=0; #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) if(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->untag_mask.portmask&(0x1<<pNexthopHost->port)) macEntry.vlan_id=0; #else // support ctag_if macEntry.ctag_if=(rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->untag_mask.portmask&(0x1<<pNexthopHost->port))?0:1; #endif } macEntry.static_entry=1; //won't age out macEntry.arp_used=1; //pointed by nexthop entry ret=rtk_rg_apollo_macEntry_add(&macEntry,&pNexthopHost->macIdx); if(ret!=RT_ERR_RG_OK){rtlglue_printf("%d failed\n",__LINE__);goto ERR_MAC;} intfEntry.valid=1; #if defined(CONFIG_RTL9600_SERIES) //20141110LUKE: add for inhibiting multicast routing downstream trigger overMTU trap. if(rg_kernel.apolloChipId==APOLLOMP_CHIP_ID)intfEntry.mtu+=2; #endif ret=RTK_L34_NETIFTABLE_SET(pNexthopHost->intfIdx, &intfEntry); if(ret!=RT_ERR_OK){rtlglue_printf("%d failed\n",__LINE__);goto ERR_NETIF;} //reset software MTU should keep original MTU, only hardware MTU need to change!! rg_db.netif[pNexthopHost->intfIdx].rtk_netif.mtu=rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->p_lanIntfConf->mtu; //setup lan interface info memcpy(&rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo,&rg_db.systemGlobal.lanIntfGroup[i].p_intfInfo->storedInfo,sizeof(rtk_rg_intfInfo_t)); rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo.lan_intf.ip_addr=pNexthopHost->ipAddr; rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo.lan_intf.ip_network_mask=0xffffffff; //memcpy(&rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ipv6_addr,&lan_info->ipv6_addr,sizeof(rtk_ipv6_addr_t)); //rg_db.systemGlobal.interfaceInfo[intfIdx].storedInfo.lan_intf.ipv6_network_mask_length=lan_info->ipv6_network_mask_length; rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo.lan_intf.port_mask.portmask=0x1<<pNexthopHost->port; rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo.lan_intf.untag_mask.portmask=0x1<<pNexthopHost->port; rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].p_lanIntfConf=&rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].storedInfo.lan_intf; //short-cut of lan interface structure rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].lan_or_wan_index=i; rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx].valid=1; bzero(&nxpEntry, sizeof(rtk_l34_nexthop_entry_t)); nxpEntry.ifIdx=pNexthopHost->intfIdx; nxpEntry.nhIdx=pNexthopHost->macIdx; rg_db.nexthop[pNexthopHost->intfIdx].valid=1; ret=RTK_L34_NEXTHOPTABLE_SET(pNexthopHost->intfIdx, &nxpEntry); if(ret!=RT_ERR_OK){rtlglue_printf("%d failed\n",__LINE__);goto ERR_NXT;} //set route bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); rtEntry.netifIdx=pNexthopHost->intfIdx; rtEntry.valid=1; rtEntry.process=L34_PROCESS_NH; rtEntry.internal=1; rtEntry.ipAddr=pNexthopHost->ipAddr; rtEntry.rt2waninf=0; rtEntry.ipMask=31; //host route //TODO:if load-balance is needed, here should be changed rtEntry.nhStart=pNexthopHost->intfIdx; /*exact index*/ rtEntry.nhNxt=pNexthopHost->intfIdx; rtEntry.nhNum=0; //exect Next hop number 1,2,4,8,16 rtEntry.nhAlgo=0; //PER-PACKET rtEntry.ipDomain=6; //Entry 0~7 ret=RTK_L34_ROUTINGTABLE_SET(pNexthopHost->rtIdx, &rtEntry); if(ret!=RT_ERR_OK){rtlglue_printf("%d failed\n",__LINE__);goto ERR_RT;} rtlglue_printf("success add!\n"); } else { goto errout; ERR_RT: bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(pNexthopHost->rtIdx, &rtEntry); ERR_NXT: bzero(&nxpEntry, sizeof(rtk_l34_nexthop_entry_t)); rg_db.nexthop[pNexthopHost->intfIdx].valid=0; RTK_L34_NEXTHOPTABLE_SET(pNexthopHost->intfIdx, &nxpEntry); ERR_NETIF: bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); RTK_L34_NETIFTABLE_SET(pNexthopHost->intfIdx, &intfEntry); bzero(&rg_db.systemGlobal.interfaceInfo[pNexthopHost->intfIdx],sizeof(rtk_rg_interface_info_global_t)); ERR_MAC: rtk_rg_apollo_macEntry_del(pNexthopHost->macIdx); bzero(pNexthopHost,sizeof(rtk_rg_nexthop_lan_host_t)); errout: _rtk_rg_proc_trapLan_show(NULL,NULL); } return len; } int _rtk_rg_proc_trapLanDel_set(struct file *filp, const char *buff,unsigned long len, void *data ) { int i; char tmpbuf[64]={'\0'}; char *strptr; unsigned int mac_addr,count=0; char *tokptr=NULL,*ipaddr=NULL,*macaddr=NULL; rtk_l34_netif_entry_t intfEntry; rtk_l34_routing_entry_t rtEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_rg_nexthop_lan_host_t nexthopHost; //echo 1 192.168.1.2 00:44:55:66:11:22 > trapLan_del if (buff && len<64 && !copy_from_user(tmpbuf, buff, len)) { strptr=tmpbuf; tokptr = strsep(&strptr," "); if (tokptr==NULL)goto errout; nexthopHost.port = simple_strtol(tokptr, NULL, 0); if(nexthopHost.port > RTK_RG_MAC_PORT_MAX)goto errout; ipaddr = strsep(&strptr," "); if (ipaddr==NULL)goto errout; NEXT_MAC: macaddr = strsep(&strptr,":"); if(macaddr==NULL)goto errout; mac_addr = simple_strtol(macaddr, NULL, 16); if(mac_addr>0xff)goto errout; nexthopHost.macAddr.octet[count++]=mac_addr; if(count<ETHER_ADDR_LEN)goto NEXT_MAC; nexthopHost.ipAddr = in_aton(ipaddr); for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.nexthop_lan_table[i].valid && rg_db.nexthop_lan_table[i].ipAddr==nexthopHost.ipAddr && !memcmp(rg_db.nexthop_lan_table[i].macAddr.octet,nexthopHost.macAddr.octet,ETHER_ADDR_LEN) && rg_db.nexthop_lan_table[i].port==nexthopHost.port) { bzero(&rtEntry, sizeof(rtk_l34_routing_entry_t)); RTK_L34_ROUTINGTABLE_SET(rg_db.nexthop_lan_table[i].rtIdx, &rtEntry); bzero(&nxpEntry, sizeof(rtk_l34_nexthop_entry_t)); rg_db.nexthop[rg_db.nexthop_lan_table[i].intfIdx].valid=0; RTK_L34_NEXTHOPTABLE_SET(rg_db.nexthop_lan_table[i].intfIdx, &nxpEntry); bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); RTK_L34_NETIFTABLE_SET(rg_db.nexthop_lan_table[i].intfIdx, &intfEntry); bzero(&rg_db.systemGlobal.interfaceInfo[rg_db.nexthop_lan_table[i].intfIdx],sizeof(rtk_rg_interface_info_global_t)); rtk_rg_apollo_macEntry_del(rg_db.nexthop_lan_table[i].macIdx); bzero(&rg_db.nexthop_lan_table[i],sizeof(rtk_rg_nexthop_lan_host_t)); rtlglue_printf("success del!\n"); break; } } } else { errout: _rtk_rg_proc_trapLan_show(NULL,NULL); } return len; } unsigned char _rtk_rg_hex_to_byte(unsigned char hex) { unsigned char ret=0; if((hex>='A')&&(hex<='F')) ret=hex-'A'+10; else if((hex>='a')&&(hex<='f')) ret=hex-'a'+10; else if((hex>='0')&&(hex<='9')) ret=hex-'0'; return ret; } int _rtk_rg_proc_sendFromCpu(struct file *filp, const char *buff,unsigned long len, void *data ) { int i; struct sk_buff *new_skb; char dev_name[16]={0}; int offset=0; // fetch net_dev name for(i=0;i<len;i++) { if(offset==0) { if(buff[i]!=' ') { if(i==15) { TRACE("CPU_DirectTX_ERROR: net_dev is not found."); return len; } dev_name[i]=buff[i]; } else { offset=i+1; break; } } } // alloc skb rg_db.pktHdr->ingressPort=RTK_RG_PORT_CPU; new_skb=_rtk_rg_getAlloc(SKB_BUF_SIZE); if(new_skb==NULL) return len; for(i=offset;i<len;i+=2) { new_skb->data[(i-offset)>>1]=(_rtk_rg_hex_to_byte(buff[i])<<4)+_rtk_rg_hex_to_byte(buff[i+1]); } new_skb->len=(len-offset)>>1; new_skb->tail=new_skb->data+new_skb->len; // search net_device new_skb->dev = first_net_device(&init_net); while (new_skb->dev) { DEBUG("CPU_DirectTX: net_dev[%s]", new_skb->dev->name); if(strcmp(new_skb->dev->name,dev_name)==0) { //20160406LUKE: in case we receive pacekt without checksum, force recalculate here! rtk_rg_fwdEngineReturn_t _rtk_rg_packetParser(struct sk_buff *skb, rtk_rg_pktHdr_t *pPktHdr); rtk_rg_fwdEngineReturn_t parserRet; if(!memcmp(dev_name,"wlan0",5)) rg_db.pktHdr->pRxDesc=(void *)&rg_db.systemGlobal.rxInfoFromWLAN; #ifdef CONFIG_DUALBAND_CONCURRENT else if(!memcmp(dev_name,"wlan1",5)) { rg_db.pktHdr->pRxDesc=(void *)&rg_db.systemGlobal.rxInfoFromSlaveWLAN; } #endif else rg_db.pktHdr->pRxDesc=(void *)&rg_kernel.rxInfoFromPS; parserRet=_rtk_rg_packetParser(new_skb,rg_db.pktHdr); if(parserRet!=RG_FWDENGINE_RET_CONTINUE){ TRACE("parsing fail...%d",parserRet); goto Error_out; }else{ uint16 *pL3Checksum, *pL4Checksum, totalLen; //calculating L3 checksum if(rg_db.pktHdr->tagif&IPV4_TAGIF && (rg_db.pktHdr->tagif&IPV6_TAGIF)==0){ //DEBUG("L3 header len=%d\n", pPktHdr->ipv4HeaderLen); pL3Checksum=(u16*)&new_skb->data[rg_db.pktHdr->l3Offset+10]; *pL3Checksum=0; *pL3Checksum=htons(inet_chksum(new_skb->data+rg_db.pktHdr->l3Offset,rg_db.pktHdr->ipv4HeaderLen)); DEBUG("L3 checksum Update. checksum(0x%x), l3offset:%d\n", *pL3Checksum, rg_db.pktHdr->l3Offset); } //totalLen = pPktHdr->l3Offset + pPktHdr->l3Len; totalLen = new_skb->len; DEBUG("l3Offset(%d), l3Len(%d), skb->len(%d)\n", rg_db.pktHdr->l3Offset, rg_db.pktHdr->l3Len, new_skb->len); if(rg_db.pktHdr->tagif&(IPV4_TAGIF) && (rg_db.pktHdr->tagif&IPV6_TAGIF)==0 && (rg_db.pktHdr->tagif&DSLITEMC_INNER_TAGIF)==0){ if(rg_db.pktHdr->tagif&TCP_TAGIF){ pL4Checksum=(u16*)&new_skb->data[rg_db.pktHdr->l4Offset+16]; *pL4Checksum=0; *pL4Checksum=htons(inet_chksum_pseudo(new_skb->data+rg_db.pktHdr->l4Offset,totalLen-rg_db.pktHdr->l4Offset,ntohl(*(rg_db.pktHdr->pIpv4Sip)),ntohl(*(rg_db.pktHdr->pIpv4Dip)),rg_db.pktHdr->ipProtocol)); DEBUG("L4 TCP checksum Update. checksum(0x%x), l4offset:%d\n", *pL4Checksum, rg_db.pktHdr->l4Offset); }else if(rg_db.pktHdr->tagif&UDP_TAGIF){ pL4Checksum=(u16*)&new_skb->data[rg_db.pktHdr->l4Offset+6]; *pL4Checksum=0; *pL4Checksum=htons(inet_chksum_pseudo(new_skb->data+rg_db.pktHdr->l4Offset,totalLen-rg_db.pktHdr->l4Offset,ntohl(*(rg_db.pktHdr->pIpv4Sip)),ntohl(*(rg_db.pktHdr->pIpv4Dip)),rg_db.pktHdr->ipProtocol)); DEBUG("L4 UDP checksum Update. checksum(0x%x), l4offset:%d\n", *pL4Checksum, rg_db.pktHdr->l4Offset); }else if(rg_db.pktHdr->tagif&ICMP_TAGIF){ pL4Checksum=(u16*)&new_skb->data[rg_db.pktHdr->l4Offset+2]; *pL4Checksum=0; *pL4Checksum=htons(inet_chksum(new_skb->data+rg_db.pktHdr->l4Offset,totalLen-rg_db.pktHdr->l4Offset)); DEBUG("L4 ICMP checksum Update. checksum(0x%x), l4offset:%d\n", *pL4Checksum, rg_db.pktHdr->l4Offset); } } } //when RX, the RG parser will count this buf. (so don't need to add by alloc) rg_db.systemGlobal.statistic.perPortCnt_skb_pre_alloc_for_uc[rg_db.pktHdr->ingressPort]--; if(memcmp(dev_name,"wlan0",5)==0) //send from WLAN0 { int ret; ret=fwdEngine_rx_skb(NULL,new_skb,NULL); if(ret==RE8670_RX_CONTINUE) { //to PS: free by PS new_skb->protocol = eth_type_trans (new_skb, new_skb->dev); netif_rx(new_skb); } else if(ret==RE8670_RX_STOP) { //drop: free at here _rtk_rg_dev_kfree_skb_any(new_skb); //rg_db.systemGlobal.statistic.perPortCnt_skb_free[rg_db.pktHdr->ingressPort]--; } else { //forward: don't do anything } } #ifdef CONFIG_DUALBAND_CONCURRENT else if(memcmp(dev_name,"wlan1",5)==0) //send from WLAN1 { int ret; ret=fwdEngine_rx_skb(NULL,new_skb,(struct rx_info *)&rg_db.systemGlobal.rxInfoFromSlaveWLAN); if(ret==RE8670_RX_CONTINUE) { //to PS: free by PS new_skb->protocol = eth_type_trans (new_skb, new_skb->dev); netif_rx(new_skb); } else if(ret==RE8670_RX_STOP) { //drop: free at here _rtk_rg_dev_kfree_skb_any(new_skb); //rg_db.systemGlobal.statistic.perPortCnt_skb_free[rg_db.pktHdr->ingressPort]--; } else { //forward: don't do anything } } #endif else //send from CPU { rtk_rg_fwdEngine_xmit(new_skb,new_skb->dev); } return len; } new_skb->dev = next_net_device(new_skb->dev); } TRACE("CPU_DirectTX_ERROR: net_dev(%s) is not found.",dev_name); Error_out: _rtk_rg_dev_kfree_skb_any(new_skb); //rg_db.systemGlobal.statistic.perPortCnt_skb_free[rg_db.pktHdr->ingressPort]--; return len; } int32 _rtk_rg_proc_wifiTxRedirect_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.wifiTxRedirect); return len; } int _rtk_rg_proc_wifiTxRedirect_set(struct file *filp, const char *buff,unsigned long len, void *data) { rg_db.systemGlobal.wifiTxRedirect=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } int32 _rtk_rg_proc_ponPortUnmatchCfDrop_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.ponPortUnmatchCfDrop); return len; } int _rtk_rg_proc_ponPortUnmatchCfDrop_set(struct file *filp, const char *buff,unsigned long len, void *data) { rg_db.systemGlobal.ponPortUnmatchCfDrop=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.ponPortUnmatchCfDrop) rtk_classify_unmatchAction_set(CLASSIFY_UNMATCH_PERMIT_WITHOUT_PON); else rtk_classify_unmatchAction_set(CLASSIFY_UNMATCH_PERMIT); return len; } int32 _rtk_rg_proc_psRxMirrorToPort0_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.psRxMirrorToPort0); return len; } int _rtk_rg_proc_psRxMirrorToPort0_set(struct file *filp, const char *buff,unsigned long len, void *data) { rg_db.systemGlobal.psRxMirrorToPort0=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } int32 _rtk_rg_proc_procToPipe_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%d\n",rg_db.systemGlobal.proc_to_pipe); return len; } int _rtk_rg_proc_procToPipe_set(struct file *filp, const char *buff,unsigned long len, void *data) { rg_db.systemGlobal.proc_to_pipe=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } #if !defined(CONFIG_OPENWRT_RG) extern int DumpProtocolStackRx_debug; int32 _rtk_rg_proc_dumpPSRxPkt_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Dump Protocol Stack Rx:"); PROC_PRINTF("%s\n",DumpProtocolStackRx_debug?"Enabled":"Disabled"); return len; } int _rtk_rg_proc_dumpPSRxPkt_set(struct file *filp, const char *buff,unsigned long len, void *data) { DumpProtocolStackRx_debug = _rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } extern int DumpProtocolStackTx_debug; int32 _rtk_rg_proc_dumpPSTxPkt_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Dump Protocol Stack Tx:"); PROC_PRINTF("%s\n",DumpProtocolStackTx_debug?"Enabled":"Disabled"); return len; } int _rtk_rg_proc_dumpPSTxPkt_set(struct file *filp, const char *buff,unsigned long len, void *data) { DumpProtocolStackTx_debug = _rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } #endif #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) int32 _rtk_rg_igmp_report_ingress_filter_portmask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("igmp report ingress filter portmask: 0x%x\n",rg_db.systemGlobal.igmpReportIngressPortmask); return len; } int _rtk_rg_igmp_report_ingress_filter_portmask_set(struct file *filp, const char *buff,unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buff && !copy_from_user(tmpBuf, buff, len)) { rg_db.systemGlobal.igmpReportIngressPortmask=simple_strtoul(tmpBuf, NULL, 16); rtlglue_printf("igmp report ingress filter portmask: 0x%x\n",rg_db.systemGlobal.igmpReportIngressPortmask); return count; } return -EFAULT; } int32 _rtk_rg_igmp_report_filter_portmask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("igmp report egress filter portmask: 0x%x\n",rg_db.systemGlobal.igmpReportPortmask); return len; } int _rtk_rg_igmp_report_filter_portmask_set(struct file *filp, const char *buff,unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buff && !copy_from_user(tmpBuf, buff, len)) { rg_db.systemGlobal.igmpReportPortmask=simple_strtoul(tmpBuf, NULL, 16); rtlglue_printf("igmp report egress filter portmask: 0x%x\n",rg_db.systemGlobal.igmpReportPortmask); return count; } return -EFAULT; } #endif #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) int32 _rtk_rg_igmp_mld_query_filter_portmask_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("igmp/mld query filter portmask: 0x%x\n",rg_db.systemGlobal.igmpMldQueryPortmask); return len; } int _rtk_rg_igmp_mld_query_filter_portmask_set(struct file *filp, const char *buff,unsigned long count, void *data) { unsigned char tmpBuf[16] = {0}; int len = (count > 15) ? 15 : count; if (buff && !copy_from_user(tmpBuf, buff, len)) { rg_db.systemGlobal.igmpMldQueryPortmask=simple_strtoul(tmpBuf, NULL, 16); rtlglue_printf("igmp/mld query filter portmask: 0x%x\n",rg_db.systemGlobal.igmpMldQueryPortmask); return count; } return -EFAULT; } #endif int32 _rtk_rg_aclDumpParameterEnable_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("acl_rg_add_parameter_dump: %s\n",rg_db.systemGlobal.acl_rg_add_parameter_dump?"Enabled":"Disabled"); return len; } int _rtk_rg_aclDumpParameterEnable_set(struct file *filp, const char *buff,unsigned long count, void *data) { rg_db.systemGlobal.acl_rg_add_parameter_dump=_rtk_rg_pasring_proc_string_to_integer(buff,count); _rtk_rg_aclDumpParameterEnable_get(NULL,NULL); return count; } int32 _rtk_rg_gponDsBCModuleEnable_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("gponDsBCModuleEnable: %s\n",rg_db.systemGlobal.gponDsBCModuleEnable?"Enabled":"Disabled"); return len; } int _rtk_rg_gponDsBCModuleEnable_set(struct file *filp, const char *buff,unsigned long count, void *data) { rg_db.systemGlobal.gponDsBCModuleEnable=_rtk_rg_pasring_proc_string_to_integer(buff,count); _rtk_rg_gponDsBCModuleEnable_get(NULL,NULL); return count; } int32 _rtk_rg_procotolStackKeepCVLANOrig_get(struct seq_file *s, void *v) { rtlglue_printf("%s\n",rg_db.systemGlobal.keepPsOrigCvlan?"Keep Original CVLAN Format(could be modified by ACL/CF).":"Follow Normal CVLAN Decision."); return 0; } int _rtk_rg_procotolStackKeepCVLANOrig_set(struct file *filp, const char *buff,unsigned long count, void *data) { rg_db.systemGlobal.keepPsOrigCvlan=_rtk_rg_pasring_proc_string_to_integer(buff,count); _rtk_rg_procotolStackKeepCVLANOrig_get(NULL,NULL); return count; } int32 _rtk_rg_pppoeProxyAllowBindingOnly_get(struct seq_file *s, void *v) { rtlglue_printf("%s\n",rg_db.systemGlobal.pppoeProxyAllowBindingOnly?"Only binding to PPPoE WAN's packet can goto pppoe proxy!":"Normal."); return 0; } int _rtk_rg_pppoeProxyAllowBindingOnly_set(struct file *filp, const char *buff,unsigned long count, void *data) { rg_db.systemGlobal.pppoeProxyAllowBindingOnly=_rtk_rg_pasring_proc_string_to_integer(buff,count); _rtk_rg_pppoeProxyAllowBindingOnly_get(NULL,NULL); return count; } #if defined(CONFIG_APOLLO_GPON_FPGATEST) int _rtk_rg_virtualMAC_with_PON_get(void) { return rg_db.systemGlobal.virtualMAC_with_PON_switch_mask.portmask; } int _rtk_rg_virtualMAC_with_PON_display(struct seq_file *s, void *v) { int ret=_rtk_rg_virtualMAC_with_PON_get(); if(ret) rtlglue_printf("virtaulMAC with PON: Turn ON at portmask %x.\n",ret); else rtlglue_printf("virtaulMAC with PON: Turn Off.\n"); return 0; } void _rtk_rg_virtualMAC_with_PON_manipulate(int portMask) { int i; if(portMask){ for(i=0;i<RTK_RG_MAC_PORT_MAX;i++){ if(portMask==0x1<<i){ //Reset CPU tag register to Default value for correct SPA in it! if(!rg_db.systemGlobal.virtualMAC_with_PON_switch_mask.portmask){ rg_db.systemGlobal.virtualMAC_with_PON_cputag_reg=MEM32_READ(0xBB023064); MEM32_WRITE(0xBB023064, 0x100); //turn on cputag insertion } rg_db.systemGlobal.virtualMAC_with_PON_switch_mask.portmask=portMask; } } }else if(rg_db.systemGlobal.virtualMAC_with_PON_switch_mask.portmask){ //Turn off CPU tag register //ASSERT_EQ(ioal_socMem32_write(0xBB023064, 0x0),RT_ERR_OK); MEM32_WRITE(0xBB023064, rg_db.systemGlobal.virtualMAC_with_PON_cputag_reg); //restore original value rg_db.systemGlobal.virtualMAC_with_PON_switch_mask.portmask=0x0; if(!list_empty(&rg_db.vmacSkbListHead)) { rtk_rg_vmac_skb_linlList_t *pVmacEntry,*pNextEntry; list_for_each_entry_safe(pVmacEntry,pNextEntry,&rg_db.vmacSkbListHead,vmac_list) { //Delete from head list list_del_init(&pVmacEntry->vmac_list); dev_kfree_skb_any(pVmacEntry->skb); pVmacEntry->skb=NULL; //Add back to free list list_add(&pVmacEntry->vmac_list,&rg_db.vmacSkbListFreeListHead); } } } } int _rtk_rg_virtualMAC_with_PON_set(struct file *file, const char __user *buff, unsigned long count, void *data) { int portMask=_rtk_rg_pasring_proc_string_to_integer(buff,count); _rtk_rg_virtualMAC_with_PON_manipulate(portMask); _rtk_rg_virtualMAC_with_PON_display(); return count; } #endif //end if CONFIG_APOLLO_GPON_FPGATEST int _rtk_rg_control_hw_ttl_minus_open(struct seq_file *s, void *v) { rtk_enable_t state; ASSERT_EQ(rtk_l34_globalState_get(L34_GLOBAL_TTLMINUS_STATE, &state),RT_ERR_OK); PROC_PRINTF("HW TTL minus state:%s\n",state==ENABLED?"Enable":"Disable"); return 0; } int _rtk_rg_control_hw_ttl_minus_write(struct file *file, const char __user *buff, unsigned long count, void *data) { rtk_enable_t state; int parsing=_rtk_rg_pasring_proc_string_to_integer(buff,count); if(parsing==0) state=DISABLED; else state=ENABLED; #if !defined(CONFIG_RG_FLOW_BASED_PLATFORM) ASSERT_EQ(RTK_L34_GLOBALSTATE_SET(L34_GLOBAL_TTLMINUS_STATE,state),RT_ERR_OK); #else ASSERT_EQ(RTK_RG_ASIC_GLOBALSTATE_SET(FB_GLOBAL_TTL_1,state),RT_ERR_OK); #endif _rtk_rg_control_hw_ttl_minus_open(NULL, NULL); return count; } #if defined(CONFIG_RTL9602C_SERIES) int _rtk_rg_proc_cf_patter0_size_get(struct seq_file *s, void *v) { int len=0; if(rg_kernel.force_cf_pattern0_size_enable==1) { rtlglue_printf("force cf_pattern0_size=%d, cf_pattern1_size=%d\n",rg_kernel.cf_pattern0_size, (TOTAL_CF_ENTRY_SIZE-rg_kernel.cf_pattern0_size)); } else { rtlglue_printf("cf_pattern0_size is set as defined DEFAULT_CF_PATTERN0_ENTRY_SIZE(%d)",DEFAULT_CF_PATTERN0_ENTRY_SIZE); } return len; } int _rtk_rg_proc_force_cf_patter0_size_enable( struct file *filp, const char *buff,unsigned long len, void *data ) { int force_cf_pattern0_size_en; force_cf_pattern0_size_en = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(force_cf_pattern0_size_en < 0 || force_cf_pattern0_size_en > 1) { rtlglue_printf("invalid value!"); } else { rg_kernel.force_cf_pattern0_size_enable = force_cf_pattern0_size_en; if(force_cf_pattern0_size_en==0) { //set to default value if disabled this proc rg_db.systemGlobal.cf_pattern0_size = DEFAULT_CF_PATTERN0_ENTRY_SIZE; } else { _rtk_rg_proc_cf_patter0_size_get(NULL,NULL); rtlglue_printf("Please do: echo [size] > /proc/rg/cf_patter0_size, and then do rg init."); } } return len; } int _rtk_rg_proc_cf_patter0_size_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int cf_pattern0_size; cf_pattern0_size = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_kernel.force_cf_pattern0_size_enable==0) { rtlglue_printf("/proc/rg/force_cf_pattern0_size_state should be enabled first!"); } else { if(cf_pattern0_size < 0 || cf_pattern0_size > TOTAL_CF_ENTRY_SIZE) { rtlglue_printf("invalid rule size!"); } else { //rg_db.systemGlobal.cf_pattern0_size = cf_pattern0_size; rg_kernel.cf_pattern0_size=cf_pattern0_size; } } _rtk_rg_proc_cf_patter0_size_get(NULL,NULL); return len; } #endif int32 _rtk_rg_proc_PreRouteCallback_get(struct seq_file *s, void *v) { int len=0; switch(rg_db.systemGlobal.demo_dpiPreRouteCallback_retValue){ case RG_FWDENGINE_PREROUTECB_CONTINUE: PROC_PRINTF("demo_PREROUTE_callback_ret: CONTINUE.\n"); break; case RG_FWDENGINE_PREROUTECB_DROP: PROC_PRINTF("demo_PREROUTE_callback_ret: DROP.\n"); break; case RG_FWDENGINE_PREROUTECB_TRAP: PROC_PRINTF("demo_PREROUTE_callback_ret: TRAP.\n"); break; default: break; } return len; } int _rtk_rg_proc_PreRouteCallback_set(struct file *filp, const char *buff,unsigned long len, void *data) { int value=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(value>=0 && value<RG_FWDENGINE_PREROUTECB_END)rg_db.systemGlobal.demo_dpiPreRouteCallback_retValue=value; _rtk_rg_proc_PreRouteCallback_get(NULL,NULL); return len; } int32 _rtk_rg_proc_FwdCallback_get(struct seq_file *s, void *v) { int len=0; switch(rg_db.systemGlobal.demo_dpiFwdCallback_retValue){ case RG_FWDENGINE_FORWARDCB_FINISH_DPI: PROC_PRINTF("demo_FORWARD_callback_ret: FINISH.\n"); break; case RG_FWDENGINE_FORWARDCB_CONTINUE_DPI: PROC_PRINTF("demo_FORWARD_callback_ret: CONTINUE.\n"); break; case RG_FWDENGINE_FORWARDCB_DROP: PROC_PRINTF("demo_FORWARD_callback_ret: DROP.\n"); break; default: break; } return len; } int _rtk_rg_proc_FwdCallback_set(struct file *filp, const char *buff,unsigned long len, void *data) { int value=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(value>=0 && value<RG_FWDENGINE_FORWARDCB_END)rg_db.systemGlobal.demo_dpiFwdCallback_retValue=value; _rtk_rg_proc_FwdCallback_get(NULL,NULL); return len; } #if defined(CONFIG_ROME_NAPT_SHORTCUT) || defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) int _rtk_rg_flushShortcut_set( struct file *filp, const char *buff,unsigned long len, void *data ) { int enabled; enabled = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(enabled == 1) _rtk_rg_shortCut_clear(); return len; } int _rtk_rg_flushShortcut_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("Usage: echo 1 > /proc/rg/flush\n"); return len; } #endif int32 _rtk_rg_proc_gatherLanNetInfo_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%s\n",rg_db.systemGlobal.gatherLanNetInfo==1?"Turn on.":"Turn off."); return len; } int _rtk_rg_proc_gatherLanNetInfo_set(struct file *filp, const char *buff,unsigned long len, void *data) { rg_db.systemGlobal.gatherLanNetInfo=_rtk_rg_pasring_proc_string_to_integer(buff,len); return len; } int32 rtk_rg_proc_disbaled_pon_dmac2cvid_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%s: portmask=0x%x\n",rg_db.systemGlobal.dmac2cvidDisabledPortmask?"Turn on.":"Turn off.",rg_db.systemGlobal.dmac2cvidDisabledPortmask); return len; } int rtk_rg_proc_disbaled_pon_dmac2cvid_set(struct file *filp, const char *buff,unsigned long len, void *data) { int enable; int port; enable=_rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.initParam.macBasedTagDecision) { //just allow wan port force disabled dmac2cvid rg_db.systemGlobal.dmac2cvidDisabledPortmask = rg_db.systemGlobal.wanPortMask.portmask; if(enable) { rtlglue_printf("Disable dmac2cvid of wan portmask=0x%x\n", rg_db.systemGlobal.dmac2cvidDisabledPortmask); for(port=0;port<RTK_RG_MAC_PORT_MAX;port++) { if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<port)) { ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(port, DISABLED), RT_ERR_OK); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //support vid unmatched action #if defined(CONFIG_RTL9602C_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT1)) #endif ASSERT_EQ(rtk_l2_vidUnmatchAction_set(port, ACTION_FORWARD), RT_ERR_OK); #endif } } } else { rtlglue_printf("Enable dmac2cvid of wan portmask=0x%x\n", rg_db.systemGlobal.dmac2cvidDisabledPortmask); for(port=0;port<RTK_RG_MAC_PORT_MAX;port++) { if(rg_db.systemGlobal.dmac2cvidDisabledPortmask & (1<<port)) { ASSERT_EQ(RTK_SVLAN_DMACVIDSELSTATE_SET(port, ENABLED), RT_ERR_OK); #if defined(CONFIG_RTL9600_SERIES) || defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) #else //support vid unmatched action #if defined(CONFIG_RTL9602C_SERIES) if((rg_db.systemGlobal.internalSupportMask & RTK_RG_INTERNAL_SUPPORT_BIT1)) #endif ASSERT_EQ(rtk_l2_vidUnmatchAction_set(port, ACTION_TRAP2CPU), RT_ERR_OK); #endif } } rg_db.systemGlobal.dmac2cvidDisabledPortmask = 0; } } else { rtlglue_printf("[WARNING] macBasedTagDecision is disabled, so fail!!\n"); } return len; } int32 rtk_rg_proc_pppoe_gpon_small_bandwidth_control_get(struct seq_file *s, void *v) { int len=0; #if defined(CONFIG_RTL9600_SERIES) PROC_PRINTF("This proc is using for Apollo series under GPON small bandwith only.\n Enabled this proc will seperate lan to wan forwarding into two stage. 1.:use RGMII port as Wan(CF-port), 2: redirect the packet from RGMII to PON by ACL.\n"); PROC_PRINTF("%s\n",rg_db.systemGlobal.pppoeGponSmallbandwithControl?"Turn on.":"Turn off."); #else PROC_PRINTF("This proc only supported by apollo platform\n"); #endif return len; } int rtk_rg_proc_pppoe_gpon_small_bandwidth_control_set(struct file *filp, const char *buff,unsigned long len, void *data) { #if defined(CONFIG_RTL9600_SERIES) rtk_port_macAbility_t macAbility; rg_db.systemGlobal.pppoeGponSmallbandwithControl = _rtk_rg_pasring_proc_string_to_integer(buff,len); if(rg_db.systemGlobal.pppoeGponSmallbandwithControl) { _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_GPON_SMALL_BANDWIDTH_CONTROL,NULL); ASSERT_EQ(RTK_CLASSIFY_CFSEL_SET(RTK_RG_MAC_PORT_RGMII, CLASSIFY_CF_SEL_ENABLE),RT_ERR_OK); REG32(0xbb00014c) = 0x06; //RGMII mode GPIO change. REG32(0xbb021408) = 0x21; //enabled RGMII Loopback mode. //port set mac-force port 5 link-state link-up //port set mac-force port 5 ability 1000f flow-control disable bzero(&macAbility,sizeof(macAbility)); ASSERT_EQ(RTK_PORT_MACFORCEABILITY_GET(RTK_RG_MAC_PORT_RGMII,&macAbility),RT_ERR_OK); macAbility.linkStatus=PORT_LINKUP; macAbility.duplex = PORT_FULL_DUPLEX; macAbility.speed = PORT_SPEED_1000M; macAbility.txFc = DISABLED; macAbility.rxFc = DISABLED; ASSERT_EQ(RTK_PORT_MACFORCEABILITY_SET(RTK_RG_MAC_PORT_RGMII,macAbility),RT_ERR_OK); //port set mac-force port 5 state enable ASSERT_EQ(RTK_PORT_MACFORCEABILITYSTATE_SET(RTK_RG_MAC_PORT_RGMII, ENABLED),RT_ERR_OK); } else { _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_GPON_SMALL_BANDWIDTH_CONTROL); ASSERT_EQ(RTK_CLASSIFY_CFSEL_SET(RTK_RG_MAC_PORT_RGMII, CLASSIFY_CF_SEL_DISABLE),RT_ERR_OK); REG32(0xbb00014c) = 0x0; //RGMII mode GPIO change. REG32(0xbb021408) = 0x20; //disabled RGMII Loopback mode. //port set mac-force port 5 link-state link-down bzero(&macAbility,sizeof(macAbility)); ASSERT_EQ(RTK_PORT_MACFORCEABILITY_GET(RTK_RG_MAC_PORT_RGMII,&macAbility),RT_ERR_OK); macAbility.linkStatus=PORT_LINKDOWN; ASSERT_EQ(RTK_PORT_MACFORCEABILITY_SET(RTK_RG_MAC_PORT_RGMII,macAbility),RT_ERR_OK); //port set mac-force port 5 state disable ASSERT_EQ( RTK_PORT_MACFORCEABILITYSTATE_SET(RTK_RG_MAC_PORT_RGMII, DISABLED),RT_ERR_OK); } #else rtk_rg_proc_pppoe_gpon_small_bandwidth_control_get(NULL,NULL); #endif return len; } #ifdef CONFIG_DUALBAND_CONCURRENT int _rtk_rg_enableSlaveWifiBind_get(struct seq_file *s, void *v) { if(!rg_db.systemGlobal.enableSlaveSSIDBind) PROC_PRINTF( "Disable Slave Wifi Binding.\n"); else PROC_PRINTF( "Enabled.\n"); return 0; } int _rtk_rg_enableSlaveWifiBind_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.enableSlaveSSIDBind = _rtk_rg_pasring_proc_string_to_integer(buff, len); #ifdef CONFIG_MASTER_WLAN0_ENABLE _rtk_rg_check_wlan_device_exist_or_not(); #endif #if defined(CONFIG_RTL9600_SERIES) if(rg_db.systemGlobal.enableSlaveSSIDBind) _rtk_rg_aclAndCfReservedRuleAdd(RTK_RG_ACLANDCF_RESERVED_EXT1_SLAVE_WIFI_TRAP, NULL); else _rtk_rg_aclAndCfReservedRuleDel(RTK_RG_ACLANDCF_RESERVED_EXT1_SLAVE_WIFI_TRAP); #endif _rtk_rg_enableSlaveWifiBind_get(NULL,NULL); return len; } #endif #ifdef CONFIG_MASTER_WLAN0_ENABLE int _rtk_rg_checkWifiDev_get(struct seq_file *s, void *v) { if(!rg_db.systemGlobal.checkWifiDev) PROC_PRINTF( "Disable Wifi device existence check.\n"); else PROC_PRINTF( "Enabled.\n"); return 0; } int _rtk_rg_checkWifiDev_set( struct file *filp, const char *buff,unsigned long len, void *data ) { rg_db.systemGlobal.checkWifiDev = _rtk_rg_pasring_proc_string_to_integer(buff, len); _rtk_rg_check_wlan_device_exist_or_not(); _rtk_rg_checkWifiDev_get(NULL,NULL); return len; } #endif int32 _rtk_rg_proc_layer2LookupMissFlood2CPU_get(struct seq_file *s, void *v) { int len=0; PROC_PRINTF("%s\n",rg_kernel.layer2LookupMissFlood2CPU==RTK_RG_ENABLED?"Turn on.":"Turn off."); PROC_PRINTF("===============================CAUTION===============================\n"); PROC_PRINTF("This functionality should configure BEFORE romeDriver initialization.\n"); PROC_PRINTF("Otherwise may cause unpredictable problem.\n"); return len; } int _rtk_rg_proc_layer2LookupMissFlood2CPU_set(struct file *filp, const char *buff,unsigned long len, void *data) { if(_rtk_rg_pasring_proc_string_to_integer(buff,len)) rg_kernel.layer2LookupMissFlood2CPU=RTK_RG_ENABLED; else rg_kernel.layer2LookupMissFlood2CPU=RTK_RG_DISABLED; _rtk_rg_proc_layer2LookupMissFlood2CPU_get(NULL,NULL); return len; } #ifdef CONFIG_RG_BRIDGE_PPP_STATUS extern unsigned int brg_ppp_state; int32 rtk_rg_proc_brg_pppstate_get(struct seq_file *s, void *v) { int len=0; //PROC_PRINTF("%d\n", brg_ppp_state==0?0:1); seq_printf(s, "%d\n", brg_ppp_state==0?0:1); return len; } #endif //================================================================== //Temporality add for IPv6 testing #if 0 /* * Convert an ASCII string to a * binary representation of mac address */ static int32 strToMac(uint8 *pMac, int8 *pStr) { int8 *ptr; uint32 k; assert (pMac != NULL); assert (pStr != NULL); bzero(pMac, sizeof(rtk_mac_t)); ptr = pStr; for ( k = 0 ; *ptr ; ptr ++ ) { if (*ptr == ' ') { } else if ( (*ptr == ':') || (*ptr == '-') ) { k ++; } else if ( ('0' <= *ptr) && (*ptr <= '9') ) { pMac[k] = (pMac[k]<<4) + (*ptr-'0'); } else if ( ('a' <= *ptr) && (*ptr <= 'f') ) { pMac[k] = (pMac[k]<<4) + (*ptr-'a'+10); } else if( ('A' <= *ptr) && (*ptr <= 'F') ) { pMac[k] = (pMac[k]<<4) + (*ptr-'A'+10); } else { break; } } if (k != 5) { return -1; } return 0; } uint8* _rtk_rg_strtomac(rtk_mac_t *mac, int8 *str) { strToMac((uint8*)mac, str); return (uint8*)mac; } int32 _rtk_rg_testV6NeighborIdxValid(uint64 dip, uint8 rt_idx) { uint8 ifid[8]; uint16 hashidx=0; uint8 cout; uint32 neighbor_idx; int8 res=-1; rtk_ipv6Neighbor_entry_t entry; ifid[0]=(dip>>56)&0xff; ifid[1]=(dip>>48)&0xff; ifid[2]=(dip>>40)&0xff; ifid[3]=(dip>>32)&0xff; ifid[4]=(dip&0xff000000)>>24; ifid[5]=(dip&0xff0000)>>16; ifid[6]=(dip&0xff00)>>8; ifid[7]=(dip&0xff); //get hash index hashidx = _rtk_rg_IPv6NeighborHash(ifid, rt_idx); for(cout=0;cout<8;cout++) { //check for available neighbor_idx = (hashidx<<3)+cout; assert_ok(rtk_l34_ipv6NeighborTable_get(neighbor_idx, &entry)); if(entry.valid==0) { res=1; break; } else { bzero((void *)&entry, sizeof(entry)); } } if(res==-1) { printk("the neighbor_idx reached end at %d\n",neighbor_idx); return res; } else return neighbor_idx; } int rtk_rg_ipv6_show_routing(void) { #ifdef CONFIG_RG_DEBUG dump_ipv6_route_table(); #endif return 0; } int rtk_rg_ipv6_add_defaultRoute(struct file *file, const char *buffer, unsigned long count, void *data) { char tmpbuf[96]; char *strptr; unsigned int i,ret,intfIdx,l2Idx,priority; char *tmp_ptr; //unsigned long tmp_num; rtk_ipv6Routing_entry_t routeEntry; rtk_l34_netif_entry_t intfEntry; rtk_rg_macEntry_t macEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_l34_pppoe_entry_t pppoeEntry; bzero(&routeEntry,sizeof(rtk_ipv6Routing_entry_t)); bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); bzero(&nxpEntry,sizeof(rtk_l34_nexthop_entry_t)); bzero(&pppoeEntry,sizeof(rtk_l34_pppoe_entry_t)); if (buffer && !copy_from_user(tmpbuf, buffer, count)) { tmpbuf[count] = '\0'; //printk("the string you enter is \"%s\"\n",tmpbuf); strptr=tmpbuf; //Get IPv6 Addr /*for(i=0;i<4;i++) { tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } tmp_num=simple_strtol(tmp_ptr, NULL, 0); routeEntry.ipv6Addr.ipv6_addr[i<<2] =(tmp_num>>24)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+1]=(tmp_num>>16)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+2]=(tmp_num>>8)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+3]=tmp_num&0xff; }*/ for(i=0;i<16;i++) routeEntry.ipv6Addr.ipv6_addr[i]=0; //Get interface gwMac tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } _rtk_rg_strtomac((void *)&intfEntry.gateway_mac,tmp_ptr); //Get PPPoE ID tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } pppoeEntry.sessionID=simple_strtol(tmp_ptr, NULL, 0); //Get Lut remote WAN port tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } macEntry.port_idx=simple_strtol(tmp_ptr, NULL, 0); //Get Lut remote gwMac tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } _rtk_rg_strtomac((void *)&macEntry.mac,tmp_ptr); //Get WAN's VLAN id tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } intfEntry.vlan_id=simple_strtol(tmp_ptr, NULL, 0); if(intfEntry.vlan_id==0) intfEntry.vlan_id=1; //Get WAN's VLAN priority tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } priority=simple_strtol(tmp_ptr, NULL, 0); if(priority>0 && intfEntry.vlan_id>1) { rg_db.vlan[intfEntry.vlan_id].priorityEn=1; rg_db.vlan[intfEntry.vlan_id].priority=priority; } //###################################################### //set Lut macEntry.fid=LAN_FID; macEntry.static_entry=1; macEntry.arp_used=1; ret=rtk_rg_macEntry_add(&macEntry,&l2Idx); DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2], macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); assert_ok(ret); //set interface for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.interfaceInfo[i].valid == 0) break; } if(i==MAX_NETIF_SW_TABLE_SIZE) { printk("no available interface entry in table...\n"); goto errout; } intfIdx=i; //keep intfEntry.valid=1; intfEntry.mac_mask=0x7; //no mask //intfEntry.vlan_id=DEFAULT_LAN_VLAN; intfEntry.enable_rounting=1; intfEntry.mtu=1500; ret = RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); assert_ok(ret); rg_db.systemGlobal.interfaceInfo[intfIdx].valid=1; //set pppoe if(pppoeEntry.sessionID != 0) { ret = RTK_L34_PPPOETABLE_SET(intfIdx, &pppoeEntry); assert_ok(ret); } //set nexthop nxpEntry.ifIdx=intfIdx; nxpEntry.type=L34_NH_ETHER; if(pppoeEntry.sessionID != 0) { nxpEntry.type=L34_NH_PPPOE; nxpEntry.pppoeIdx=intfIdx; //using interface idx as PPPoE table idx } nxpEntry.nhIdx=l2Idx; //L2 index nxpEntry.keepPppoe=0; ret = RTK_L34_NEXTHOPTABLE_SET(intfIdx, &nxpEntry); assert_ok(ret); //set ipv6 route routeEntry.nhOrIfidIdx=intfIdx; routeEntry.type=L34_IPV6_ROUTE_TYPE_GLOBAL; routeEntry.valid=1; routeEntry.rt2waninf=1; ret = RTK_L34_IPV6ROUTINGTABLE_SET(3, &routeEntry); //Default route assert_ok(ret); return count; } else { errout: printk("interfaceGMAC PPPoEID WANport RemoteMAC\n"); } return count; } int rtk_rg_ipv6_add_interfaceRoute(struct file *file, const char *buffer, unsigned long count, void *data) { char tmpbuf[96]; char *strptr; unsigned int i,ret,intfIdx,routeIdx; char *tmp_ptr; unsigned long tmp_num; rtk_ipv6Routing_entry_t routeEntry,tmpEty; rtk_l34_netif_entry_t intfEntry; rtk_rg_macEntry_t macEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_l34_pppoe_entry_t pppoeEntry; bzero(&routeEntry,sizeof(rtk_ipv6Routing_entry_t)); bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); bzero(&nxpEntry,sizeof(rtk_l34_nexthop_entry_t)); bzero(&pppoeEntry,sizeof(rtk_l34_pppoe_entry_t)); if (buffer && !copy_from_user(tmpbuf, buffer, count)) { tmpbuf[count] = '\0'; //printk("the string you enter is \"%s\"\n",tmpbuf); strptr=tmpbuf; //Get IPv6 Addr for(i=0;i<4;i++) { tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } tmp_num=simple_strtol(tmp_ptr, NULL, 0); routeEntry.ipv6Addr.ipv6_addr[i<<2] =(tmp_num>>24)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+1]=(tmp_num>>16)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+2]=(tmp_num>>8)&0xff; routeEntry.ipv6Addr.ipv6_addr[(i<<2)+3]=tmp_num&0xff; } //Get Prefix len tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } routeEntry.ipv6PrefixLen=simple_strtol(tmp_ptr, NULL, 0); //Get WAN or not tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } routeEntry.rt2waninf=simple_strtol(tmp_ptr, NULL, 0); //Get VLAN id if(routeEntry.rt2waninf==1) { tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } intfEntry.vlan_id=simple_strtol(tmp_ptr, NULL, 0); } else { //LAN will use 4000 intfEntry.vlan_id=DEFAULT_LAN_VLAN; } //Get interface gwMac tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } _rtk_rg_strtomac((void *)&intfEntry.gateway_mac,tmp_ptr); //###################################################### //set interface for(i=0;i<MAX_NETIF_SW_TABLE_SIZE;i++) { if(rg_db.systemGlobal.interfaceInfo[i].valid == 0) break; } if(i==MAX_NETIF_SW_TABLE_SIZE) { printk("no available interface entry in table...\n"); goto errout; } intfIdx=i; //keep intfEntry.valid=1; intfEntry.mac_mask=0x7; //no mask //intfEntry.vlan_id=DEFAULT_LAN_VLAN; intfEntry.enable_rounting=1; intfEntry.mtu=1500; ret = RTK_L34_NETIFTABLE_SET(intfIdx, &intfEntry); assert_ok(ret); rg_db.systemGlobal.interfaceInfo[intfIdx].valid=1; //set ipv6 route for(i=0;i<3;i++) //last one is for default route { ret = rtk_l34_ipv6RoutingTable_get(i,&tmpEty); assert_ok(ret); if(tmpEty.valid==0) break; } if(i==3) { printk("ipv6 routing table is full...\n"); goto errout; } routeIdx=i; //keep routeEntry.nhOrIfidIdx=intfIdx; routeEntry.type=L34_IPV6_ROUTE_TYPE_LOCAL; routeEntry.valid=1; ret = RTK_L34_IPV6ROUTINGTABLE_SET(routeIdx, &routeEntry); //Default route assert_ok(ret); return count; } else { errout: printk("ip_addr1(0x..) ip_addr2(0x..) ip_addr3(0x..) ip_addr4(0x..) prefix wan_or_not interfaceGMAC \n"); } return count; } int rtk_rg_ipv6_show_neighbor(void) { #ifdef CONFIG_RG_DEBUG dump_ipv6_neighbor_table(); #endif return 0; } int rtk_rg_ipv6_add_neighbor(struct file *file, const char *buffer, unsigned long count, void *data) { char tmpbuf[96]; char *strptr; unsigned int ret,l2Idx; char *tmp_ptr; unsigned long tmp_num; rtk_ipv6Routing_entry_t routeEntry; rtk_ipv6Neighbor_entry_t neighborEntry; rtk_l34_netif_entry_t intfEntry; rtk_rg_macEntry_t macEntry; rtk_l34_nexthop_entry_t nxpEntry; rtk_l34_pppoe_entry_t pppoeEntry; bzero(&routeEntry,sizeof(rtk_ipv6Routing_entry_t)); bzero(&intfEntry,sizeof(rtk_l34_netif_entry_t)); bzero(&macEntry,sizeof(rtk_rg_macEntry_t)); bzero(&nxpEntry,sizeof(rtk_l34_nexthop_entry_t)); bzero(&pppoeEntry,sizeof(rtk_l34_pppoe_entry_t)); if (buffer && !copy_from_user(tmpbuf, buffer, count)) { tmpbuf[count] = '\0'; //printk("the string you enter is \"%s\"\n",tmpbuf); strptr=tmpbuf; //Get IPv6 IFID (64bits) tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } tmp_num=simple_strtol(tmp_ptr, NULL, 0); neighborEntry.ipv6Ifid=tmp_num; //high part neighborEntry.ipv6Ifid=neighborEntry.ipv6Ifid<<32; tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } tmp_num=simple_strtol(tmp_ptr, NULL, 0); neighborEntry.ipv6Ifid+=tmp_num; //low part //Get RTIdx tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } neighborEntry.ipv6RouteIdx=simple_strtol(tmp_ptr, NULL, 0); //Get Lut remote WAN port tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } macEntry.port_idx=simple_strtol(tmp_ptr, NULL, 0); //Get Lut remote gwMac tmp_ptr = strsep(&strptr," "); if (tmp_ptr==NULL) { goto errout; } _rtk_rg_strtomac((void *)&macEntry.mac,tmp_ptr); //###################################################### //set Lut macEntry.fid=LAN_FID; macEntry.static_entry=1; macEntry.arp_used=1; ret=rtk_rg_macEntry_add(&macEntry,&l2Idx); DEBUG("### add l2[%d]=%02x:%02x:%02x:%02x:%02x:%02x ###\n",l2Idx,macEntry.mac.octet[0],macEntry.mac.octet[1],macEntry.mac.octet[2], macEntry.mac.octet[3],macEntry.mac.octet[4],macEntry.mac.octet[5]); assert_ok(ret); //set neighbor table neighborEntry.l2Idx=l2Idx; ret = _rtk_rg_testV6NeighborIdxValid(neighborEntry.ipv6Ifid, neighborEntry.ipv6RouteIdx); if(ret==-1) { printk("the neighbor is overflow..failed!\n"); goto errout; } else { ret = RTK_L34_IPV6NEIGHBORTABLE_SET(ret, &neighborEntry); assert_ok(ret); } } else { errout: printk("ipaddr_1(0x..) ipaddr_2(0x..) Match_RoutingIdx port_idx L2MACAddr\n"); } return count; } #endif //================================================================== // for proc rtk_rg_proc_t liteRomeDriveProc[]= { //===========proc for test start=========== #if defined(CONFIG_RTL9602C_SERIES) { .name="test_l34" , .get=_rtk_rg_test_l34_get, .set=_rtk_rg_test_l34_set, }, #endif { .name="auto_test_result_check" , .get=_rtk_rg_auto_test_result_check_get, .set=_rtk_rg_auto_test_result_check_set, }, { .name="auto_test_fail_arp_interval_sec" , .get=_rtk_rg_auto_test_fail_arp_interval_sec_get, .set=_rtk_rg_auto_test_fail_arp_interval_sec_set, }, //===========proc for test end=========== #if defined(CONFIG_XDSL_NEW_HWNAT_DRIVER) { .name="rtk_rg_xdsl_init" , .get= NULL , .set= rtk_rg_xdsl_init , }, { .name="rtk_rg_xdsl_lanInterface_add" , .get= NULL , .set= rtk_rg_xdsl_lanInterface_add , }, { .name="rtk_rg_xdsl_wanInterface_add" , .get= NULL , .set= rtk_rg_xdsl_wanInterface_add , }, { .name="rtk_rg_xdsl_staticInfo_set" , .get= NULL , .set= rtk_rg_xdsl_staticInfo_set , }, { .name="rtk_rg_xdsl_pppoeClientInfoBeforeDial_set" , .get= NULL , .set= rtk_rg_xdsl_pppoeClientInfoBeforeDial_set , }, { .name="rtk_rg_xdsl_pppoeClientInfoAfterDial_set" , .get= NULL , .set= rtk_rg_xdsl_pppoeClientInfoAfterDial_set , }, { .name="rtk_rg_xdsl_dhcpRequest_set" , .get= NULL , .set= rtk_rg_xdsl_dhcpRequest_set , }, { .name="rtk_rg_xdsl_dhcpClientInfo_set" , .get = NULL , .set = rtk_rg_xdsl_dhcpClientInfo_set , }, { .name="rtk_rg_acl" , .get = NULL , .set = rtk_rg_xdsl_acl_cmd , }, { .name="cvlan" , .get = NULL , .set = rtk_rg_xdsl_cvlan , }, { .name="qos" , .get = NULL , .set = rtk_rg_xdsl_qos , }, #endif { .name="arp_request_interval_sec" , .get = _rtk_rg_arp_request_interval_sec_get , .set = _rtk_rg_arp_request_interval_sec_set , }, { .name="ipv6MC_tranlate_ingressVid" , .get = _rtk_rg_ipv6MC_tranlate_ingressVid_state , .set = _rtk_rg_ipv6MC_tranlate_ingressVid_control , }, { .name="unknownDA_trap_to_PS" , .get = _rtk_rg_unknowDA_trap_to_PS_state , .set = _rtk_rg_unknowDA_trap_to_PS_change , }, { .name="igmp_trap_to_PS" , .get = _rtk_rg_igmp_trap_to_PS_state , .set = _rtk_rg_igmp_trap_to_PS_change , }, { .name="mld_trap_to_PS" , .get = _rtk_rg_mld_trap_to_PS_state , .set = _rtk_rg_mld_trap_to_PS_change , }, { .name="hwnat" , .get = rtk_rg_hwnat_is_enabled , .set = rtk_rg_hwnat_enable , }, { .name="debug_level" , .get = rtk_rg_debug_level_show , .set = rtk_rg_debug_level_change , }, { .name="filter_level" , .get = rtk_rg_filter_level_show , .set = rtk_rg_filter_level_change , }, { .name="trace_filter" , .get = rtk_rg_traceFilterShow , .set = rtk_rg_traceFilterChange , }, #if defined(RTK_RG_INGRESS_QOS_TEST_PATCH) && defined(CONFIG_RTL9600_SERIES) { .name="qos_type" , .get = NULL , .set = rtk_rg_qos_type_sel , }, #endif #ifdef CONFIG_RG_CALLBACK { .name="callback" , .get = rtk_rg_callback_show , .set = NULL , }, #endif { .name="turn_on_acl_counter", .get = acl_counter_mode_get, .set = acl_counter_mode_set, }, { .name="acl_drop_ip_range_rule_handle_by_sw" , .get = acl_drop_ip_range_by_sw_get , .set = acl_drop_ip_range_by_sw_set , }, { .name="acl_permit_ip_range_rule_handle_by_sw" , .get = acl_permit_ip_range_by_sw_get , .set = acl_permit_ip_range_by_sw_set , }, { .name="qosInternalAndRemark" , .get = rtk_rg_qosInternalAndRemark_show , .set = NULL , }, { .name="acl_reserved_arrange" , .get = rtk_rg_reserved_acl_arrange_show , .set = NULL , }, { .name="acl_skip_hw_rearrange" , .get = _rtk_rg_acl_skip_hw_rearrange_get, .set = _rtk_rg_acl_skip_hw_rearrange_set, }, { .name="arp_timeout" , .get = _rtk_rg_get_arp_timeout , .set = _rtk_rg_set_arp_timeout , }, { .name="neighbor_timeout" , .get = _rtk_rg_get_neighbor_timeout , .set = _rtk_rg_set_neighbor_timeout , }, { .name="tcp_long_timeout" , .get = _rtk_rg_get_tcp_long_timeout , .set = _rtk_rg_set_tcp_long_timeout , }, { .name="tcp_short_timeout" , .get = _rtk_rg_get_tcp_short_timeout , .set = _rtk_rg_set_tcp_short_timeout , }, { .name="udp_long_timeout" , .get = _rtk_rg_get_udp_long_timeout , .set = _rtk_rg_set_udp_long_timeout , }, { .name="udp_short_timeout" , .get = _rtk_rg_get_udp_short_timeout , .set = _rtk_rg_set_udp_short_timeout , }, #if defined(CONFIG_RTL9600_SERIES) #else //support lut traffic bit { .name="l2_timeout" , .get = _rtk_rg_get_l2_timeout , .set = _rtk_rg_set_l2_timeout , }, #endif { .name="house_keep_sec" , .get = _rtk_rg_get_house_keep_sec , .set = _rtk_rg_set_house_keep_sec , }, { .name="igmp_sys_timer_sec" , .get = _rtk_rg_get_igmp_sys_timer_sec , .set = _rtk_rg_set_igmp_sys_timer_sec , }, { .name="igmp_max_simultaneous_group_size" , .get = _rtk_rg_get_igmp_max_simultaneous_group_size , .set = _rtk_rg_set_igmp_max_simultaneous_group_size , }, { .name="mcast_query_sec" , .get = _rtk_rg_get_mcast_query_sec , .set = _rtk_rg_set_mcast_query_sec , }, { .name="mcast_protocol" , .get = _rtk_rg_get_mcast_protocol , .set = _rtk_rg_set_mcast_protocol , }, { .name="mcast_force_report_sec" , .get = _rtk_rg_get_mcast_force_report_sec , .set = _rtk_rg_set_mcast_force_report_sec , }, { .name="unknownDA_rate_limit" , .get = rtk_rg_unknownDARateLimit_get , .set = rtk_rg_unknownDARateLimit_set , }, { .name="unknownDA_rate_limit_portMask" , .get = rtk_rg_unknownDARateLimitPortMask_get , .set = rtk_rg_unknownDARateLimitPortMask_set , }, { .name="BC_rate_limit" , .get = rtk_rg_BCRateLimit_get , .set = rtk_rg_BCRateLimit_set , }, { .name="BC_rate_limit_portMask" , .get = rtk_rg_BCRateLimitPortMask_get , .set = rtk_rg_BCRateLimitPortMask_set , }, { .name="IPv6_MC_rate_limit" , .get = rtk_rg_IPv6MCRateLimit_get , .set = rtk_rg_IPv6MCRateLimit_set , }, { .name="IPv6_MC_rate_limit_portMask" , .get = rtk_rg_IPv6MCRateLimitPortMask_get , .set = rtk_rg_IPv6MCRateLimitPortMask_set , }, { .name="IPv4_MC_rate_limit" , .get = rtk_rg_IPv4MCRateLimit_get , .set = rtk_rg_IPv4MCRateLimit_set , }, { .name="IPv4_MC_rate_limit_portMask" , .get = rtk_rg_IPv4MCRateLimitPortMask_get , .set = rtk_rg_IPv4MCRateLimitPortMask_set , }, #ifdef CONFIG_MASTER_WLAN0_ENABLE { .name="wifi_ingress_rate_limit" , .get = rtk_rg_WifiIngressRateLimit_get , .set = rtk_rg_WifiIngressRateLimit_set , }, { .name="wifi_egress_rate_limit" , .get = rtk_rg_WifiEgressRateLimit_get , .set = rtk_rg_WifiEgressRateLimit_set , }, { .name="wifi_device_enumerate" , .get = NULL , .set = rtk_rg_wifiDeviceEnumerate , }, { .name="wifi_check_device" , .get = _rtk_rg_checkWifiDev_get , .set = _rtk_rg_checkWifiDev_set , }, #endif { .name="alg_user_defined_port_num" , .get = rtk_rg_algUserDefinedPortNum_get , .set = rtk_rg_algUserDefinedPortNum_set , }, { .name="alg_user_defined_time_out" , .get = rtk_rg_algUserDefinedTimeOut_get , .set = rtk_rg_algUserDefinedTimeOut_set , }, { .name="tcp_hw_learning_at_syn" , .get = rtk_rg_tcp_hw_learning_at_syn_get , .set = rtk_rg_tcp_hw_learning_at_syn_set , }, { .name="tcp_in_shortcut_learning_at_syn" , .get = rtk_rg_tcp_in_shortcut_learning_at_syn_get , .set = rtk_rg_tcp_in_shortcut_learning_at_syn_set , }, #ifdef CONFIG_ROME_NAPT_SHORTCUT { .name="turn_off_ipv4_shortcut" , .get = rtk_rg_ipv4_shortcutOff_function_get , .set = rtk_rg_ipv4_shortcutOff_function_set , }, { .name="ipv4_shortcut_timeout" , .get = _rtk_rg_ipv4_shortcut_timeout_get , .set = _rtk_rg_ipv4_shortcut_timeout_set , }, #endif #ifdef CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT { .name="turn_off_ipv6_shortcut" , .get = rtk_rg_ipv6_shortcutOff_function_get , .set = rtk_rg_ipv6_shortcutOff_function_set , }, { .name="ipv6_shortcut_timeout" , .get = _rtk_rg_ipv6_shortcut_timeout_get , .set = _rtk_rg_ipv6_shortcut_timeout_set , }, #endif #if defined(CONFIG_RG_FLOW_AUTO_AGEOUT) { .name="flow_timeout" , .get = _rtk_rg_flow_timeout_get , .set = _rtk_rg_flow_timeout_set , }, #endif { .name="strange_packet_drop" , .get = _rtk_rg_strangePacketDrop_state , .set = _rtk_rg_strangePacketDrop_change , }, #if defined(CONFIG_APOLLO) { .name="portBindingByProtocal" , .get = _rtk_rg_portBindingByProtocal_state , .set = _rtk_rg_portBindingByProtocal_change , }, { .name="portBindingByProtocal_filter_downstream_vid" , .get = _rtk_rg_portBindingByProtocal_filter_vid_for_downstream_get , .set = _rtk_rg_portBindingByProtocal_filter_vid_for_downstream_set , }, #endif { .name="l4ways" , .get = _rtk_rg_proc_l4ways_dump , .set = _rtk_rg_proc_l4ways_set , }, { .name="l4ways_list" , .get = _rtk_rg_proc_l4waysList_get , .set = _rtk_rg_proc_l4waysList_set , }, { .name="l4_choice_hw_in" , .get = _rtk_rg_proc_l4ChoiceHwIn_get , .set = _rtk_rg_proc_l4ChoiceHwIn_set , }, { .name="tcp_do_not_del_when_rst_fin" , .get = _rtk_rg_proc_tcpDoNotDelWhenRstFin_get , .set = _rtk_rg_proc_tcpDoNotDelWhenRstFin_set , }, { .name="tcp_swap_fin_del_rst" , .get = _rtk_rg_proc_tcpSwapFinDelRst_get , .set = _rtk_rg_proc_tcpSwapFinDelRst_set , }, { .name="tcp_disable_stateful_tracking" , .get = _rtk_rg_proc_tcpDisableStatefulTracking_get , .set = _rtk_rg_proc_tcpDisableStatefulTracking_set , }, { .name="fwd_statistic" , .get = _rtk_rg_proc_fwdStatistic_get , .set = _rtk_rg_proc_fwdStatistic_set , }, { .name="tcp_short_timeout_housekeep_jiffies" , .get = _rtk_rg_proc_tcpShortTimeoutHouseKeep_get , .set = _rtk_rg_proc_tcpShortTimeoutHouseKeep_set , }, { .name="congestion_ctrl_interval_usec" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlHwTimerFunc_set , }, { .name="congestion_ctrl_send_times_per_port" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlSendTimesPerPort_set , }, { .name="congestion_ctrl_send_byte_per_sec" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlSendBytePerSec_set , }, { .name="congestion_ctrl_send_byte_per_sec_for_wan" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlSendBytePerSecForWan_set , }, { .name="congestion_ctrl_port_mask" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlPortMask_set , }, { .name="congestion_ctrl_inbound_ack_to_high_queue" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlInboundAckToHighQueue_set , }, { .name="congestion_ctrl_send_remainder_in_next_gap" , .get = _rtk_rg_proc_congestionCtrl_get , .set = _rtk_rg_proc_congestionCtrlSendRemainderInNextGap_set , }, { .name="wanIntf_disable_ipv6_linkLocal_rsvACL" , .get = _wanIntf_disable_add_ipv6_linkLocal_state_get , .set = _wanIntf_disable_add_ipv6_linkLocal_state_set , }, { .name="bridgeWan_drop_by_protocal" , .get = _bridgeWan_drop_by_protocal_get , .set = _bridgeWan_drop_by_protocal_set , }, { .name="turn_off_arp_hw_traffic_info" , .get = _rtk_rg_proc_turnOffARPTrafficInfo_get , .set = _rtk_rg_proc_turnOffARPTrafficInfo_set , }, { .name="arp_max_request_count" , .get = _rtk_rg_proc_ARPMaxRequestCount_get , .set = _rtk_rg_proc_ARPMaxRequestCount_set , }, { .name="mac_anti_spoofing_enable", .get=_rtk_rg_proc_MacAntiSpoof_get, .set=_rtk_rg_proc_MacAntiSpoof_set, }, { .name="ip_anti_spoofing_enable", .get=_rtk_rg_proc_IpAntiSpoof_get, .set=_rtk_rg_proc_IpAntiSpoof_set, }, { .name="remove_l34_tag_for_same_mac" , .get = _rtk_rg_proc_RemoveL34TagOption_get , .set = _rtk_rg_proc_RemoveL34TagOption_set , }, { .name="urlFilter_mode" , .get = _rtk_rg_proc_UrlFilterMode_get , .set = _rtk_rg_proc_UrlFilterMode_set , }, { .name="trap_syn_and_disable_svlan" , .get = _rtk_rg_proc_trapSynState_get , .set = _rtk_rg_proc_trapSynState_set , }, { .name="assign_ack_priority_and_disable_svlan" , .get = _rtk_rg_proc_assignAckPriority_get , .set = _rtk_rg_proc_assignAckPriority_set , }, { .name="redirect_first_http_req_by_mac" , .get = _rtk_rg_proc_redirectFirstHttpMAC_show , .set = _rtk_rg_proc_redirectFirstHttpMAC_modify , }, { .name="redirect_first_http_req_set_url" , .get = _rtk_rg_proc_redirectFirstHttpURL_show , .set = _rtk_rg_proc_redirectFirstHttpURL_modify , }, { .name="trap_lan_enable" , .get = _rtk_rg_proc_trapLan_get , .set = _rtk_rg_proc_trapLan_set , }, { .name="trap_lan_add_host" , .get = _rtk_rg_proc_trapLan_show , .set = _rtk_rg_proc_trapLanAdd_set , }, { .name="trap_lan_del_host" , .get = _rtk_rg_proc_trapLan_show , .set = _rtk_rg_proc_trapLanDel_set , }, { .name="stag_enable" , .get = rtk_rg_stag_is_enabled , .set = rtk_rg_stag_enable , }, { .name="send_from_cpu" , .get = NULL , .set = _rtk_rg_proc_sendFromCpu , }, { .name="wifi_tx_redirect_to_port0" , .get = _rtk_rg_proc_wifiTxRedirect_get , .set = _rtk_rg_proc_wifiTxRedirect_set , }, { .name="pon_port_unmatch_cf_drop" , .get = _rtk_rg_proc_ponPortUnmatchCfDrop_get , .set = _rtk_rg_proc_ponPortUnmatchCfDrop_set , }, { .name="ps_rx_mirror_to_port0" , .get = _rtk_rg_proc_psRxMirrorToPort0_get , .set = _rtk_rg_proc_psRxMirrorToPort0_set , }, { .name="proc_to_pipe" , .get = _rtk_rg_proc_procToPipe_get , .set = _rtk_rg_proc_procToPipe_set , }, #if !defined(CONFIG_OPENWRT_RG) { .name="dump_ps_rx_pkt" , .get = _rtk_rg_proc_dumpPSRxPkt_get , .set = _rtk_rg_proc_dumpPSRxPkt_set , }, { .name="dump_ps_tx_pkt" , .get = _rtk_rg_proc_dumpPSTxPkt_get , .set = _rtk_rg_proc_dumpPSTxPkt_set , }, #endif #if defined(CONFIG_RG_IGMP_SNOOPING) || defined(CONFIG_RG_MLD_SNOOPING) { .name="igmp_report_ingress_filter_portmask" , .get = _rtk_rg_igmp_report_ingress_filter_portmask_get , .set = _rtk_rg_igmp_report_ingress_filter_portmask_set , }, { .name="igmp_report_filter_portmask" , .get = _rtk_rg_igmp_report_filter_portmask_get , .set = _rtk_rg_igmp_report_filter_portmask_set , }, { .name="igmp_mld_query_filter_portmask" , .get = _rtk_rg_igmp_mld_query_filter_portmask_get , .set = _rtk_rg_igmp_mld_query_filter_portmask_set , }, #endif { .name="gponDsBCModuleEnable" , .get = _rtk_rg_gponDsBCModuleEnable_get , .set = _rtk_rg_gponDsBCModuleEnable_set , }, { .name="acl_rg_add_parameter_dump" , .get = _rtk_rg_aclDumpParameterEnable_get , .set = _rtk_rg_aclDumpParameterEnable_set , }, { .name="pppoe_bc_passthrought_to_bindingWan" , .get = _rtk_rg_pppoe_bc_passthrought_to_bindingWan_get , .set = _rtk_rg_pppoe_bc_passthrought_to_bindingWan_set , }, { .name="pppoe_mc_routing_trap" , .get = _rtk_rg_pppoe_mc_routing_trap_state_get , .set = _rtk_rg_pppoe_mc_routing_trap_state_set , }, { .name="keep_protocol_stack_packets_orig_cvlan" , .get = _rtk_rg_procotolStackKeepCVLANOrig_get , .set = _rtk_rg_procotolStackKeepCVLANOrig_set , }, { .name="pppoe_proxy_only_for_binding_packet" , .get = _rtk_rg_pppoeProxyAllowBindingOnly_get , .set = _rtk_rg_pppoeProxyAllowBindingOnly_set , }, #if defined(CONFIG_APOLLO_GPON_FPGATEST) { .name="virtualMAC_with_PON" , .get = _rtk_rg_virtualMAC_with_PON_display , .set = _rtk_rg_virtualMAC_with_PON_set , }, #endif { .name="control_hw_TTL_minus" , .get = _rtk_rg_control_hw_ttl_minus_open , .set = _rtk_rg_control_hw_ttl_minus_write , }, #if defined(CONFIG_RTL9602C_SERIES) { .name="force_cf_pattern0_size_state" , .get = _rtk_rg_proc_cf_patter0_size_get, .set = _rtk_rg_proc_force_cf_patter0_size_enable, }, { .name="cf_pattern0_size" , .get = _rtk_rg_proc_cf_patter0_size_get, .set = _rtk_rg_proc_cf_patter0_size_set, }, #endif { .name="dpi_callback_preroute" , .get = _rtk_rg_proc_PreRouteCallback_get , .set = _rtk_rg_proc_PreRouteCallback_set , }, { .name="dpi_callback_forward" , .get = _rtk_rg_proc_FwdCallback_get , .set = _rtk_rg_proc_FwdCallback_set , }, #if defined(CONFIG_ROME_NAPT_SHORTCUT) || defined(CONFIG_RG_IPV6_SOFTWARE_SHORTCUT_SUPPORT) { .name="flush_shortcut" , .get = _rtk_rg_flushShortcut_get, .set = _rtk_rg_flushShortcut_set, }, #endif { .name="gather_lanNetInfo" , .get = _rtk_rg_proc_gatherLanNetInfo_get, .set = _rtk_rg_proc_gatherLanNetInfo_set, }, #ifdef CONFIG_APOLLOPRO_FPGA { .name="fpga_testing" , .get = NULL, .set = single_test, }, #endif #ifdef CONFIG_RG_BRIDGE_PPP_STATUS { .name="brg_pppstate" , .get = rtk_rg_proc_brg_pppstate_get, .set = NULL, }, #endif { .name="wan_dmac2cvid_force_disabled" , .get = rtk_rg_proc_disbaled_pon_dmac2cvid_get, .set = rtk_rg_proc_disbaled_pon_dmac2cvid_set, }, { .name="pppoe_gpon_small_bandwidth_control" , .get = rtk_rg_proc_pppoe_gpon_small_bandwidth_control_get, .set = rtk_rg_proc_pppoe_gpon_small_bandwidth_control_set, }, #ifdef CONFIG_DUALBAND_CONCURRENT { .name="slaveWifiBind" , .get = _rtk_rg_enableSlaveWifiBind_get , .set = _rtk_rg_enableSlaveWifiBind_set , }, #endif { .name="port_range_used_by_ps" , .get = _rtk_rg_proc_port_range_used_by_protocolStack_get , .set = _rtk_rg_proc_port_range_used_by_protocolStack_set , }, { .name="log_rx_pcap" , .get = _rtk_rg_proc_log_rx_pcap_get , .set = _rtk_rg_proc_log_rx_pcap_set , }, { .name="log_to_PS_pcap" , .get = _rtk_rg_proc_log_to_PS_pcap_get , .set = _rtk_rg_proc_log_to_PS_pcap_set , }, { .name="layer2LookupMissFlood2CPU" , .get = _rtk_rg_proc_layer2LookupMissFlood2CPU_get , .set = _rtk_rg_proc_layer2LookupMissFlood2CPU_set , }, }; static int NULL_liteRome_get(struct seq_file *s, void *v){ return 0;} static int NULL_liteRome_single_open(struct inode *inode, struct file *file){return(single_open(file, NULL_liteRome_get, NULL));} static int common_liteRome_single_open(struct inode *inode, struct file *file) { int i; for( i=0; i< (sizeof(liteRomeDriveProc)/sizeof(rtk_rg_proc_t)) ;i++) { //printk("common_single_open inode_id=%u i_ino=%u\n",liteRomeDriveProc[i].inode_id,(unsigned int)inode->i_ino); if(liteRomeDriveProc[i].inode_id==(unsigned int)inode->i_ino) { return(single_open(file, liteRomeDriveProc[i].get, NULL)); } } return -1; } static ssize_t common_liteRome_single_write(struct file * file, const char __user * userbuf, size_t count, loff_t * off) { int i; for( i=0; i< (sizeof(liteRomeDriveProc)/sizeof(rtk_rg_proc_t)) ;i++) { //printk("common_single_write inode_id=%u i_ino=%u\n",liteRomeDriveProc[i].inode_id,(unsigned int)file->f_dentry->d_inode->i_ino); if(liteRomeDriveProc[i].inode_id==(unsigned int)file->f_dentry->d_inode->i_ino) { return liteRomeDriveProc[i].set(file,userbuf,count,off); } } return -1; } void rg_system_proc_init(void){ struct proc_dir_entry *p;//,*start; int32 i; if(rg_kernel.proc_rg==NULL) rg_kernel.proc_rg = proc_mkdir("rg", NULL); /* if ( create_proc_read_entry ("start", 0644, rg_kernel.proc_rg, (read_proc_t *)rtk_rg_fwdEngine_start, (void *)NULL) == NULL ) { printk("create proc rg/start failed!\n"); } */ for( i=0; i< (sizeof(liteRomeDriveProc)/sizeof(rtk_rg_proc_t)) ;i++) { if(liteRomeDriveProc[i].get==NULL) liteRomeDriveProc[i].proc_fops.open=NULL_liteRome_single_open; else liteRomeDriveProc[i].proc_fops.open=common_liteRome_single_open; if(liteRomeDriveProc[i].set==NULL) liteRomeDriveProc[i].proc_fops.write=NULL; else liteRomeDriveProc[i].proc_fops.write=common_liteRome_single_write; liteRomeDriveProc[i].proc_fops.read=seq_read; liteRomeDriveProc[i].proc_fops.llseek=seq_lseek; liteRomeDriveProc[i].proc_fops.release=single_release; p = proc_create_data(liteRomeDriveProc[i].name, 0644, rg_kernel.proc_rg , &(liteRomeDriveProc[i].proc_fops),NULL); if(!p){ printk("create proc rg/%s failed!\n",liteRomeDriveProc[i].name); } liteRomeDriveProc[i].inode_id = p->low_ino; } } #endif int _rtk_rg_globalVariableReset() { int i; //#if defined(CONFIG_RG_IGMP_SNOOPING) // extern struct timer_list mCastQuerytimer; //#endif rtk_rg_sipDipClassification_t sipDipClass[MAX_SIP_CLASS][MAX_DIP_CLASS]= /* DIP: NPI, NI, LP, RP, NPE, NE */ {{SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_NAPT , SIP_DIP_CLASS_CPU ,SIP_DIP_CLASS_CPU}, ///NPI {SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_NAT , SIP_DIP_CLASS_CPU ,SIP_DIP_CLASS_CPU}, ///NI {SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING,SIP_DIP_CLASS_ROUTING}, ///LP {SIP_DIP_CLASS_CPU , SIP_DIP_CLASS_CPU , SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_ROUTING, SIP_DIP_CLASS_NAPTR ,SIP_DIP_CLASS_NATR}}; ///RP #ifdef __KERNEL__ #if defined(CONFIG_RG_NAPT_AUTO_AGEOUT) || defined(CONFIG_RG_LAYER2_SOFTWARE_LEARN) || defined(CONFIG_RG_ARP_AUTO_AGEOUT) || defined(CONFIG_RG_FLOW_AUTO_AGEOUT) //20140507LUKE:delete and re-init house keep after all data structure is ready! del_timer(&rg_kernel.fwdEngineHouseKeepingTimer); #endif for(i=0;i<(MAX_NETIF_SW_TABLE_SIZE<<1);i++) { del_timer(&rg_kernel.arpRequestTimer[i]); del_timer(&rg_kernel.neighborDiscoveryTimer[i]); } //#if defined(CONFIG_RG_IGMP_SNOOPING) // del_timer(&mCastQuerytimer); //#endif #endif if(rg_db.systemGlobal.log_rx_pcap_fp!=NULL) { filp_close(rg_db.systemGlobal.log_rx_pcap_fp, NULL); rg_db.systemGlobal.log_rx_pcap_fp = NULL; } if(rg_db.systemGlobal.log_to_PS_pcap_fp!=NULL) { filp_close(rg_db.systemGlobal.log_to_PS_pcap_fp, NULL); rg_db.systemGlobal.log_to_PS_pcap_fp = NULL; } // stop congestion control timer before clearing database REG32(TC2INT)=0x10000; REG32(TC2CTL)=200; REG32(TC2DATA)=1; REG32(TC2CTL)|=0x10000000; //Clean all rg_db variables!! bzero(&rg_db,sizeof(rtk_rg_globalDatabase_t)); //initialize log rx pcap rg_db.systemGlobal.log_rx_pcap = 0; rg_db.systemGlobal.log_rx_pcap_fp = NULL; //initialize log to PS pcap rg_db.systemGlobal.log_to_PS_pcap = 0; rg_db.systemGlobal.log_to_PS_pcap_fp = NULL; //Reset what we need here memcpy(rg_db.systemGlobal.sipDipClass,sipDipClass,sizeof(sipDipClass)); rg_db.systemGlobal.rxInfoFromWLAN.rx_fs=1; rg_db.systemGlobal.rxInfoFromWLAN.rx_ls=1; rg_db.systemGlobal.rxInfoFromWLAN.rx_origformat=1; //from wifi should consider as original #if defined(CONFIG_RTL9607C_SERIES) rg_db.systemGlobal.rxInfoFromWLAN.rx_src_port_num=RTK_RG_MAC_PORT_MASTERCPU_CORE0; rg_db.systemGlobal.rxInfoFromWLAN.rx_extspa=RTK_RG_MAC_EXT_PORT0; #else rg_db.systemGlobal.rxInfoFromWLAN.rx_src_port_num=RTK_RG_PORT_CPU; rg_db.systemGlobal.rxInfoFromWLAN.rx_dst_port_mask=0x8; //from EXT1 #endif #ifdef CONFIG_DUALBAND_CONCURRENT rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_fs=1; rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_ls=1; rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_origformat=1; //from wifi should consider as original #if defined(CONFIG_RTL9607C_SERIES) rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_src_port_num=RTK_RG_MAC_PORT_SLAVECPU; rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_extspa=RTK_RG_MAC_EXT_PORT0; #else rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_src_port_num=RTK_RG_PORT_CPU; rg_db.systemGlobal.rxInfoFromSlaveWLAN.rx_dst_port_mask=0x10; //from EXT2 #endif #endif // CONFIG_DUALBAND_CONCURRENT //init longest idle time of napt index rg_db.longestIdleNaptIdx = FAIL; #ifdef CONFIG_APOLLO_MODEL //init hwnat to DISABLE rg_db.systemGlobal.hwnat_enable=RG_HWNAT_DISABLE; #else //init hwnat to ENABLE rg_db.systemGlobal.hwnat_enable=RG_HWNAT_ENABLE; #endif rg_db.systemGlobal.unknownDA_Trap_to_PS_enable=RTK_RG_DISABLED; //init vlan-binding free link list INIT_LIST_HEAD(&rg_db.vlanBindingFreeListHead); //init vlan-binding head table for(i=0;i<RTK_RG_PORT_MAX;i++) INIT_LIST_HEAD(&rg_db.vlanBindingListHead[i]); //init vlan-binding free table for(i=0;i<MAX_BIND_SW_TABLE_SIZE;i++) { INIT_LIST_HEAD(&rg_db.vlanBindingFreeList[i].vbd_list); rg_db.vlanBindingFreeList[i].wanIdx=-1; rg_db.vlanBindingFreeList[i].vlanId=0; //add free list to free list head list_add_tail(&rg_db.vlanBindingFreeList[i].vbd_list,&rg_db.vlanBindingFreeListHead); } //init software arp free link list INIT_LIST_HEAD(&rg_db.softwareArpFreeListHead); //init software arp head table for(i=0;i<MAX_ARP_SW_TABLE_HEAD;i++) INIT_LIST_HEAD(&rg_db.softwareArpTableHead[i]); for(i=0;i<(MAX_ARP_SW_TABLE_SIZE-MAX_ARP_HW_TABLE_SIZE);i++) { INIT_LIST_HEAD(&rg_db.softwareArpFreeList[i].arp_list); rg_db.softwareArpFreeList[i].idx=MAX_ARP_HW_TABLE_SIZE; rg_db.softwareArpFreeList[i].idx+=i; //software index after hw index //add free list to free list head list_add_tail(&rg_db.softwareArpFreeList[i].arp_list,&rg_db.softwareArpFreeListHead); } #if defined(CONFIG_RTL9602C_SERIES) //init hardware arp free link list INIT_LIST_HEAD(&rg_db.hardwareArpFreeListHead); //init hardware arp head table for(i=0;i<MAX_ARP_HW_TABLE_HEAD;i++) INIT_LIST_HEAD(&rg_db.hardwareArpTableHead[i]); #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) for(i=0;i<MAX_ARP_HW_TABLE_SIZE_FPGA;i++) #else for(i=0;i<MAX_ARP_HW_TABLE_SIZE;i++) #endif { INIT_LIST_HEAD(&rg_db.hardwareArpFreeList[i].arp_list); rg_db.hardwareArpFreeList[i].idx=i; //hw index //add free list to free list head list_add_tail(&rg_db.hardwareArpFreeList[i].arp_list,&rg_db.hardwareArpFreeListHead); } #endif //init software lut free link list INIT_LIST_HEAD(&rg_db.softwareLutFreeListHead); //init software lut head table for(i=0;i<MAX_LUT_SW_TABLE_HEAD;i++) INIT_LIST_HEAD(&rg_db.softwareLutTableHead[i]); for(i=0;i<MAX_LUT_SW_LIST_SIZE;i++) { INIT_LIST_HEAD(&rg_db.softwareLutFreeList[i].lut_list); rg_db.softwareLutFreeList[i].idx=MAX_LUT_HW_TABLE_SIZE+i; //software index after hw index //add free list to free list head list_add_tail(&rg_db.softwareLutFreeList[i].lut_list,&rg_db.softwareLutFreeListHead); } #if defined(CONFIG_RG_FLOW_BASED_PLATFORM) && defined(CONFIG_RG_FLOW_4K_MODE) //init flow Tcam free link list INIT_LIST_HEAD(&rg_db.flowTcamFreeListHead); //init flow Tcam head table for(i=0;i<(MAX_FLOW_TABLE_SIZE>>MAX_FLOW_WAYS_SHIFT);i++) INIT_LIST_HEAD(&rg_db.flowTcamListHead[i]); for(i=0;i<MAX_FLOW_TCAM_TABLE_SIZE;i++) { INIT_LIST_HEAD(&rg_db.flowTcamList[i].flowTcam_list); rg_db.flowTcamList[i].idx=MAX_FLOW_TABLE_SIZE; rg_db.flowTcamList[i].idx+=i; //add free list to free list head list_add_tail(&rg_db.flowTcamList[i].flowTcam_list,&rg_db.flowTcamFreeListHead); } #endif #if defined(CONFIG_RTL9600_SERIES) //init software lut bcam link list head INIT_LIST_HEAD(&rg_db.lutBCAMLinkListHead); INIT_LIST_HEAD(&rg_db.lutBCAMChosenLinkListHead); for(i=0;i<MAX_LUT_BCAM_TABLE_SIZE;i++) { INIT_LIST_HEAD(&rg_db.lutBCAMLinkList[i].lut_list); rg_db.lutBCAMLinkList[i].idx=i+MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE; //add to list list_add_tail(&rg_db.lutBCAMLinkList[i].lut_list,&rg_db.lutBCAMLinkListHead); } #else //support lut traffic bit //init software lut cam free link list INIT_LIST_HEAD(&rg_db.lutBCAMFreeListHead); //init software lut cam head table for(i=0;i<((MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE)>>2);i++) INIT_LIST_HEAD(&rg_db.lutBCAMTableHead[i]); #if defined(CONFIG_APOLLO_FPGA_PHY_TEST) for(i=0;i<MAX_LUT_BCAM_TABLE_SIZE_FPGA;i++) #else for(i=0;i<MAX_LUT_BCAM_TABLE_SIZE;i++) #endif { INIT_LIST_HEAD(&rg_db.lutBCAMLinkList[i].lut_list); rg_db.lutBCAMLinkList[i].idx=i+MAX_LUT_HW_TABLE_SIZE-MAX_LUT_BCAM_TABLE_SIZE; //add free list to free list head list_add_tail(&rg_db.lutBCAMLinkList[i].lut_list,&rg_db.lutBCAMFreeListHead); } INIT_LIST_HEAD(&rg_db.hostPoliceCountListHead); for(i=0;i<HOST_POLICING_TABLE_SIZE;i++){ INIT_LIST_HEAD(&rg_db.hostPoliceList[i].host_list); rg_db.hostPoliceList[i].idx=i; } #endif INIT_LIST_HEAD(&rg_db.redirectHttpURLListHead); INIT_LIST_HEAD(&rg_db.redirectHttpURLFreeListHead); INIT_LIST_HEAD(&rg_db.redirectHttpWhiteListListHead); INIT_LIST_HEAD(&rg_db.redirectHttpWhiteListFreeListHead); for(i=0;i<MAX_REDIRECT_URL_NUM;i++){ INIT_LIST_HEAD(&rg_db.redirectHttpURLFreeList[i].url_list); //add free list to free list head list_add_tail(&rg_db.redirectHttpURLFreeList[i].url_list,&rg_db.redirectHttpURLFreeListHead); } for(i=0;i<MAX_REDIRECT_WHITE_LIST_NUM;i++){ INIT_LIST_HEAD(&rg_db.redirectHttpWhiteListFreeList[i].white_list); //add free list to free list head list_add_tail(&rg_db.redirectHttpWhiteListFreeList[i].white_list,&rg_db.redirectHttpWhiteListFreeListHead); } //20151123LUKE: init all ALG wellknwon port and timeout to default value. for(i=0;i<MAX_ALG_FUNCTIONS;i++){ switch(i){ //Server in WAN case RTK_RG_ALG_SIP_TCP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_SIP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_SIP_UPNP_TIMEOUT; break; case RTK_RG_ALG_SIP_UDP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_SIP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_H323_TCP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_H323_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_H323_UPNP_TIMEOUT; break; case RTK_RG_ALG_H323_UDP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_H323_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_RTSP_TCP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_RTSP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_RTSP_UPNP_TIMEOUT; break; case RTK_RG_ALG_RTSP_UDP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_RTSP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_FTP_TCP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_FTP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_FTP_PASV_UPNP_TIMEOUT; break; case RTK_RG_ALG_FTP_UDP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_FTP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; //Server in LAN case RTK_RG_ALG_SIP_TCP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_SIP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_SIP_UDP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_SIP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_H323_TCP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_H323_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_H323_UDP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_H323_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_RTSP_TCP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_RTSP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_RTSP_UDP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_RTSP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_FTP_TCP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_FTP_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_FTP_ACTIVE_UPNP_TIMEOUT; break; case RTK_RG_ALG_FTP_UDP_SRV_IN_LAN: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_FTP_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; //Pass through case RTK_RG_ALG_PPTP_TCP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_PPTP_PASSTHROUGH_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_PPTP_UDP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_PPTP_PASSTHROUGH_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_L2TP_TCP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_L2TP_PASSTHROUGH_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_L2TP_UDP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_L2TP_PASSTHROUGH_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_IPSEC_TCP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_IPSEC_PASSTHROUGH_TCP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; case RTK_RG_ALG_IPSEC_UDP_PASSTHROUGH: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_IPSEC_PASSTHROUGH_UDP_PORT; rg_db.algUserDefinedTimeout[i]=FAIL; break; #ifdef CONFIG_RG_ROMEDRIVER_ALG_BATTLENET_SUPPORT case RTK_RG_ALG_BATTLENET_TCP: rg_db.algUserDefinedPort[i]=RTK_RG_ALG_BATTLENET_TCP_PORT; rg_db.algUserDefinedTimeout[i]=ALG_BATTLENET_UPNP_TIMEOUT; break; #endif default: break; } } #if defined(CONFIG_APOLLO_ROMEDRIVER) || defined(CONFIG_XDSL_ROMEDRIVER) _rtk_rg_fwdEngineGlobalVariableReset(); #endif //reset rg_db's default pktHdr to 1 rg_db.pktHdr=&rg_db.systemGlobal.pktHeader_1; //reset nonbinding portmask rg_db.systemGlobal.non_binding_pmsk.portmask=0xfff; #ifdef __KERNEL__ //20140507LUKE:delete and re-init house keep after all data structure is ready! if(timer_pending(&rg_kernel.fwdEngineHouseKeepingTimer)) del_timer(&rg_kernel.fwdEngineHouseKeepingTimer); init_timer(&rg_kernel.fwdEngineHouseKeepingTimer); rg_kernel.fwdEngineHouseKeepingTimer.function = rtk_rg_fwdEngineHouseKeepingTimerFunc; if(rg_db.systemGlobal.house_keep_sec==0) rg_db.systemGlobal.house_keep_sec=RTK_RG_DEFAULT_HOUSE_KEEP_SECOND; mod_timer(&rg_kernel.fwdEngineHouseKeepingTimer, jiffies+(rg_db.systemGlobal.house_keep_sec*TICKTIME_PERIOD)); #endif #if 0//def CONFIG_GPON_FEATURE //initial untag bridge GPON WAN's index rg_db.systemGlobal.untagBridgeGponWanIdx=-1; #endif return (RT_ERR_RG_OK); } void _rtk_rg_str2mac(unsigned char *mac_string,rtk_mac_t *pMacEntry) { int i,j=0,k=0; memset(pMacEntry,0,sizeof(rtk_mac_t)); for(i=0;i<strlen(mac_string);i++) { if(mac_string[i]==':') { j=0; continue; } else if((mac_string[i]>='A')&&(mac_string[i]<='F')) pMacEntry->octet[k]+=(mac_string[i]-'A'+10); else if((mac_string[i]>='a')&&(mac_string[i]<='f')) pMacEntry->octet[k]+=(mac_string[i]-'a'+10); else if((mac_string[i]>='0')&&(mac_string[i]<='9')) pMacEntry->octet[k]+=(mac_string[i]-'0'); else DEBUG("str2mac MAC string parsing error!"); if(j==0) pMacEntry->octet[k]<<=4; if(j==1) k++; j++; } } #if 1 extern void rtk_rg_isakmp_system_proc_init(void); #endif #ifdef __KERNEL__ static intrBcasterNotifier_t linkChangeNotifier = { .notifyType = MSG_TYPE_LINK_CHANGE, .notifierCb = _rtk_rg_switchLinkChangeHandler, }; #endif #ifdef CONFIG_APOLLO_MODEL int rtk_rg_api_module_init(void) #else int rtk_rg_apollo_api_module_init(void) #endif { #ifdef __KERNEL__ #if 0 //SRAM mapping //map sram uint32 __sram=(u32)(&rg_db)&0xffff8000; uint32 __sram2=(u32)__sram+0x2000; printk("8K SRAM mapping to 0x%x\n",__sram); REG32(0xb8004020) = (((unsigned int )__sram)&(0x1ffffffe))|1; REG32(0xb8004024) = 0x06; //8k REG32(0xb8004028) = 0x32000; //offset=192K+8K //unmap dram REG32(0xb8001320) = (((unsigned int )__sram)&(0x1ffffffe))|1; REG32(0xb8001324) = 0x06; //8k printk("16K SRAM mapping to 0x%x\n",__sram2); REG32(0xb8004030) = (((unsigned int )__sram2)&(0x1ffffffe))|1; REG32(0xb8004034) = 0x07; //16k REG32(0xb8004038) = 0x34000; //offset=192K+8K+8K //unmap dram REG32(0xb8001330) = (((unsigned int )__sram2)&(0x1ffffffe))|1; REG32(0xb8001334) = 0x07; //16k #endif //Clear All Kernel related variables bzero(&rg_kernel,sizeof(rtk_rg_globalKernel_t)); rg_kernel.rxInfoFromPS.rx_fs=1; rg_kernel.rxInfoFromPS.rx_ls=1; #if defined(CONFIG_RTL9607C_SERIES) rg_kernel.rxInfoFromPS.rx_src_port_num=RTK_RG_MAC_PORT_MASTERCPU_CORE0; rg_kernel.rxInfoFromPS.rx_extspa=RTK_RG_MAC_EXT_PORT_MAX; #else rg_kernel.rxInfoFromPS.rx_src_port_num=RTK_RG_PORT_CPU; rg_kernel.rxInfoFromPS.rx_dst_port_mask=0x20; #endif rg_kernel.rxInfoFromPS.rx_igrLocation=0x0; //from protocol stack rg_kernel.rxInfoFromARPND.rx_fs=1; rg_kernel.rxInfoFromARPND.rx_ls=1; #if defined(CONFIG_RTL9607C_SERIES) rg_kernel.rxInfoFromARPND.rx_src_port_num=RTK_RG_MAC_PORT_MASTERCPU_CORE0; rg_kernel.rxInfoFromARPND.rx_extspa=RTK_RG_MAC_EXT_PORT_MAX; #else rg_kernel.rxInfoFromARPND.rx_src_port_num=RTK_RG_PORT_CPU; rg_kernel.rxInfoFromARPND.rx_dst_port_mask=0x20; #endif rg_kernel.rxInfoFromARPND.rx_igrLocation=0x1; //from ARP or ND rg_kernel.rxInfoFromIGMPMLD.rx_fs=1; rg_kernel.rxInfoFromIGMPMLD.rx_ls=1; #if defined(CONFIG_RTL9607C_SERIES) rg_kernel.rxInfoFromIGMPMLD.rx_src_port_num=RTK_RG_MAC_PORT_MASTERCPU_CORE0; rg_kernel.rxInfoFromIGMPMLD.rx_extspa=RTK_RG_MAC_EXT_PORT_MAX; #else rg_kernel.rxInfoFromIGMPMLD.rx_src_port_num=RTK_RG_PORT_CPU; rg_kernel.rxInfoFromIGMPMLD.rx_dst_port_mask=0x20; #endif rg_kernel.rxInfoFromIGMPMLD.rx_igrLocation=0x2; //from IGMP or MLD rg_kernel.arp_number_for_LAN=MAX_ARP_FOR_LAN_INTF; rg_kernel.arp_number_for_WAN=MAX_ARP_FOR_WAN_INTF; #endif //replace rtk_rg_initParam_set init, for CONFIG_APOLLO_TESTING disabled. virtualmacEnable = DISABLE; #if 0 assert_ok(rtk_init()); assert_ok(rtk_l34_init()); assert_ok(rtk_l2_init()); assert_ok(rtk_l2_addr_delAll(ENABLED)); #endif #ifdef CONFIG_APOLLO_MODEL #else rg_system_proc_init(); #endif #ifdef CONFIG_RG_DEBUG //init proc system rg_proc_init(); #endif #if 1 rtk_rg_isakmp_system_proc_init(); #endif #ifdef __KERNEL__ //init semaphores used in liteRomeDriver /*sema_init(&rg_kernel.wanStaticCalled, 1); sema_init(&rg_kernel.wanDHCPCalled, 1); sema_init(&rg_kernel.wanPPPOEAfterCalled, 1); sema_init(&rg_kernel.interfaceLock, 1);*/ //init_MUTEX(&rg_kernel.wanStaticCalled); //init_MUTEX(&rg_kernel.wanDsliteCalled); //init_MUTEX(&rg_kernel.wanDHCPCalled); //init_MUTEX(&rg_kernel.wanPPPOEAfterCalled); //init_MUTEX(&rg_kernel.wanPPTPAfterCalled); //init_MUTEX(&rg_kernel.wanL2TPAfterCalled); //init_MUTEX(&rg_kernel.wanPPPOEDSLITEAfterCalled); //init_MUTEX(&rg_kernel.interfaceLock); spin_lock_init(&rg_kernel.initLock);//init_MUTEX(&rg_kernel.initLock); //init IPv4 fragment lock spin_lock_init(&rg_kernel.ipv4FragLock);//init_MUTEX(&rg_kernel.ipv4FragLock); spin_lock_init(&rg_kernel.ipv4FragFreeLock);//init_MUTEX(&rg_kernel.ipv4FragFreeLock); spin_lock_init(&rg_kernel.ipv4FragQueueLock);//init_MUTEX(&rg_kernel.ipv4FragQueueLock); spin_lock_init(&rg_kernel.naptTableLock);//init_MUTEX(&rg_kernel.naptTableLock); #ifdef CONFIG_RG_IPV6_STATEFUL_ROUTING_SUPPORT spin_lock_init(&rg_kernel.ipv6StatefulLock);//init_MUTEX(&rg_kernel.ipv6StatefulLock); spin_lock_init(&rg_kernel.ipv6FragQueueLock);//init_MUTEX(&rg_kernel.ipv6FragQueueLock); #endif spin_lock_init(&rg_kernel.algDynamicLock);//init_MUTEX(&rg_kernel.algDynamicLock); //init SA learning lock //sema_init(&rg_kernel.saLearningLimitLock, 1); //init link-down indicator lock //sema_init(&rg_kernel.linkChangeHandlerLock, 1); #endif #ifdef CONFIG_SMP rg_kernel.rg_tasklet_queue_lock = SPIN_LOCK_UNLOCKED; memset(&rg_kernel.rg_tasklet_data,0,sizeof(rg_kernel.rg_tasklet_data)); rg_kernel.rg_tasklets.data=(unsigned long)&rg_kernel.rg_tasklet_data; rg_kernel.rg_tasklets.func=(void (*)(unsigned long))_rtk_rg_tasklet_queue_func; atomic_set(&rg_kernel.rg_tasklet_queue_entrance,0); #endif //Clear all software related variables _rtk_rg_globalVariableReset(); rg_kernel.debug_level|=RTK_RG_DEBUG_LEVEL_WARN; rg_kernel.filter_level=0; rg_kernel.traceFilterRuleMask=0x1;//enable rule 0 rg_kernel.stag_enable = #if defined(CONFIG_GPON_FEATURE) || defined(CONFIG_EPON_FEATURE) RTK_RG_ENABLED; #else RTK_RG_DISABLED; #endif rg_kernel.layer2LookupMissFlood2CPU = RTK_RG_DISABLED; rg_kernel.cp3_execute_count_state=0; rg_kernel.cp3_execute_count=0; //turn on link-change ISR mask to receive intr bcaster event ASSERT_EQ(rtk_intr_imr_set(INTR_TYPE_LINK_CHANGE,ENABLED),RT_ERR_OK); //Register for link-down event ASSERT_EQ(intr_bcaster_notifier_cb_register(&linkChangeNotifier),RT_ERR_OK); //Create single thread work queue for callback rg_kernel.rg_callbackWQ = create_singlethread_workqueue("RG_CB_WQ"); for(rg_kernel.rg_cbUnionCurrentIdx=CONFIG_RG_CALLBACK_WQ_TOTAL_SIZE;rg_kernel.rg_cbUnionCurrentIdx>0;rg_kernel.rg_cbUnionCurrentIdx--) atomic_set(&rg_kernel.rg_cbUnionArray[rg_kernel.rg_cbUnionCurrentIdx-1].not_used,1); return SUCCESS; } #ifdef __KERNEL__ EXPORT_SYMBOL(_rtk_rg_NAPTRemoteHash_get); EXPORT_SYMBOL(_rtk_rg_arpAndMacEntryAdd); EXPORT_SYMBOL(_rtk_rg_arpGeneration); EXPORT_SYMBOL(_rtk_rg_naptConnection_add); EXPORT_SYMBOL(rg_db); EXPORT_SYMBOL(_rtk_rg_l3lookup); EXPORT_SYMBOL(_rtk_rg_naptTcpUdpOutHashIndex); EXPORT_SYMBOL(_rtk_rg_naptTcpUdpOutHashIndexLookup); EXPORT_SYMBOL(_rtk_rg_naptTcpUdpInHashIndex); #endif