#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_ARCH_CORTINA_G3LITE) && defined(CONFIG_RTK_DEV_AP) #include #include #endif char printtmp[256]; #define G3INITPRINT( comment ,arg...) \ do {\ int mt_trace_i;\ sprintf( printtmp, comment,## arg);\ for(mt_trace_i=1;mt_trace_i<1024;mt_trace_i++) \ { \ if(printtmp[mt_trace_i]==0) \ { \ if(printtmp[mt_trace_i-1]=='\n') printtmp[mt_trace_i-1]=' '; \ else break; \ } \ } \ printk("[HWNAT] %s @%s(%d)\n", printtmp, __FUNCTION__, __LINE__);\ } while(0) #if defined(CONFIG_RG_G3_L2FE_POL_OFFLOAD) #define LAN_PORT_SHAPER_IPG_ID 3 uint32_t rtk_rg_g3_l3te_shaper_init(int portIdx) { // enable L3TM sharper 1Gbps, sharping L3QM -> p6 traffic ca_port_id_t port_id; aal_l3_te_shaper_tbc_cfg_msk_t shaper_msk; aal_l3_te_shaper_tbc_cfg_t shaper; rtk_port_speed_t linkSpeed; rtk_port_duplex_t linkDuplex; if((portIdx < G3_LOOPBACK_P_START) || (portIdx > G3_LOOPBACK_P_END)) return CA_E_OK; shaper_msk.u32 = 0; memset(&shaper, 0, sizeof(shaper)); shaper_msk.s.bs = 1; shaper_msk.s.rate_k = 1; shaper_msk.s.rate_m = 1; shaper_msk.s.mode = 1; shaper_msk.s.state = 1; // ca_port_shaper_set shaper.bs = 3; //(1048568/1000 + 511) >> 9; shaper.rate_k = 256; shaper.rate_m = 1000; shaper.mode = CA_AAL_SHAPER_MODE_BPS; shaper.state = CA_AAL_SHAPER_ADMIN_STATE_SHAPER; aal_l3_te_shaper_ipq_set(G3_DEF_DEVID, LAN_PORT_SHAPER_IPG_ID, 20-4/*4 bytes for fake vlan overhead*/); if((rtk_port_speedDuplex_get(portIdx, &linkSpeed, &linkDuplex)) == RT_ERR_OK) { switch(linkSpeed) { case PORT_SPEED_10M: shaper.bs = 1; shaper.rate_k = 16; shaper.rate_m = 1024; break; case PORT_SPEED_100M: shaper.bs = 2; shaper.rate_k = 64; shaper.rate_m = 100; break; case PORT_SPEED_1000M: default: shaper.bs = 3; shaper.rate_k = 256; shaper.rate_m = 1000; break; } }else { // keep default } port_id = portIdx + CA_AAL_L3_TE_NI_PORT_OFFSET; ASSERT_EQ(aal_l3_te_shaper_port_tbc_set(G3_DEF_DEVID, port_id, shaper_msk, &shaper), CA_E_OK); ASSERT_EQ(aal_l3_te_shaper_port_ipq_set(G3_DEF_DEVID, port_id, LAN_PORT_SHAPER_IPG_ID), CA_E_OK); return CA_E_OK; } uint32_t rtk_rg_g3_l2fe_hostpolicing_init(void) { ca_status_t ret = CA_E_OK; int i = 0; { // enable l2fe tpid parsing for 0x88a8 //Bit[3]: TPID == 8100; //Bit[2]: TPID == 9100; //Bit[1]: TPID == 88A8; //Bit[0]: TPID == Value of Register FE_PR_FE_TPID_CMP_C ASSERT_EQ(ca_l2_vlan_outer_tpid_add(G3_DEF_DEVID, G3_LOOPBACK_P_NEWSPA, 1), CA_E_OK); } { // enable port 6 loopback aal_ni_xge_cmn_cfg_t xge_cmn_cfg; aal_ni_xge_cmn_cfg_mask_t xge_cmn_cfg_mask; xge_cmn_cfg_mask.u32 = 0; memset(&xge_cmn_cfg, 0, sizeof(xge_cmn_cfg)); xge_cmn_cfg_mask.s.core_tx2rx_lb = 1; xge_cmn_cfg.core_tx2rx_lb = 1; aal_ni_xge_cmn_config_set(G3_DEF_DEVID, 0, xge_cmn_cfg_mask, &xge_cmn_cfg); aal_fdb_port_limit_set(G3_DEF_DEVID, G3_LOOPBACK_P_NEWSPA, 0); } { // redefine qm to tm port mapping L2TM_L2TM_BM_DQ_TO_TM_PORT_MAP_t map; map.wrd = rtk_ne_reg_read(L2TM_L2TM_BM_DQ_TO_TM_PORT_MAP); map.bf.tm_dpid_sel_0 = AAL_NI_PORT_XGE1; map.bf.tm_dpid_sel_1 = AAL_NI_PORT_XGE1; map.bf.tm_dpid_sel_2 = AAL_NI_PORT_XGE1; map.bf.tm_dpid_sel_3 = AAL_NI_PORT_XGE1; rtk_ne_reg_write(map.wrd, L2TM_L2TM_BM_DQ_TO_TM_PORT_MAP); } { // enable L3TM sharper 1Gbps, sharping L3QM -> p6 traffic for(i = G3_LOOPBACK_P_START; i <= G3_LOOPBACK_P_END; i++) { rtk_rg_g3_l3te_shaper_init(i); } } { // use VE(vlan engine) to redirect downstream pkt to {ldpid, cos} according to fake vid int queue_id = 0, fake_vid =0; aal_l2_vlan_fib_map_t vlanfibmap; aal_l2_vlan_fib_map_mask_t vlanfibmapmask; aal_l2_vlan_fib_mask_t vlanfibmask; aal_l2_vlan_fib_t vlanfib; aal_ilpb_cfg_msk_t ilpbcfg_msk; aal_ilpb_cfg_t ilpbcfg; // reset all vlan configuration before create new one ASSERT_EQ(ca_l2_vlan_delete_all(G3_DEF_DEVID), CA_E_OK); for(i = G3_LOOPBACK_P_START; i <= G3_LOOPBACK_P_END; i++) { for(queue_id = 0; queue_id <= 7; queue_id++) { fake_vid = G3_LOOPBACK_DOWNSTREAM_VID(i, queue_id); // create fake vlan but only works on lan ports ASSERT_EQ(ca_l2_vlan_create(G3_DEF_DEVID, fake_vid), CA_E_OK); vlanfibmapmask.u32 = 0; vlanfibmapmask.s.valid = 1; vlanfibmap.valid = 0; aal_l2_vlan_fib_map_set(G3_DEF_DEVID, AAL_L2_VLAN_TYPE_WAN_SVLAN, fake_vid, vlanfibmapmask, &vlanfibmap); aal_l2_vlan_fib_map_set(G3_DEF_DEVID, AAL_L2_VLAN_TYPE_WAN_CVLAN, fake_vid, vlanfibmapmask, &vlanfibmap); // enable vlan engine to pop fake vlan and redirect to specific dest port vlanfibmask.u32 = 0; vlanfibmask.s.dpid_field_vld = 1; vlanfibmask.s.dpid = 1; vlanfibmask.s.cos_field_vld = 1; vlanfibmask.s.cos = 1; vlanfibmask.s.top_vlan_cmd_field_vld = 1; vlanfibmask.s.top_vlan_cmd = 1; vlanfibmask.s.permit = 1; vlanfib.dpid_field_vld = 1; vlanfib.dpid = i; vlanfib.cos_field_vld = 1; vlanfib.cos = queue_id; vlanfib.top_vlan_cmd_field_vld = 1; vlanfib.top_vlan_cmd = 2; //0:nop;1:push;2:pop;3:swap vlanfib.permit = 1; aal_l2_vlan_fib_map_get(G3_DEF_DEVID, AAL_L2_VLAN_TYPE_LAN_VLAN, fake_vid, &vlanfibmap); //G3INITPRINT("create fake vid %d, fibid = %d (valid:%d)", fake_vid, vlanfibmap.fib_id, vlanfibmap.valid); aal_l2_vlan_action_cfg_set(G3_DEF_DEVID, AAL_L2_VLAN_TYPE_LAN_VLAN, vlanfibmap.fib_id, vlanfibmask, &vlanfib); } // For upstream loopback, force port 0 ~ port 3 do not aware vlan tag to prevent incorrect vlan operation of ptag(priority tag). { memset(&ilpbcfg_msk, 0, sizeof(ilpbcfg_msk)); memset(&ilpbcfg, 0, sizeof(ilpbcfg)); ilpbcfg_msk.s.s_tpid_match = 1; ilpbcfg_msk.s.c_tpid_match = 1; ilpbcfg_msk.s.other_tpid_match = 1; ilpbcfg.s_tpid_match = 0x0; ilpbcfg.c_tpid_match = 0x0; ilpbcfg.other_tpid_match = 0x0; aal_port_ilpb_cfg_set(G3_DEF_DEVID, i, ilpbcfg_msk, &ilpbcfg); } } memset(&ilpbcfg_msk, 0, sizeof(ilpbcfg_msk)); memset(&ilpbcfg, 0, sizeof(ilpbcfg)); ilpbcfg_msk.s.cos_mode_sel = 1; ilpbcfg_msk.s.cos_control_bm = 1; ilpbcfg.cos_mode_sel = 2; // remarking from cls/vlan ilpbcfg.cos_control_bm = 2; // allow ve only aal_port_ilpb_cfg_set(G3_DEF_DEVID, G3_LOOPBACK_P_NEWSPA, ilpbcfg_msk, &ilpbcfg); } { //DMAC host policing: use egress packet lenth to do rate limiting //with fake vlan --> L2FE --> without fake vlan aal_l2_te_policer_cfg_t cfg; aal_l2_te_policer_cfg_msk_t msk; int flow_id; memset((void *)&cfg, 0, sizeof(cfg)); memset((void *)&msk, 0, sizeof(msk)); msk.s.pkt_len_sel = 1; cfg.pkt_len_sel = 1; for(flow_id = 0 + G3_FLOW_POLICER_IDXSHIFT_HOSTPOLMTR_TX ; flow_id < (G3_FLOW_POLICER_HOSTPOLICING_SIZE + G3_FLOW_POLICER_IDXSHIFT_HOSTPOLMTR_TX) ; flow_id++) ASSERT_EQ(aal_l2_te_policer_flow_cfg_set(G3_DEF_DEVID, flow_id, msk, &cfg), CA_E_OK); } { //disable port 6 L2/L3 port policer aal_l2_te_policer_cfg_msk_t l2_policer_msk; aal_l2_te_policer_cfg_t l2_policer_cfg; aal_l3_te_policer_cfg_msk_t l3_policer_msk; aal_l3_te_policer_cfg_t l3_policer_cfg; memset((void *)&l2_policer_msk, 0, sizeof(l2_policer_msk)); memset((void *)&l2_policer_cfg, 0, sizeof(l2_policer_cfg)); l2_policer_msk.s.type = 1; l2_policer_cfg.type = CA_AAL_POLICER_TYPE_NONE; ASSERT_EQ(aal_l2_te_policer_port_cfg_set(G3_DEF_DEVID, G3_LOOPBACK_P_NEWSPA, l2_policer_msk, &l2_policer_cfg), CA_E_OK); memset((void *)&l3_policer_msk, 0, sizeof(l3_policer_msk)); memset((void *)&l3_policer_cfg, 0, sizeof(l3_policer_cfg)); l3_policer_msk.s.type = 1; l3_policer_cfg.type = CA_AAL_L3_POLICER_TYPE_NONE; ASSERT_EQ(aal_l3_te_policer_port_cfg_set(G3_DEF_DEVID, G3_LOOPBACK_P_NEWSPA, l3_policer_msk, &l3_policer_cfg), CA_E_OK); } return ret; } #endif #if defined(CONFIG_RG_G3_L3FE_MC_DEEPQ) uint32_t rtk_rg_g3_l3fe_mcbuffering_init(void) { { // enable port 6 loopback aal_ni_xge_cmn_cfg_t xge_cmn_cfg; aal_ni_xge_cmn_cfg_mask_t xge_cmn_cfg_mask; xge_cmn_cfg_mask.u32 = 0; memset(&xge_cmn_cfg, 0, sizeof(xge_cmn_cfg)); xge_cmn_cfg_mask.s.core_tx2rx_lb = 1; xge_cmn_cfg.core_tx2rx_lb = 1; aal_ni_xge_cmn_config_set(0, 0, xge_cmn_cfg_mask, &xge_cmn_cfg); } { // enable L3TM sharper 1Gbps, sharping L3QM -> p6 traffic ca_port_id_t port_id = CA_NI_PORT6; aal_l3_te_shaper_tbc_cfg_msk_t shaper_msk; aal_l3_te_shaper_tbc_cfg_t shaper; shaper_msk.u32 = 0; memset(&shaper, 0, sizeof(shaper)); shaper_msk.s.bs = 1; shaper_msk.s.rate_k = 1; shaper_msk.s.rate_m = 1; shaper_msk.s.mode = 1; shaper_msk.s.state = 1; // ca_port_shaper_set shaper.bs = 3; //(1048568/1000 + 511) >> 9; shaper.rate_k = 256; shaper.rate_m = 1024; shaper.mode = CA_AAL_SHAPER_MODE_BPS; shaper.state = CA_AAL_SHAPER_ADMIN_STATE_SHAPER; ASSERT_EQ(aal_l3_te_shaper_port_tbc_set(G3_DEF_DEVID, port_id, shaper_msk, &shaper), CA_E_OK); } { // in order to buffer mc traffic by L3QM, enable deepq and tx to port 6(loopback port). // above changes could be found in aal_l3_cls_tbl_init() // enable port 6 DQ select here QM_QM_DEST_PORT0_EQ_CFG_t dp_eq_cfg; // get port 7 selection dp_eq_cfg.wrd = rtk_ne_reg_read(QM_QM_DEST_PORT0_EQ_CFG + (QM_QM_DEST_PORT0_EQ_CFG_STRIDE * (CA_NI_PORT7 + CA_NI_TOTAL_CPU_PORT))); // set to port 6 rtk_ne_reg_write(dp_eq_cfg.wrd , QM_QM_DEST_PORT0_EQ_CFG + (QM_QM_DEST_PORT0_EQ_CFG_STRIDE * (CA_NI_PORT6 + CA_NI_TOTAL_CPU_PORT))); } return CA_E_OK; } #endif uint32_t rtk_rg_g3_cpuport_init(void) { ca_status_t ret = CA_E_OK; int i; i = 0; #if defined(CONFIG_CA_NE_L2FP) || defined(CONFIG_NE_L2FP) // if defined, L2FP module allocate netdev for those cpuports. ref: ca_l2fp_init // enable CPU port 0x12~0x17 for wifi acceleration for(i=AAL_LPORT_CPU_2; i <= AAL_LPORT_CPU_7; i++) { if(!(ret = rtk_ni_virtual_cpuport_open(i))) G3INITPRINT("Enable CPU port 0x%x", i); else G3INITPRINT("ERROR - Enable CPU port 0x%x FAIL, ret = 0x%x !!!!!", i ,ret); } #else G3INITPRINT("CA wifi acceleration (L2FP) in off!!!!!\n"); #endif #if defined(CONFIG_ARCH_CORTINA_G3HGU) // init DMA_LSO shaper for wifi veriwave Rx test. { unsigned int regValue = 0; // shp_en=1, global shaper enable regValue = rtk_dma_lso_reg_read(DMA_SEC_SS_CTRL); regValue |= 0x40000000; rtk_dma_lso_reg_write(regValue, DMA_SEC_SS_CTRL); // enb=2'b10, port shaper enable rtk_dma_lso_reg_write(0x00000080, DMA_SEC_SS_SHP_PORT_TBC_MEM_DATA1); // burst size=16, 1Gbps shaper rate (after take IPG into consideration) rtk_dma_lso_reg_write(0x400F6400, DMA_SEC_SS_SHP_PORT_TBC_MEM_DATA0); // indirect register write to DMA_LSO VP 2~7 shaper rtk_dma_lso_reg_write(0xc0000002, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); rtk_dma_lso_reg_write(0xc0000003, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); rtk_dma_lso_reg_write(0xc0000004, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); rtk_dma_lso_reg_write(0xc0000005, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); rtk_dma_lso_reg_write(0xc0000006, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); rtk_dma_lso_reg_write(0xc0000007, DMA_SEC_SS_SHP_PORT_TBC_MEM_ACCESS); } #endif return ret; } uint32_t rtk_rg_g3_cpuport_exit(void) { int i = 0; G3INITPRINT("unregister cpu port..."); for(i=AAL_LPORT_CPU_2; i <= AAL_LPORT_CPU_7; i++) { rtk_ni_virtual_cpuport_close(i); G3INITPRINT("close cpu port...0x%x", i); } return CA_E_OK; } #define HL_ACTGRP_UUC_GENERIC (ACTION_GROUP_12 | ACTION_GROUP_13 | ACTION_GROUP_14 | ACTION_GROUP_15 | ACTION_GROUP_16 | ACTION_GROUP_18 | ACTION_GROUP_19) typedef struct { HASH_ACTION_GROUP_12; HASH_ACTION_GROUP_13; HASH_ACTION_GROUP_14; HASH_ACTION_GROUP_15; HASH_ACTION_GROUP_16; HASH_ACTION_GROUP_18; HASH_ACTION_GROUP_19; } __attribute__((packed)) hl_act_uuc_generic_t; static hl_act_uuc_generic_t HL_ACT_UUC = { .mdata_w_vld_2 = 0x8, .mdata_w_2 = (RXINFO_REF_TRAP_RSN_UNKNOWN_DA<HASHLITE_PROFILE_15) { G3INITPRINT("forbidden to enable uuc storm control for port %d", port); return AAL_E_OUTRANGE; } // 2K+32(G3) or 256+32(G3Lite) entries, 192 bits per each action fib //G3INITPRINT("turn %s port %d unknown DA storm control, policer idx is %d", enable?"ON":"OFF", port, l3fe_pol_idx); if(enable) { HL_ACT_UUC.stage3_ctrl = 1; //Do NOT apply type-0 result of Hash Engine, keep hashlite mdata_w_2 HL_ACT_UUC.pol_vld = 1; HL_ACT_UUC.pol_en = 2; HL_ACT_UUC.pol_base = l3fe_pol_idx; // the reserved entry for uuc storm control. HL_ACT_UUC.pol_sel = 2; if ((ret = aal_hashlite_profile_set(G3_DEF_DEVID, hashlite_profile, &profile_uuc)) != AAL_E_OK) { G3INITPRINT("ERROR! Fail to initialize HashLite profile %d. (ret %d)\n", hashlite_profile, ret); } } else { HL_ACT_UUC.stage3_ctrl = 0; //Apply type-0 result of Hash Engine HL_ACT_UUC.pol_vld = 0; HL_ACT_UUC.pol_en = 0; HL_ACT_UUC.pol_base = 0; HL_ACT_UUC.pol_sel = 0; if ((ret = aal_hashlite_profile_set(G3_DEF_DEVID, hashlite_profile, &profile_uuc)) != AAL_E_OK) { G3INITPRINT("ERROR! Fail to initialize HashLite profile %d. (ret %d)\n", hashlite_profile, ret); } } return ret; } int32 rtk_rg_g3_l3fe_unknownDA_wan_default_enable(unsigned int wan_port_idx) { ca_int32_t ret = AAL_E_OK; cls_action_t action; /* set CLS WAN default fib for hashlite(unknowDA lookup)*/ memset(&action, 0, sizeof(cls_action_t)); /* set msk_ctrl */ action.t2_ctrl_vld = 1; action.t2_ctrl = 0; // any one to trap packets action.t3_ctrl_vld = 1; action.t3_ctrl = T3_CTRL_BYPASS; action.t4_ctrl_vld = 1; action.t4_ctrl = wan_port_idx; action.t5_ctrl_vld = 1; action.t5_ctrl = T5_CTRL_BYPASS; action.dpid_vld = 1; action.permit = 1; action.mcgid = AAL_LPORT_CPU_0, action.dpid_pri = 1; ret = aal_l3_cls_default_set(G3_DEF_DEVID, 0, CL_RSLT_TYPE_0, &action); if (ret != AAL_E_OK) { G3INITPRINT("L3 CLS WAN DEF FIB initialtion fail, ret = %d\n", ret); } return ret; } int32 rtk_rg_g3_l3fe_knownDA_add(unsigned char *dmac, unsigned int *entryidx) { int ret = AAL_E_OK; aal_hash_key_t key; aal_hashlite_hash_action_entry_t action; hl_act_uuc_generic_t *pAction = (hl_act_uuc_generic_t *)&action; unsigned int hashIdx = 0; memset(&key, 0, sizeof(key)); memset(&pAction, 0, sizeof(action)); *entryidx = 0; key.mac_da_0 = dmac[5]; key.mac_da_1 = dmac[4]; key.mac_da_2 = dmac[3]; key.mac_da_3 = dmac[2]; key.mac_da_4 = dmac[1]; key.mac_da_5 = dmac[0]; ret = aal_hashlite_hash_add(G3_DEF_DEVID, &key, &HL_KEYMSK_UUC, &action, HL_ACTGRP_UUC_GENERIC, HASHLITE_AGING_STATIC, &hashIdx); if(ret==AAL_E_EXIST || ret==AAL_E_OK) { if(hashIdx & HASHLITE_OVERFLOW_FLAG) *entryidx = (hashIdx & HASHLITE_INDEX_MASK) + HASHLITE_HASH_ACTION_ENTRY_MAX; else *entryidx = (hashIdx & HASHLITE_INDEX_MASK); ret = AAL_E_OK; //printk("add known da %pM to hashlite entry %d\n", dmac, *entryidx); }else if(ret == AAL_E_TBLFULL){ printk(KERN_DEBUG "table full, fail to add known da hashlite entry, ret = %d\n", ret); }else{ G3INITPRINT("ERROR! fail to add known da hashlite entry, ret = %d\n", ret); } return ret; } int32 rtk_rg_g3_l3fe_knownDA_del(unsigned int entryidx) { int ret = AAL_E_OK; ret = aal_hashlite_hash_delete(G3_DEF_DEVID, entryidx); if(ret) { G3INITPRINT("fail to del known da with hashlite entry idx %d, ret = %d\n", entryidx, ret); } return ret; } int32 rtk_rg_g3_l3fe_knownDA_flush(void) { unsigned int hashIdx = 0; for(hashIdx = 0; hashIdx < (HASHLITE_HASH_TBL_ENTRY_MAX+HASHLITE_OVERFLOW_TBL_ENTRY_MAX); hashIdx++) { aal_hashlite_hash_delete(G3_DEF_DEVID, hashIdx); } return AAL_E_OK; } uint32_t rtk_rg_g3_hashlite_init(void) { unsigned char bcMac[ETHER_ADDR_LEN]; unsigned int wanportidx = AAL_LPORT_ETH_NI7; // unmask all fields in hash mask, refer: hash_mask_unmask() memset(&HL_KEYMSK_UUC, -1, sizeof(aal_hash_mask_t)); HL_KEYMSK_UUC.ip_sa = 0; HL_KEYMSK_UUC.ip_da = 0; HL_KEYMSK_UUC.ip_ttl = 0; // turn on interested key HL_KEYMSK_UUC.mac_da = 0; // disable rtk_rg_g3_l3fe_unknownDAStormCtrl(0, DISABLED, 0); rtk_rg_g3_l3fe_unknownDAStormCtrl(1, DISABLED, 0); rtk_rg_g3_l3fe_unknownDAStormCtrl(2, DISABLED, 0); rtk_rg_g3_l3fe_unknownDAStormCtrl(3, DISABLED, 0); rtk_rg_g3_l3fe_unknownDAStormCtrl(4, DISABLED, 0); #if defined(CONFIG_ARCH_CORTINA_G3HGU) #if defined(CONFIG_RG_G3_WAN_PORT_INDEX) rtk_rg_g3_l3fe_unknownDAStormCtrl(CONFIG_RG_G3_WAN_PORT_INDEX, DISABLED, 0); wanportidx = CONFIG_RG_G3_WAN_PORT_INDEX; #else rtk_rg_g3_l3fe_unknownDAStormCtrl(7, DISABLED, 0); wanportidx = AAL_LPORT_ETH_NI7; #endif #endif rtk_rg_g3_l3fe_knownDA_flush(); memset(bcMac, 0xff, ETHER_ADDR_LEN); { unsigned int idx; rtk_rg_g3_l3fe_knownDA_add(bcMac, &idx); } // enable //rtk_rg_g3_l3fe_unknownDAStormCtrl(1, G3_FLOW_POLICER_IDXSHIFT_STORMCTL + 3); // for test aal_hashlite_aging_timer_set(0); /* set CLS WAN default fib for hashlite(unknowDA lookup)*/ ASSERT_EQ(rtk_rg_g3_l3fe_unknownDA_wan_default_enable(wanportidx), AAL_E_OK); return CA_E_OK; } uint32_t rtk_rg_g3_flow_init(rtk_rg_flow_key_mask_t flowKeyMask) { ca_flow_key_type_config_t flow_key; ca_flow_key_type_t keyType; ca_flow_key_profile_mapping_set(G3_DEF_DEVID, RG_CA_FLOW_UC5TUPLE_DS, HASH_PROFILE_0); ca_flow_key_profile_mapping_set(G3_DEF_DEVID, RG_CA_FLOW_UC5TUPLE_US, HASH_PROFILE_1); ca_flow_key_profile_mapping_set(G3_DEF_DEVID, RG_CA_FLOW_MC, HASH_PROFILE_2); ca_flow_key_profile_mapping_set(G3_DEF_DEVID, RG_CA_FLOW_UC2TUPLE_BRIDGE, HASH_PROFILE_3); ca_flow_delete_all(G3_DEF_DEVID); for(keyType = CA_FLOW_TYPE_0; keyType aal_hash_profile_set() */ #if 0 // one profile one tuple, no TTL checking anymore /****************************************************** * TTL pattern mask ******************************************************/ memset(&flow_key, 0, sizeof(flow_key)); flow_key.key_type = RG_CA_FLOW_TYPE_TTL; //TTL flow_key.prio = RG_CA_FLOW_TYPE_TTL; flow_key.key_mask.l3_keys = TRUE; flow_key.key_mask.l3_mask.ip_ttl = 0xFE; ASSERT_EQ(ca_flow_key_type_add(G3_DEF_DEVID, &flow_key), CA_E_OK); /****************************************************** * reserved Flow entry ******************************************************/ { ca_status_t ca_ret = CA_E_OK; ca_flow_t flow_config; memset(&flow_config, 0, sizeof(flow_config)); flow_config.key_type = RG_CA_FLOW_TYPE_TTL; flow_config.key.l3_key.ip_ttl = 0; flow_config.actions.forward = CA_CLASSIFIER_FORWARD_PORT; flow_config.actions.dest.port = AAL_LPORT_CPU_0; ca_ret = ca_flow_add(0, &flow_config); if(ca_ret == CA_E_OK){ printk(">>>>> Add flow[%d]\n", flow_config.index); }else{ printk("\033[1;33;41m[WARNING] Add flow type[%d] fail, ca_ret=0x%x \033[0m\n", flow_config.key_type, ca_ret); } } #endif return CA_E_OK; } /*==================== rtk_rg_g3_policer_init(): Init L2 and L3 policer here. dal_ca8279_rate_init(): Init L2 policer only ====================*/ int32 rtk_rg_g3_policer_init(void) { ca_uint16_t flow_id; ca_policer_t policer; /*init l2 and L3 policer here*/ policer.mode = CA_POLICER_MODE_DISABLE; policer.pps = 0; policer.cir = 32767499; policer.cbs = 0xfff; policer.pir = 32767499; policer.pbs = 0xfff; for(flow_id=0;flow_id <= CA_AAL_MAX_FLOW_ID;flow_id++) { ASSERT_EQ(ca_l2_flow_policer_set(G3_DEF_DEVID,flow_id,&policer), CA_E_OK); } for(flow_id=0; flow_id <= CA_AAL_L3_MAX_FLOW_ID;flow_id++) { ASSERT_EQ(ca_l3_flow_policer_set(G3_DEF_DEVID,flow_id,&policer), CA_E_OK); } return CA_E_OK; } uint32_t rtk_rg_g3_init(void) { int i; ca_uint8_t keep=1; ca_uint32_t l3fe_tbl_count=4; aal_ni_hv_glb_internal_port_id_cfg_t portid_cfg; aal_ni_hv_glb_internal_port_id_cfg_mask_t portid_mask={0}; aal_l3fe_lpb_tbl_cfg_t lpb_cfg; aal_l3fe_lpb_tbl_cfg_mask_t lpb_mask={0}; aal_ilpb_cfg_msk_t ilpb_cfg_msk={0}; aal_ilpb_cfg_t ilpb_cfg; #if defined(CONFIG_RG_G3_L2FE_POL_OFFLOAD) aal_elpb_cfg_msk_t elpb_cfg_msk={0}; aal_elpb_cfg_t elpb_cfg; #endif //ca_gen_intf_attrib_t gen_intf_attrib; #if 0 //defined(CONFIG_ARCH_CORTINA_G3LITE) && defined(CONFIG_RTK_DEV_AP) ca_uint32_t port, queue; ca_port_id_t port_id; ca_queue_wred_profile_t profile; ca_status_t ret; #endif //keep lspid unchange ASSERT_EQ(aal_l3fe_keep_lspid_unchange_set(0, &keep),CA_E_OK); G3INITPRINT("%s: #####Set keep_lspid_unchange to %u#####\n", __func__,keep); //let packet from port 7 could enter l2fe ASSERT_EQ(aal_ni_hv_glb_internal_port_id_cfg_get(0, &portid_cfg),CA_E_OK); portid_mask.bf.wan_rxsel=1; portid_cfg.wan_rxsel=2; ASSERT_EQ(aal_ni_hv_glb_internal_port_id_cfg_set(0, portid_mask, &portid_cfg),CA_E_OK); G3INITPRINT("%s: #####Set wan_rxsel to %u#####\n", __func__,portid_cfg.wan_rxsel); //let packet from port 7 act as other LAN ports ilpb_cfg_msk.u32[0] = 0; ilpb_cfg_msk.u32[1] = 0; ilpb_cfg_msk.s.wan_ind = 1; ilpb_cfg.wan_ind = 1; #if defined (CONFIG_RG_G3_SERIES) && defined(CONFIG_RG_G3_WAN_PORT_INDEX) ASSERT_EQ(aal_port_ilpb_cfg_set(0, CONFIG_RG_G3_WAN_PORT_INDEX, ilpb_cfg_msk, &ilpb_cfg),CA_E_OK); #else ASSERT_EQ(aal_port_ilpb_cfg_set(0, AAL_LPORT_ETH_NI7, ilpb_cfg_msk, &ilpb_cfg),CA_E_OK);//FC driver #endif G3INITPRINT("%s: #####Set wan_ind to %u#####\n", __func__,ilpb_cfg.wan_ind); //Disable MAC check in L3FE lpb_mask.s.mac_da_match_en=1; for(i=0;i