/****************************************************************************** ** ** FILE NAME : ifxmips_ppa_hal_vr9_d5.c ** PROJECT : UEIP ** MODULES : MII0/1 Acceleration Package (VR9 PPA D5) ** ** DATE : 19 OCT 2009 ** AUTHOR : Xu Liang ** DESCRIPTION : MII0/1 Driver with Acceleration Firmware (D5) ** COPYRIGHT : Copyright (c) 2009 ** Lantiq Deutschland GmbH ** Am Campeon 3; 85579 Neubiberg, Germany ** ** For licensing information, see the file 'LICENSE' in the root folder of ** this software module. ** ** HISTORY ** $Date $Author $Comment ** 19 OCT 2009 Xu Liang Initiate Version *******************************************************************************/ /*! \defgroup AMAZON_S_PPA_PPE_D5_HAL Amazon-S (AR9) PPA PPE D5 HAL layer driver module \brief Amazon-S (AR9) PPA PPE D5 HAL layer driver module */ /*! \defgroup AMAZON_S_PPA_PPE_D5_HAL_COMPILE_PARAMS Compile Parametere \ingroup AMAZON_S_PPA_PPE_D5_HAL \brief compile options to turn on/off some feature */ /*! \defgroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS Exported Functions \ingroup AMAZON_S_PPA_PPE_D5_HAL \brief exported functions for other driver use */ /*! \file amazon_s_ppa_ppe_d5_hal.c \ingroup AMAZON_S_PPA_PPE_D5_HAL \brief Amazon-S (AR9) PPA PPE D5 HAL layer driver file */ /* * #################################### * Head File * #################################### */ /* * Common Head File */ #include #include #include #include #include #include #include //#include #include /* * Chip Specific Head File */ #include #include #include "ifxmips_ppa_hal_vr9_e5.h" #include "ifxmips_ppa_proc_vr9_e5.h" /* * #################################### * Definition * #################################### */ /* * #################################### * Version No. * #################################### */ #define VER_FAMILY 0xC0 // bit 0: res // 1: Danube // 2: Twinpass // 3: Amazon-SE // 4: res // 5: AR9 // 6: GR9 #define VER_DRTYPE 0x04 // bit 0: Normal Data Path driver // 1: Indirect-Fast Path driver // 2: HAL driver // 3: Hook driver // 4: Stack/System Adaption Layer driver // 5: PPA API driver #define VER_INTERFACE 0x00 // bit 0: MII 0 // 1: MII 1 // 2: ATM WAN // 3: PTM WAN #define VER_ACCMODE 0x01 // bit 0: Routing // 1: Bridging #define VER_MAJOR 0 #define VER_MID 0 #define VER_MINOR 2 /* * Compilation Switch */ #define ENABLE_IPv6_DEMO_SUPPORT 0 #define ENABLE_NEW_HASH_ALG 1 /*! \addtogroup AMAZON_S_PPA_PPE_D5_HAL_COMPILE_PARAMS */ /*@{*/ /*! \brief Turn on/off debugging message and disable inline optimization. */ #define ENABLE_DEBUG 0 /*! \brief Turn on/off ASSERT feature, print message while condition is not fulfilled. */ #define ENABLE_ASSERT 1 /*@}*/ #if defined(ENABLE_DEBUG) && ENABLE_DEBUG #define ENABLE_DEBUG_PRINT 1 #define DISABLE_INLINE 1 #endif #if defined(DISABLE_INLINE) && DISABLE_INLINE #define INLINE #else #define INLINE inline #endif #if defined(ENABLE_DEBUG_PRINT) && ENABLE_DEBUG_PRINT #undef dbg #define dbg(format, arg...) do { printk(KERN_WARNING __FILE__ ":%d:%s: " format "\n", __LINE__, __FUNCTION__, ##arg); } while ( 0 ) #elif !defined(dbg) #define dbg(format, arg...) #endif #if defined(ENABLE_ASSERT) && ENABLE_ASSERT #define ASSERT(cond, format, arg...) do { if ( !(cond) ) printk(KERN_ERR __FILE__ ":%d:%s: " format "\n", __LINE__, __FUNCTION__, ##arg); } while ( 0 ) #else #define ASSERT(cond, format, arg...) #endif /* * #################################### * Declaration * #################################### */ static int print_fw_ver(char *, int); static int print_driver_ver(char *, int, char *, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int); /* * Proc File */ static INLINE void proc_file_create(void); static INLINE void proc_file_delete(void); static int proc_read_ver(char *, char **, off_t, int, int *, void *); extern uint32_t ppe_drv_get_firmware_id(uint32_t pp32_index,struct fw_ver_id *fw_ver_id ); #ifdef CONFIG_IFX_PPA_QOS extern u32 cgu_get_pp32_clock(void); static int proc_read_qos(char *, char **, off_t, int, int *, void *); static int32_t get_basic_time_tick(void); static uint32_t qos_queue_portid = 1; //QOS note: At present A5 Ethernet WAN mode and D5's portid is 1, and E5's port id is 7 #define PPE_MAX_ETH1_QOS_QUEUE 8 #ifdef CONFIG_IFX_PPA_QOS_WFQ #define IFX_PPA_DRV_QOS_WFQ_WLEVEL_2_W ( 200 ) static uint32_t wfq_multiple = IFX_PPA_DRV_QOS_WFQ_WLEVEL_2_W; static uint32_t wfq_strict_pri_weight = 0x7FFFFF; int32_t ifx_ppa_drv_get_qos_wfq( uint32_t portid, uint32_t queueid, uint32_t *weight_level, uint32_t flag); #endif #ifdef CONFIG_IFX_PPA_QOS_RATE_SHAPING uint32_t default_qos_rateshaping_burst = 6000; #endif static uint32_t set_qos_port_id(void) { uint32_t res = IFX_SUCCESS; uint32_t wan_itf = *CFG_WAN_PORTMAP; uint32_t mixed_mode = *CFG_MIXED_PORTMAP; if( mixed_mode ) //not support QOS in mixed mode { qos_queue_portid = -1; //invalid res = IFX_FAILURE; } if( wan_itf & IFX_PPA_DEST_LIST_ETH0 ) qos_queue_portid = 0; else if( wan_itf & IFX_PPA_DEST_LIST_ETH1 ) qos_queue_portid = 1; else if ( wan_itf & IFX_PPA_DEST_LIST_ATM ) qos_queue_portid = 7; else { qos_queue_portid = -1; //invalid res = IFX_FAILURE; } return res; } #endif static void *proc_read_bridge_seq_start(struct seq_file *seq, loff_t *ppos); static void *proc_read_bridge_seq_next(struct seq_file *seq, void *v, loff_t *ppos); static void proc_read_bridge_seq_stop(struct seq_file *seq, void *v); static int proc_read_bridge_seq_show(struct seq_file *seq, void *v); static int ifx_ppa_drv_proc_read_bridge_seq_open(struct inode *inode, struct file *file); /* * #################################### * Global Variable * #################################### */ const static int dest_list_map[] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x00}; static uint8_t g_ipv6_ip_counter[MAX_IPV6_IP_ENTRIES] = {0}; static PPA_LOCK g_ipv6_ip_lock; static uint16_t g_lan_routing_entry_occupation[(MAX_LAN_ROUTING_ENTRIES + BITSIZEOF_UINT16 - 1) / BITSIZEOF_UINT16] = {0}; static uint32_t g_lan_collision_routing_entries = 0; static PPA_LOCK g_lan_routing_lock; static uint16_t g_wan_routing_entry_occupation[(MAX_WAN_ROUTING_ENTRIES + BITSIZEOF_UINT16 - 1) / BITSIZEOF_UINT16] = {0}; static uint32_t g_wan_collision_routing_entries = 0; static PPA_LOCK g_wan_routing_lock; static uint32_t g_wan_mc_entry_occupation[(MAX_WAN_MC_ENTRIES + BITSIZEOF_UINT32 - 1) / BITSIZEOF_UINT32] = {0}; static uint32_t g_wan_mc_entries = 0; static PPA_LOCK g_wan_mc_lock; static uint32_t g_pppoe_entry_counter[MAX_PPPOE_ENTRIES] = {0}; static PPA_LOCK g_pppoe_lock; static uint32_t g_mtu_entry_counter[MAX_MTU_ENTRIES] = {0}; static PPA_LOCK g_mtu_lock; static uint32_t g_mac_entry_counter[MAX_MAC_ENTRIES] = {0}; static PPA_LOCK g_mac_lock; static uint32_t g_outer_vlan_entry_counter[MAX_OUTER_VLAN_ENTRIES] = {0}; static PPA_LOCK g_outer_vlan_lock; static uint32_t g_classification_entry_counter[MAX_CLASSIFICATION_ENTRIES] = {0}; static PPA_LOCK g_classification_lock; //static PPA_LOCK g_itf_cfg_lock; static PPA_LOCK g_mac_tbl_lock; static struct mac_tbl_item *g_mac_tbl_hash[BRIDGING_SESSION_LIST_HASH_TABLE_SIZE] = {0}; static PPA_MEM_CACHE *g_mac_tbl_item_cache = NULL; /* * PROC */ static int g_ppa_proc_dir_flag = 0; static struct proc_dir_entry *g_ppa_proc_dir = NULL; static int g_ppa_ppe_hal_proc_dir_flag = 0; static struct proc_dir_entry *g_ppa_ppe_hal_proc_dir = NULL; static struct proc_dir_entry *g_ppa_ppe_a4_hal_proc_dir = NULL; static struct seq_operations g_proc_read_bridge_seq_ops = { .start = proc_read_bridge_seq_start, .next = proc_read_bridge_seq_next, .stop = proc_read_bridge_seq_stop, .show = proc_read_bridge_seq_show, }; static uint32_t g_proc_read_bridge_pos = 0; static struct file_operations g_proc_file_bridge_seq_fops = { .owner = THIS_MODULE, .open = ifx_ppa_drv_proc_read_bridge_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int g_ipv6_acc_en = 0; static int g_wan_itf = ~0; static int g_mix_map = ~0; int32_t (*ifx_ppa_drv_hal_generic_hook)(PPA_GENERIC_HOOK_CMD cmd, void *buffer, uint32_t flag)=NULL; EXPORT_SYMBOL(ifx_ppa_drv_hal_generic_hook); /* * #################################### * Extern Functions * #################################### */ extern int mac_entry_setting(unsigned char *mac, uint32_t fid, uint32_t portid, uint32_t agetime, uint32_t st_entry , uint32_t action); /* * #################################### * Extern Variable * #################################### */ /* * #################################### * Local Function * #################################### */ static int print_fw_ver(char *buf, int buf_len) { static char * fw_ver_family_str[] = { "reserved - 0", "Danube", "Twinpass", "Amazon-SE", "reserved - 4", "AR9", "GR9", "VR9", NULL }; static char * fw_ver_type_str[] = { "reserved - 0", "Standard", "Acceleration", "VDSL2 Bonding", NULL }; static char * fw_ver_interface_str[] = { "MII0", "MII0 + MII1", "MII0 + ATM", "MII0 + PTM", "MII0/1 + ATM", "MII0/1 + PTM", NULL }; static char * fw_ver_mode_str[] = { "reserved - 0", "reserved - 1", "reserved - 2", "Routing", "reserved - 4", "Bridging", "Bridging + IPv4 Routing", "Bridging + IPv4/6 Routing", NULL }; int len = 0, i=0; struct fw_ver_id id[2]; //Nte, for E5, there is two PPE FW version but they share same register, so have to read version from datapath ppe_drv_get_firmware_id(0, &id[0]); ppe_drv_get_firmware_id(1, &id[1]); for(i=0; i NUM_ENTITY(fw_ver_family_str) ) len += snprintf(buf + len, buf_len - len, " Family : reserved - %d\n", (int)id[i].family); else len += snprintf(buf + len, buf_len - len, " Family : %s\n", fw_ver_family_str[id[i].family]); if ( id[i].fwtype > NUM_ENTITY(fw_ver_type_str) ) len += snprintf(buf + len, buf_len - len, " FW Type : reserved - %d\n", (int)id[i].fwtype); else len += snprintf(buf + len, buf_len - len, " FW Type : %s\n", fw_ver_type_str[id[i].fwtype]); if ( id[i].interface > NUM_ENTITY(fw_ver_interface_str) ) len += snprintf(buf + len, buf_len - len, " Interface : reserved - %d\n", (int)id[i].interface); else len += snprintf(buf + len, buf_len - len, " Interface : %s\n", fw_ver_interface_str[id[i].interface]); if ( id[i].fwmode > NUM_ENTITY(fw_ver_mode_str) ) len += snprintf(buf + len, buf_len - len, " Mode : reserved - %d\n", (int)id[i].fwmode); else len += snprintf(buf + len, buf_len - len, " Mode : %s\n", fw_ver_mode_str[id[i].fwmode]); len += snprintf(buf + len, buf_len - len, " Release : %d.%d\n", (int)id[i].major, (int)id[i].minor); } return len; } static int print_driver_ver(char *buf, int buf_len, char *title, unsigned int family, unsigned int type, unsigned int itf, unsigned int mode, unsigned int major, unsigned int mid, unsigned int minor) { static char * dr_ver_family_str[] = { NULL, "Danube", "Twinpass", "Amazon-SE", NULL, "AR9", "GR9", "VR9", NULL }; static char * dr_ver_type_str[] = { "Normal Data Path", "Indirect-Fast Path", "HAL", "Hook", "OS Adatpion Layer", "PPA API", NULL }; static char * dr_ver_interface_str[] = { "MII0", "MII1", "ATM", "PTM", NULL }; static char * dr_ver_accmode_str[] = { "Routing", "Bridging", NULL }; int len = 0; unsigned int bit; int i, j; len += snprintf(buf + len, buf_len - len, "%s:\n", title); len += snprintf(buf + len, buf_len - len, " Version ID: %d.%d.%d.%d.%d.%d.%d\n", family, type, itf, mode, major, mid, minor); len += snprintf(buf + len, buf_len - len, " Family : "); for ( bit = family, i = j = 0; bit != 0 && i < NUM_ENTITY(dr_ver_family_str); bit >>= 1, i++ ) if ( (bit & 0x01) && dr_ver_family_str[i] != NULL ) { if ( j ) len += snprintf(buf + len, buf_len - len, " | %s", dr_ver_family_str[i]); else len += snprintf(buf + len, buf_len - len, dr_ver_family_str[i]); j++; } if ( j ) len += snprintf(buf + len, buf_len - len, "\n"); else len += snprintf(buf + len, buf_len - len, "N/A\n"); len += snprintf(buf + len, buf_len - len, " DR Type : "); for ( bit = type, i = j = 0; bit != 0 && i < NUM_ENTITY(dr_ver_type_str); bit >>= 1, i++ ) if ( (bit & 0x01) && dr_ver_type_str[i] != NULL ) { if ( j ) len += snprintf(buf + len, buf_len - len, " | %s", dr_ver_type_str[i]); else len += snprintf(buf + len, buf_len - len, dr_ver_type_str[i]); j++; } if ( j ) len += snprintf(buf + len, buf_len - len, "\n"); else len += snprintf(buf + len, buf_len - len, "N/A\n"); len += snprintf(buf + len, buf_len - len, " Interface : "); for ( bit = itf, i = j = 0; bit != 0 && i < NUM_ENTITY(dr_ver_interface_str); bit >>= 1, i++ ) if ( (bit & 0x01) && dr_ver_interface_str[i] != NULL ) { if ( j ) len += snprintf(buf + len, buf_len - len, " | %s", dr_ver_interface_str[i]); else len += snprintf(buf + len, buf_len - len, dr_ver_interface_str[i]); j++; } if ( j ) len += snprintf(buf + len, buf_len - len, "\n"); else len += snprintf(buf + len, buf_len - len, "N/A\n"); len += snprintf(buf + len, buf_len - len, " Mode : "); for ( bit = mode, i = j = 0; bit != 0 && i < NUM_ENTITY(dr_ver_accmode_str); bit >>= 1, i++ ) if ( (bit & 0x01) && dr_ver_accmode_str[i] != NULL ) { if ( j ) len += snprintf(buf + len, buf_len - len, " | %s", dr_ver_accmode_str[i]); else len += snprintf(buf + len, buf_len - len, dr_ver_accmode_str[i]); j++; } if ( j ) len += snprintf(buf + len, buf_len - len, "\n"); else len += snprintf(buf + len, buf_len - len, "N/A\n"); len += snprintf(buf + len, buf_len - len, " Release : %d.%d.%d\n", major, mid, minor); return len; } #ifdef CONFIG_IFX_PPA_QOS int proc_read_qos(char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; int len_max = off + count; char *pstr; char str[200]; int llen; int i; struct wtx_eg_q_shaping_cfg qos_cfg; struct eth_wan_mib_table qos_queue_mib; volatile struct tx_qos_cfg tx_qos_cfg = *TX_QOS_CFG; volatile struct wtx_qos_q_desc_cfg qos_q_desc_cfg[PPE_MAX_ETH1_QOS_QUEUE]; pstr = *start = page; __sync(); if( set_qos_port_id() != IFX_SUCCESS ) { llen = sprintf(pstr, "\n Note: PPA QOS is disabled for wan_itf=%x mixed_itf=%x\n", *CFG_WAN_PORTMAP, *CFG_MIXED_PORTMAP); pstr += llen; len += llen; *eof = 1; return len - off; } llen = sprintf(pstr, "\n qos : %s\n wfq : %s\n Rate shaping: %s\n\n", tx_qos_cfg.eth1_qss ?"enabled":"disabled", tx_qos_cfg.wfq_en?"enabled":"disabled", tx_qos_cfg.shape_en ?"enabled":"disabled"); pstr += llen; len += llen; llen = sprintf(pstr, " Ticks =%u, overhd =%u, qnum=%u @%p\n", (u32)tx_qos_cfg.time_tick, (u32)tx_qos_cfg.overhd_bytes, (u32)tx_qos_cfg.eth1_eg_qnum, TX_QOS_CFG ); pstr += llen; len += llen; llen = sprintf(pstr, " PPE clk=%u MHz, basic tick=%u\n",(u32)cgu_get_pp32_clock()/1000000, (u32)get_basic_time_tick()); pstr += llen; len += llen; #ifdef CONFIG_IFX_PPA_QOS_WFQ llen = sprintf(pstr, "\n wfq_multiple : %08u @0x%p", wfq_multiple, &wfq_multiple ); pstr += llen; len += llen; llen = sprintf(pstr, "\n strict_weight: %08u @0x%p\n", wfq_strict_pri_weight, &wfq_strict_pri_weight ); pstr += llen; len += llen; #endif if ( tx_qos_cfg.eth1_eg_qnum ) { uint32_t bit_rate_kbps=0; uint32_t weight_level=0; llen = sprintf(pstr, "\n Cfg : T R S --> Bit-rate(kbps) Weight --> Level Address d/w tick_cnt b/S\n"); pstr += llen; len += llen; for ( i = 0; i < PPE_MAX_ETH1_QOS_QUEUE /*tx_qos_cfg.eth1_eg_qnum*/; i++ ) { qos_cfg = *WTX_EG_Q_SHAPING_CFG(i); #ifdef CONFIG_IFX_PPA_QOS_RATE_SHAPING ifx_ppa_drv_get_qos_rate( qos_queue_portid, i, &bit_rate_kbps, NULL,0); #endif #ifdef CONFIG_IFX_PPA_QOS_WFQ ifx_ppa_drv_get_qos_wfq( qos_queue_portid, i, &weight_level, 0); #endif llen = sprintf(str, "\n %2u: %03u %05u %05u %07u %08u %03u @0x%p %08u %03u %05u\n", i, qos_cfg.t, qos_cfg.r, qos_cfg.s, bit_rate_kbps, qos_cfg.w, weight_level, WTX_EG_Q_SHAPING_CFG(i), qos_cfg.d, qos_cfg.tick_cnt, qos_cfg.b); if ( len <= off && len + llen > off ) { ppa_memcpy(pstr, str + off - len, len + llen - off); pstr += len + llen - off; } else if ( len > off ) { ppa_memcpy(pstr, str, llen); pstr += llen; } len += llen; if ( len >= len_max ) goto PROC_READ_MAC_OVERRUN_END; } //QOS Note: For ethernat wan mode only. For E5 ptm mode, it is not necessary since there is no port based rate shaping if( qos_queue_portid & 3) { qos_cfg = *WTX_EG_Q_PORT_SHAPING_CFG(0); #ifdef CONFIG_IFX_PPA_QOS_RATE_SHAPING ifx_ppa_drv_get_qos_rate( qos_queue_portid, i, &bit_rate_kbps, NULL,0); #endif llen = sprintf(str, "\n port: %03u %05u %05u %07u @0x%p %08u %03u %05u\n", qos_cfg.t, qos_cfg.r, qos_cfg.s, bit_rate_kbps, WTX_EG_Q_PORT_SHAPING_CFG(0), qos_cfg.d, qos_cfg.tick_cnt, qos_cfg.b); if ( len <= off && len + llen > off ) { ppa_memcpy(pstr, str + off - len, len + llen - off); pstr += len + llen - off; } else if ( len > off ) { ppa_memcpy(pstr, str, llen); pstr += llen; } len += llen; if ( len >= len_max ) goto PROC_READ_MAC_OVERRUN_END; } //QOS Note: For ethernat wan mode only. For E5 ptm mode, it is not necessary since there is no port based rate shaping --End llen = sprintf(pstr, "\n MIB : rx_pkt/rx_bytes tx_pkt/tx_bytes cpu_small_drop/cpu_drop fast_small_drop/fast_drop_cnt\n"); pstr += llen; len += llen; for ( i = 0; i < PPE_MAX_ETH1_QOS_QUEUE /*tx_qos_cfg.eth1_eg_qnum*/; i++ ) { qos_queue_mib = *ETH_WAN_TX_MIB_TABLE(i); llen = sprintf(str, " %2u: %010u/%010u %010u/%010u %010u/%010u %010u/%010u @0x%p\n", i, qos_queue_mib.wrx_total_pdu, qos_queue_mib.wrx_total_bytes, qos_queue_mib.wtx_total_pdu, qos_queue_mib.wtx_total_bytes, qos_queue_mib.wtx_cpu_drop_small_pdu, qos_queue_mib.wtx_cpu_drop_pdu, qos_queue_mib.wtx_fast_drop_small_pdu, qos_queue_mib.wtx_fast_drop_pdu, ETH_WAN_TX_MIB_TABLE(i)); if ( len <= off && len + llen > off ) { ppa_memcpy(pstr, str + off - len, len + llen - off); pstr += len + llen - off; } else if ( len > off ) { ppa_memcpy(pstr, str, llen); pstr += llen; } len += llen; if ( len >= len_max ) goto PROC_READ_MAC_OVERRUN_END; } //QOS queue descriptor llen = sprintf(pstr, "\n Desc: threshold num base_addr rd_ptr wr_ptr\n"); pstr += llen; len += llen; for(i=0; inext ) if ( res->namelen == 3 && res->name[0] == 'p' && res->name[1] == 'p' && res->name[2] == 'a' ) // "ppa" { g_ppa_proc_dir = res; break; } if ( !res ) { g_ppa_proc_dir = proc_mkdir("ppa", NULL); g_ppa_proc_dir_flag = 1; } for ( res = g_ppa_proc_dir->subdir; res; res = res->next ) if ( res->namelen == 3 && res->name[0] == 'h' && res->name[1] == 'a' && res->name[2] == 'l' ) // "hal" { g_ppa_ppe_hal_proc_dir = res; break; } if ( !res ) { g_ppa_ppe_hal_proc_dir = proc_mkdir("hal", g_ppa_proc_dir); g_ppa_ppe_hal_proc_dir_flag = 1; } g_ppa_ppe_a4_hal_proc_dir = proc_mkdir("e5", g_ppa_ppe_hal_proc_dir); res = create_proc_read_entry("ver", 0, g_ppa_ppe_a4_hal_proc_dir, proc_read_ver, NULL); res = create_proc_read_entry("mib", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_hal_mib, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_hal_mib; res = create_proc_read_entry("route", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_route, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_hal_route; res = create_proc_entry("bridge", 0, g_ppa_ppe_a4_hal_proc_dir); if ( res ) res->proc_fops = &g_proc_file_bridge_seq_fops; res = create_proc_read_entry("mc", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_mc, NULL); res = create_proc_read_entry("genconf", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_genconf, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_genconf; res = create_proc_read_entry("pppoe", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_pppoe, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_pppoe; res = create_proc_read_entry("mtu", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_mtu, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_mtu; res = create_proc_read_entry("hit", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_hit, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_hit; res = create_proc_read_entry("mac", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_mac, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_mac; res = create_proc_read_entry("out_vlan", 0, g_ppa_ppe_a4_hal_proc_dir, ifx_ppa_drv_proc_read_out_vlan, NULL); if ( res ) res->write_proc = ifx_ppa_drv_proc_write_out_vlan; if ( g_ipv6_acc_en ) { res = create_proc_entry("ipv6_ip", 0, g_ppa_ppe_a4_hal_proc_dir); if ( res ) res->read_proc = ifx_ppa_drv_proc_read_ipv6_ip; } #ifdef CONFIG_IFX_PPA_QOS create_proc_read_entry("qos", 0, g_ppa_ppe_a4_hal_proc_dir, proc_read_qos, NULL); #endif } static INLINE void proc_file_delete(void) { #ifdef CONFIG_IFX_PPA_QOS remove_proc_entry("qos", g_ppa_ppe_a4_hal_proc_dir); #endif if ( g_ipv6_acc_en ) { remove_proc_entry("ipv6_ip", g_ppa_ppe_a4_hal_proc_dir); } remove_proc_entry("out_vlan", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("mac", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("hit", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("mtu", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("pppoe", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("genconf", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("mc", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("bridge", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("route", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("mib", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("ver", g_ppa_ppe_a4_hal_proc_dir); remove_proc_entry("e5", g_ppa_ppe_hal_proc_dir); if ( g_ppa_ppe_hal_proc_dir_flag ) remove_proc_entry("hal", g_ppa_proc_dir); if ( g_ppa_proc_dir_flag ) remove_proc_entry("ppa", NULL); } /* * #################################### * Global Function * #################################### */ void get_ppe_hal_id(uint32_t *p_family, uint32_t *p_type, uint32_t *p_if, uint32_t *p_mode, uint32_t *p_major, uint32_t *p_mid, uint32_t *p_minor) { if ( p_family ) *p_family = VER_FAMILY; if ( p_type ) *p_type = VER_DRTYPE; if ( p_if ) *p_if = VER_INTERFACE; if ( p_mode ) *p_mode = VER_ACCMODE; if ( p_major ) *p_major = VER_MAJOR; if ( p_mid ) *p_mid = VER_MID; if ( p_minor ) *p_minor = VER_MINOR; } /*! \fn uint32_t get_firmware_id(uint32_t *p_family, uint32_t *p_type, uint32_t *p_if, uint32_t *p_mode, uint32_t *p_major, uint32_t *p_minor) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief read firmware ID \param p_family get family code \param p_type get firmware type \param p_if get interface supported by firmware \param p_mode get firmware working mode \param p_major get major number \param p_minor get minor number \return no return value */ uint32_t get_firmware_id(uint32_t pp32_index, uint32_t *p_family, uint32_t *p_type, uint32_t *p_if, uint32_t *p_mode, uint32_t *p_major, uint32_t *p_minor) { struct fw_ver_id id; //id = *FW_VER_ID; //note, for E5 PTM wan mode, if ( ppe_drv_get_firmware_id(pp32_index, &id) != IFX_SUCCESS ) return IFX_FAILURE; if ( p_family ) *p_family = id.family; if ( p_type ) *p_type = id.fwtype; if ( p_if ) *p_if = id.interface; if ( p_mode ) *p_mode = id.fwmode; if ( p_major ) *p_major = id.major; if ( p_minor ) *p_minor = id.minor; return IFX_SUCCESS; } uint32_t get_number_of_phys_port(void) { return 8; } void get_phys_port_info(uint32_t port, uint32_t *p_flags, PPA_IFNAME ifname[PPA_IF_NAME_SIZE]) { char *str_ifname[] = { "eth0", "eth1", "", "", "", "", "", "ptm0" }; /* uint32_t flags[] = { GEN_MODE_CFG1->sys_cfg == 2 ? IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_MIX_VALID : IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_LAN_VALID, 0, // real flags for port 1 could be found below IFX_PPA_PHYS_PORT_FLAGS_MODE_CPU_VALID, IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_LAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0, IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_LAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0, IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_LAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0 | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU1, IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_LAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0, GEN_MODE_CFG1->sys_cfg == 0 ? IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_WAN_VALID : 0 }; */ if ( port >= sizeof(str_ifname) / sizeof(*str_ifname) ) { if ( p_flags ) *p_flags = 0; if ( ifname ) *ifname = 0; return; } if(p_flags) { *p_flags = 0; switch(port) { case 0: /*eth0*/ case 1: /*eth1*/ if(g_wan_itf & (1 << port)){//ethx wan if(g_mix_map & (1 << port)){//mix mode *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_MIX_VALID; }else{ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_WAN_VALID; } }else{ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_LAN_VALID; } break; case 2: /*CPU port*/ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_CPU_VALID; break; case 3: /*EXT port*/ case 4: case 5: case 6: if(g_wan_itf & ( 1 << port)){ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_WAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0; }else{ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_EXT_LAN_VALID | IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU0; } if(port == 5){ *p_flags |= IFX_PPA_PHYS_PORT_FLAGS_EXT_CPU1; } break; case 7: if(g_wan_itf & (1 << port)){ *p_flags = IFX_PPA_PHYS_PORT_FLAGS_MODE_ETH_WAN_VALID; } break; default: *p_flags = 0; break; } if ( (GEN_MODE_CFG2->itf_outer_vlan_vld & (1 << port)) && (*p_flags & IFX_PPA_PHYS_PORT_FLAGS_TYPE_MASK) != IFX_PPA_PHYS_PORT_FLAGS_TYPE_ATM ) *p_flags |= IFX_PPA_PHYS_PORT_FLAGS_OUTER_VLAN; } if ( ifname ) strcpy(ifname, str_ifname[port]); } /*! \fn void get_max_route_entries(uint32_t *p_entry, uint32_t *p_mc_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get maximum number of routing entries \param p_entry get maximum number of uni-cast routing entries. In Amazon-S (AR9) D5, either LAN side or WAN side has 32 hash entries, as well as 64 collision routing entries. In each hash entry, there are 16 routing entries. \param p_mc_entry get maximum number of multicast routing entries. In Amazon-S (AR9) D5, there are 64 entries. \return no return value */ void get_max_route_entries(uint32_t *p_entry, uint32_t *p_mc_entry) { if ( p_entry ) *p_entry = MAX_ROUTING_ENTRIES; if ( p_mc_entry ) *p_mc_entry = MAX_WAN_MC_ENTRIES; } void get_max_bridging_entries(uint32_t *p_entry) { if ( p_entry ) *p_entry = MAX_BRIDGING_ENTRIES; // tricky to help bridging over ATM } static void *proc_read_bridge_seq_start(struct seq_file *seq, loff_t *ppos) { struct mac_tbl_item *p_item = NULL; int idx; uint32_t l; l = (g_proc_read_bridge_pos = (uint32_t)*ppos) + 1; ppa_lock_get(&g_mac_tbl_lock); for ( idx = 0; l && idx < NUM_ENTITY(g_mac_tbl_hash); idx++ ) { for ( p_item = g_mac_tbl_hash[idx]; p_item; p_item = p_item->next ) if ( !--l ) break; } if ( l == 0 && p_item ) { *ppos = ++g_proc_read_bridge_pos; return p_item; } else return NULL; } static void *proc_read_bridge_seq_next(struct seq_file *seq, void *v, loff_t *ppos) { struct mac_tbl_item *p_item = v; int idx; ASSERT((uint32_t)v >= KSEG0 && (uint32_t)v < KSEG1, "v (%08x) out of range", (uint32_t)v); if ( !p_item ) return NULL; if ( p_item->next ) { *ppos = ++g_proc_read_bridge_pos; return p_item->next; } else { for ( idx = BRIDGING_SESSION_LIST_HASH_VALUE(p_item->mac0) + 1; idx < NUM_ENTITY(g_mac_tbl_hash); idx++ ) if ( g_mac_tbl_hash[idx] ) { *ppos = ++g_proc_read_bridge_pos; return g_mac_tbl_hash[idx]; } return NULL; } } static void proc_read_bridge_seq_stop(struct seq_file *seq, void *v) { ppa_lock_release(&g_mac_tbl_lock); } static int proc_read_bridge_seq_show(struct seq_file *seq, void *v) { struct mac_tbl_item *p_item = v; ASSERT((uint32_t)v >= KSEG0 && (uint32_t)v < KSEG1, "v (%08x) out of range", (uint32_t)v); if ( g_proc_read_bridge_pos == 1 ) seq_printf(seq, "Bridging Table\n"); seq_printf(seq, " no. %u\n", g_proc_read_bridge_pos); seq_printf(seq, " next = %08X\n", (uint32_t)p_item->next); seq_printf(seq, " ref = %d\n", p_item->ref); seq_printf(seq, " mac0 = %08X\n", p_item->mac0); seq_printf(seq, " mac1 = %08X\n", p_item->mac1); seq_printf(seq, " age = %u\n", p_item->age); seq_printf(seq, " timestamp = %u\n", p_item->timestamp); return 0; } int ifx_ppa_drv_proc_read_bridge_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &g_proc_read_bridge_seq_ops); } /*! \fn void set_wan_vlan_id(uint32_t vlan_id) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief set VLAN ID range for WAN side \param vlan_id least significant 16 bits is lower bound of WAN side VLAN ID (12-bit), most significant 16 bits is upper bound of WAN side VLAN ID (12-bit) \return no return value */ void set_wan_vlan_id(uint32_t vlan_id) { ETH_PORTS_CFG->wan_vlanid_lo = vlan_id & ((1 << 12) - 1); ETH_PORTS_CFG->wan_vlanid_hi = (vlan_id >> 16) & ((1 << 12) - 1); } /*! \fn uint32_t get_wan_vlan_id(void) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get VLAN ID range for WAN side \return least significant 16 bits is lower bound of WAN side VLAN ID, most significant 16 bits is upper bound of WAN side VLAN ID */ uint32_t get_wan_vlan_id(void) { return (ETH_PORTS_CFG->wan_vlanid_hi << 16) | ETH_PORTS_CFG->wan_vlanid_lo; } /*! \fn void set_if_type(uint32_t if_type, uint32_t if_no) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief set interface type (LAN, WAN, or Mix mode) \param if_type 1: LAN, 2: WAN, 3: Mix \param if_no interface no. - 0: eth0, 1: eth1 \return no return value */ // if_type: // bit 0: LAN // bit 1: WAN void set_if_type(uint32_t if_type, uint32_t if_no) { uint8_t if_type_template[4] = {2, 0, 1, 2}; // 0: LAN, 1: WAN, 2: MIX (new spec) if ( if_no == 0 ) ETH_PORTS_CFG->eth0_type = if_type_template[if_type]; else if ( if_no == 1 ) ETH_PORTS_CFG->eth1_type = if_type_template[if_type]; } /*! \fn uint32_t get_if_type(uint32_t if_no) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get interface type (LAN, WAN, or Mix mode) \param if_no interface no. - 0: eth0 \return 0: reserved, 1: LAN, 2: WAN, 3: Mix */ uint32_t get_if_type(uint32_t if_no) { uint32_t if_type_template[4] = {IFX_PPA_IF_TYPE_LAN, IFX_PPA_IF_TYPE_WAN, IFX_PPA_IF_TYPE_MIX, IFX_PPA_IF_NOT_FOUND}; if ( if_no == 0 ) return if_type_template[ETH_PORTS_CFG->eth0_type]; else if ( if_no == 1 ) return if_type_template[ETH_PORTS_CFG->eth1_type]; else return IFX_PPA_IF_NOT_FOUND; } /*! \fn void set_route_cfg(uint32_t f_is_lan, uint32_t entry_num, uint32_t mc_entry_num, uint32_t f_ip_verify, uint32_t f_tcpudp_verify, uint32_t f_iptcpudp_err_drop, uint32_t f_drop_on_no_hit, uint32_t f_mc_drop_on_no_hit, uint32_t flags) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief setup routing table configuration \param f_is_lan setup LAN side routing table configuration \param entry_num maximum number of LAN/WAN side uni-cast routing entries (min 512 max 512 + 64) \param mc_entry_num maximum number of WAN side multicast routing entries (max 64) \param f_ip_verify turn on/off IP checksum verification \param f_tcpudp_verify turn on/off TCP/UDP checksum verification \param f_iptcpudp_err_drop drop/not drop if IP/TCP/UDP checksum is wrong \param f_drop_on_no_hit drop/not drop if uni-cast packet does not match any entry \param f_mc_drop_on_no_hit drop/not drop if multicast packet does not match any entry \param flags bit 0: entry_num is valid, bit 1: mc_entry_num is valid, bit 2: f_ip_verify is valid, bit 3: f_tcpudp_verify is valid, bit 4: f_tcpudp_err_drop is valid, bit 5: f_drop_on_no_hit is valid, bit 6: f_mc_drop_on_no_hit is valid \return no return value */ // flags // bit 0: entry_num is valid // bit 1: mc_entry_num is valid // bit 2: f_ip_verify is valid // bit 3: f_tcpudp_verify is valid // bit 4: f_tcpudp_err_drop is valid // bit 5: f_drop_on_no_hit is valid // bit 6: f_mc_drop_on_no_hit is valid void set_route_cfg(uint32_t f_is_lan, uint32_t entry_num, // routing entries, include both hash entries and collision entries, min 512 max 512 + 64 uint32_t mc_entry_num, // max 64, reserved in LAN route table config uint32_t f_ip_verify, uint32_t f_tcpudp_verify, uint32_t f_iptcpudp_err_drop, uint32_t f_drop_on_no_hit, uint32_t f_mc_drop_on_no_hit, // reserved in LAN route table config uint32_t flags) { // LAN route config is only a little different struct rout_tbl_cfg cfg; uint16_t *p_occupation; if ( entry_num <= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) entry_num = MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK + 1; else if ( entry_num > MAX_WAN_ROUTING_ENTRIES ) entry_num = MAX_WAN_ROUTING_ENTRIES; if ( mc_entry_num < 1 ) mc_entry_num = 1; else if ( mc_entry_num > MAX_WAN_MC_ENTRIES ) mc_entry_num = MAX_WAN_MC_ENTRIES; cfg = f_is_lan ? *LAN_ROUT_TBL_CFG : *WAN_ROUT_TBL_CFG; if ( (flags & IFX_PPA_SET_ROUTE_CFG_ENTRY_NUM) ) { cfg.rout_num = entry_num - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK; if ( f_is_lan ) { g_lan_collision_routing_entries = cfg.rout_num; p_occupation = g_lan_routing_entry_occupation; } else { g_wan_collision_routing_entries = cfg.rout_num; p_occupation = g_wan_routing_entry_occupation; } ppa_memset(p_occupation, 0, (entry_num + BITSIZEOF_UINT16 - 1) / BITSIZEOF_UINT16 * sizeof(uint16_t)); if ( entry_num % BITSIZEOF_UINT16 ) { p_occupation += entry_num / BITSIZEOF_UINT16; *p_occupation = ~0 ^ ((1 << (entry_num % BITSIZEOF_UINT16)) - 1); } } if ( !f_is_lan && (flags & IFX_PPA_SET_ROUTE_CFG_MC_ENTRY_NUM) ) { g_wan_mc_entries = mc_entry_num; cfg.wan_rout_mc_num = mc_entry_num; if ( mc_entry_num % BITSIZEOF_UINT32 ) g_wan_mc_entry_occupation[mc_entry_num / BITSIZEOF_UINT32] = ~0 ^ ((1 << (mc_entry_num % BITSIZEOF_UINT32)) - 1); } if ( (flags & IFX_PPA_SET_ROUTE_CFG_IP_VERIFY) ) cfg.ip_ver_en = f_ip_verify ? 1 : 0; if ( (flags & IFX_PPA_SET_ROUTE_CFG_TCPUDP_VERIFY) ) cfg.tcpdup_ver_en = f_tcpudp_verify ? 1 : 0; if ( (flags & IFX_PPA_SET_ROUTE_CFG_TCPUDP_ERR_DROP) ) cfg.iptcpudperr_drop = f_iptcpudp_err_drop ? 1 : 0; if ( (flags & IFX_PPA_SET_ROUTE_CFG_DROP_ON_NOT_HIT) ) cfg.rout_drop = f_drop_on_no_hit ? 1 : 0; if ( !f_is_lan && (flags & IFX_PPA_SET_ROUTE_CFG_MC_DROP_ON_NOT_HIT) ) cfg.wan_rout_mc_drop = f_mc_drop_on_no_hit ? 1 : 0; if ( f_is_lan ) *LAN_ROUT_TBL_CFG = cfg; else *WAN_ROUT_TBL_CFG = cfg; } void set_bridging_cfg(uint32_t entry_num, uint32_t br_to_src_port_mask, uint32_t br_to_src_port_en, uint32_t f_dest_vlan_en, uint32_t f_src_vlan_en, uint32_t f_mac_change_drop, uint32_t flags) { } /*! \fn void set_fast_mode(uint32_t mode, uint32_t flags) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief set fastpath mode for interfaces \param mode bit 0 - app2 (1 direct, 0 indirect), bit 1 - eth1 (1 direct, 0 indirect) \param flags bit 0 - app2 field in mode is valid, bit 1 - eth1 field in mode is valid \return no return value */ void set_fast_mode(uint32_t mode, uint32_t flags) { if ( (flags & IFX_PPA_SET_FAST_MODE_APP2) ) GEN_MODE_CFG->app2_indirect = (mode & IFX_PPA_SET_FAST_MODE_APP2_DIRECT) ? 0 : 1; if ( (flags & IFX_PPA_SET_FAST_MODE_ETH1) ) GEN_MODE_CFG->us_indirect = (mode & IFX_PPA_SET_FAST_MODE_ETH1_DIRECT) ? 0 : 1; // TODO // reconfig_dma_channel(GEN_MODE_CFG->cpu1_fast_mode && GEN_MODE_CFG->wan_fast_mode); } /*! \fn void set_if_wfq(uint32_t if_wfq, uint32_t if_no) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief obsolete in D5 \return no return value */ void set_if_wfq(uint32_t if_wfq, uint32_t if_no) { } /*! \fn void set_dplus_wfq(uint32_t wfq) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief obsolete in D5 \return no return value */ void set_dplus_wfq(uint32_t wfq) { } /*! \fn void set_fastpath_wfq(uint32_t wfq) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief obsolete in D5 \return no return value */ void set_fastpath_wfq(uint32_t wfq) { } /*! \fn void get_acc_mode(uint32_t f_is_lan, uint32_t *p_acc_mode) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get acceleration mode for interfaces (LAN/WAN) \param f_is_lan 0: WAN interface, 1: LAN interface \param p_acc_mode a u32 data pointer to get acceleration mode (IFX_PPA_ACC_MODE_ROUTING / IFX_PPA_ACC_MODE_NONE) \return no return value */ void get_acc_mode(uint32_t f_is_lan, uint32_t *p_acc_mode) { if ( p_acc_mode ) *p_acc_mode = (f_is_lan ? GEN_MODE_CFG->lan_acc_en : GEN_MODE_CFG->wan_acc_en) ? IFX_PPA_ACC_MODE_ROUTING: IFX_PPA_ACC_MODE_NONE; } /*! \fn void set_acc_mode(uint32_t f_is_lan, uint32_t acc_mode) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief set acceleration mode for interfaces (LAN/WAN) \param f_is_lan 0: WAN interface, 1: LAN interface \param p_acc_mode acceleration mode (IFX_PPA_ACC_MODE_ROUTING / IFX_PPA_ACC_MODE_NONE) \return no return value */ // acc_mode: // 0: no acceleration // 2: routing acceleration void set_acc_mode(uint32_t f_is_lan, uint32_t acc_mode) { if ( f_is_lan ) GEN_MODE_CFG->lan_acc_en = (acc_mode & IFX_PPA_ACC_MODE_ROUTING) ? 1 : 0; else GEN_MODE_CFG->wan_acc_en = (acc_mode & IFX_PPA_ACC_MODE_ROUTING) ? 1 : 0; } /*! \fn void set_default_dest_list(uint32_t uc_dest_list, uint32_t mc_dest_list, uint32_t if_no) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief obsolete in D5 \return no return value */ void set_default_dest_list(uint32_t uc_dest_list, uint32_t mc_dest_list, uint32_t if_no) { } void set_bridge_if_vlan_config(uint32_t if_no, uint32_t f_eg_vlan_insert, uint32_t f_eg_vlan_remove, uint32_t f_ig_vlan_aware, uint32_t f_ig_src_ip_based, uint32_t f_ig_eth_type_based, uint32_t f_ig_vlanid_based, uint32_t f_ig_port_based, uint32_t f_eg_out_vlan_insert, uint32_t f_eg_out_vlan_remove, uint32_t f_ig_out_vlan_aware) { } void get_bridge_if_vlan_config(uint32_t if_no, uint32_t *f_eg_vlan_insert, uint32_t *f_eg_vlan_remove, uint32_t *f_ig_vlan_aware, uint32_t *f_ig_src_ip_based, uint32_t *f_ig_eth_type_based, uint32_t *f_ig_vlanid_based, uint32_t *f_ig_port_based, uint32_t *f_eg_out_vlan_insert, uint32_t *f_eg_out_vlan_remove, uint32_t *f_ig_out_vlan_aware) { } int32_t add_vlan_map(uint32_t ig_criteria_type, uint32_t ig_criteria, uint32_t new_vci, uint32_t dest_qos, uint32_t outer_vlan_ix, uint32_t in_out_etag_ctrl, uint32_t vlan_port_map) { return IFX_EPERM; } void del_vlan_map(uint32_t ig_criteria_type, uint32_t ig_criteria) { } int32_t get_vlan_map(uint32_t ig_criteria_type, uint32_t entry, uint32_t *ig_criteria, uint32_t *new_vci, uint32_t *dest_qos, uint32_t *outer_vlan_ix, uint32_t *in_out_etag_ctrl, uint32_t *vlan_port_map) { return IFX_ENOTAVAIL; } void del_all_vlan_map(void) { } int32_t is_ipv6_enabled(void) { return GEN_MODE_CFG1->ipv6_acc_en ? 1 : 0; } int32_t add_ipv6_ip_entry(uint32_t ipv6_ip[4], uint32_t *p_entry) { int32_t ret = 0; int32_t entry = -1, empty_entry = -1; int x, i; if ( GEN_MODE_CFG1->ipv6_acc_en == 0 ) return -1; ASSERT(p_entry != NULL, "p_entry == NULL"); ppa_lock_get(&g_ipv6_ip_lock); for ( i = 0; entry < 0 && i < MAX_IPV6_IP_ENTRIES_PER_BLOCK; i++ ) for ( x = 0; entry < 0 && x < MAX_IPV6_IP_ENTRIES_BLOCK; x++ ) { if ( g_ipv6_ip_counter[x * MAX_IPV6_IP_ENTRIES_PER_BLOCK + i] ) { if ( ppa_memcmp((void *)IPv6_IP_IDX_TBL(x, i), ipv6_ip, 16) == 0 ) entry = x * MAX_IPV6_IP_ENTRIES_PER_BLOCK + i; } else if ( empty_entry < 0 ) empty_entry = x * MAX_IPV6_IP_ENTRIES_PER_BLOCK + i; } if ( entry >= 0 ) { g_ipv6_ip_counter[entry]++; *p_entry = entry; } else if ( empty_entry >= 0 ) { ppa_memcpy((void *)IPv6_IP_IDX_TBL(empty_entry / MAX_IPV6_IP_ENTRIES_PER_BLOCK, empty_entry % MAX_IPV6_IP_ENTRIES_PER_BLOCK), ipv6_ip, 16); g_ipv6_ip_counter[empty_entry]++; *p_entry = empty_entry; i = empty_entry % MAX_IPV6_IP_ENTRIES_PER_BLOCK; if ( i >= GEN_MODE_CFG1->ipv6_rout_num ) GEN_MODE_CFG1->ipv6_rout_num = i + 1; } else ret = -1; ppa_lock_release(&g_ipv6_ip_lock); return ret; } void del_ipv6_ip_entry(uint32_t entry) { int x = entry / MAX_IPV6_IP_ENTRIES_PER_BLOCK; int i = entry % MAX_IPV6_IP_ENTRIES_PER_BLOCK; if ( GEN_MODE_CFG1->ipv6_acc_en != 0 && x < MAX_IPV6_IP_ENTRIES_BLOCK ) { ppa_lock_get(&g_ipv6_ip_lock); if ( g_ipv6_ip_counter[entry] && !--g_ipv6_ip_counter[entry] ) ppa_memset((void *)IPv6_IP_IDX_TBL(x, i), 0, 16); for ( i = GEN_MODE_CFG1->ipv6_rout_num - 1; i >= 0; i-- ) for ( x = 0; x < MAX_IPV6_IP_ENTRIES_BLOCK; x++ ) { if ( IPv6_IP_IDX_TBL(x, i)[0] == 0 && IPv6_IP_IDX_TBL(x, i)[1] == 0 && IPv6_IP_IDX_TBL(x, i)[2] == 0 && IPv6_IP_IDX_TBL(x, i)[3] == 0 ) continue; GEN_MODE_CFG1->ipv6_rout_num = i > 1 ? i + 1 : 2; goto DEL_IPV6_IP_ENTRY_QUIT; } DEL_IPV6_IP_ENTRY_QUIT: ppa_lock_release(&g_ipv6_ip_lock); } } int32_t get_ipv6_ip_entry(uint32_t entry, uint32_t ipv6_ip[4]) { int x = entry / MAX_IPV6_IP_ENTRIES_PER_BLOCK; int i = entry % MAX_IPV6_IP_ENTRIES_PER_BLOCK; if ( GEN_MODE_CFG1->ipv6_acc_en == 0 || x >= MAX_IPV6_IP_ENTRIES_BLOCK ) return -1; ppa_lock_get(&g_ipv6_ip_lock); if ( !g_ipv6_ip_counter[entry] ) { ppa_lock_release(&g_ipv6_ip_lock); return -1; } ASSERT(ipv6_ip != NULL, "ipv6_ip == NULL"); ppa_memcpy(ipv6_ip, (void *)IPv6_IP_IDX_TBL(x, i), 16); ppa_lock_release(&g_ipv6_ip_lock); return 0; } int32_t add_ipv6_routing_entry(uint32_t f_is_lan, uint32_t src_ip[4], uint32_t src_port, uint32_t dst_ip[4], uint32_t dst_port, uint32_t f_is_tcp, // 1: TCP, 0: UDP uint32_t route_type, uint32_t new_ip, uint32_t new_port, uint8_t new_mac[PPA_ETH_ALEN], uint32_t new_src_mac_ix, uint32_t mtu_ix, uint32_t f_new_dscp_enable, uint32_t new_dscp, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t pppoe_mode, uint32_t pppoe_ix, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t dslwan_qid, uint32_t dest_list, uint32_t *p_entry) { uint32_t src_ip_entry, dst_ip_entry, entry; if ( add_ipv6_ip_entry(src_ip, &src_ip_entry) != 0 ) goto ADD_SRC_IP_FAIL; if ( add_ipv6_ip_entry(dst_ip, &dst_ip_entry) != 0 ) goto ADD_DST_IP_FAIL; if ( add_routing_entry(f_is_lan, src_ip_entry | *PSEUDO_IPv4_BASE_ADDR, src_port, dst_ip_entry | *PSEUDO_IPv4_BASE_ADDR, dst_port, f_is_tcp, route_type, new_ip, new_port, new_mac, new_src_mac_ix, mtu_ix, f_new_dscp_enable, new_dscp, f_vlan_ins_enable, new_vci, f_vlan_rm_enable, pppoe_mode, pppoe_ix, f_out_vlan_ins_enable, out_vlan_ix, f_out_vlan_rm_enable, dslwan_qid, dest_list, &entry) != 0 ) goto ADD_ROUTING_ENTRY_FAIL; ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return 0; ADD_ROUTING_ENTRY_FAIL: del_ipv6_ip_entry(dst_ip_entry); ADD_DST_IP_FAIL: del_ipv6_ip_entry(src_ip_entry); ADD_SRC_IP_FAIL: return -1; } /*! \fn int32_t add_routing_entry(uint32_t f_is_lan, uint32_t src_ip, uint32_t src_port, uint32_t dst_ip, uint32_t dst_port, uint32_t f_is_tcp, uint32_t route_type, uint32_t new_ip, uint32_t new_port, uint8_t new_mac[PPA_ETH_ALEN], uint32_t new_src_mac_ix, uint32_t mtu_ix, uint32_t f_new_dscp_enable, uint32_t new_dscp, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t pppoe_mode, uint32_t pppoe_ix, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t dslwan_qid, uint32_t dest_list, uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add one routing entry \param src_ip source IP address \param src_port source PORT number \param dst_ip destination IP address \param dst_port destination PORT number \param f_is_tcp 0: UDP, 1: TCP \param route_type 0: no action, 1: IPv4, 2: NAT, 3:NAPT \param new_ip new IP address (valid in NAT and NAPT) \param new_port new PORT number (valid in NAPT) \param new_mac new destination MAC address \param new_src_mac_ix index of new source MAC address \param mtu_ix index of Max Transmission Unit \param f_new_dscp_enable replace DSCP value \param new_dscp new DSCP value \param f_vlan_ins_enable insert inner VLAN tag \param new_vci new inner VLAN tag \param f_vlan_rm_enable remove inner VLAN tag (if there is VLAN tag in incoming packet) \param pppoe_mode PPPoE termination, LAN side add PPPoE header, WAN side remove PPPoE header \param pppoe_ix index of PPPoE header, valid only in LAN side \param f_out_vlan_ins_enable insert outer VLAN tag \param out_vlan_ix index of new outer VLAN tag \param f_out_vlan_rm_enable remove outer VLAN tag \param dslwan_qid destination QID (Switch Queue) \param dest_list destination ports, bit 0: eth0, bit 1: eth1 \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_routing_entry(uint32_t f_is_lan, uint32_t src_ip, uint32_t src_port, uint32_t dst_ip, uint32_t dst_port, uint32_t f_is_tcp, // 1: TCP, 0: UDP uint32_t route_type, uint32_t new_ip, uint32_t new_port, uint8_t new_mac[PPA_ETH_ALEN], uint32_t new_src_mac_ix, uint32_t mtu_ix, uint32_t f_new_dscp_enable, uint32_t new_dscp, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t pppoe_mode, uint32_t pppoe_ix, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t dslwan_qid, uint32_t dest_list, uint32_t *p_entry) { uint32_t sys_flag; PPA_LOCK *p_lock; uint32_t entry; struct rout_forward_action_tbl action = {0}; struct rout_forward_compare_tbl compare; volatile struct rout_forward_action_tbl *paction; volatile struct rout_forward_compare_tbl *pcompare; volatile u32 *phit; u32 hitbit; uint32_t hash; uint16_t occupation; uint32_t f_collision; uint16_t *p_occupation; uint32_t entries; uint32_t i; uint16_t bit; #if 0 printk("add_routing_entry: \n"); printk(" f_is_lan = %d\n", f_is_lan); printk(" src_ip = %d.%d.%d.%d\n", src_ip >> 24, (src_ip >> 16) & 0xFF, (src_ip >> 8) & 0xFF, src_ip & 0xFF); printk(" src_port = %d\n", src_port); printk(" dst_ip = %d.%d.%d.%d\n", dst_ip >> 24, (dst_ip >> 16) & 0xFF, (dst_ip >> 8) & 0xFF, dst_ip & 0xFF); printk(" dst_port = %d\n", dst_port); printk(" f_is_tcp = %d\n", f_is_tcp); printk(" route_type = %d\n", route_type); printk(" new_ip = %d.%d.%d.%d\n", new_ip >> 24, (new_ip >> 16) & 0xFF, (new_ip >> 8) & 0xFF, new_ip & 0xFF); printk(" new_port = %d\n", new_port); printk(" new_mac = %02x:%02x:%02x:%02x:%02x:%02x\n", (uint32_t)new_mac[0], (uint32_t)new_mac[1], (uint32_t)new_mac[2], (uint32_t)new_mac[3], (uint32_t)new_mac[4], (uint32_t)new_mac[5]); printk(" new_src_mac_ix = %d (%02x:%02x:%02x:%02x:%02x:%02x)\n", new_src_mac_ix, ROUT_MAC_CFG_TBL(new_src_mac_ix)[0] >> 24, (ROUT_MAC_CFG_TBL(new_src_mac_ix)[0] >> 16) & 0xFF, (ROUT_MAC_CFG_TBL(new_src_mac_ix)[0] >> 8) & 0xFF, ROUT_MAC_CFG_TBL(new_src_mac_ix)[0] & 0xFF, (ROUT_MAC_CFG_TBL(new_src_mac_ix)[1] >> 24) & 0xFF, (ROUT_MAC_CFG_TBL(new_src_mac_ix)[1] >> 16) & 0xFF); printk(" mtu_ix = %d (%d)\n", mtu_ix, *MTU_CFG_TBL(mtu_ix)); printk(" f_new_dscp_enable = %d\n", f_new_dscp_enable); printk(" new_dscp = %d\n", new_dscp); printk(" f_vlan_ins_enable = %d\n", f_vlan_ins_enable); printk(" new_vci = %04x\n", new_vci); printk(" f_vlan_rm_enable = %d\n", f_vlan_rm_enable); printk(" pppoe_mode = %d\n", pppoe_mode); if ( f_is_lan ) printk(" pppoe_ix = %d (%d)\n", pppoe_ix, *PPPOE_CFG_TBL(pppoe_ix)); else printk(" pppoe_ix = %d\n", pppoe_ix); printk(" f_out_vlan_ins_enable = %d\n", f_out_vlan_ins_enable); printk(" out_vlan_ix = %04x\n", out_vlan_ix); printk(" f_out_vlan_rm_enable = %d\n", f_out_vlan_rm_enable); printk(" dest_qid (dslwan_qid) = %d\n", dslwan_qid); printk(" dest_list = %02X\n", dest_list); printk(" p_entry = %08X\n", (uint32_t)p_entry); #endif //// dest_list remap //if ( !(dest_list & IFX_PPA_DEST_LIST_NO_REMAP) ) //{ // uint32_t org_dest_list = dest_list; // // dest_list = 0; // for ( i = 0; org_dest_list && i < sizeof(dest_list_map) / sizeof(*dest_list_map); i++, org_dest_list >>= 1 ) // if ( (org_dest_list & 0x01) ) // dest_list |= dest_list_map[i]; //} //else // dest_list &= ~IFX_PPA_DEST_LIST_NO_REMAP; #if defined(ENABLE_NEW_HASH_ALG) && ENABLE_NEW_HASH_ALG hash = ((unsigned int)src_port << 16) | (dst_port & 0xFFFF); hash = src_ip ^ dst_ip ^ hash; hash = (((hash >> 30) & 0x03) ^ (hash >> 27) ^ (hash >> 24) ^ (hash >> 21) ^ (hash >> 18) ^ (hash >> 15) ^ (hash >> 12) ^ (hash >> 9) ^ (hash >> 6) ^ (hash >> 3) ^ hash) & 0x07; #endif if ( f_is_lan ) { p_lock = &g_lan_routing_lock; p_occupation = g_lan_routing_entry_occupation; entries = g_lan_collision_routing_entries; #if !defined(ENABLE_NEW_HASH_ALG) || !ENABLE_NEW_HASH_ALG hash = ((src_ip >> 2) & 0x07) ^ (((src_ip & 0x03) << 1) | ((src_port >> 15) & 0x01)) ^ ((src_port >> 12) & 0x07) ^ ((src_port >> 9) & 0x07) ^ ((src_port >> 6) & 0x07) ^ ((src_port >> 3) & 0x07) ^ (src_port & 0x07); #endif } else { p_lock = &g_wan_routing_lock; p_occupation = g_wan_routing_entry_occupation; entries = g_wan_collision_routing_entries; if ( GEN_MODE_CFG->wan_hash_alg ) { #if !defined(ENABLE_NEW_HASH_ALG) || !ENABLE_NEW_HASH_ALG hash = ((dst_ip >> 2) & 0x07) ^ (((dst_ip & 0x03) << 1) | ((dst_port >> 15) & 0x01)) ^ ((dst_port >> 12) & 0x07) ^ ((dst_port >> 9) & 0x07) ^ ((dst_port >> 6) & 0x07) ^ ((dst_port >> 3) & 0x07) ^ (dst_port & 0x07); #endif } else hash = dst_port & 0x07; } ppa_lock_get(p_lock); occupation = p_occupation[hash]; if ( occupation == (uint16_t)~0 ) { // collision for ( i = 0; i < (entries + BITSIZEOF_UINT16 - 1) / BITSIZEOF_UINT16; i++ ) if ( p_occupation[MAX_HASH_BLOCK + i] != (uint16_t)~0 ) goto ADD_ROUTING_ENTRY_GOON; // no empty entry ppa_lock_release(p_lock); return IFX_EAGAIN; ADD_ROUTING_ENTRY_GOON: entry = MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK + i * BITSIZEOF_UINT16; bit = 1; while ( (p_occupation[MAX_HASH_BLOCK + i] & bit) ) { bit <<= 1; entry++; } p_occupation[MAX_HASH_BLOCK + i] |= bit; f_collision = 1; } else { entry = hash * MAX_ROUTING_ENTRIES_PER_HASH_BLOCK; bit = 1; while ( (occupation & bit) ) { bit <<= 1; entry++; } p_occupation[hash] |= bit; f_collision = 0; } action.new_port = new_port; action.new_dest_mac54 = (((uint32_t)new_mac[0] & 0xFF) << 8) | ((uint32_t)new_mac[1] & 0xFF); action.new_dest_mac30 = (((uint32_t)new_mac[2] & 0xFF) << 24) | (((uint32_t)new_mac[3] & 0xFF) << 16) | (((uint32_t)new_mac[4] & 0xFF) << 8) | ((uint32_t)new_mac[5] & 0xFF); action.new_ip = new_ip; action.rout_type = route_type; action.new_dscp = new_dscp; action.mtu_ix = mtu_ix < MAX_MTU_ENTRIES ? mtu_ix : 0; action.in_vlan_ins = f_vlan_ins_enable ? 1 : 0; action.in_vlan_rm = f_vlan_rm_enable ? 1 : 0; action.new_dscp_en = f_new_dscp_enable ? 1 : 0; action.protocol = f_is_tcp ? 1 : 0; action.dest_list = IFX_PPA_DEST_LIST_CPU0; // disable action first by pass packet to CPU0 action.pppoe_mode = pppoe_mode ? 1 : 0; if ( f_is_lan && pppoe_mode ) action.pppoe_ix = pppoe_ix < MAX_PPPOE_ENTRIES ? pppoe_ix : 0; action.new_src_mac_ix = new_src_mac_ix < MAX_MAC_ENTRIES ? new_src_mac_ix : 0; action.new_in_vci = f_vlan_ins_enable ? new_vci : 0; action.out_vlan_ix = f_out_vlan_ins_enable ? out_vlan_ix : 0; action.out_vlan_ins = f_out_vlan_ins_enable ? 1 : 0; action.out_vlan_rm = f_out_vlan_rm_enable ? 1 : 0; action.dest_qid = dslwan_qid; compare.src_ip = src_ip; compare.dest_ip = dst_ip; compare.src_port = src_port; compare.dest_port = dst_port; if ( f_is_lan) { if ( f_collision ) { pcompare = ROUT_LAN_COLL_CMP_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); paction = ROUT_LAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); phit = ROUT_LAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = ~(1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F))); } else { pcompare = ROUT_LAN_HASH_CMP_TBL(entry); paction = ROUT_LAN_HASH_ACT_TBL(entry); phit = ROUT_LAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = ~(1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F))); } } else { if ( f_collision ) { pcompare = ROUT_WAN_COLL_CMP_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); paction = ROUT_WAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); phit = ROUT_WAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = ~(1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F))); } else { pcompare = ROUT_WAN_HASH_CMP_TBL(entry); paction = ROUT_WAN_HASH_ACT_TBL(entry); phit = ROUT_WAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = ~(1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F))); } } dbg("%s, hash = %d, entry = %d, pcomare = 0x%08x, paction = 0x%08x\n", f_is_lan ? "LAN" : "WAN", hash, entry, (unsigned int)pcompare, (unsigned int)paction); *pcompare = compare; *paction = action; // before enable this entry, clear hit status sys_flag = ppa_disable_int(); *phit &= hitbit; paction->dest_list = dest_list; paction->entry_vld = 1; // enable this entry finally ppa_enable_int(sys_flag); ppa_lock_release(p_lock); if ( f_is_lan ) entry |= 0x80000000; // bit 31: 0 - WAN, 1 - LAN ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_routing_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete one routing entry \param entry entry number got from function call "add_routing_entry" \return no return value */ void del_routing_entry(uint32_t entry) { PPA_LOCK *p_lock; volatile struct rout_forward_action_tbl *paction; volatile struct rout_forward_compare_tbl *pcompare; uint16_t *p_occupation; uint32_t src_ip_entry = ~0, dst_ip_entry = ~0; if ( (entry & 0x80000000) ) { p_lock = &g_lan_routing_lock; entry &= 0x7FFFFFFF; p_occupation = g_lan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { pcompare = ROUT_LAN_COLL_CMP_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); paction = ROUT_LAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); } else { pcompare = ROUT_LAN_HASH_CMP_TBL(entry); paction = ROUT_LAN_HASH_ACT_TBL(entry); } } else { p_lock = &g_wan_routing_lock; p_occupation = g_wan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { pcompare = ROUT_WAN_COLL_CMP_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); paction = ROUT_WAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); } else { pcompare = ROUT_WAN_HASH_CMP_TBL(entry); paction = ROUT_WAN_HASH_ACT_TBL(entry); } } ppa_lock_get(p_lock); if ( g_ipv6_acc_en != 0 ) { src_ip_entry = pcompare->src_ip & 0xFF; dst_ip_entry = pcompare->dest_ip & 0xFF; } paction->entry_vld = 0; paction->dest_list = IFX_PPA_DEST_LIST_CPU0; ppa_memset((void *)pcompare, 0, sizeof(*pcompare)); ppa_memset((void *)paction, 0, sizeof(*paction)); p_occupation[entry >> 4] &= ~(1 << (entry & 0x0F)); ppa_lock_release(p_lock); if ( g_ipv6_acc_en != 0 ) { del_ipv6_ip_entry(src_ip_entry); del_ipv6_ip_entry(dst_ip_entry); } } /*! \fn int32_t update_routing_entry(uint32_t entry, uint32_t route_type, uint32_t new_ip, uint32_t new_port, uint8_t new_mac[PPA_ETH_ALEN], uint32_t new_src_mac_ix, uint32_t mtu_ix, uint32_t f_new_dscp_enable, uint32_t new_dscp, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t pppoe_mode, uint32_t pppoe_ix, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t dslwan_qid, uint32_t dest_list, uint32_t flags) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief update one routing entry \param entry entry number got from function call "add_routing_entry" \param route_type 0: no action, 1: IPv4, 2: NAT, 3:NAPT \param new_ip new IP address (valid in NAT and NAPT) \param new_port new PORT number (valid in NAPT) \param new_mac new destination MAC address \param new_src_mac_ix index of new source MAC address \param mtu_ix index of Max Transmission Unit \param f_new_dscp_enable replace DSCP value \param new_dscp new DSCP value \param f_vlan_ins_enable insert inner VLAN tag \param new_vci new inner VLAN tag \param f_vlan_rm_enable remove inner VLAN tag (if there is VLAN tag in incoming packet) \param pppoe_mode PPPoE termination, LAN side add PPPoE header, WAN side remove PPPoE header \param pppoe_ix index of PPPoE header, valid only in LAN side \param f_out_vlan_ins_enable insert outer VLAN tag \param out_vlan_ix index of new outer VLAN tag \param f_out_vlan_rm_enable remove outer VLAN tag \param dslwan_qid destination QID (Switch Queue) \param dest_list destination ports, bit 0: eth0, bit 1: eth1 \param flags mask of the fields to be updated \return 0: OK, otherwise: fail */ int32_t update_routing_entry(uint32_t entry, uint32_t route_type, uint32_t new_ip, uint32_t new_port, uint8_t new_mac[PPA_ETH_ALEN], uint32_t new_src_mac_ix, uint32_t mtu_ix, uint32_t f_new_dscp_enable, uint32_t new_dscp, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t pppoe_mode, uint32_t pppoe_ix, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t dslwan_qid, uint32_t dest_list, uint32_t flags) { PPA_LOCK *p_lock; uint16_t *p_occupation; volatile struct rout_forward_action_tbl *paction; struct rout_forward_action_tbl action; if ( (entry & 0x80000000) ) { // LAN p_lock = &g_lan_routing_lock; entry &= 0x7FFFFFFF; p_occupation = g_lan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) paction = ROUT_LAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); else paction = ROUT_LAN_HASH_ACT_TBL(entry); } else { // WAN p_lock = &g_wan_routing_lock; p_occupation = g_wan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) paction = ROUT_WAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); else paction = ROUT_WAN_HASH_ACT_TBL(entry); } ppa_lock_get(p_lock); if ( !(p_occupation[entry >> 4] & (1 << (entry & 0x0F))) ) { ppa_lock_release(p_lock); return IFX_EINVAL; } action = *paction; // disable this entry paction->entry_vld = 0; paction->dest_list = IFX_PPA_DEST_LIST_CPU0; // if dest_chid is not update, keep it if ( !(flags & IFX_PPA_UPDATE_ROUTING_ENTRY_DEST_LIST) ) dest_list = action.dest_list; //else //{ // if ( !(dest_list & IFX_PPA_DEST_LIST_NO_REMAP) ) // { // uint32_t org_dest_list = dest_list; // int i; // // dest_list = 0; // for ( i = 0; org_dest_list && i < sizeof(dest_list_map) / sizeof(*dest_list_map); i++, org_dest_list >>= 1 ) // if ( (org_dest_list & 0x01) ) // dest_list |= dest_list_map[i]; // } // else // dest_list &= ~IFX_PPA_DEST_LIST_NO_REMAP; //} action.dest_list = IFX_PPA_DEST_LIST_CPU0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_PORT) ) action.new_port = new_port; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_MAC) ) { action.new_dest_mac54 = (((uint32_t)new_mac[0] & 0xFF) << 8) | ((uint32_t)new_mac[1] & 0xFF); action.new_dest_mac30 = (((uint32_t)new_mac[2] & 0xFF) << 24) | (((uint32_t)new_mac[3] & 0xFF) << 16) | (((uint32_t)new_mac[4] & 0xFF) << 8) | ((uint32_t)new_mac[5] & 0xFF); } if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_IP) ) action.new_ip = new_ip; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_ROUTE_TYPE) ) action.rout_type = route_type; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_DSCP) ) action.new_dscp = new_dscp; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_MTU_IX) ) action.mtu_ix = mtu_ix < MAX_MTU_ENTRIES ? mtu_ix : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_VLAN_INS_EN) ) action.in_vlan_ins = f_vlan_ins_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_VLAN_RM_EN) ) action.in_vlan_rm = f_vlan_rm_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_DSCP_EN) ) action.new_dscp_en = f_new_dscp_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_PPPOE_MODE) ) action.pppoe_mode = pppoe_mode ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_OUT_VLAN_INS_EN) ) action.out_vlan_ins = f_out_vlan_ins_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_OUT_VLAN_RM_EN) ) action.out_vlan_rm = f_out_vlan_rm_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_PPPOE_IX) ) action.pppoe_ix = action.pppoe_mode && pppoe_ix < MAX_PPPOE_ENTRIES ? pppoe_ix : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_SRC_MAC_IX) ) action.new_src_mac_ix = new_src_mac_ix < MAX_MAC_ENTRIES ? new_src_mac_ix : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_NEW_VCI) ) action.new_in_vci = action.in_vlan_ins ? new_vci : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_OUT_VLAN_IX) ) action.out_vlan_ix = action.out_vlan_ins ? out_vlan_ix : 0; if ( (flags & IFX_PPA_UPDATE_ROUTING_ENTRY_DEST_QID) ) action.dest_qid = dslwan_qid; action.bytes = paction->bytes; *paction = action; paction->dest_list = dest_list; paction->entry_vld = 1; ppa_lock_release(p_lock); return IFX_SUCCESS; } /*! \fn int32_t add_wan_mc_entry(uint32_t dest_ip_compare, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t f_src_mac_enable, uint32_t src_mac_ix, uint32_t pppoe_mode, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t f_new_dscp_en, uint32_t new_dscp, uint32_t dest_qid, uint32_t dest_list, uint32_t route_type, // sgh add to support multicast bridging/routing feature uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add one multicast routing entry \param dest_ip_compare destination IP address \param f_vlan_ins_enable insert inner VLAN tag \param new_vci new inner VLAN tag \param f_vlan_rm_enable remove inner VLAN tag (if there is VLAN tag in incoming packet) \param src_mac_ix index of new source MAC address \param pppoe_mode PPPoE termination, LAN side add PPPoE header, WAN side remove PPPoE header \param f_out_vlan_ins_enable insert outer VLAN tag \param out_vlan_ix index of new outer VLAN tag \param f_out_vlan_rm_enable remove outer VLAN tag \param f_new_dscp_en replace DSCP value \param new_dscp new DSCP value \param dest_list destination ports, bit 0: eth0, bit 1: eth1 \param dest_qid destination Queue ID (Switch Queue) \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_wan_mc_entry(uint32_t dest_ip_compare, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t f_src_mac_enable, uint32_t src_mac_ix, uint32_t pppoe_mode, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t f_new_dscp_en, uint32_t new_dscp, uint32_t dest_qid, uint32_t dest_list, uint32_t route_type, // sgh add to support multicast bridging/routing feature uint32_t *p_entry) { uint32_t sys_flag; uint32_t entry; struct wan_rout_multicast_cmp_tbl compare = {0}; struct wan_rout_multicast_act_tbl action = {0}; uint32_t entries; uint32_t i; uint32_t bit; //// dest_list remap //if ( !(dest_list & IFX_PPA_DEST_LIST_NO_REMAP) ) //{ // uint32_t org_dest_list = dest_list; // // dest_list = 0; // for ( i = 0; org_dest_list && i < sizeof(dest_list_map) / sizeof(*dest_list_map); i++, org_dest_list >>= 1 ) // if ( (org_dest_list & 0x01) ) // dest_list |= dest_list_map[i]; //} //else // dest_list &= IFX_PPA_DEST_LIST_NO_REMAP; /* * find empty entry */ entries = (g_wan_mc_entries + BITSIZEOF_UINT32 - 1) / BITSIZEOF_UINT32; ppa_lock_get(&g_wan_mc_lock); for ( i = 0; i < entries; i++ ) if ( g_wan_mc_entry_occupation[i] != ~0 ) goto ADD_WAN_MC_ENTRY_GOON; // no empty entry ppa_lock_release(&g_wan_mc_lock); return IFX_EAGAIN; ADD_WAN_MC_ENTRY_GOON: entry = i * BITSIZEOF_UINT32; bit = 1; while ( (g_wan_mc_entry_occupation[i] & bit) ) { bit <<= 1; entry++; } g_wan_mc_entry_occupation[i] |= bit; compare.wan_dest_ip = dest_ip_compare; action.rout_type = route_type; action.new_dscp = f_new_dscp_en ? new_dscp : 0; action.in_vlan_ins = f_vlan_ins_enable ? 1 : 0; action.in_vlan_rm = f_vlan_rm_enable ? 1 : 0; action.new_dscp_en = f_new_dscp_en ? 1 : 0; action.new_src_mac_en = f_src_mac_enable ? 1 : 0; action.dest_list = IFX_PPA_DEST_LIST_CPU0; action.pppoe_mode = pppoe_mode ? 1 : 0; action.new_src_mac_ix = f_src_mac_enable ? src_mac_ix : 0; action.new_in_vci = f_vlan_ins_enable ? new_vci : 0; action.out_vlan_ix = f_out_vlan_ins_enable ? out_vlan_ix : 0; action.out_vlan_ins = f_out_vlan_ins_enable ? 1 : 0; action.out_vlan_rm = f_out_vlan_rm_enable ? 1 : 0; action.dest_qid = dest_qid; *ROUT_WAN_MC_CMP_TBL(entry) = compare; *ROUT_WAN_MC_ACT_TBL(entry) = action; sys_flag = ppa_disable_int(); *ROUT_WAN_MC_HIT_STAT_TBL(entry >> 5) &= ~(1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F))); ROUT_WAN_MC_ACT_TBL(entry)->dest_list = dest_list; ROUT_WAN_MC_ACT_TBL(entry)->entry_vld = 1; ppa_enable_int(sys_flag); ppa_lock_release(&g_wan_mc_lock); ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_wan_mc_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete one multicast routing entry \param entry entry number got from function call "add_wan_mc_entry" \return no return value */ void del_wan_mc_entry(uint32_t entry) { if ( entry < g_wan_mc_entries ) { volatile uint32_t *p; ppa_lock_get(&g_wan_mc_lock); // disable entry ROUT_WAN_MC_ACT_TBL(entry)->entry_vld = 0; ROUT_WAN_MC_ACT_TBL(entry)->dest_list = IFX_PPA_DEST_LIST_CPU0; ROUT_WAN_MC_CMP_TBL(entry)->wan_dest_ip = 0; p = (volatile uint32_t *)ROUT_WAN_MC_ACT_TBL(entry); p[1] = 0; p[0] = 0; g_wan_mc_entry_occupation[entry >> 5] &= ~(1 << (entry & 0x1F)); ppa_lock_release(&g_wan_mc_lock); } } /*! \fn int32_t update_wan_mc_entry(uint32_t entry, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t f_src_mac_enable, uint32_t src_mac_ix, uint32_t pppoe_mode, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t f_new_dscp_en, uint32_t new_dscp, uint32_t dest_list, uint32_t dest_qid, uint32_t flags) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief update one multicast routing entry \param entry entry number got from function call "add_wan_mc_entry" \param f_vlan_ins_enable insert inner VLAN tag \param new_vci new inner VLAN tag \param f_vlan_rm_enable remove inner VLAN tag (if there is VLAN tag in incoming packet) \param src_mac_ix index of new source MAC address \param pppoe_mode PPPoE termination, LAN side add PPPoE header, WAN side remove PPPoE header \param f_out_vlan_ins_enable insert outer VLAN tag \param out_vlan_ix index of new outer VLAN tag \param f_out_vlan_rm_enable remove outer VLAN tag \param f_new_dscp_en replace DSCP value \param new_dscp new DSCP value \param dest_qid destination Queue ID (Switch Queue) \param dest_list destination ports, bit 0: eth0, bit 1: eth1 \param flags mask of the fields to be updated \return 0: OK, otherwise: fail */ int32_t update_wan_mc_entry(uint32_t entry, uint32_t f_vlan_ins_enable, uint32_t new_vci, uint32_t f_vlan_rm_enable, uint32_t f_src_mac_enable, uint32_t src_mac_ix, uint32_t pppoe_mode, uint32_t f_out_vlan_ins_enable, uint32_t out_vlan_ix, uint32_t f_out_vlan_rm_enable, uint32_t f_new_dscp_en, uint32_t new_dscp, uint32_t dest_qid, uint32_t dest_list, uint32_t flags) { struct wan_rout_multicast_act_tbl action; if ( entry >= g_wan_mc_entries ) return IFX_EINVAL; ppa_lock_get(&g_wan_mc_lock); if ( !(g_wan_mc_entry_occupation[entry >> 5] & (1 << (entry & 0x1F))) ) { ppa_lock_release(&g_wan_mc_lock); return IFX_EINVAL; } action = *ROUT_WAN_MC_ACT_TBL(entry); action.entry_vld = 0; // if not update, keep it if ( !(flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_DEST_LIST) ) dest_list = action.dest_list; //else //{ // if ( !(dest_list & IFX_PPA_DEST_LIST_NO_REMAP) ) // { // uint32_t org_dest_list = dest_list; // int i; // // dest_list = 0; // for ( i = 0; org_dest_list && i < sizeof(dest_list_map) / sizeof(*dest_list_map); i++, org_dest_list >>= 1 ) // if ( (org_dest_list & 0x01) ) // dest_list |= dest_list_map[i]; // } // else // dest_list &= ~IFX_PPA_DEST_LIST_NO_REMAP; //} action.dest_list = IFX_PPA_DEST_LIST_CPU0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_VLAN_INS_EN) ) action.in_vlan_ins = f_vlan_ins_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_NEW_VCI) ) action.new_in_vci = action.in_vlan_ins ? new_vci : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_VLAN_RM_EN) ) action.in_vlan_rm = f_vlan_rm_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_SRC_MAC_EN) ) action.new_src_mac_en = f_src_mac_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_SRC_MAC_IX) ) action.new_src_mac_ix = action.new_src_mac_en ? src_mac_ix : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_PPPOE_MODE) ) action.pppoe_mode = pppoe_mode ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_OUT_VLAN_INS_EN) ) action.out_vlan_ins = f_out_vlan_ins_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_OUT_VLAN_IX) ) action.out_vlan_ix = action.out_vlan_ins ? out_vlan_ix : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_OUT_VLAN_RM_EN) ) action.out_vlan_rm = f_out_vlan_rm_enable ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_NEW_DSCP_EN) ) action.new_dscp_en = f_new_dscp_en ? 1 : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_NEW_DSCP) ) action.new_dscp = action.new_dscp_en ? new_dscp : 0; if ( (flags & IFX_PPA_UPDATE_WAN_MC_ENTRY_DEST_QID) ) action.dest_qid = dest_qid; ROUT_WAN_MC_ACT_TBL(entry)->entry_vld = 0; *ROUT_WAN_MC_ACT_TBL(entry) = action; ROUT_WAN_MC_ACT_TBL(entry)->dest_list = dest_list; ROUT_WAN_MC_ACT_TBL(entry)->entry_vld = 1; ppa_lock_release(&g_wan_mc_lock); return IFX_SUCCESS; } /*! \fn int32_t get_dest_ip_from_wan_mc_entry(uint32_t entry, uint32_t *p_ip) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief update one multicast routing entry \param entry entry number got from function call "add_wan_mc_entry" \param p_ip a data pointer to get multicast IP \return 0: OK, otherwise: fail */ int32_t get_dest_ip_from_wan_mc_entry(uint32_t entry, uint32_t *p_ip) { if ( entry >= g_wan_mc_entries ) return IFX_EINVAL; ASSERT(p_ip != NULL, "p_ip == NULL"); *p_ip = ROUT_WAN_MC_CMP_TBL(entry)->wan_dest_ip; return IFX_SUCCESS; } int32_t add_bridging_entry(uint32_t port, uint8_t mac[PPA_ETH_ALEN], uint32_t f_src_mac_drop, uint32_t dslwan_qid, uint32_t dest_list, uint32_t *p_entry) { struct mac_tbl_item *p_item; struct mac_tbl_item *p_search_item; int idx; uint32_t ret = 0; const uint32_t fid = 1; const uint32_t portid = 6; //cpu port const uint32_t st_entry = 1; //static entry const uint32_t agetime = 3; //uint32_t sys_flag; *p_entry = ~0; if(port <= get_number_of_phys_port()){//switch will deal with it return IFX_EINVAL; } if ( (mac[0] & 0x01) ) return IFX_EINVAL; p_item = ppa_mem_cache_alloc(g_mac_tbl_item_cache); if ( !p_item ) return IFX_ENOMEM; p_item->next = NULL; p_item->ref = 1; ppa_memcpy(p_item->mac,mac,sizeof(p_item->mac)); p_item->mac0 = ((uint32_t)mac[2] << 24) | ((uint32_t)mac[3] << 16) | ((uint32_t)mac[4] << 8) | (uint32_t)mac[5]; p_item->mac1 = ((uint32_t)mac[0] << 8) | (uint32_t)mac[1]; p_item->age = 0; p_item->timestamp = ppa_get_time_in_10msec(); idx = BRIDGING_SESSION_LIST_HASH_VALUE(p_item->mac0); ppa_lock_get(&g_mac_tbl_lock); for ( p_search_item = g_mac_tbl_hash[idx]; p_search_item; p_search_item = p_search_item->next ) if ( p_search_item->mac0 == p_item->mac0 && p_search_item->mac1 == p_item->mac1 ) { p_search_item->ref++; ppa_lock_release(&g_mac_tbl_lock); *p_entry = (uint32_t)p_search_item; ppa_mem_cache_free(p_item, g_mac_tbl_item_cache); return IFX_SUCCESS; } #if 0 //sys_flag = ppa_disable_int(); while ( (*AMAZON_S_SW_ADR_TB_ST2 & (1 << 31)) ); *AMAZON_S_SW_ADR_TB_CTL0 = p_item->mac0; *AMAZON_S_SW_ADR_TB_CTL1 = p_item->mac1 | ((dest_list & 0xFF) << 20); *AMAZON_S_SW_ADR_TB_CTL2 = (0 << 20) | (7 << 16); // create new entry while ( ((reg_value = *AMAZON_S_SW_ADR_TB_ST2) & (1 << 31)) ); //ppa_enable_int(sys_flag); #endif ret = mac_entry_setting(p_item->mac, fid, portid, agetime, st_entry, MAC_TABLE_ENTRY_ADD); if ( ret != IFX_SUCCESS) { ppa_lock_release(&g_mac_tbl_lock); ppa_mem_cache_free(p_item, g_mac_tbl_item_cache); return IFX_FAILURE; } else { p_item->next = g_mac_tbl_hash[idx]; g_mac_tbl_hash[idx] = p_item; ppa_lock_release(&g_mac_tbl_lock); p_item->timestamp = ppa_get_time_in_10msec(); *p_entry = (uint32_t)p_item; return IFX_SUCCESS; } } void del_bridging_entry(uint32_t entry) { struct mac_tbl_item *p_item; struct mac_tbl_item *p_search_item, *p_prev; const uint32_t fid = 1; int idx; // uint32_t reg_value; //uint32_t sys_flag; if ( entry < KSEG0 || entry >= KSEG1 ) return; p_item = (struct mac_tbl_item *)entry; idx = BRIDGING_SESSION_LIST_HASH_VALUE(p_item->mac0); p_prev = NULL; ppa_lock_get(&g_mac_tbl_lock); for ( p_search_item = g_mac_tbl_hash[idx]; p_search_item; p_search_item = p_search_item->next ) { if ( p_search_item == p_item ) { if ( --(p_item->ref) <= 0 ) { if ( p_prev == NULL ) g_mac_tbl_hash[idx] = p_item->next; else p_prev->next = p_item->next; ppa_lock_release(&g_mac_tbl_lock); #if 0 //sys_flag = ppa_disable_int(); while ( (*AMAZON_S_SW_ADR_TB_ST2 & (1 << 31)) ); *AMAZON_S_SW_ADR_TB_CTL0 = p_item->mac0; *AMAZON_S_SW_ADR_TB_CTL1 = p_item->mac1; *AMAZON_S_SW_ADR_TB_CTL2 = (1 << 20) | (0x0F << 16); // remove entry while ( ((reg_value = *AMAZON_S_SW_ADR_TB_ST2) & (1 << 31)) ); //ppa_enable_int(sys_flag); #endif mac_entry_setting(p_item->mac, fid, 0, 0, 0, MAC_TABLE_ENTRY_REMOVE); ppa_mem_cache_free(p_item, g_mac_tbl_item_cache); return; } break; } p_prev = p_search_item; } ppa_lock_release(&g_mac_tbl_lock); } /*! \fn int32_t add_pppoe_entry(uint32_t session_id, uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add PPPoE session ID \param session_id PPPoE session ID \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_pppoe_entry(uint32_t session_id, uint32_t *p_entry) { uint32_t entry, empty_entry = MAX_PPPOE_ENTRIES; session_id &= 0x0003FFFF; ppa_lock_get(&g_pppoe_lock); for ( entry = 0; entry < MAX_PPPOE_ENTRIES; entry++ ) if ( !g_pppoe_entry_counter[entry] ) empty_entry = entry; else if ( *PPPOE_CFG_TBL(entry) == session_id ) goto ADD_PPPOE_ENTRY_GOON; // no empty entry if ( empty_entry >= MAX_PPPOE_ENTRIES ) { ppa_lock_release(&g_pppoe_lock); return IFX_EAGAIN; } entry = empty_entry; *PPPOE_CFG_TBL(entry) = session_id; ADD_PPPOE_ENTRY_GOON: g_pppoe_entry_counter[entry]++; ppa_lock_release(&g_pppoe_lock); ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_pppoe_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete PPPoE session ID \param entry entry number got from function call "add_pppoe_entry" \return no return value */ void del_pppoe_entry(uint32_t entry) { if ( entry < MAX_PPPOE_ENTRIES ) { ppa_lock_get(&g_pppoe_lock); if ( g_pppoe_entry_counter[entry] && !--g_pppoe_entry_counter[entry] ) *PPPOE_CFG_TBL(entry) = 0; ppa_lock_release(&g_pppoe_lock); } } /*! \fn int32_t get_pppoe_entry(uint32_t entry, uint32_t *p_session_id) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get PPPoE session ID \param entry entry number got from function call "add_pppoe_entry" \param p_session_id a data pointer to get PPPoE session ID \return 0: OK, otherwise: fail */ int32_t get_pppoe_entry(uint32_t entry, uint32_t *p_session_id) { if ( entry >= MAX_PPPOE_ENTRIES ) return IFX_EINVAL; ppa_lock_get(&g_pppoe_lock); if ( !g_pppoe_entry_counter[entry] ) { ppa_lock_release(&g_pppoe_lock); return IFX_EINVAL; } ASSERT(p_session_id != NULL, "p_session_id == NULL"); *p_session_id = *PPPOE_CFG_TBL(entry); ppa_lock_release(&g_pppoe_lock); return IFX_SUCCESS; } /*! \fn int32_t add_mtu_entry(uint32_t mtu_size, uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add MTU (Max Transmission Unit) \param mtu_size Max Transmission Unit size \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_mtu_entry(uint32_t mtu_size, uint32_t *p_entry) { uint32_t entry, empty_entry = MAX_MTU_ENTRIES; mtu_size = (mtu_size & 0x0003FFFF) + 4; ppa_lock_get(&g_mtu_lock); // find existing entry and empty entry for ( entry = 0; entry < MAX_MTU_ENTRIES; entry++ ) if ( !g_mtu_entry_counter[entry] ) empty_entry = entry; else if ( *MTU_CFG_TBL(entry) == mtu_size ) goto ADD_MTU_ENTRY_GOON; // no empty entry if ( empty_entry >= MAX_MTU_ENTRIES ) { ppa_lock_release(&g_mtu_lock); return IFX_EAGAIN; } entry = empty_entry; *MTU_CFG_TBL(entry) = mtu_size; ADD_MTU_ENTRY_GOON: g_mtu_entry_counter[entry]++; ppa_lock_release(&g_mtu_lock); ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_pppoe_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete MTU (Max Transmission Unit) \param entry entry number got from function call "add_mtu_entry" \return no return value */ void del_mtu_entry(uint32_t entry) { if ( entry < MAX_MTU_ENTRIES ) { ppa_lock_get(&g_mtu_lock); if ( g_mtu_entry_counter[entry] && !--g_mtu_entry_counter[entry] ) *MTU_CFG_TBL(entry) = 0; ppa_lock_release(&g_mtu_lock); } } /*! \fn int32_t get_mtu_entry(uint32_t entry, uint32_t *p_mtu_size) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get MTU (Max Transmission Unit) \param entry entry number got from function call "add_mtu_entry" \param p_mtu_size a data pointer to get Max Transmission Unit size \return 0: OK, otherwise: fail */ int32_t get_mtu_entry(uint32_t entry, uint32_t *p_mtu_size) { if ( entry >= MAX_MTU_ENTRIES ) return IFX_EINVAL; ppa_lock_get(&g_mtu_lock); if ( !g_mtu_entry_counter[entry] ) { ppa_lock_release(&g_mtu_lock); return IFX_EINVAL; } ASSERT(p_mtu_size != NULL, "p_mtu_size == NULL"); *p_mtu_size = *MTU_CFG_TBL(entry) - 4; ppa_lock_release(&g_mtu_lock); return IFX_SUCCESS; } /*! \fn void uint32_t get_routing_entry_bytes(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get one routing entry's byte counter \param entry entry number got from function call "add_routing_entry" \return byte counter value */ uint32_t get_routing_entry_bytes(uint32_t entry) { PPA_LOCK *p_lock; volatile struct rout_forward_action_tbl *paction; uint16_t *p_occupation; uint32_t byte_cnt = 0; if ( (entry & 0x80000000) ) { p_lock = &g_lan_routing_lock; entry &= 0x7FFFFFFF; p_occupation = g_lan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { paction = ROUT_LAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); } else { paction = ROUT_LAN_HASH_ACT_TBL(entry); } } else { p_lock = &g_wan_routing_lock; p_occupation = g_wan_routing_entry_occupation; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { paction = ROUT_WAN_COLL_ACT_TBL(entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK); } else { paction = ROUT_WAN_HASH_ACT_TBL(entry); } } ppa_lock_get(p_lock); byte_cnt = paction->bytes; ppa_lock_release(p_lock); return byte_cnt; } /*! \fn int32_t add_mac_entry(uint8_t mac[PPA_ETH_ALEN], uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add local MAC address \param mac MAC address of one local interface \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_mac_entry(uint8_t mac[PPA_ETH_ALEN], uint32_t *p_entry) { int32_t entry, empty_entry = -1; uint32_t mac52 = (((uint32_t)mac[0] & 0xFF) << 24) | (((uint32_t)mac[1] & 0xFF) << 16) | (((uint32_t)mac[2] & 0xFF) << 8) | ((uint32_t)mac[3] & 0xFF); uint32_t mac10 = (((uint32_t)mac[4] & 0xFF) << 24) | (((uint32_t)mac[5] & 0xFF) << 16); ppa_lock_get(&g_mac_lock); // find existing entry and empty entry for ( entry = MAX_MAC_ENTRIES - 1; entry >= 0; entry-- ) if ( !g_mac_entry_counter[entry] ) empty_entry = entry; else if ( ROUT_MAC_CFG_TBL(entry)[0] == mac52 && ROUT_MAC_CFG_TBL(entry)[1] == mac10 ) goto ADD_MAC_ENTRY_GOON; // no empty entry if ( empty_entry < 0 ) { ppa_lock_release(&g_mac_lock); return IFX_EAGAIN; } entry = empty_entry; ROUT_MAC_CFG_TBL(entry)[0] = mac52; ROUT_MAC_CFG_TBL(entry)[1] = mac10; ADD_MAC_ENTRY_GOON: g_mac_entry_counter[entry]++; ppa_lock_release(&g_mac_lock); ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_mac_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete local MAC address \param entry entry number got from function call "add_mac_entry" \return no return value */ void del_mac_entry(uint32_t entry) { if ( entry < MAX_MAC_ENTRIES ) { ppa_lock_get(&g_mac_lock); if ( g_mac_entry_counter[entry] && !--g_mac_entry_counter[entry] ) { ROUT_MAC_CFG_TBL(entry)[0] = 0; ROUT_MAC_CFG_TBL(entry)[1] = 0; } ppa_lock_release(&g_mac_lock); } } /*! \fn int32_t get_mac_entry(uint32_t entry, uint8_t mac[PPA_ETH_ALEN]) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get local MAC address \param entry entry number got from function call "add_mac_entry" \param mac a data pointer to get local MAC address \return 0: OK, otherwise: fail */ int32_t get_mac_entry(uint32_t entry, uint8_t mac[PPA_ETH_ALEN]) { if ( entry >= MAX_MAC_ENTRIES ) return IFX_EINVAL; ppa_lock_get(&g_mac_lock); if ( !g_mac_entry_counter[entry] ) { ppa_lock_release(&g_mac_lock); return IFX_EINVAL; } ASSERT(mac != NULL, "mac == NULL"); ppa_memcpy(mac, (void *)ROUT_MAC_CFG_TBL(entry), PPA_ETH_ALEN); ppa_lock_release(&g_mac_lock); return IFX_SUCCESS; } /*! \fn int32_t add_outer_vlan_entry(uint32_t new_tag, uint32_t *p_entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief add outer VLAN tag \param new_tag ounter VLAN tag \param p_entry a data pointer to get entry number \return 0: OK, otherwise: fail */ int32_t add_outer_vlan_entry(uint32_t new_tag, uint32_t *p_entry) { uint32_t entry, empty_entry = MAX_OUTER_VLAN_ENTRIES; ppa_lock_get(&g_outer_vlan_lock); for ( entry = 0; entry < MAX_OUTER_VLAN_ENTRIES; entry++ ) if ( !g_outer_vlan_entry_counter[entry] ) empty_entry = entry; else if ( *OUTER_VLAN_TBL(entry) == new_tag ) goto ADD_OUTER_VLAN_ENTRY_GOON; // no empty entry if ( empty_entry >= MAX_OUTER_VLAN_ENTRIES ) { ppa_lock_release(&g_outer_vlan_lock); return IFX_EAGAIN; } entry = empty_entry; *OUTER_VLAN_TBL(entry) = new_tag; ADD_OUTER_VLAN_ENTRY_GOON: g_outer_vlan_entry_counter[entry]++; ppa_lock_release(&g_outer_vlan_lock); ASSERT(p_entry != NULL, "p_entry == NULL"); *p_entry = entry; return IFX_SUCCESS; } /*! \fn void del_outer_vlan_entry(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief delete outer VLAN tag \param entry entry number got from function call "add_outer_vlan_entry" \return no return value */ void del_outer_vlan_entry(uint32_t entry) { if ( entry < MAX_OUTER_VLAN_ENTRIES ) { ppa_lock_get(&g_outer_vlan_lock); if ( g_outer_vlan_entry_counter[entry] && !--g_outer_vlan_entry_counter[entry] ) *OUTER_VLAN_TBL(entry) = 0; ppa_lock_release(&g_outer_vlan_lock); } } /*! \fn int32_t get_outer_vlan_entry(uint32_t entry, uint32_t *p_outer_vlan_tag) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief get outer VLAN tag \param entry entry number got from function call "add_outer_vlan_entry" \param p_outer_vlan_tag a data pointer to get outer VLAN tag \return 0: OK, otherwise: fail */ int32_t get_outer_vlan_entry(uint32_t entry, uint32_t *p_outer_vlan_tag) { if ( entry >= MAX_OUTER_VLAN_ENTRIES ) return IFX_EINVAL; ppa_lock_get(&g_outer_vlan_lock); if ( !g_outer_vlan_entry_counter[entry] ) { ppa_lock_release(&g_outer_vlan_lock); return IFX_EINVAL; } ASSERT(p_outer_vlan_tag != NULL, "p_outer_vlan_tag == NULL"); *p_outer_vlan_tag = *OUTER_VLAN_TBL(entry); ppa_lock_release(&g_outer_vlan_lock); return IFX_SUCCESS; } /*! \brief This is to enable/disable multiple field feature function \param[in] enable, 1--enable/0--disable multiple field \param[in] flag reserved for future \return uint8_t, The return value can be any one of the following: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error */ uint8_t ifx_ppe_drv_multifield_control(uint8_t enable, uint32_t flag) { GEN_MODE_CFG1->brg_class_en = enable ? 1 : 0; return IFX_SUCCESS; } /*! \brief This is get multiple field function status: enabled/disabled \param[out] enable, 1--enable/0--disable multiple field \param[in] flag reserved for future \return uint8_t, The return value can be any one of the following: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error */ uint8_t ifx_ppe_drv_get_multifield_status(uint8_t *enable, uint32_t flag) { ASSERT(enable != NULL, "enable == NULL"); *enable = GEN_MODE_CFG1->brg_class_en; return IFX_SUCCESS; } /*! \brief This is to get the maximum multiple field entry number \param[in] flag reserved for future \return int32_t, return the maximum multiple field entry number */ int32_t ifx_ppe_drv_get_multifield_max_entry(uint32_t flag) { return MAX_CLASSIFICATION_ENTRIES; } static INLINE int mfe_param2word(PPA_MULTIFIELD_FLOW_INFO *p_multifield_info, uint32_t compare[], uint32_t mask[], struct classification_act_tbl *act) { int ret = 0; PPA_MULTIFIELD_VLAN_INFO_MANUAL *p_vlan = &(p_multifield_info->cfg0.vlan_info.vlan_info_manual); unsigned int out_vci; int i; compare[0] |= ((unsigned int)p_multifield_info->cfg0.ether_type & 0xFFFF) << 16; mask[0] |= ((unsigned int)p_multifield_info->cfg0.ether_type_mask & 0xFFFF) << 16; compare[0] |= ((unsigned int)p_multifield_info->cfg0.dscp & 0xFF) << 8; mask[0] |= ((unsigned int)p_multifield_info->cfg0.dscp_mask & 0xFF) << 8; compare[0] |= (unsigned int)p_multifield_info->cfg0.pkt_length & 0xFF; mask[0] |= (unsigned int)p_multifield_info->cfg0.pkt_length_mask & 0xFF; compare[1] = p_multifield_info->cfg0.s_ip; mask[1] = p_multifield_info->cfg0.s_ip_mask; compare[2] |= ((unsigned int)p_vlan->out_vlan_pri & 0x07) << 29; mask[2] |= ((unsigned int)p_vlan->out_vlan_pri_mask & 0x07) << 29; compare[2] |= p_vlan->out_vlan_cfi ? (1 << 28) : 0; mask[2] |= p_vlan->out_vlan_cfi_mask ? (1 << 28) : 0; compare[2] |= ((unsigned int)p_vlan->out_vlan_vid & 0x0FFF) << 16; mask[2] |= ((unsigned int)p_vlan->out_vlan_vid_mask & 0x0FFF) << 16; compare[2] |= ((unsigned int)p_multifield_info->cfg0.l3_off0 & 0xFF) << 8; mask[2] |= ((unsigned int)p_multifield_info->cfg0.l3_off0_mask & 0xFF) << 8; compare[2] |= (unsigned int)p_multifield_info->cfg0.l3_off1 & 0xFF; mask[2] |= (unsigned int)p_multifield_info->cfg0.l3_off1_mask & 0xFF; compare[3] |= ((unsigned int)p_vlan->in_vlan_pri & 0x07) << 29; mask[3] |= ((unsigned int)p_vlan->in_vlan_pri_mask & 0x07) << 29; compare[3] |= p_vlan->in_vlan_cfi ? (1 << 28) : 0; mask[3] |= p_vlan->in_vlan_cfi_mask ? (1 << 28) : 0; compare[3] |= ((unsigned int)p_vlan->in_vlan_vid & 0x0FFF) << 16; mask[3] |= ((unsigned int)p_vlan->in_vlan_vid_mask & 0x0FFF) << 16; compare[3] |= 1 << (p_vlan->tx_if_id + 8); // mask[3] |= 0xFF << 8; compare[3] |= (unsigned int)p_vlan->rx_if_id << 5; // mask[3] |= 0x07 << 5; compare[3] |= p_multifield_info->cfg0.ipv4 ? (1 << 4) : 0; mask[3] |= p_multifield_info->cfg0.ipv4_mask ? (1 << 4) : 0; compare[3] |= p_multifield_info->cfg0.ipv6 ? (1 << 3) : 0; mask[3] |= p_multifield_info->cfg0.ipv6_mask ? (1 << 3) : 0; compare[3] |= p_multifield_info->cfg0.pppoe_session ? (1 << 2) : 0; mask[3] |= p_multifield_info->cfg0.pppoe_session_mask ? (1 << 2) : 0; compare[3] |= (unsigned int)p_vlan->is_vlan & 0x03; mask[3] |= (unsigned int)p_vlan->is_vlan_mask & 0x03; for ( i = 0; i < 4; i++ ) compare[i] &= ~mask[i]; if ( p_vlan->action_in_vlan_insert ) { act->in_vlan_ins = 1; act->new_in_vci = ((unsigned int)p_vlan->new_in_vlan_pri << 13) | ((unsigned int)p_vlan->new_in_vlan_cfi << 12) | p_vlan->new_in_vlan_vid; } act->in_vlan_rm = p_vlan->action_in_vlan_remove ? 1 : 0; if ( p_vlan->action_out_vlan_insert ) { act->out_vlan_ins = 1; out_vci = ((unsigned int)p_vlan->new_out_vlan_tpid << 16) | ((unsigned int)p_vlan->new_out_vlan_pri << 13) | ((unsigned int)p_vlan->new_out_vlan_cfi << 12) | p_vlan->new_out_vlan_vid; if ( add_outer_vlan_entry(out_vci, &out_vci) != IFX_SUCCESS ) return -1; else { act->out_vlan_ix = out_vci; ret = out_vci + 1; } } act->out_vlan_rm = p_vlan->action_out_vlan_remove ? 1 : 0; act->dest_qid = p_multifield_info->cfg0.queue_id; return ret; } static INLINE int mfe_word2param(PPA_MULTIFIELD_FLOW_INFO *p_multifield_info, uint32_t compare[], uint32_t mask[], struct classification_act_tbl *act) { PPA_MULTIFIELD_VLAN_INFO_MANUAL *p_vlan = &(p_multifield_info->cfg0.vlan_info.vlan_info_manual); unsigned int out_vci; unsigned int tmp_id; int i; ppa_memset( p_vlan, 0, sizeof(*p_vlan) ); p_multifield_info->cfg0.vlan_info.bfauto = 0; p_multifield_info->cfg0.ether_type = compare[0] >> 16; p_multifield_info->cfg0.ether_type_mask = mask[0] >> 16; p_multifield_info->cfg0.dscp = ( compare[0] >> 8 ) & 0xFF; p_multifield_info->cfg0.dscp_mask = ( mask[0] >> 8 ) & 0xFF; p_multifield_info->cfg0.pkt_length = compare[0] && 0xFF; p_multifield_info->cfg0.pkt_length_mask = mask[0] && 0xFF; p_multifield_info->cfg0.s_ip = compare[1]; p_multifield_info->cfg0.s_ip_mask = mask[1]; p_vlan->out_vlan_pri = ( compare[2] >> 29 ) & 0x07; p_vlan->out_vlan_pri_mask = ( mask[2] >> 29 ) & 0x07; p_vlan->out_vlan_cfi = ( compare[2] >> 28 ) & 0x01; p_vlan->out_vlan_cfi_mask = ( mask[2] >> 28 ) & 0x01; p_vlan->out_vlan_vid = ( compare[2] >> 16 ) & 0x0FFF; p_vlan->out_vlan_vid_mask = ( mask[2] >> 16 ) & 0x0FFF; p_multifield_info->cfg0.l3_off0 = ( compare[2] >> 8 ) & 0xFF; p_multifield_info->cfg0.l3_off0_mask = ( mask[2] >> 8 ) & 0xFF; p_multifield_info->cfg0.l3_off1 = compare[2] & 0xFF; p_multifield_info->cfg0.l3_off1_mask = mask[2] & 0xFF; p_vlan->in_vlan_pri = ( compare[3] >> 29 ) & 0x07; p_vlan->in_vlan_pri_mask = ( mask[3] >> 29 ) & 0x07; p_vlan->in_vlan_cfi = ( compare[3] >> 28 ) & 0x01; p_vlan->in_vlan_cfi_mask = ( mask[3] >> 28 ) & 0x01; p_vlan->in_vlan_vid = ( compare[3] >> 16 ) & 0x0FFF; p_vlan->in_vlan_vid_mask = ( mask[3] >> 16 ) & 0x0FFF; tmp_id = ( compare[3] >> 8 ) & 0xFF; for(i=0; i<8; i++ ) { if( tmp_id & (1 << i) ) p_vlan->tx_if_id = i; } p_vlan->rx_if_id = ( compare[3] >> 5 ) & 0x7; p_multifield_info->cfg0.ipv4 = ( compare[3] & (1 << 4) ) ? 1:0; p_multifield_info->cfg0.ipv4_mask = ( mask[3] & (1 << 4) ) ? 1:0; p_multifield_info->cfg0.ipv6 = ( compare[3] & (1 << 3) ) ? 1:0; p_multifield_info->cfg0.ipv6_mask = ( mask[3] & (1 << 3) ) ? 1:0; p_multifield_info->cfg0.pppoe_session = ( compare[3] & (1 << 2) ) ? 1:0; p_multifield_info->cfg0.pppoe_session_mask = ( mask[3] & (1 << 2) ) ? 1:0; p_vlan->is_vlan = compare[3] & 0x03; p_vlan->is_vlan_mask = mask[3] & 0x03; if( act->in_vlan_ins ) { p_vlan->action_in_vlan_insert = 1; p_vlan->new_in_vlan_pri = act->new_in_vci >> 13; p_vlan->new_in_vlan_cfi = ( act->new_in_vci >> 12 ) & 1; p_vlan->new_in_vlan_vid = act->new_in_vci & 0xFFF; } p_vlan->action_in_vlan_remove = act->in_vlan_rm; if( act->out_vlan_ins ) { p_vlan->action_out_vlan_insert = 1; if( get_outer_vlan_entry(act->out_vlan_ix, &out_vci) != IFX_SUCCESS ) { return IFX_FAILURE; } p_vlan->new_out_vlan_tpid = ( out_vci >> 16 ) & 0xFFFF; p_vlan->new_out_vlan_pri = ( out_vci >> 13 ) & 0x3; p_vlan->new_out_vlan_cfi = ( out_vci >> 12 ) & 0x1; p_vlan->new_out_vlan_vid = out_vci & 0xFFF; } p_vlan->action_out_vlan_remove = act->out_vlan_rm; p_multifield_info->cfg0.queue_id = act->dest_qid; return IFX_SUCCESS; } /*! \brief This is to add one multiple field entry/flow \param[in] p_multifield_info the detail multiple field entry classification and its action \param[out] index return the index of the newly added entry in the compare table, it's valid only if return IFX_SUCCESS \param[in] flag reserved for future \return int32_t, The return value can be any one of the following: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error \n \note, as for p_multifield_info, only sub member */ int32_t ifx_ppe_drv_add_multifield_entry(PPA_MULTIFIELD_FLOW_INFO *p_multifield_info, int32_t *index, uint32_t flag) { int ret; int out_vci; int32_t entry, empty_entry = -1; uint32_t compare[4] = {0}; uint32_t mask[4] = {0}; struct classification_act_tbl act = {0}; int i; ASSERT(p_multifield_info != NULL, "p_multifield_info == NULL"); act.fw_cpu = 1; out_vci = mfe_param2word(p_multifield_info, compare, mask, &act); if ( out_vci < 0 ) return IFX_EAGAIN; ppa_lock_get(&g_classification_lock); // find existing entry and empty entry for ( entry = MAX_CLASSIFICATION_ENTRIES - 1; entry >= 0; entry-- ) if ( !g_classification_entry_counter[entry] ) empty_entry = entry; else { for ( i = 0; i < 4; i++ ) if ( CLASSIFICATION_CMP_TBL(entry)[i] != compare[i] ) break; if ( i < 4 ) // not match continue; if ( *(unsigned int *)&act != *(volatile unsigned int *)CLASSIFICATION_ACT_TBL(entry) ) { ppa_lock_release(&g_classification_lock); ret = IFX_EINVAL; goto ADD_MFE_ENTRY_FAIL; } goto ADD_MFE_ENTRY_GOON; } // no empty entry if ( empty_entry < 0 ) { ppa_lock_release(&g_classification_lock); ret = IFX_EAGAIN; goto ADD_MFE_ENTRY_FAIL; } entry = empty_entry; for ( i = 0; i < 4; i++ ) { CLASSIFICATION_CMP_TBL(entry)[i] = compare[i]; CLASSIFICATION_MSK_TBL(entry)[i] = mask[i]; } *CLASSIFICATION_ACT_TBL(entry) = act; CLASSIFICATION_ACT_TBL(entry)->fw_cpu = 0; ADD_MFE_ENTRY_GOON: g_classification_entry_counter[entry]++; ppa_lock_release(&g_classification_lock); ASSERT(index != NULL, "index == NULL"); *index = entry; return IFX_SUCCESS; ADD_MFE_ENTRY_FAIL: if ( out_vci > 0 ) del_outer_vlan_entry(out_vci - 1); return ret; } /*! \brief This is to get one multiple field entry as specified via index \param[in] index, the index of compare table to get \param[in] multifield_info, the pointer to store the multiple field information \return int32_t, return the bytes in the compare table. It can be any one of the following now: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error */ int32_t ifx_ppe_drv_get_multifield_entry(int32_t index, PPA_MULTIFIELD_FLOW_INFO *p_multifield_info, uint32_t flag) { int ret; uint32_t compare[4] = {0}; uint32_t mask[4] = {0}; struct classification_act_tbl act = {0}; int i; ASSERT(p_multifield_info != NULL, "p_multifield_info == NULL"); if( index < 0 || index >= MAX_CLASSIFICATION_ENTRIES ) return IFX_EAGAIN; ppa_lock_get(&g_classification_lock); for ( i = 0; i < 4; i++ ) { compare[i] = CLASSIFICATION_CMP_TBL(index)[i]; mask[i] = CLASSIFICATION_MSK_TBL(index)[i]; } act = *CLASSIFICATION_ACT_TBL(index); ret = mfe_word2param(p_multifield_info, compare, mask, &act); ppa_lock_release(&g_classification_lock); return ret; } /*! \brief This is to delete multiple field entry if compare/mask/key completely match \param[in] multifield_info, the pointer to the compare/action table \param[in] flag reserved for future \return int32_t, The return value can be any one of the following: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error, like entry full already */ int32_t ifx_ppe_drv_del_multifield_entry(PPA_MULTIFIELD_FLOW_INFO *multifield_info, uint32_t flag) { return IFX_SUCCESS; } /*! \brief This is to delete multiple field entry as specified via index \param[out] index, the index of compare table to delete \param[in] flag reserved for future \return int32_t, return the bytes in the compare table. It can be any one of the following now: \n \return int32_t, The return value can be any one of the following: \n IFX_SUCCESS on sucess \n IFX_FAILURE on error \note if index is -1, it will delete all multiple field entries */ int32_t ifx_ppe_drv_del_multifield_entry_via_index(int32_t index, uint32_t flag) { if ( index >= 0 && index < MAX_CLASSIFICATION_ENTRIES ) { ppa_lock_get(&g_classification_lock); if ( g_classification_entry_counter[index] && !--g_classification_entry_counter[index] ) { unsigned int out_vci_entry = ~0; if ( CLASSIFICATION_ACT_TBL(index)->out_vlan_ins ) out_vci_entry = CLASSIFICATION_ACT_TBL(index)->out_vlan_ix; CLASSIFICATION_ACT_TBL(index)->fw_cpu = 1; CLASSIFICATION_MSK_TBL(index)[0] = 0; CLASSIFICATION_MSK_TBL(index)[1] = 0; CLASSIFICATION_MSK_TBL(index)[2] = 0; CLASSIFICATION_MSK_TBL(index)[3] = 0; CLASSIFICATION_CMP_TBL(index)[0] = 0; CLASSIFICATION_CMP_TBL(index)[1] = 0; CLASSIFICATION_CMP_TBL(index)[2] = 0; CLASSIFICATION_CMP_TBL(index)[3] = 0; if ( out_vci_entry != ~0 ) del_outer_vlan_entry(out_vci_entry); } ppa_lock_release(&g_classification_lock); } return IFX_SUCCESS; } void get_itf_mib(uint32_t itf, struct ppe_itf_mib *p) { if ( p != NULL && itf < 8 ) ppa_memcpy(p, (void *)ITF_MIB_TBL(itf), sizeof(*p)); } /*! \fn uint32_t test_and_clear_hit_stat(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief check hit status and clean it \param entry entry number got from function call "add_routing_entry" \return 0: OK, otherwise: fail */ uint32_t test_and_clear_hit_stat(uint32_t entry) { uint32_t sys_flag; if ( (entry & 0x7FFFFFFF) < MAX_ROUTING_ENTRIES ) { uint32_t ret; volatile u32 *phit; u32 hitbit; if ( (entry & 0x80000000) ) { // LAN entry &= 0x7FFFFFFF; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { phit = ROUT_LAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F)); } else { phit = ROUT_LAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F)); } } else { // WAN if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { phit = ROUT_WAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F)); } else { phit = ROUT_WAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F)); } } sys_flag = ppa_disable_int(); ret = *phit; *phit = ret & ~hitbit; ppa_enable_int(sys_flag); ret &= hitbit; return ret; } else return 0; } /*! \fn uint32_t test_and_clear_hit_stat_batch(uint32_t entry) \ingroup AMAZON_S_PPA_PPE_D5_HAL_GLOBAL_FUNCTIONS \brief check hit status of consecutive 32 entries and clean them \param entry entry number got from function call "add_routing_entry" \return 0: OK, otherwise: fail */ uint32_t test_and_clear_hit_stat_batch(uint32_t entry) { if ( (entry & 0x7FFFFFFF) < MAX_ROUTING_ENTRIES ) { uint32_t ret; volatile u32 *phit; u32 hitbit; if ( (entry & 0x80000000) ) { // LAN entry &= 0x7FFFFFFF; if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { phit = ROUT_LAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F)); } else { phit = ROUT_LAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F)); } } else { // WAN if ( entry >= MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK ) { phit = ROUT_WAN_COLL_HIT_STAT_TBL((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - ((entry - MAX_ROUTING_ENTRIES_PER_HASH_BLOCK * MAX_HASH_BLOCK) & 0x1F)); } else { phit = ROUT_WAN_HASH_HIT_STAT_TBL(entry >> 5); hitbit = 1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F)); } } ret = *phit; *phit = 0; return ret; } else return 0; } uint32_t test_and_clear_mc_hit_stat(uint32_t entry) { uint32_t sys_flag; if ( entry < MAX_WAN_MC_ENTRIES ) { uint32_t ret; uint32_t bit; bit = 1 << (BITSIZEOF_UINT32 - 1 - (entry & 0x1F)); sys_flag = ppa_disable_int(); ret = *ROUT_WAN_MC_HIT_STAT_TBL(entry >> 5); *ROUT_WAN_MC_HIT_STAT_TBL(entry >> 5) = ret & ~bit; ppa_enable_int(sys_flag); ret &= bit; return ret; } else return 0; } uint32_t test_and_clear_mc_hit_stat_batch(uint32_t entry) { if ( entry < MAX_WAN_MC_ENTRIES ) { uint32_t block = entry >> 5; uint32_t ret = *ROUT_WAN_MC_HIT_STAT_TBL(block); *ROUT_WAN_MC_HIT_STAT_TBL(block) = 0; return ret; } else return 0; } uint32_t test_and_clear_bridging_hit_stat(uint32_t entry) { return 0; } uint32_t test_and_clear_bridging_hit_stat_batch(uint32_t entry) { return 0; } int32_t get_max_vfilter_entries(uint32_t vfilter_type) { return 0; } #ifdef CONFIG_IFX_PPA_QOS #define IFX_PPA_DRV_QOS_RATESHAPE_bitrate_2_R( bitrate_kbps, T, basic_tick) ( (bitrate_kbps) * (T) * (basic_tick) /8/1000 ) #define IFX_PPA_DRV_QOS_RATESHAPE_R_2_bitrate( R, T, basic_tick) ( (R) * 8 * 1000 / (T) / (basic_tick) ) struct bitrate_2_t_kbps { uint32_t bitrate_kbps; uint32_t T; //Time ticks }; struct bitrate_2_t_kbps bitrate_2_t_kbps_table[]={ { 100000, 1}, { 10000, 10}, { 1000, 100}, { 0, 250} }; static int32_t get_basic_time_tick(void) { return TX_QOS_CFG->time_tick / (cgu_get_pp32_clock() /1000000); } /*! \brief This is to get the maximum queue number supported on specified port \param[in] portid the physical port id which support qos queue \param[in] flag reserved for future \return returns the queue number supported on this port. */ int32_t ifx_ppa_drv_get_qos_qnum( uint32_t portid, uint32_t flag) { if( set_qos_port_id() != IFX_SUCCESS ) return 0; if ( qos_queue_portid != portid ) return 0; return TX_QOS_CFG->eth1_eg_qnum; } /*! \brief This is to get the mib couter for specified port and queue \param[in] portid the physical port id \param[in] queueid the queueid for the mib counter to get \param[out] mib the buffer for mib counter \param[in] flag reserved for future \return returns the queue number supported on this port. */ int32_t ifx_ppa_drv_get_qos_mib(uint32_t portid, uint32_t queueid, PPA_QOS_MIB *mib, uint32_t flag) { if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( !mib ) return IFX_FAILURE; if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( queueid >= TX_QOS_CFG->eth1_eg_qnum ) return IFX_FAILURE; #if 1 { struct eth_wan_mib_table qos_queue_mib; qos_queue_mib = *ETH_WAN_TX_MIB_TABLE(queueid); mib->total_rx_pkt = qos_queue_mib.wrx_total_pdu; mib->total_rx_bytes = qos_queue_mib.wrx_total_bytes; mib->total_tx_pkt = qos_queue_mib.wtx_total_pdu; mib->total_tx_bytes = qos_queue_mib.wtx_total_bytes; mib->cpu_path_small_pkt_drop_cnt = qos_queue_mib.wtx_cpu_drop_small_pdu; mib->cpu_path_total_pkt_drop_cnt = qos_queue_mib.wtx_cpu_drop_pdu; mib->fast_path_small_pkt_drop_cnt = qos_queue_mib.wtx_fast_drop_small_pdu; mib->fast_path_total_pkt_drop_cnt = qos_queue_mib.wtx_fast_drop_pdu; } #else { struct wtx_mib_e1 qos_queue_mib; qos_queue_mib = *(struct wtx_mib_e1 *)WTX_MIB_TBL(queueid); mib->total_rx_pkt = 0; mib->total_rx_bytes = 0; mib->total_tx_pkt = qos_queue_mib.total_pdu; mib->total_tx_bytes = qos_queue_mib.total_bytes; mib->cpu_path_small_pkt_drop_cnt = qos_queue_mib.wtx_cpu_drop_small_pdu; mib->cpu_path_total_pkt_drop_cnt = qos_queue_mib.wtx_cpu_drop_pdu; mib->fast_path_small_pkt_drop_cnt = qos_queue_mib.wtx_fast_drop_small_pdu; mib->fast_path_total_pkt_drop_cnt = qos_queue_mib.wtx_fast_drop_pdu; } #endif return IFX_SUCCESS; } #ifdef CONFIG_IFX_PPA_QOS_RATE_SHAPING /*! \brief This is to eanble/disable Rate Shaping feature \param[in] portid the phisical port id which support qos queue \param[in] enabled 1:enable 0: disable \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_set_ctrl_qos_rate( uint32_t portid, uint32_t enable, uint32_t flag) { struct tx_qos_cfg tx_qos_cfg = {0}; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; tx_qos_cfg = *TX_QOS_CFG; // if ( !tx_qos_cfg.eth1_qss ) return IFX_FAILURE; //Note, For E5 ptm wan mode, the eth1_qss is disabled. if ( enable ) { tx_qos_cfg.shape_en = 1; tx_qos_cfg.overhd_bytes = 20; //add 20 bytes preamble and inter-frame gape } else { tx_qos_cfg.shape_en = 0; if ( !tx_qos_cfg.shape_en && !tx_qos_cfg.wfq_en ) { //tx_qos_cfg.eth1_qss = 1; //tx_qos_cfg.eth1_eg_qnum = 0; } } *TX_QOS_CFG = tx_qos_cfg; return IFX_SUCCESS; } /*! \brief This is to get Rate Shaping feature status: enabled or disabled \param[in] portid the phisical port id which support qos queue \param[out] enabled 1:enable 0: disable \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_get_ctrl_qos_rate( uint32_t portid, uint32_t *enable, uint32_t flag) { if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( enable ) { if ( TX_QOS_CFG->shape_en ) *enable = 1; else *enable =0; } return IFX_SUCCESS; } /*! \brief This is to set Rate Shaping for one specified port and queue \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set rate shaping. \n If queue id bigger than muximum queue id, it will be regarded as port based rate shaping. \param[in] rate the maximum rate limit in kbps \param[in] burst the maximun burst in bytes \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_set_qos_rate( uint32_t portid, uint32_t queueid, uint32_t rate, uint32_t burst, uint32_t flag) { int i; struct wtx_eg_q_shaping_cfg qos_cfg = {0}; volatile struct wtx_eg_q_shaping_cfg *p_qos_cfg = NULL; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid!= portid ) return IFX_FAILURE; if ( queueid >= TX_QOS_CFG->eth1_eg_qnum ) //regard it as port based rate shaping { if( qos_queue_portid == 7)//PTM mode and there is no ported based rate shaping since DSL has its link rate return IFX_FAILURE; //QOS Note: otherwise it is E5 ethernet wan mode, so regard it as port based rate shaping, not queue based rate shaping qos_cfg = *WTX_EG_Q_PORT_SHAPING_CFG(0); p_qos_cfg = WTX_EG_Q_PORT_SHAPING_CFG(0); } else { qos_cfg = *WTX_EG_Q_SHAPING_CFG(queueid); p_qos_cfg = WTX_EG_Q_SHAPING_CFG(queueid); } if ( rate >= bitrate_2_t_kbps_table[0].bitrate_kbps ) { qos_cfg.t = bitrate_2_t_kbps_table[0].T; } else { for( i = 0; i < sizeof(bitrate_2_t_kbps_table) / sizeof(bitrate_2_t_kbps_table[0]) - 1; i++ ) { if ( rate < bitrate_2_t_kbps_table[i].bitrate_kbps && rate >= bitrate_2_t_kbps_table[i+1].bitrate_kbps ) { qos_cfg.t = bitrate_2_t_kbps_table[i+1].T; break; } } } if ( qos_cfg.t == 0 ) { return IFX_FAILURE; } if( burst == 0 ) { burst = default_qos_rateshaping_burst; } qos_cfg.r = IFX_PPA_DRV_QOS_RATESHAPE_bitrate_2_R( rate, qos_cfg.t, get_basic_time_tick() ); qos_cfg.s = burst; *p_qos_cfg = qos_cfg; return IFX_SUCCESS; } /*! \brief This is to get Rate Shaping settings for one specified port and queue \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set rate shaping \n If queue id bigger than muximum queue id, it will be regarded as port based rate shaping. \param[out] rate the maximum rate limit in kbps \param[out] burst the maximun burst in bytes \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_get_qos_rate( uint32_t portid, uint32_t queueid, uint32_t *rate, uint32_t *burst, uint32_t flag) { struct wtx_eg_q_shaping_cfg qos_cfg = {0}; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //A5 Ethernet WAN mode only QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( queueid >= TX_QOS_CFG->eth1_eg_qnum ) { if( qos_queue_portid == 7 ) //PTM mode and there is no ported based rate shaping since DSL has its link rate return IFX_FAILURE; //otherwise it is E5 ethernet wan mode, so regard it as port based rate shaping, not queue based rate shaping qos_cfg = *WTX_EG_Q_PORT_SHAPING_CFG(0); } else qos_cfg = *WTX_EG_Q_SHAPING_CFG(queueid); //queue based rate shaping if ( qos_cfg.t != 0 ) //not set yet { if ( rate ) *rate = IFX_PPA_DRV_QOS_RATESHAPE_R_2_bitrate(qos_cfg.r, qos_cfg.t, get_basic_time_tick()); } else { if ( rate ) *rate = 0; } if ( burst ) *burst = qos_cfg.s; return IFX_SUCCESS; } /*! \brief This is to reset Rate Shaping for one specified port and queue ( \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set rate shaping \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_reset_qos_rate( uint32_t portid, uint32_t queueid, uint32_t flag ) { if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; if ( qos_queue_portid != portid ) return IFX_FAILURE; ifx_ppa_drv_set_qos_rate(portid, queueid, 1000000, default_qos_rateshaping_burst, flag); return IFX_SUCCESS; } int32_t ifx_ppa_drv_init_qos_rate(void) { int i; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom for ( i = 0; i <= ifx_ppa_drv_get_qos_qnum(qos_queue_portid, 0); i++ ) //here we purposely use <= to set port based rate sahping also ifx_ppa_drv_reset_qos_rate(qos_queue_portid, i, 0); return IFX_SUCCESS; } #endif /*end of CONFIG_IFX_PPA_QOS_RATE_SHAPING*/ #ifdef CONFIG_IFX_PPA_QOS_WFQ /*! \brief This is to eanble/disable WFQ feature \param[in] portid the phisical port id which support qos queue \param[in] enabled 1:enable 0: disable \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_set_ctrl_qos_wfq( uint32_t portid, uint32_t enable, uint32_t flag) { struct tx_qos_cfg tx_qos_cfg = {0}; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; tx_qos_cfg = *TX_QOS_CFG; if ( enable ) { tx_qos_cfg.wfq_en= 1; tx_qos_cfg.overhd_bytes = 20; //add 20 bytes preamble and inter-frame gape //tx_qos_cfg.eth1_qss = 1; if( flag != 0 ) wfq_multiple= flag; else wfq_multiple= IFX_PPA_DRV_QOS_WFQ_WLEVEL_2_W; } else { tx_qos_cfg.wfq_en = 0; if ( !tx_qos_cfg.shape_en && !tx_qos_cfg.wfq_en ) { //tx_qos_cfg.eth1_qss = 1; //tx_qos_cfg.eth1_eg_qnum = 0; } } *TX_QOS_CFG = tx_qos_cfg; return IFX_SUCCESS; } /*! \brief This is to get WFQ feature status: enabled or disabled \param[in] portid the phisical port id which support qos queue \param[out] enabled 1:enable 0: disable \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_get_ctrl_qos_wfq( uint32_t portid, uint32_t *enable, uint32_t flag) { if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( enable ) { if ( TX_QOS_CFG->wfq_en ) *enable = 1; else *enable =0; } return IFX_SUCCESS; } /*! \brief This is to set WFQ weight level for one specified port and queue \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set WFQ \param[in] weight_level the value should be 0 ~ 100. It will be mapped to internal PPE FW WFQ real weight \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_set_qos_wfq( uint32_t portid, uint32_t queueid, uint32_t weight_level, uint32_t flag) { struct wtx_eg_q_shaping_cfg qos_cfg = {0}; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( queueid >= TX_QOS_CFG->eth1_eg_qnum ) return IFX_FAILURE; qos_cfg = *WTX_EG_Q_SHAPING_CFG(queueid); if ( weight_level == 100 ) { if( wfq_multiple != 1 ) qos_cfg.w = wfq_strict_pri_weight; else qos_cfg.w = weight_level * wfq_multiple; } else { qos_cfg.w = weight_level * wfq_multiple; } *WTX_EG_Q_SHAPING_CFG(queueid) = qos_cfg; return IFX_SUCCESS; } /*! \brief This is to get WFQ settings for one specified port and queue ( default value should be 0xFFFF) \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set WFQ \param[out] weight_level the value should be 0 ~ 100. \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_get_qos_wfq( uint32_t portid, uint32_t queueid, uint32_t *weight_level, uint32_t flag) { struct wtx_eg_q_shaping_cfg qos_cfg = {0}; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom if ( qos_queue_portid != portid ) return IFX_FAILURE; if ( queueid >= TX_QOS_CFG->eth1_eg_qnum ) return IFX_FAILURE; if ( !weight_level ) return IFX_FAILURE; qos_cfg = *WTX_EG_Q_SHAPING_CFG(queueid); if ( qos_cfg.w == wfq_strict_pri_weight ) { if( wfq_multiple != 1 ) *weight_level = 100; else *weight_level = qos_cfg.w / wfq_multiple; } else { *weight_level = qos_cfg.w / wfq_multiple; } return IFX_SUCCESS; } /*! \brief This is to reset WFQ for one specified port and queue ( default value should be 0xFFFF) \param[in] portid the phisical port id which support qos queue \param[in] queueid the queue id need to set WFQ \param[in] flag reserved for future \return The return value can be any one of the following: \n - IFX_SUCCESS on success \n - IFX_FAILURE on error \n */ int32_t ifx_ppa_drv_reset_qos_wfq( uint32_t portid, uint32_t queueid, uint32_t flag ) { return ifx_ppa_drv_set_qos_wfq(portid, queueid, 100, flag); } int32_t ifx_ppa_drv_init_qos_wfq(void) { int i; if( set_qos_port_id() != IFX_SUCCESS ) return IFX_FAILURE; //QOS Note: Different condition with different mode/platfrom for ( i = 0; i < ifx_ppa_drv_get_qos_qnum(qos_queue_portid, 0); i++ ) ifx_ppa_drv_reset_qos_wfq(qos_queue_portid, i, 0); return IFX_SUCCESS; } #endif /*end of CONFIG_IFX_PPA_QOS_WFQ*/ #endif /*end of CONFIG_IFX_PPA_QOS*/ /* * #################################### * Init/Cleanup API * #################################### */ static INLINE void hal_init(void) { int i; // init table with default value for ( i = 0; i < MAX_CLASSIFICATION_ENTRIES; i++ ) CLASSIFICATION_ACT_TBL(i)->fw_cpu = 1; *KEY_SEL_n(0) = 0x00000000; *KEY_SEL_n(1) = 0x00000000; *KEY_SEL_n(2) = 0x4E4F8081; *KEY_SEL_n(3) = 0x52430000; GEN_MODE_CFG1->classification_num = MAX_CLASSIFICATION_ENTRIES; g_ipv6_acc_en = GEN_MODE_CFG1->ipv6_acc_en ? 1 : 0; g_wan_itf = *CFG_WAN_PORTMAP; g_mix_map = *CFG_MIXED_PORTMAP; ppa_mem_cache_create("mac_tbl_item", sizeof(struct mac_tbl_item), &g_mac_tbl_item_cache); proc_file_create(); ppa_lock_init(&g_ipv6_ip_lock); ppa_lock_init(&g_lan_routing_lock); ppa_lock_init(&g_wan_routing_lock); ppa_lock_init(&g_wan_mc_lock); ppa_lock_init(&g_pppoe_lock); ppa_lock_init(&g_mtu_lock); ppa_lock_init(&g_mac_lock); ppa_lock_init(&g_outer_vlan_lock); ppa_lock_init(&g_classification_lock); //ppa_lock_init(&g_itf_cfg_lock); ppa_lock_init(&g_mac_tbl_lock); ifx_ppa_drv_add_ipv6_routing_entry_fn = add_ipv6_routing_entry; ifx_ppa_drv_add_routing_entry_fn = add_routing_entry; ifx_ppa_drv_del_routing_entry_fn = del_routing_entry; #if defined(ENABLE_IPv6_DEMO_SUPPORT) && ENABLE_IPv6_DEMO_SUPPORT { uint32_t entry; uint32_t lan_ip[4] = {0x01020304, 0x05060708, 0x090A0B0C, 0x0D0E0F10}; uint32_t lan_port = 1234; uint8_t lan_mac[6]= {0x00, 0x11, 0x22, 0x33, 0x44, 0x55}; uint32_t wan_ip[4] = {0x100F0E0D, 0x0C0B0A09, 0x08070605, 0x04030201}; uint32_t wan_port = 4321; uint8_t wan_mac[6]= {0x00, 0x55, 0x44, 0x33, 0x22, 0x11}; add_ipv6_routing_entry(1, lan_ip, lan_port, wan_ip, wan_port, 0, // 1: TCP, 0: UDP 1, // normal routing 0, 0, wan_mac, 1, // new_src_mac_ix 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02, &entry); add_ipv6_routing_entry(0, wan_ip, wan_port, lan_ip, lan_port, 0, // 1: TCP, 0: UDP 1, 0, 0, lan_mac, 0, // new_src_mac_ix 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, &entry); add_routing_entry(1, lan_ip[0], lan_port, wan_ip[3], wan_port, 0, // 1: TCP, 0: UDP 3, // NAPT 0x0403020B, 1111, wan_mac, 1, // new_src_mac_ix 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x02, &entry); add_routing_entry(0, wan_ip[3], wan_port, 0x0403020B, 1111, 0, // 1: TCP, 0: UDP 3, // NAPT lan_ip[0], lan_port, lan_mac, 0, // new_src_mac_ix 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01, &entry); } #endif } static INLINE void hal_exit(void) { if ( g_mac_tbl_item_cache ) { ppa_mem_cache_destroy(g_mac_tbl_item_cache); g_mac_tbl_item_cache = NULL; } proc_file_delete(); ppa_lock_destroy(&g_ipv6_ip_lock); ppa_lock_destroy(&g_lan_routing_lock); ppa_lock_destroy(&g_wan_routing_lock); ppa_lock_destroy(&g_wan_mc_lock); ppa_lock_destroy(&g_pppoe_lock); ppa_lock_destroy(&g_mtu_lock); ppa_lock_destroy(&g_mac_lock); ppa_lock_destroy(&g_outer_vlan_lock); ppa_lock_destroy(&g_classification_lock); //ppa_lock_destroy(&g_itf_cfg_lock); ppa_lock_destroy(&g_mac_tbl_lock); } int ifx_ppa_ppe_hal_init(void) { hal_init(); return IFX_SUCCESS; } void ifx_ppa_ppe_hal_exit(void) { hal_exit(); } EXPORT_SYMBOL(get_ppe_hal_id); EXPORT_SYMBOL(get_firmware_id); EXPORT_SYMBOL(get_number_of_phys_port); EXPORT_SYMBOL(get_phys_port_info); EXPORT_SYMBOL(get_max_route_entries); EXPORT_SYMBOL(get_max_bridging_entries); EXPORT_SYMBOL(set_wan_vlan_id); EXPORT_SYMBOL(get_wan_vlan_id); EXPORT_SYMBOL(set_if_type); EXPORT_SYMBOL(get_if_type); EXPORT_SYMBOL(set_route_cfg); EXPORT_SYMBOL(set_bridging_cfg); EXPORT_SYMBOL(set_fast_mode); EXPORT_SYMBOL(set_if_wfq); EXPORT_SYMBOL(set_dplus_wfq); EXPORT_SYMBOL(set_fastpath_wfq); EXPORT_SYMBOL(get_acc_mode); EXPORT_SYMBOL(set_acc_mode); EXPORT_SYMBOL(set_default_dest_list); EXPORT_SYMBOL(set_bridge_if_vlan_config); EXPORT_SYMBOL(get_bridge_if_vlan_config); EXPORT_SYMBOL(add_vlan_map); EXPORT_SYMBOL(del_vlan_map); EXPORT_SYMBOL(get_vlan_map); EXPORT_SYMBOL(del_all_vlan_map); EXPORT_SYMBOL(is_ipv6_enabled); EXPORT_SYMBOL(add_ipv6_routing_entry); EXPORT_SYMBOL(add_routing_entry); EXPORT_SYMBOL(del_routing_entry); EXPORT_SYMBOL(update_routing_entry); EXPORT_SYMBOL(add_wan_mc_entry); EXPORT_SYMBOL(del_wan_mc_entry); EXPORT_SYMBOL(update_wan_mc_entry); EXPORT_SYMBOL(get_dest_ip_from_wan_mc_entry); EXPORT_SYMBOL(add_bridging_entry); EXPORT_SYMBOL(del_bridging_entry); EXPORT_SYMBOL(add_pppoe_entry); EXPORT_SYMBOL(del_pppoe_entry); EXPORT_SYMBOL(get_pppoe_entry); EXPORT_SYMBOL(add_mtu_entry); EXPORT_SYMBOL(del_mtu_entry); EXPORT_SYMBOL(get_mtu_entry); EXPORT_SYMBOL(get_routing_entry_bytes); EXPORT_SYMBOL(add_mac_entry); EXPORT_SYMBOL(del_mac_entry); EXPORT_SYMBOL(get_mac_entry); EXPORT_SYMBOL(add_outer_vlan_entry); EXPORT_SYMBOL(del_outer_vlan_entry); EXPORT_SYMBOL(get_outer_vlan_entry); EXPORT_SYMBOL(ifx_ppe_drv_multifield_control); EXPORT_SYMBOL(ifx_ppe_drv_get_multifield_status); EXPORT_SYMBOL(ifx_ppe_drv_get_multifield_max_entry); EXPORT_SYMBOL(ifx_ppe_drv_add_multifield_entry); EXPORT_SYMBOL(ifx_ppe_drv_get_multifield_entry); EXPORT_SYMBOL(ifx_ppe_drv_del_multifield_entry); EXPORT_SYMBOL(ifx_ppe_drv_del_multifield_entry_via_index); EXPORT_SYMBOL(get_itf_mib); EXPORT_SYMBOL(test_and_clear_hit_stat); EXPORT_SYMBOL(test_and_clear_hit_stat_batch); EXPORT_SYMBOL(test_and_clear_mc_hit_stat); EXPORT_SYMBOL(test_and_clear_mc_hit_stat_batch); EXPORT_SYMBOL(test_and_clear_bridging_hit_stat); EXPORT_SYMBOL(test_and_clear_bridging_hit_stat_batch); EXPORT_SYMBOL(get_max_vfilter_entries); EXPORT_SYMBOL(ifx_ppa_ppe_hal_init); EXPORT_SYMBOL(ifx_ppa_ppe_hal_exit); #ifdef CONFIG_IFX_PPA_QOS EXPORT_SYMBOL(ifx_ppa_drv_get_qos_qnum); EXPORT_SYMBOL(ifx_ppa_drv_get_qos_mib); #ifdef CONFIG_IFX_PPA_QOS_RATE_SHAPING EXPORT_SYMBOL(ifx_ppa_drv_set_ctrl_qos_rate); EXPORT_SYMBOL(ifx_ppa_drv_get_ctrl_qos_rate); EXPORT_SYMBOL(ifx_ppa_drv_set_qos_rate); EXPORT_SYMBOL(ifx_ppa_drv_get_qos_rate); EXPORT_SYMBOL(ifx_ppa_drv_reset_qos_rate); EXPORT_SYMBOL(ifx_ppa_drv_init_qos_rate); #endif //end of CONFIG_IFX_PPA_QOS_RATE_SHAPING #ifdef CONFIG_IFX_PPA_QOS_WFQ EXPORT_SYMBOL(ifx_ppa_drv_set_ctrl_qos_wfq); EXPORT_SYMBOL(ifx_ppa_drv_get_ctrl_qos_wfq); EXPORT_SYMBOL(ifx_ppa_drv_set_qos_wfq); EXPORT_SYMBOL(ifx_ppa_drv_get_qos_wfq); EXPORT_SYMBOL(ifx_ppa_drv_reset_qos_wfq); EXPORT_SYMBOL(ifx_ppa_drv_init_qos_wfq); #endif //CONFIG_IFX_PPA_QOS_WFQ #endif