/* * Copyright (c) 2011-2014 AVM GmbH * All rights reserved. * * vim:set expandtab shiftwidth=3 softtabstop=3: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed and/or modified under the * terms of the GNU General Public License as published by the Free Software * Foundation. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_FUSIV_VX185 #include #include #endif #include /* --------------------------------------------------------------------------- */ #undef AVM_PA_FUSIV_DEBUG #undef AVM_PA_FUSIV_TRACE /* * what about statistics ? */ #if defined(CONFIG_FUSIV_VX180) && defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) #define AVM_PA_FUSIV_AP_HAS_STATISTIC #ifdef AVM_PA_HARDWARE_PA_HAS_SESSION_STATS #define AVM_PA_FUSIV_USE_SESSION_STATS_FUNCTION #else #define AVM_PA_FUSIV_POLL_STATISTIC #endif #endif #define AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL /* --------------------------------------------------------------------------- */ #if defined(AVM_PA_FUSIV_DEBUG) #define AVM_PA_FUSIV_DBG(a...) printk(KERN_ERR "[avm_pa_fusiv] " a); #else #define AVM_PA_FUSIV_DBG(a...) #endif #if defined(AVM_PA_FUSIV_TRACE) #define AVM_PA_FUSIV_TRC(a...) printk(KERN_ERR "[avm_pa_fusiv] " a); #else #define AVM_PA_FUSIV_TRC(a...) #endif #ifdef CONFIG_FUSIV_VX185 extern descInfo_t descinfo[]; #endif extern newapIfStruct_t apArray[]; enum fusivflowtype { fusiv_flow_v4, fusiv_flow_v6, fusiv_bridge_flow }; /*------------------------------------------------------------------------------------------*\ * AVM PA fusiv \*------------------------------------------------------------------------------------------*/ struct avm_pa_fusiv_session { struct avm_pa_session *avm_session; unsigned char valid_session; unsigned char rxApId; unsigned char txApId; unsigned short flowhash; enum fusivflowtype flowtype; union { apFlowEntry_t *v4; apIpv6FlowEntry_t *v6; } flow; apNewBridgeEntry_t *bridgeFlow; #ifdef AVM_PA_FUSIV_AP_HAS_STATISTIC apStatistics_t prevStat; #endif }; static int avm_pa_fusiv_add_session(struct avm_pa_session *avm_session); static int avm_pa_fusiv_remove_session(struct avm_pa_session *avm_session); static int avm_pa_fusiv_try_to_accelerate(avm_pid_handle pid_handle, struct sk_buff *skb); static int avm_pa_fusiv_alloc_rx_channel(avm_pid_handle pid_handle); static int avm_pa_fusiv_alloc_tx_channel(avm_pid_handle pid_handle); static int avm_pa_fusiv_free_tx_channel(avm_pid_handle pid_handle); #ifdef AVM_PA_FUSIV_USE_SESSION_STATS_FUNCTION static int avm_pa_fusiv_session_stats(struct avm_pa_session *avm_session, struct avm_pa_session_stats *ingress); #endif #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL static int tx_channel_allocated[CONFIG_AVM_PA_MAX_PID]; static struct avm_pa_pid_hwinfo tx_channel_hwinfo; #endif static DEFINE_SPINLOCK(session_list_lock); static struct avm_hardware_pa avm_pa_fusiv = { .add_session = avm_pa_fusiv_add_session, .remove_session = avm_pa_fusiv_remove_session, .try_to_accelerate = avm_pa_fusiv_try_to_accelerate, .alloc_rx_channel = avm_pa_fusiv_alloc_rx_channel, .alloc_tx_channel = avm_pa_fusiv_alloc_tx_channel, .free_rx_channel = NULL /* avm_pa_fusiv_free_rx_channel */ , .free_tx_channel = avm_pa_fusiv_free_tx_channel, #ifdef AVM_PA_FUSIV_USE_SESSION_STATS_FUNCTION .session_stats = avm_pa_fusiv_session_stats, #endif }; static struct avm_pa_fusiv_session fusiv_session_array[CONFIG_AVM_PA_MAX_SESSION]; static int inline avm_pa_fusiv_session_valid(struct avm_pa_fusiv_session *sess) { return sess >= &fusiv_session_array[0] && sess < &fusiv_session_array[CONFIG_AVM_PA_MAX_SESSION]; } #ifdef AVM_PA_FUSIV_POLL_STATISTIC #define AVM_PA_FUSIV_STAT_POLLING_TIME 1 static struct timer_list statistics_timer; #endif /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static const char *mac2str(const void *cp, char *buf, size_t size) { const unsigned char *mac = (const unsigned char *) cp; snprintf(buf, size, "%02X:%02X:%02X:%02X:%02X:%02X", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); return buf; } /*--------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------*/ void handle_fp_bridge_pkt(apPreHeader_t * apBuf, struct net_device *dev) { struct sk_buff *skb; apNewBridgeEntry_t *ap_bridge_entry; AVM_PA_FUSIV_TRC("call handle_fp_bridge_pkt\n"); ap_bridge_entry = (apNewBridgeEntry_t *)(apBuf->matchedEntryAddr); if (ap_bridge_entry) { #ifdef CONFIG_FUSIV_VX185 if ((unsigned int)ap_bridge_entry & (1 << FT_ENTRY_IN_LMEM_CHKFRMHOST_BIT)) ap_bridge_entry = (apNewBridgeEntry_t *)LMEM_PIO2CBUS(ap_bridge_entry); #endif ap_bridge_entry = (apNewBridgeEntry_t *)PHYS_TO_K1(ap_bridge_entry); } if (ap_bridge_entry && ap_bridge_entry->userHandle) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (ap_bridge_entry->userHandle); if (unlikely(!avm_pa_fusiv_session_valid(fusiv_session))) { if (net_ratelimit()) printk(KERN_ERR "handle_fp_bridge_pkt: avm session invalid %p\n", fusiv_session); putCluster(apBuf); return; } if (fusiv_session->avm_session) { if ((skb = (struct sk_buff *) translateApbuf2Mbuf(apBuf)) != 0) { avm_pa_tx_channel_accelerated_packet(fusiv_session->avm_session-> egress[0].pid_handle, fusiv_session-> avm_session->session_handle, skb); AVM_PA_FUSIV_TRC ("handle_fp_bridge_pkt: packet accelerated (pid %u session %u) \n", fusiv_session->avm_session->egress[0].pid_handle, fusiv_session->avm_session->session_handle); } return; } } putCluster(apBuf); } void handle_fp_route_pkt(apPreHeader_t * apBuf, struct net_device *dev) { struct sk_buff *skb; apFlowEntry_t *flow; AVM_PA_FUSIV_TRC("call handle_fp_route_pkt\n"); flow = (apFlowEntry_t *)(apBuf->matchedEntryAddr); if (flow) { #ifdef CONFIG_FUSIV_VX185 if ((unsigned int)flow & (1 << FT_ENTRY_IN_LMEM_CHKFRMHOST_BIT)) flow = (apFlowEntry_t *)LMEM_PIO2CBUS(flow); #endif flow = (apFlowEntry_t *)PHYS_TO_K1(flow); } if (flow && flow->userHandle) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (flow->userHandle); if (unlikely(!avm_pa_fusiv_session_valid(fusiv_session))) { if (net_ratelimit()) printk(KERN_ERR "handle_fp_route_pkt: avm session invalid %p\n", fusiv_session); putCluster(apBuf); return; } if (fusiv_session->avm_session) { if ((skb = (struct sk_buff *) translateApbuf2Mbuf(apBuf)) != 0) { avm_pa_tx_channel_accelerated_packet(fusiv_session->avm_session-> egress[0].pid_handle, fusiv_session-> avm_session->session_handle, skb); AVM_PA_FUSIV_TRC ("handle_fp_route_pkt: packet accelerated (pid %u session %u) \n", fusiv_session->avm_session->egress[0].pid_handle, fusiv_session->avm_session->session_handle); } return; } } putCluster(apBuf); } #define MAX_SSID_LEN 6 struct txInfo { struct net_device *netdev; void (*fp) (apPreHeader_t *, struct net_device *); unsigned char bssid[MAX_SSID_LEN]; }; struct txInfo tx_info_bridge = { .fp = handle_fp_bridge_pkt, }; struct txInfo tx_info_route = { .fp = handle_fp_route_pkt, }; static int avm_pa_fusiv_add_bridge_session(struct avm_pa_session *avm_session) { apNewBridgeEntry_t bridgeEntry, *newBridgeEntry; struct avm_pa_fusiv_session new_session; struct avm_pa_pid_hwinfo *ingress_hw, *egress_hw; unsigned short hash; unsigned long slock_flags; int res = AVM_PA_TX_ERROR_SESSION; int rc; ingress_hw = avm_pa_pid_get_hwinfo(avm_session->ingress_pid_handle); egress_hw = avm_pa_pid_get_hwinfo(avm_session->egress[0].pid_handle); if ((ingress_hw == NULL) || (egress_hw == NULL)) return res; /* == AVM/UGA 20140707 AP2AP/Bridging accallertion for WLAN/PERI AP == * reenabled since we most probably fixed the buffer leak in apClassify */ #if 0 /* * == AVM/SKI 20140515 == AP2AP/Bridging accallertion for WLAN/PERI AP * disabled because of currently unknown AP buffer leak. */ if (egress_hw->apId == PERI_ID) return res; #endif memset(&bridgeEntry, 0, sizeof(bridgeEntry)); if (egress_hw->apId == PERI_ID) { bridgeEntry.egressList[0].pEgress = (void *) AP_EGRESS_HOST; bridgeEntry.egressList[1].pEgress = &tx_info_bridge; bridgeEntry.rxPort = (unsigned short) ((K1_TO_PHYS(PERI_BASE_ADDR)) >> 16); } else if (ingress_hw->apId == PERI_ID) { bridgeEntry.egressList[0].pEgress = (void *) (K1_TO_PHYS(apArray[egress_hw->apId].apTxFifo)); if (egress_hw->apId == MAC1_ID) bridgeEntry.rxPort = (unsigned short) ((K1_TO_PHYS(MAC1_BASE_ADDR)) >> 16); else if (egress_hw->apId == MAC2_ID) bridgeEntry.rxPort = (unsigned short) ((K1_TO_PHYS(MAC2_BASE_ADDR)) >> 16); } else { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_bridge_session: unsupported bridging AP%u -> AP%u\n", ingress_hw->apId, egress_hw->apId); return res; } bridgeEntry.egressList[0].pFlowID = (void *) 0; bridgeEntry.used = 1; bridgeEntry.vlanAddr = 1; bridgeEntry.bridgeState = AP_BRIDGE_OPERATIONAL; bridgeEntry.operations = (0x1 << AP_BRIDGE_VALID_BIT); memcpy(bridgeEntry.macAddr, avm_session->bsession->ethh.h_dest, ETH_ALEN); bridgeEntry.userHandle = (unsigned int) &fusiv_session_array[avm_session->session_handle]; hash = apBridgeCalculateHash(ingress_hw->apId, &bridgeEntry); rc = apAddBridgeEntry(1, hash, &bridgeEntry, &newBridgeEntry); if (rc != 0) { AVM_PA_FUSIV_DBG("apAddBridgeEntry returned %d\n", rc); return res; } AVM_PA_FUSIV_DBG("apAddBridgeEntry successful hash %u\n", hash); new_session.avm_session = avm_session; new_session.valid_session = 1; new_session.rxApId = ingress_hw->apId; new_session.txApId = egress_hw->apId; new_session.flowhash = hash; new_session.bridgeFlow = newBridgeEntry; new_session.flowtype = fusiv_bridge_flow; #if defined(CONFIG_FUSIV_VX180) && defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) memset(&new_session.prevStat, 0, sizeof(apStatistics_t)); #endif // take_session_lock spin_lock_irqsave(&session_list_lock, slock_flags); if (!fusiv_session_array[avm_session->session_handle].valid_session) { fusiv_session_array[avm_session->session_handle] = new_session; res = AVM_PA_TX_SESSION_ADDED; } else { UINT32 moved; apDeleteBridgeEntry(1, hash, newBridgeEntry, &moved); printk(KERN_CRIT "session add failed - double call for add by avm_pa?!"); dump_stack(); } // release_session_lock spin_unlock_irqrestore(&session_list_lock, slock_flags); return res; } /*--------------------------------------------------------------------------*\ \*--------------------------------------------------------------------------*/ /* * TODO: fuer Vx185 muss die DESC ADDR richtig gesetzt werden, z.B.: * COPY_DESC(DESC_SELECT_ROUTE_SNAT_PPPOE_PLAIN, flow.descAddr); */ static int avm_pa_fusiv_add_session_v4(struct avm_pa_session *avm_session) { unsigned long slock_flags; int i, rc; struct avm_pa_fusiv_session new_session; int res = AVM_PA_TX_ERROR_SESSION; apFlowEntry_t flow; apFlowEntry_t *newflow; unsigned int proto, priority, mtu; struct avm_pa_pid_hwinfo *ingress_hw, *egress_hw; unsigned short flowhash; char srcmac[32], dstmac[32], insrc[32]; AVM_PA_FUSIV_DBG("avm_pa_fusiv_add_session_v4: start\n"); ingress_hw = avm_pa_pid_get_hwinfo(avm_session->ingress_pid_handle); egress_hw = avm_pa_pid_get_hwinfo(avm_session->egress[0].pid_handle); #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if ( egress_hw == 0 && tx_channel_allocated[avm_session->egress[0].pid_handle]) egress_hw = &tx_channel_hwinfo; #endif if (ingress_hw == 0 || egress_hw == 0) return res; proto = avm_session->ingress.pkttype & AVM_PA_PKTTYPE_PROTO_MASK; memset(&flow, 0, sizeof(flow)); #ifdef CONFIG_FUSIV_VX180 flow.entryType = AP_TCP_UDP_ENTRY; #endif for (i = 0; i < avm_session->ingress.nmatch; i++) { struct avm_pa_match_info *p = &avm_session->ingress.match[i]; hdrunion_t *hdr = (hdrunion_t *) & avm_session->ingress.hdrcopy[p->offset + avm_session->ingress. hdroff]; switch (p->type) { case AVM_PA_VLAN: break; case AVM_PA_ETH: #ifdef CONFIG_FUSIV_VX180 memcpy(flow.inSrcMacAddr, &hdr->ethh.h_source, ETH_ALEN); #else memcpy(flow.apCompare.fields.inMAC, &hdr->ethh.h_source, ETH_ALEN); #endif #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (egress_hw->apId == 0) { /* * for tx_channel, we always need an ethernet header * 2016-03-04, calle */ #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #else memcpy(flow.apModify.srcMAC, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.apModify.dstMAC, &hdr->ethh.h_dest, ETH_ALEN); #endif flow.operations |= (1 << AP_DO_ETH_HDR_BIT); } #endif break; case AVM_PA_PPP: break; case AVM_PA_PPPOE: { #ifdef CONFIG_FUSIV_VX180 flow.inSessionId = hdr->pppoeh.sid; #else flow.apCompare.inSessionID = hdr->pppoeh.sid; #endif flow.operations |= (1 << AP_CHECK_PPPOE_BIT); } break; case AVM_PA_IPV4: #ifdef CONFIG_FUSIV_VX180 flow.srcIPAddr = hdr->iph.saddr; flow.dstIPAddr = hdr->iph.daddr; flow.pktInfo.l3Proto.proto = proto; #else flow.apCompare.srcIP = hdr->iph.saddr; flow.apCompare.dstIP = hdr->iph.daddr; flow.apCompare.protocol = proto; #endif break; case AVM_PA_IPV6: break; case AVM_PA_PORTS: #ifdef CONFIG_FUSIV_VX180 flow.otherInfo.tcpUdpInfo.srcPort = ntohs(hdr->ports[0]); flow.otherInfo.tcpUdpInfo.dstPort = ntohs(hdr->ports[1]); #else flow.apCompare.fields.ports.srcPort = ntohs(hdr->ports[0]); flow.apCompare.fields.ports.dstPort = ntohs(hdr->ports[1]); #endif break; default: AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v4: can not accelerate, unsupported ingress match type %d\n", p->type); return res; } } for (i = 0; i < avm_session->egress[0].match.nmatch; i++) { struct avm_pa_match_info *p = avm_session->egress[0].match.match + i; hdrunion_t *hdr = (hdrunion_t *) & avm_session->egress[0].match.hdrcopy[p->offset + avm_session-> egress[0].match. hdroff]; switch (p->type) { case AVM_PA_VLAN: if (flow.operations & (1 << AP_ADD_VLAN_HDR_BIT)) return AVM_PA_TX_ERROR_SESSION; #ifdef CONFIG_FUSIV_VX180 flow.vlanId = hdr->vlanh.vlan_tci; #else flow.apModify.vlanID = hdr->vlanh.vlan_tci; flow.apModify.etherType = 0x8100; #endif flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); break; case AVM_PA_ETH: #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #else memcpy(flow.apModify.srcMAC, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.apModify.dstMAC, &hdr->ethh.h_dest, ETH_ALEN); #endif flow.operations |= (1 << AP_DO_ETH_HDR_BIT); break; case AVM_PA_PPP: break; case AVM_PA_PPPOE: { struct pppoehdr *pppoe_hdr = (struct pppoehdr *) (avm_session->egress[0].match.hdrcopy + avm_session->egress[0].match.hdroff + avm_session->egress[0].pppoe_offset); #ifdef CONFIG_FUSIV_VX180 flow.outSessionId = pppoe_hdr->sid; #else flow.apModify.sessionID = pppoe_hdr->sid; flow.apModify.etherType = 0x8864; #endif flow.operations |= (1 << AP_ADD_PPPOE_HDR_BIT); break; } case AVM_PA_IPV4: break; case AVM_PA_IPV6: #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcIpv6Addr, hdr->ipv6h.saddr.s6_addr32, 16); memcpy(flow.dstIpv6Addr, hdr->ipv6h.daddr.s6_addr32, 16); flow.operations |= (1 << AP_IPV4_TO_IPV6_TUNNEL_BIT); #endif break; case AVM_PA_PORTS: break; default: AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v4: can not accelerate, unsupported egress match type %d\n", p->type); break; } } /* * ATA -> LAN: add port VLAN for WAN Port */ if ((flow.operations & (1 << AP_ADD_VLAN_HDR_BIT)) == 0 && (egress_hw->apId == MAC1_ID) && (ingress_hw->apId == MAC1_ID) && (avm_session->egress[0].pid_handle != avm_session->ingress_pid_handle)) { #ifdef CONFIG_FUSIV_VX180 flow.vlanId = avm_cpmac_get_wan_port_vlan(); flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); #else flow.apModify.vlanID = avm_cpmac_get_wan_port_vlan(); flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); #endif } #if defined(CONFIG_FUSIV_VX180) && defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) memset(&flow.apStatistics, 0, sizeof(apStatistics_t)); memset(&new_session.prevStat, 0, sizeof(apStatistics_t)); #endif if (avm_session->mod.v4_mod.flags & AVM_PA_V4_MOD_DADDR) { #ifdef CONFIG_FUSIV_VX180 flow.natDstIPAddr = avm_session->mod.v4_mod.daddr; flow.natDstPort = flow.otherInfo.tcpUdpInfo.dstPort; #else flow.apModify.natIP = avm_session->mod.v4_mod.daddr; flow.apModify.natPort = flow.apCompare.fields.ports.dstPort; #endif flow.operations |= (1 << AP_DO_DST_NAT_BIT); } if (avm_session->mod.v4_mod.flags & AVM_PA_V4_MOD_DPORT) { #ifdef CONFIG_FUSIV_VX180 flow.natDstPort = avm_session->mod.v4_mod.dport; #else flow.apModify.natPort = avm_session->mod.v4_mod.dport; #endif flow.operations |= (1 << AP_DO_DST_NAT_BIT); } if (avm_session->mod.v4_mod.flags & AVM_PA_V4_MOD_SADDR) { #ifdef CONFIG_FUSIV_VX180 flow.natIPAddr = avm_session->mod.v4_mod.saddr; flow.natPort = flow.otherInfo.tcpUdpInfo.srcPort; #else flow.apModify.natIP = avm_session->mod.v4_mod.saddr; flow.apModify.natPort = flow.apCompare.fields.ports.srcPort; #endif flow.operations |= (1 << AP_DO_SRC_NAT_BIT); } if (avm_session->mod.v4_mod.flags & AVM_PA_V4_MOD_SPORT) { #ifdef CONFIG_FUSIV_VX180 flow.natPort = avm_session->mod.v4_mod.sport; #else flow.apModify.natPort = avm_session->mod.v4_mod.sport; #endif flow.operations |= (1 << AP_DO_SRC_NAT_BIT); } priority = avm_session->egress[0].output.priority; priority = (priority & TC_H_MIN_MASK); if (priority > 7) priority = 7; #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (egress_hw->apId == 0) { flow.egressList[0].pEgress = (void *) AP_EGRESS_HOST; flow.egressList[0].pFlowID = (void *)((UINT32)((AP_ETH_Q_TO_TX - 1)) << 16); flow.egressList[1].pEgress = &tx_info_route; } else #endif { if (egress_hw->apId == PERI_ID || egress_hw->apId == ATM_ID) flow.egressList[0].pFlowID = (void *) (priority << 16); else flow.egressList[0].pFlowID = (void *) (priority); if (egress_hw->apId != PERI_ID) { mtu = avm_session->egress[0].mtu; flow.egressList[0].pFlowID = (void *) ((UINT32) flow.egressList[0].pFlowID | (UINT32) (mtu) << MTU_SIZE_OFFSET_INSIDE_FLOW_ID); } if (egress_hw->apId == PERI_ID) { flow.egressList[0].pEgress = (void *) 0xFFFFFFFF; flow.egressList[1].pEgress = &tx_info_route; } else { flow.egressList[0].pEgress = (void *) (K1_TO_PHYS(apArray[egress_hw->apId].apTxFifo)); } } flow.userHandle = (unsigned int) &fusiv_session_array[avm_session->session_handle]; flow.operations |= (0x1 << AP_ROUTE_VALID_BIT); flowhash = apCalculateHash(ingress_hw->apId, &flow); rc = apAddFlowEntry(ingress_hw->apId, flowhash, &flow, &newflow); if (rc != 0) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v4: can not accelerate, apAddFlowEntry returned %d\n", rc); return res; } new_session.avm_session = avm_session; new_session.valid_session = 1; new_session.rxApId = ingress_hw->apId; new_session.txApId = egress_hw->apId; new_session.flowhash = flowhash; new_session.flow.v4 = newflow; new_session.flowtype = fusiv_flow_v4; new_session.bridgeFlow = 0; #ifdef CONFIG_FUSIV_VX180 mac2str(flow.srcMacAddr, srcmac, sizeof(srcmac)); mac2str(flow.dstMacAddr, dstmac, sizeof(dstmac)); mac2str(flow.inSrcMacAddr, insrc, sizeof(insrc)); #else mac2str(flow.apModify.srcMAC, srcmac, sizeof(srcmac)); mac2str(flow.apModify.dstMAC, dstmac, sizeof(dstmac)); mac2str(flow.apCompare.fields.inMAC, insrc, sizeof(insrc)); #endif AVM_PA_FUSIV_DBG ("apAddFlowEntry: AP#%d->AP#%d srcmac %s dstmac %s insrc %s\n", ingress_hw->apId, egress_hw->apId, srcmac, dstmac, insrc); // take_session_lock spin_lock_irqsave(&session_list_lock, slock_flags); if (!fusiv_session_array[avm_session->session_handle].valid_session) { fusiv_session_array[avm_session->session_handle] = new_session; res = AVM_PA_TX_SESSION_ADDED; } else { UINT32 moved; if (apDeleteFlowEntry(ingress_hw->apId, flowhash, newflow, &moved) == 0) { if (moved) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (newflow->userHandle); fusiv_session->flow.v4 = newflow; } } printk(KERN_CRIT "session add failed - double call for add by avm_pa?!"); dump_stack(); } // release_session_lock spin_unlock_irqrestore(&session_list_lock, slock_flags); AVM_PA_FUSIV_DBG("avm_pa_fusiv_add_session_v4: done\n"); return res; } static int avm_pa_fusiv_add_session_v6(struct avm_pa_session *avm_session) { unsigned long slock_flags; int i, rc; struct avm_pa_fusiv_session new_session; int res = AVM_PA_TX_ERROR_SESSION; apIpv6FlowEntry_t flow; apIpv6FlowEntry_t *newflow; unsigned int proto, priority; struct avm_pa_pid_hwinfo *ingress_hw, *egress_hw; unsigned short flowhash; char srcmac[32], dstmac[32], insrc[32]; AVM_PA_FUSIV_DBG("avm_pa_fusiv_add_session_v6: start\n"); ingress_hw = avm_pa_pid_get_hwinfo(avm_session->ingress_pid_handle); egress_hw = avm_pa_pid_get_hwinfo(avm_session->egress[0].pid_handle); #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if ( egress_hw == 0 && tx_channel_allocated[avm_session->egress[0].pid_handle]) egress_hw = &tx_channel_hwinfo; #endif if (ingress_hw == 0 || egress_hw == 0) return res; if (ingress_hw->apId == PERI_ID || egress_hw->apId == PERI_ID) return res; proto = avm_session->ingress.pkttype & AVM_PA_PKTTYPE_PROTO_MASK; memset(&flow, 0, sizeof(flow)); #ifdef CONFIG_FUSIV_VX180 if (proto == IPPROTO_UDP) { flow.entryType = AP_UDP_ENTRY; } else if (proto == IPPROTO_TCP) { flow.entryType = AP_TCP_ENTRY; } else { AVM_PA_FUSIV_DBG("avm_pa_fusiv_add_session_v6: unsupported protocol %u\n", proto); return res; } #endif for (i = 0; i < avm_session->ingress.nmatch; i++) { struct avm_pa_match_info *p = &avm_session->ingress.match[i]; hdrunion_t *hdr = (hdrunion_t *) & avm_session->ingress.hdrcopy[p->offset + avm_session->ingress. hdroff]; switch (p->type) { case AVM_PA_VLAN: break; case AVM_PA_ETH: #ifdef CONFIG_FUSIV_VX180 memcpy(&flow.inSrcMacAddr, &hdr->ethh.h_source, ETH_ALEN); #else memcpy(&flow.apCompare.inMAC, &hdr->ethh.h_source, ETH_ALEN); #endif #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (egress_hw->apId == 0) { /* * for tx_channel, we always need an ethernet header * 2016-03-04, calle */ #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #else memcpy(flow.apModify.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.apModify.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #endif flow.operations |= (1 << AP_DO_ETH_HDR_BIT); } #endif break; case AVM_PA_PPP: break; case AVM_PA_PPPOE: { #ifdef CONFIG_FUSIV_VX180 flow.inSessionId = hdr->pppoeh.sid; #else flow.apCompare.inSessionID = hdr->pppoeh.sid; #endif flow.operations |= (1 << AP_CHECK_PPPOE_BIT); } break; case AVM_PA_IPV4: break; case AVM_PA_IPV6: #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcIpv6Addr, hdr->ipv6h.saddr.s6_addr32, 16); memcpy(flow.dstIpv6Addr, hdr->ipv6h.daddr.s6_addr32, 16); #else memcpy(flow.apCompare.srcIpv6Addr, hdr->ipv6h.saddr.s6_addr32, 16); memcpy(flow.apCompare.dstIpv6Addr, hdr->ipv6h.daddr.s6_addr32, 16); #endif break; case AVM_PA_PORTS: #ifdef CONFIG_FUSIV_VX180 flow.otherInfo.tcpUdpInfo.srcPort = ntohs(hdr->ports[0]); flow.otherInfo.tcpUdpInfo.dstPort = ntohs(hdr->ports[1]); #else flow.apCompare.fields.ports.srcPort = ntohs(hdr->ports[0]); flow.apCompare.fields.ports.dstPort = ntohs(hdr->ports[1]); #endif break; default: AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v6: can not accelerate, unsupported ingress match type %d\n", p->type); return res; } } for (i = 0; i < avm_session->egress[0].match.nmatch; i++) { struct avm_pa_match_info *p = avm_session->egress[0].match.match + i; hdrunion_t *hdr = (hdrunion_t *) & avm_session->egress[0].match.hdrcopy[p->offset + avm_session-> egress[0].match. hdroff]; switch (p->type) { case AVM_PA_VLAN: if (flow.operations & (1 << AP_ADD_VLAN_HDR_BIT)) return AVM_PA_TX_ERROR_SESSION; #ifdef CONFIG_FUSIV_VX180 flow.vlanId = hdr->vlanh.vlan_tci; #else flow.apModify.vlanID = hdr->vlanh.vlan_tci; #endif flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); break; case AVM_PA_ETH: #ifdef CONFIG_FUSIV_VX180 memcpy(flow.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #else memcpy(flow.apModify.srcMacAddr, &hdr->ethh.h_source, ETH_ALEN); memcpy(flow.apModify.dstMacAddr, &hdr->ethh.h_dest, ETH_ALEN); #endif flow.operations |= (1 << AP_DO_ETH_HDR_BIT); break; case AVM_PA_PPP: break; case AVM_PA_PPPOE: { struct pppoehdr *pppoe_hdr = (struct pppoehdr *) (avm_session->egress[0].match.hdrcopy + avm_session->egress[0].match.hdroff + avm_session->egress[0].pppoe_offset); #ifdef CONFIG_FUSIV_VX180 flow.outSessionId = pppoe_hdr->sid; #else flow.apModify.sessionID = pppoe_hdr->sid; #endif flow.operations |= (1 << AP_ADD_PPPOE_HDR_BIT); break; } case AVM_PA_IPV4: #ifdef CONFIG_FUSIV_VX180 flow.srcIPAddr = hdr->iph.saddr; flow.dstIPAddr = hdr->iph.daddr; flow.operations |= (0x1 << AP_IPV6_TO_IPV4_TUNNEL_BIT); #endif break; case AVM_PA_IPV6: break; case AVM_PA_PORTS: break; default: AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v6: can not accelerate, unsupported egress match type %d\n", p->type); break; } } /* * ATA -> LAN: add port VLAN for WAN Port */ if ((flow.operations & (1 << AP_ADD_VLAN_HDR_BIT)) == 0 && (egress_hw->apId == MAC1_ID) && (ingress_hw->apId == MAC1_ID) && (avm_session->egress[0].pid_handle != avm_session->ingress_pid_handle)) { #ifdef CONFIG_FUSIV_VX180 flow.vlanId = avm_cpmac_get_wan_port_vlan(); flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); #else flow.apModify.vlanID = avm_cpmac_get_wan_port_vlan(); flow.operations |= (1 << AP_ADD_VLAN_HDR_BIT); #endif } #if defined(CONFIG_FUSIV_VX180) && defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) memset(&flow.apStatistics, 0, sizeof(apStatistics_t)); memset(&new_session.prevStat, 0, sizeof(apStatistics_t)); #endif priority = avm_session->egress[0].output.priority; priority = (priority & TC_H_MIN_MASK); if (priority > 7) priority = 7; flow.userHandle = (unsigned int) &fusiv_session_array[avm_session->session_handle]; #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (egress_hw->apId == 0) { flow.egressList[0].pEgress = (void *) AP_EGRESS_HOST; flow.egressList[0].pFlowID = (void *)((UINT32)((AP_ETH_Q_TO_TX - 1)) << 16); flow.egressList[1].pEgress = &tx_info_route; } else #endif { flow.egressList[0].pEgress = (void *) (K1_TO_PHYS(apArray[egress_hw->apId].apTxFifo)); if (egress_hw->apId == PERI_ID || egress_hw->apId == ATM_ID) flow.egressList[0].pFlowID = (void *) (priority << 16); else flow.egressList[0].pFlowID = (void *) (priority); } flow.operations |= (0x1 << AP_ROUTE_VALID_BIT); flowhash = apIpv6CalculateHash(ingress_hw->apId, &flow); rc = apIpv6AddFlowEntry(ingress_hw->apId, flowhash, &flow, &newflow); if (rc != 0) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session_v6: can not accelerate, apAddFlowEntry returned %d\n", rc); return res; } new_session.avm_session = avm_session; new_session.valid_session = 1; new_session.rxApId = ingress_hw->apId; new_session.txApId = egress_hw->apId; new_session.flowhash = flowhash; new_session.flow.v6 = newflow; new_session.flowtype = fusiv_flow_v6; new_session.bridgeFlow = 0; #ifdef CONFIG_FUSIV_VX180 mac2str(flow.srcMacAddr, srcmac, sizeof(srcmac)); mac2str(flow.dstMacAddr, dstmac, sizeof(dstmac)); mac2str(flow.inSrcMacAddr, insrc, sizeof(insrc)); #else mac2str(flow.apModify.srcMacAddr, srcmac, sizeof(srcmac)); mac2str(flow.apModify.dstMacAddr, dstmac, sizeof(dstmac)); mac2str(flow.apCompare.inMAC, insrc, sizeof(insrc)); #endif AVM_PA_FUSIV_DBG ("apAddFlowEntry: AP#%d->AP#%d srcmac %s dstmac %s insrc %s\n", ingress_hw->apId, egress_hw->apId, srcmac, dstmac, insrc); // take_session_lock spin_lock_irqsave(&session_list_lock, slock_flags); if (!fusiv_session_array[avm_session->session_handle].valid_session) { fusiv_session_array[avm_session->session_handle] = new_session; res = AVM_PA_TX_SESSION_ADDED; } else { UINT32 moved; if (apIpv6DeleteFlowEntry(ingress_hw->apId, flowhash, newflow, &moved) == 0) { if (moved) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (newflow->userHandle); fusiv_session->flow.v6 = newflow; } } printk(KERN_CRIT "session add failed - double call for add by avm_pa?!"); dump_stack(); } // release_session_lock spin_unlock_irqrestore(&session_list_lock, slock_flags); AVM_PA_FUSIV_DBG("avm_pa_fusiv_add_session_v6: done\n"); return res; } static int avm_pa_fusiv_add_session(struct avm_pa_session *avm_session) { struct avm_pa_pkt_match *ingress; struct avm_pa_egress *egress; struct avm_pa_pid_hwinfo *ingress_hw, *egress_hw; unsigned int proto; BUG_ON(avm_session->session_handle >= CONFIG_AVM_PA_MAX_SESSION); if (avm_session->negress != 1) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, egress = %d\n", avm_session->negress); return AVM_PA_TX_ERROR_SESSION; } ingress = &avm_session->ingress; egress = &avm_session->egress[0]; ingress_hw = avm_pa_pid_get_hwinfo(avm_session->ingress_pid_handle); egress_hw = avm_pa_pid_get_hwinfo(avm_session->egress[0].pid_handle); #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if ( avm_session->bsession == 0 && egress_hw == 0 && tx_channel_allocated[avm_session->egress[0].pid_handle]) egress_hw = &tx_channel_hwinfo; #endif if (!ingress_hw || !egress_hw) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, hw pointer not valid\n"); return AVM_PA_TX_ERROR_SESSION; } #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (ingress_hw->apId == 0) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, ingress AP-ID is 0\n"); return AVM_PA_TX_ERROR_SESSION; } if (egress_hw->apId == 0 && avm_session->bsession) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, ingress AP-ID is 0\n"); return AVM_PA_TX_ERROR_SESSION; } #else if ((egress_hw->apId == 0) || (ingress_hw->apId == 0)) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, %s AP-ID is 0\n", egress_hw->apId ? "ingress" : "egress"); return AVM_PA_TX_ERROR_SESSION; } #endif if (egress_hw->apId == BMU_ID) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, AP-ID %d not supported\n", egress_hw->apId); return AVM_PA_TX_ERROR_SESSION; } if (avm_session->bsession) return avm_pa_fusiv_add_bridge_session(avm_session); proto = avm_session->ingress.pkttype & AVM_PA_PKTTYPE_PROTO_MASK; if (proto != IPPROTO_TCP && proto != IPPROTO_UDP) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, protocol %u not supported \n", proto); return AVM_PA_TX_ERROR_SESSION; } if ((ingress->pkttype & AVM_PA_PKTTYPE_LISP) || (egress->match.pkttype & AVM_PA_PKTTYPE_LISP)) { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate LISP tunnel packets \n"); return AVM_PA_TX_ERROR_SESSION; } if ((ingress->pkttype & AVM_PA_PKTTYPE_IP_MASK) == AVM_PA_PKTTYPE_IPV4) { /* * IPV4 -> IPV4 */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE) return avm_pa_fusiv_add_session_v4(avm_session); #ifdef CONFIG_FUSIV_VX180 /* * IPV4 -> IPV4 in IPV6 (DsLite) */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_IPV6ENCAP) return avm_pa_fusiv_add_session_v4(avm_session); /* currently buggy */ /* * IPV4 in IPV6 -> IPV4 (DsLite) */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_IPV6ENCAP && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE) return avm_pa_fusiv_add_session_v4(avm_session); #endif } else if ((ingress->pkttype & AVM_PA_PKTTYPE_IP_MASK) == AVM_PA_PKTTYPE_IPV6) { /* * IPV6 -> IPV6 */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE) return avm_pa_fusiv_add_session_v6(avm_session); #ifdef CONFIG_FUSIV_VX180 /* * IPV6 -> IPV6 in IPV4 (6to4) */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_IPV4ENCAP) return avm_pa_fusiv_add_session_v6(avm_session); /* * IPV6 in IPV4 -> IPV6 (6to4) */ if ((ingress->pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_IPV4ENCAP && (egress->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK) == AVM_PA_PKTTYPE_NONE) return avm_pa_fusiv_add_session_v6(avm_session); #endif } AVM_PA_FUSIV_DBG ("avm_pa_fusiv_add_session: can not accelerate, unsupported protocol\n"); return AVM_PA_TX_ERROR_SESSION; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int avm_pa_fusiv_remove_session(struct avm_pa_session *avm_session) { int status = AVM_PA_TX_ERROR_SESSION; unsigned long slock_flags; struct avm_pa_fusiv_session session_to_remove; void *moved; int rc = 0; BUG_ON(avm_session->session_handle >= CONFIG_AVM_PA_MAX_SESSION); // take_session_lock spin_lock_irqsave(&session_list_lock, slock_flags); if ((avm_session == fusiv_session_array[avm_session->session_handle].avm_session) && fusiv_session_array[avm_session->session_handle].valid_session) { session_to_remove = fusiv_session_array[avm_session->session_handle]; } else { // no valid session found // release_session_lock and return spin_unlock_irqrestore(&session_list_lock, slock_flags); return status; } // release_session_lock spin_unlock_irqrestore(&session_list_lock, slock_flags); if (session_to_remove.flowtype == fusiv_flow_v4 && session_to_remove.flow.v4) { if ((rc = apDeleteFlowEntry(session_to_remove.rxApId, session_to_remove.flowhash, session_to_remove.flow.v4, (void *) &moved)) == 0) { apFlowEntry_t *flowp = session_to_remove.flow.v4; #if defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) && defined(AVM_PA_FUSIV_DEBUG) AVM_PA_FUSIV_DBG ("ap2apFlowDelete Stats %lu rxPkt %lu rxByte %lu txPkt %lu txByte\n", flowp->apStatistics.rxPkt, flowp->apStatistics.rxByte, flowp->apStatistics.txPkt, flowp->apStatistics.txByte); #endif if (moved) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (flowp->userHandle); fusiv_session->flow.v4 = flowp; AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: v4 flow entry moved\n"); } AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apDeleteFlowEntry successful\n"); } else { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apDeleteFlowEntry failed (rc=%u) APID %d\n", rc, session_to_remove.rxApId); } } else if (session_to_remove.flowtype == fusiv_flow_v6 && session_to_remove.flow.v6) { if ((rc = apIpv6DeleteFlowEntry(session_to_remove.rxApId, session_to_remove.flowhash, session_to_remove.flow.v6, (void *) &moved)) == 0) { apIpv6FlowEntry_t *flowp = session_to_remove.flow.v6; #if defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) && defined(AVM_PA_FUSIV_DEBUG) AVM_PA_FUSIV_DBG ("ap2apFlowDelete Stats %lu rxPkt %lu rxByte %lu txPkt %lu txByte \n", flowp->apStatistics.rxPkt, flowp->apStatistics.rxByte, flowp->apStatistics.txPkt, flowp->apStatistics.txByte); #endif if (moved) { struct avm_pa_fusiv_session *fusiv_session = (struct avm_pa_fusiv_session *) (flowp->userHandle); fusiv_session->flow.v6 = flowp; AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: v6 flow entry moved\n"); } AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apIpv6DeleteFlowEntry successful\n"); } else { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apIpv6DeleteFlowEntry failed (rc=%d)\n", rc); } } else if (session_to_remove.flowtype == fusiv_bridge_flow && session_to_remove.bridgeFlow) { if ((rc = apDeleteBridgeEntry(1, session_to_remove.flowhash, session_to_remove.bridgeFlow, (void *) &moved)) == 0) { #if defined(CONFIG_FUSIV_KERNEL_APSTATISTICS_PER_INTERFACE) && defined(AVM_PA_FUSIV_DEBUG) apNewBridgeEntry_t *flowp = session_to_remove.bridgeFlow; AVM_PA_FUSIV_DBG ("apDeleteBridgeEntry Stats %lu rxPkt %lu rxByte %lu txPkt %lu txByte\n", flowp->apStatistics.rxPkt, flowp->apStatistics.rxByte, flowp->apStatistics.txPkt, flowp->apStatistics.txByte); #endif AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apDeleteBridgeEntry successful\n"); } else { AVM_PA_FUSIV_DBG ("avm_pa_fusiv_remove_session: apDeleteBridgeEntry failed (rc=%d)\n", rc); } } // take_session_lock spin_lock_irqsave(&session_list_lock, slock_flags); if ((avm_session == fusiv_session_array[avm_session->session_handle].avm_session) && fusiv_session_array[avm_session->session_handle].valid_session) { memset(&fusiv_session_array[avm_session->session_handle], 0, sizeof(struct avm_pa_fusiv_session)); status = AVM_PA_TX_OK; } else { printk(KERN_CRIT "session has been removed already - double call for remove by avm_pa?!"); dump_stack(); } // release_session_lock spin_unlock_irqrestore(&session_list_lock, slock_flags); return status; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ #ifdef AVM_PA_FUSIV_AP_HAS_STATISTIC static int avm_pa_fusiv_get_stat(struct avm_pa_fusiv_session *session, unsigned long *pbytes_since_last_report, unsigned long *ppackets_since_last_report) { apFlowEntry_t *flowpv4; apIpv6FlowEntry_t *flowpv6; apNewBridgeEntry_t *bridgeflow; if (!session->valid_session) return -1; if (session->rxApId == PERI_ID) /* not used on rx */ return -1; if (session->flowtype == fusiv_flow_v4) { flowpv4 = session->flow.v4; if (flowpv4->apStatistics.txByte >= session->prevStat.txByte) { *pbytes_since_last_report = flowpv4->apStatistics.txByte - session->prevStat.txByte; } else { *pbytes_since_last_report = ((unsigned long) -1) - session->prevStat.txByte + flowpv4->apStatistics.txByte; } if (flowpv4->apStatistics.txPkt >= session->prevStat.txPkt) { *ppackets_since_last_report = flowpv4->apStatistics.txPkt - session->prevStat.txPkt; } else { *ppackets_since_last_report = ((unsigned long) -1) - session->prevStat.txPkt + flowpv4->apStatistics.txPkt; } session->prevStat.txByte = flowpv4->apStatistics.txByte; session->prevStat.rxByte = flowpv4->apStatistics.rxByte; session->prevStat.txPkt = flowpv4->apStatistics.txPkt; session->prevStat.rxPkt = flowpv4->apStatistics.rxPkt; return 0; } if (session->flowtype == fusiv_flow_v6) { flowpv6 = session->flow.v6; if (flowpv6->apStatistics.txByte >= session->prevStat.txByte) { *pbytes_since_last_report = flowpv6->apStatistics.txByte - session->prevStat.txByte; } else { *pbytes_since_last_report = ((unsigned long) -1) - session->prevStat.txByte + flowpv6->apStatistics.txByte; } if (flowpv6->apStatistics.txPkt >= session->prevStat.txPkt) { *ppackets_since_last_report = flowpv6->apStatistics.txPkt - session->prevStat.txPkt; } else { *ppackets_since_last_report = ((unsigned long) -1) - session->prevStat.txPkt + flowpv6->apStatistics.txPkt; } session->prevStat.txByte = flowpv6->apStatistics.txByte; session->prevStat.rxByte = flowpv6->apStatistics.rxByte; session->prevStat.txPkt = flowpv6->apStatistics.txPkt; session->prevStat.rxPkt = flowpv6->apStatistics.rxPkt; return 0; } if (session->flowtype == fusiv_bridge_flow) { bridgeflow = session->bridgeFlow; if (bridgeflow->apStatistics.txByte >= session->prevStat.txByte) { *pbytes_since_last_report = bridgeflow->apStatistics.txByte - session->prevStat.txByte; } else { *pbytes_since_last_report = ((unsigned long) -1) - session->prevStat.txByte + bridgeflow->apStatistics.txByte; } if (bridgeflow->apStatistics.txPkt >= session->prevStat.txPkt) { *ppackets_since_last_report = bridgeflow->apStatistics.txPkt - session->prevStat.txPkt; } else { *ppackets_since_last_report = ((unsigned long) -1) - session->prevStat.txPkt + bridgeflow->apStatistics.txPkt; } session->prevStat.txByte = bridgeflow->apStatistics.txByte; session->prevStat.rxByte = bridgeflow->apStatistics.rxByte; session->prevStat.txPkt = bridgeflow->apStatistics.txPkt; session->prevStat.rxPkt = bridgeflow->apStatistics.rxPkt; return 0; } return -1; } #endif #ifdef AVM_PA_FUSIV_POLL_STATISTIC static void avm_pa_check_stat(unsigned long dummy) { struct avm_pa_fusiv_session *session; unsigned long bytes, packets; unsigned long slock_flags; avm_session_handle handle; spin_lock_irqsave(&session_list_lock, slock_flags); for (handle = 0; handle < CONFIG_AVM_PA_MAX_SESSION; handle++) { session = &fusiv_session_array[handle]; if ( session->avm_session && session->avm_session->session_handle == handle && avm_pa_fusiv_get_stat(session, &bytes, &packets) == 0) { #if 0 printk(KERN_ERR "avm_pa_hardware_session_report(%u): rxId %u txId %u pkts %lu bytes %llu\n", session->avm_session->session_handle, (unsigned)session->rxApId, (unsigned)session->txApId, packets, bytes); #endif if (packets || bytes) avm_pa_hardware_session_report(handle, packets, bytes); } else { AVM_PA_FUSIV_DBG("avm_pa_check_stat: no session handle\n"); } } spin_unlock_irqrestore(&session_list_lock, slock_flags); mod_timer(&statistics_timer, jiffies + HZ * AVM_PA_FUSIV_STAT_POLLING_TIME - 1); } #endif #ifdef AVM_PA_FUSIV_USE_SESSION_STATS_FUNCTION static int avm_pa_fusiv_session_stats(struct avm_pa_session *avm_session, struct avm_pa_session_stats *ingress) { struct avm_pa_fusiv_session *session; unsigned long bytes, packets; unsigned long slock_flags; ingress->validflags = 0; spin_lock_irqsave(&session_list_lock, slock_flags); session = &fusiv_session_array[avm_session->session_handle]; if ( session->avm_session == avm_session && avm_pa_fusiv_get_stat(session, &bytes, &packets) == 0) { /* * Because VALIDs are interpreted as activity, * we only report VALIDs, if values are not zero, * * 2016-03-07 calle */ if (packets || bytes) { ingress->validflags = AVM_PA_SESSION_STATS_VALID_HIT | AVM_PA_SESSION_STATS_VALID_PKTS | AVM_PA_SESSION_STATS_VALID_BYTES; ingress->tx_pkts = packets; ingress->tx_bytes = bytes; } spin_unlock_irqrestore(&session_list_lock, slock_flags); return 0; } spin_unlock_irqrestore(&session_list_lock, slock_flags); return -1; } #endif /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static void pkt_from_ap(apPreHeader_t * ap_buf) { struct sk_buff *skb; avm_pid_handle pid_handle; pid_handle = ap_buf->specInfoElement; ap_buf->specInfoElement = 1; AVM_PA_FUSIV_TRC("%s called pid_handle %u\n", __func__, pid_handle); if ((skb = (struct sk_buff *) translateApbuf2Mbuf(ap_buf)) != 0) avm_pa_rx_channel_packet_not_accelerated(pid_handle, skb); } extern int apClassify(unsigned char apId, apPreHeader_t * pFrame, void *handle); extern void (*wlan_pkt_from_ap_ptr) (apPreHeader_t *); static int avm_pa_fusiv_try_to_accelerate(avm_pid_handle pid_handle, struct sk_buff *skb) { struct avm_pa_pid_hwinfo *hwinfo; apPreHeader_t *ap_buf; hwinfo = avm_pa_pid_get_hwinfo(pid_handle); if (!hwinfo) { AVM_PA_FUSIV_TRC(KERN_ERR "avm_pa_fusiv_try_to_accelerate: no hw info for pid %u\n", pid_handle); return AVM_PA_RX_BYPASS; } /* == AVM/UGA 20140707 no HW acceleration for ingress WLAN/PERI AP pkts == * Workaround for trouble-free telephony. * AVM PA rate throttling is not working for ingres WLAN pkts because they * are not seen/counted by it atm. * Thus, we fallback to SW acceleration which sees/counts pkts * appropriately so throttling works. */ #if 1 if (hwinfo->apId == PERI_ID) return AVM_PA_RX_BYPASS; #endif ap_buf = (apPreHeader_t *) (translateMbuf2Apbuf(skb, 0)); if (!ap_buf) { return AVM_PA_RX_BYPASS; } ap_buf->flags1 = 1 << AP_FLAG1_IS_ETH_BIT; ap_buf->specInfoElement = pid_handle; ap_buf->flags2 = 0; dev_kfree_skb_any(skb); AVM_PA_FUSIV_TRC("avm_pa_fusiv_try_to_accelerate: apClassify apId %u\n", hwinfo->apId); if (apClassify(hwinfo->apId, ap_buf, (void *) pkt_from_ap) != SIM_OK) { putCluster(ap_buf); } return AVM_PA_RX_STOLEN; } static int avm_pa_fusiv_alloc_rx_channel(avm_pid_handle pid_handle) { wlan_pkt_from_ap_ptr = pkt_from_ap; return 0; } static int avm_pa_fusiv_alloc_tx_channel(avm_pid_handle pid_handle) { #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (pid_handle == 0 || pid_handle >= CONFIG_AVM_PA_MAX_PID) return -1; tx_channel_allocated[pid_handle] = 1; #endif apBridgeTable(1, 0xCC, 0xff); return 0; } static int avm_pa_fusiv_free_tx_channel(avm_pid_handle pid_handle) { #ifdef AVM_AP_FUSIV_ALLOW_TX_CHANNEL_FOR_LOCAL if (pid_handle == 0 || pid_handle >= CONFIG_AVM_PA_MAX_PID) return -1; tx_channel_allocated[pid_handle] = 0; #endif return 0; } /*------------------------------------------------------------------------------------------*\ \*------------------------------------------------------------------------------------------*/ static int __init avm_pa_fusiv_init(void) { AVM_PA_FUSIV_DBG("[%s] start \n", __func__); memset(&fusiv_session_array[0], 0, sizeof(struct avm_pa_fusiv_session) * CONFIG_AVM_PA_MAX_SESSION); avm_pa_register_hardware_pa(&avm_pa_fusiv); #ifdef AVM_PA_FUSIV_POLL_STATISTIC setup_timer(&statistics_timer, avm_pa_check_stat, 0); mod_timer(&statistics_timer, jiffies + HZ * AVM_PA_FUSIV_STAT_POLLING_TIME - 1); #endif AVM_PA_FUSIV_DBG("[%s] init complete \n", __func__); return 0; } static void __exit avm_pa_fusiv_exit(void) { AVM_PA_FUSIV_DBG("[%s] start \n", __func__); #ifdef AVM_PA_FUSIV_POLL_STATISTIC del_timer(&statistics_timer); #endif avm_pa_register_hardware_pa(0); AVM_PA_FUSIV_DBG("[%s] exit complete \n", __func__); } module_init(avm_pa_fusiv_init); module_exit(avm_pa_fusiv_exit); MODULE_DESCRIPTION("Ikanos HW acceleration"); MODULE_LICENSE("GPL");