#include "fastpath_core.h" #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <linux/if_pppox.h> #include <linux/notifier.h> #include <linux/netfilter.h> #include <linux/netfilter_bridge/ebt_ftos_t.h> #include <net/netfilter/nf_conntrack.h> #include <net/netfilter/nf_conntrack_helper.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_conntrack_core.h> #include <net/netfilter/nf_conntrack_zones.h> #include <net/ip.h> #include <linux/if_smux.h> #include <linux/if_arp.h> #include <net/xfrm.h> #include <net/rtl/rtl_types.h> #include <net/rtl/rtl_glue.h> #include "../drivers/net/rtl819x/AsicDriver/rtl865x_asicL4.h" #include <net/rtl/rtl865x_nat.h> #ifdef TRAFFIC_MONITOR #include "../drivers/net/rtl819x/common/rtl865x_netif_local.h" #include <net/rtl/rtl867x_hwnat_api.h> #include "../drivers/net/rtl819x/AsicDriver/rtl865xc_asicregs.h" #include <net/rtl/rtl_types.h> extern struct timer_list fp_monitor_timer; //#define DEBUG_TRAFFIC_MONITOR 1 #ifdef DEBUG_TRAFFIC_MONITOR #define DEBUG_TRAFFIC_MONITOR_PRINTK printk #else #define DEBUG_TRAFFIC_MONITOR_PRINTK(format, args...) #endif #endif //TRAFFIC_MONITOR #if defined(SIP_LIMIT_CHECK) #define CONNECTION_LIMIT 4096 typedef struct _srcIP_reference{ ip_t intIp; unsigned int count; struct _srcIP_reference *next; } srcIP_reference; #endif #ifdef CONFIG_ETHWAN #else #define __SRAM #endif extern __u8 fastpath_forward_flag; extern u32 LANsub[8]; extern u32 LANmask[8]; extern u32 routeIndex; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) extern u32 WANsub[8]; extern u32 wanIndex; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)||defined(CONFIG_ATP_SUPPORT_ETHUP) extern int up_qos_enable; #endif #ifdef CONFIG_NET_IPIP extern const struct net_device_ops ipip_netdev_ops; #endif//end of CONFIG_NET_IPIP #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) extern struct Path_List_Entry *RTL_HWNAT_FP_MAPPING[RTL8676_TCPUDPTBL_SIZE_HW]; #endif extern int DumpTrapCPUpkt_debug; extern int DumpTrapCPUpkt_debug_LIMIT; extern void neigh_hh_init(struct neighbour *n, struct dst_entry *dst); __SRAM int fp_iproute_input(void *pSt, struct iphdr *iph, __u32 *fp_dip) { struct sk_buff *skb = (struct sk_buff *)pSt; if(ip_route_input(skb, *fp_dip, iph->saddr, iph->tos, skb->dev)) { //printk("%s %d fail.\n", __func__, __LINE__); return 0; } #ifdef CONFIG_XFRM DEBUGP_PKT("get xfrm boudles.\n"); if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) return 0; if (!xfrm4_route_forward(skb)) return 0; #endif return 1; } extern char ipsecdev[]; __u8 *fastpath_getdstifName(void *pSt) { struct dst_entry *dst = (struct dst_entry *)pSt; #ifdef CONFIG_XFRM if(dst->xfrm != NULL) return &ipsecdev[0]; #endif return dst->dev->name; } int fp_iproute_output(void *pSt, struct iphdr *iph) { #if defined(CONFIG_PPTP) || defined(CONFIG_NET_IPIP) struct rtable *rt = NULL; struct sk_buff *skb = (struct sk_buff *)pSt; struct flowi4 flp = { .__fl_common = { .flowic_oif = 0, .flowic_tos = RT_TOS(0), .flowic_proto = iph->protocol, }, .saddr = iph->saddr, .daddr = iph->daddr, }; rt = ip_route_output_key(&init_net, &flp); if (IS_ERR(rt)) return 0; skb_dst_set(skb, &rt->dst); return 1; #else return 0; #endif//end of CONFIG_PPTP || CONFIG_NET_IPIP } void * getSkbDst(void *pSt) { return skb_dst(((struct sk_buff *)pSt)); } int is_NoARP_Dev(void *dev) { struct net_device *netdev; netdev = (struct net_device *)dev; if (netdev->flags&IFF_NOARP) return 1; return 0; } __SRAM void setSkbDst(void *pSt, void *dst) { struct sk_buff *skb = (struct sk_buff *)pSt; skb_dst_set(skb,dst); } __SRAM void SetFPDst(void *pSt, void **dst) { struct sk_buff *skb = (struct sk_buff *)pSt; *dst = skb_dst(skb); } __SRAM int isSkbDstAssigned(void *pSt) { struct sk_buff *skb = (struct sk_buff *)pSt; return (skb_dst(skb))?1:0; } __SRAM int isDestLo(void *pSt) { struct sk_buff *skb = (struct sk_buff *)pSt; if (!strcmp(skb_dst(skb)->dev->name, "lo")) return 1; return 0; } __SRAM int getSkbMark(void *pskb) { struct sk_buff *skb = (struct sk_buff *)pskb; if(skb) return skb->mark; return 0; } __SRAM int getSkbDscp(void *pskb) { struct sk_buff *skb = (struct sk_buff *)pskb; if(skb) return skb->mdscp; return 0; } #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) u8 getSkbImqFlags(void *pskb) { struct sk_buff *skb = (struct sk_buff *)pskb; if(skb) return skb->mimqflags; return 0; } __SRAM void setQoSIMQ(void *pSt, u8 imqflags) { struct sk_buff *skb = (struct sk_buff *)pSt; skb->imq_flags = imqflags; } #endif __SRAM void setQoSMark(void *pSt, unsigned int mark) { struct sk_buff *skb = (struct sk_buff *)pSt; skb->skb_iif = 0; skb->mark = mark; } __SRAM void setQosDscp(void *pSt, unsigned int mdscp, unsigned int mark) { int ftos_set; struct sk_buff *skb; struct iphdr *iph; if (mdscp & 0x1000000) {//dscp/tc mark ftos_set = mdscp>>16; skb = (struct sk_buff *)pSt; iph = ip_hdr(skb); if (ftos_set & FTOS_SETFTOS) { if (iph->protocol != IPPROTO_UDP) { FASTPATH_ADJUST_CHKSUM_TOS(((iph->tos&(~0xFC))|(mdscp&0xFC)), iph->tos, iph->check); } else { FASTPATH_ADJUST_CHKSUM_TOS_UDP(((iph->tos&(~0xFC))|(mdscp&0xFC)), iph->tos, iph->check); } iph->tos = ((iph->tos&(~0xFC))|(mdscp&0xFC)); } else if (ftos_set & FTOS_WMMFTOS) { if (iph->protocol != IPPROTO_UDP) { FASTPATH_ADJUST_CHKSUM_TOS((((mark>>PRIO_LOC_NFMARK) & PRIO_LOC_NFMASK) << DSCP_MASK_SHIFT),iph->tos, iph->check); } else { FASTPATH_ADJUST_CHKSUM_TOS_UDP((((mark>>PRIO_LOC_NFMARK) & PRIO_LOC_NFMASK) << DSCP_MASK_SHIFT), iph->tos, iph->check); } iph->tos = ((mark>>PRIO_LOC_NFMARK) & PRIO_LOC_NFMASK) << DSCP_MASK_SHIFT; } } } __SRAM void * getDevFromDestentry(void *dst) { struct dst_entry *pdst = (struct dst_entry *)dst; #ifdef CONFIG_XFRM while(pdst->child != NULL) pdst = pdst->child; #endif return (void *)(pdst->dev); } __SRAM unsigned short getDevTypeFromDestentry(void *dst) { struct dst_entry *pdst = (struct dst_entry *)dst; #ifdef CONFIG_XFRM while(pdst->child != NULL) pdst = pdst->child; #endif return (pdst->dev->type); } __SRAM void FastPathHoldDst(void *pSt) { struct sk_buff *skb = (struct sk_buff *)pSt; #ifdef CONFIG_XFRM struct dst_entry *pdst = skb_dst(skb); DEBUGP_PKT("FastPathHoldDst, hold boudles.\n"); while(pdst != NULL){ pdst->lastuse = jiffies; dst_hold(pdst); pdst->__use++; pdst = pdst->child; } return; #endif skb_dst(skb)->lastuse = jiffies; dst_hold(skb_dst(skb)); //cathy, fix dst cache full problem, dst should be held when it is referenced skb_dst(skb)->__use++; } __SRAM void initSkbHdr(void *pSt) { struct sk_buff *skb = (struct sk_buff *)pSt; #if !defined(CONFIG_RTL_HW_TX_CSUM) skb->ip_summed = 0x0; #endif skb->dev = skb_dst(skb)->dev; } /* Find conntrack by tuple. * Note: need to put ct after use it */ struct nf_conn *fp_tuple_to_ct(struct nf_conntrack_tuple *tp) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct=NULL; /* look for tuple match */ h = nf_conntrack_find_get(&init_net, NF_CT_DEFAULT_ZONE, tp); if (!h) { //printk("lookup ct failed\n"); ct = NULL; } else { ct = nf_ct_tuplehash_to_ctrack(h); } return ct; } /* Update Conntrack timer */ inline void fp_updateConxTimer(struct Path_List_Entry *ptr) { struct nf_conn *ct; static unsigned int nf_ct_timeout; unsigned long newtime; ct = fp_tuple_to_ct(&ptr->orig_tuple); if (ct == NULL) return; if (ct->tuplehash[0].tuple.dst.protonum == IPPROTO_TCP) nf_ct_timeout = ct->ct_net->ct.nf_ct_proto.tcp.timeouts[ct->proto.tcp.state]; else nf_ct_timeout = ct->ct_net->ct.nf_ct_proto.udp.timeouts[UDP_CT_REPLIED]; newtime = nf_ct_timeout + jiffies; if ( (newtime - ct->timeout.expires >= HZ)){ mod_timer_pending(&ct->timeout, newtime); } nf_ct_put(ct); } /* * Description: pskb is a pointer of struct sk_buff, pskb->dst point to the new rtable, while dst point to the old rtable. */ int ipip_sanity_check(void *pskb, void *dst) { struct sk_buff *skb = (struct sk_buff *)pskb; struct dst_entry *rt = (struct dst_entry *)dst; struct net_device *tdev; /* Device to other host */ struct iphdr *old_iph = (struct iphdr *)skb_transport_header(skb); struct iphdr *tiph = ip_hdr(skb); int mtu; tdev = skb_dst(skb)->dev; if (tdev == skb->dev) { goto tx_error; } if (tiph->frag_off) mtu = dst_mtu(skb_dst(skb)) - sizeof(struct iphdr); else mtu = rt ? dst_mtu(rt) : skb->dev->mtu; if (mtu < 68) { goto tx_error; } if (rt) rt->ops->update_pmtu(rt, NULL, skb, mtu); if ((old_iph->frag_off&htons(IP_DF)) && mtu < ntohs(old_iph->tot_len)) { goto tx_error; } dst_release(rt); return 1; tx_error: printk("%s error.\n", __func__); dst_release(skb_dst(skb)); skb_dst_set(skb, dst); return 0; } __SRAM int isNotFromPPPItf(void *pSt) { struct sk_buff *skb = (struct sk_buff *)pSt; if(strncmp(skb_dst(skb)->dev->name, "ppp",3)) return 1; else return 0; } //#if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) ip_t getNetAddrbyName(const char *ifname, ip_t dst) { struct net_device *dev; ip_t addr = 0; struct in_device *in_dev; dev = __dev_get_by_name(&init_net, ifname); if (NULL == dev) { printk("%s dev %s not found.\n", __func__, ifname); return 0; } rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); if (!in_dev) return 0; for_primary_ifa(in_dev) { if (inet_ifa_match(dst, ifa)) { addr = ifa->ifa_local; break; } if (!addr) addr = ifa->ifa_local; } endfor_ifa(in_dev); rcu_read_unlock(); return addr; } //#endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP /* * state: 0-unreplied 1-established */ #ifdef CONFIG_RTL867X_KERNEL_MIPS16_NET __NOMIPS16 #endif __SRAM enum LR_RESULT fastpath_addRoutedNaptConnection(struct sk_buff *pskb, struct nf_conn *ct, struct nf_conntrack_tuple ori_tuple, struct nf_conntrack_tuple reply_tuple, enum NP_FLAGS flags, int state) { struct nf_conntrack_tuple tpdir1, tpdir2; struct FP_NAPT_entry napt; int fwd_flag=0; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) int reverse=0; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP int i; //exclude 127.0.0.1 if (((ori_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((ori_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001)) return 0; // for GCC 4.x.0 warning tpdir1.src.u3.ip = 0; tpdir2.src.u3.ip = 0; tpdir2.dst.u3.ip = 0; #ifdef CONFIG_PPPOL2TP tpdir2.src.u.all = 0; #endif napt.protocol = ori_tuple.dst.protonum; napt.flags = flags; // printk("%s %d routeIndex=%d\n", __func__, __LINE__, routeIndex); //cathy, for multi-subnet for(i=0; i< routeIndex; i++) { if ( (((ori_tuple.src.u3.ip & LANmask[i]) == LANsub[i]) && ((ori_tuple.dst.u3.ip & LANmask[i]) == LANsub[i])) || ((ori_tuple.dst.u3.ip & 0xF0000000) == 0xE0000000) || ((reply_tuple.dst.u3.ip & 0xF0000000) == 0xE0000000) ) { fwd_flag = 0; break; } if ( (ori_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; fwd_flag = 1; break; } else if ( (reply_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ){ tpdir1 = reply_tuple; tpdir2 = ori_tuple; fwd_flag = 1; break; } } #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) if (i >= routeIndex) {//not found in LANsub[] /* QL: we should distinguish wan local out packet and l2tp vpn data */ if ( #ifdef CONFIG_PPTP (ori_tuple.dst.protonum == IPPROTO_GRE) || #endif #ifdef CONFIG_NET_IPIP (ori_tuple.dst.protonum == IPPROTO_IPIP) || #endif #ifdef CONFIG_PPPOL2TP ((ori_tuple.dst.protonum == IPPROTO_UDP) && (ntohs(ori_tuple.dst.u.all)==1701 || ntohs(reply_tuple.dst.u.all)==1701)) || #endif FALSE) { for (i=0; i<wanIndex; i++) { if (ori_tuple.src.u3.ip == WANsub[i]) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; fwd_flag = 1; break; } else if (reply_tuple.src.u3.ip == WANsub[i]) { tpdir1 = reply_tuple; tpdir2 = ori_tuple; reverse = 1; fwd_flag = 1; break; } } } #ifdef CONFIG_PPPOL2TP /* for l2tp, we should double check upstream dest port */ if (ori_tuple.dst.protonum == IPPROTO_UDP) { if (ntohs(tpdir2.src.u.all)!=1701)/*upstream dest port is not 1701,it is not l2tp data */ fwd_flag = 0; } #endif } #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP if (!fwd_flag) { // printk("it is not forward packet, don't create fastpath entry.\n"); return 0; } napt.intIp = tpdir1.src.u3.ip; napt.intPort = ntohs(tpdir1.src.u.all); napt.extIp = tpdir2.dst.u3.ip; napt.extPort = ntohs(tpdir2.dst.u.all); napt.remIp = tpdir2.src.u3.ip; napt.remPort = ntohs(tpdir2.src.u.all); napt.int_remIp = tpdir1.dst.u3.ip; napt.int_remPort = ntohs(tpdir1.dst.u.all); napt.ct = ct ; #ifdef CONFIG_PPTP if (ori_tuple.dst.protonum == IPPROTO_GRE) { struct pptp_gre_header { __u8 flags; __u8 ver; __u16 protocol; __u16 payload_len; __u16 call_id; __u32 seq; __u32 ack; } *grehdr; struct iphdr *iph = (struct iphdr*)skb_network_header(pskb); grehdr = (struct pptp_gre_header*)((__u32 *)iph + iph->ihl); //printk("----> tpdir1.dst.u.all=%d, grehdr->call_id=%d\n", ntohs(tpdir1.dst.u.all), grehdr->call_id); napt.intPort =napt.int_remPort = napt.extPort = napt.remPort = grehdr->call_id; printk("\n====> ori_tupe: src=%x dst=%x port:%d reply_tupe: src=%x dst=%x port:%d\n", ori_tuple.src.u3.ip, ori_tuple.dst.u3.ip,napt.intPort, reply_tuple.src.u3.ip, reply_tuple.dst.u3.ip, napt.remPort); } #endif #ifdef CONFIG_NET_IPIP if (ori_tuple.dst.protonum == IPPROTO_IPIP) { napt.intPort = napt.extPort = napt.remPort = 0; } #endif//end of CONFIG_NET_IPIP return (fastpath_addNaptConnection(pskb,&napt, state)); } #ifdef CONFIG_RTL867X_KERNEL_MIPS16_NET __NOMIPS16 #endif __SRAM enum LR_RESULT fastpath_updateRoutedNaptConnection(struct nf_conntrack_tuple ori_tuple, struct nf_conntrack_tuple reply_tuple, unsigned int mark, unsigned int mdscp) { struct nf_conntrack_tuple tpdir1, tpdir2; struct FP_NAPT_entry napt; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) int reverse=0; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP int i; //exclude 127.0.0.1 if (((ori_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((ori_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001)) return 0; napt.protocol = ori_tuple.dst.protonum; //init tpdir1 = ori_tuple; tpdir2 = reply_tuple; // for GCC 4.x.0 warning tpdir1.src.u3.ip = 0; tpdir2.src.u3.ip = 0; tpdir2.dst.u3.ip = 0; //cathy, for multi-subnet for(i=0; i< routeIndex; i++) { if ( (ori_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; break; } else #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) if ( (reply_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ) #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP { tpdir1 = reply_tuple; tpdir2 = ori_tuple; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) break; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP } } #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) if (i >= routeIndex) {//not found in LANsub[] for (i=0; i<wanIndex; i++) { if (ori_tuple.src.u3.ip == WANsub[i]) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; break; } else if (reply_tuple.src.u3.ip == WANsub[i]){ tpdir1 = reply_tuple; tpdir2 = ori_tuple; reverse = 1; break; } } if (i >= wanIndex) return LR_NONEXIST; } #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP napt.intIp = tpdir1.src.u3.ip; napt.intPort = ntohs(tpdir1.src.u.all); napt.extIp = tpdir2.dst.u3.ip; napt.extPort = ntohs(tpdir2.dst.u.all); napt.remIp = tpdir2.src.u3.ip; napt.remPort = ntohs(tpdir2.src.u.all); napt.int_remIp = tpdir1.dst.u3.ip; napt.int_remPort = ntohs(tpdir1.dst.u.all); #ifdef CONFIG_PPTP if (ori_tuple.dst.protonum == IPPROTO_GRE) { napt.intPort = napt.extPort = napt.remPort = ntohs(tpdir1.dst.u.all); } #endif #ifdef CONFIG_NET_IPIP if (ori_tuple.dst.protonum == IPPROTO_IPIP) { napt.intPort = napt.extPort = napt.remPort = 0; } #endif//end of CONFIG_NET_IPIP return (fastpath_updateNaptConnection(&napt, mark, mdscp)); } #ifdef CONFIG_RTL867X_KERNEL_MIPS16_NET __NOMIPS16 #endif enum LR_RESULT fastpath_delRoutedNaptConnection (struct nf_conntrack_tuple ori_tuple, struct nf_conntrack_tuple reply_tuple) { struct nf_conntrack_tuple tpdir1, tpdir2; struct FP_NAPT_entry napt; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) int reverse=0; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP int i; //init tpdir1 = ori_tuple; tpdir2 = reply_tuple; //exclude 127.0.0.1 if (((ori_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((ori_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.src.u3.ip&0xFFFFFFFF)==0x7F000001) || ((reply_tuple.dst.u3.ip&0xFFFFFFFF)==0x7F000001)) return 0; napt.protocol = ori_tuple.dst.protonum; // for GCC 4.x.0 warning tpdir1.src.u3.ip = 0; tpdir2.src.u3.ip = 0; tpdir2.dst.u3.ip = 0; //cathy, for multi-subnet for(i=0; i< routeIndex; i++) { if ( (ori_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; break; } else #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) if ( (reply_tuple.src.u3.ip & LANmask[i]) == LANsub[i] ) #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP { tpdir1 = reply_tuple; tpdir2 = ori_tuple; #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) break; #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP } } #if defined(CONFIG_PPTP) || defined(CONFIG_PPPOL2TP) || defined(CONFIG_NET_IPIP) if (i >= routeIndex) {//not found in LANsub[] for (i=0; i<wanIndex; i++) { if (ori_tuple.src.u3.ip == WANsub[i]) { tpdir1 = ori_tuple; tpdir2 = reply_tuple; break; } else if (reply_tuple.src.u3.ip == WANsub[i]){ tpdir1 = reply_tuple; tpdir2 = ori_tuple; reverse = 1; break; } } if (i >= wanIndex) return LR_NONEXIST; } #endif//end of CONFIG_PPTP || CONFIG_PPPOL2TP || CONFIG_NET_IPIP napt.intIp = tpdir1.src.u3.ip; napt.intPort = ntohs(tpdir1.src.u.all); napt.extIp = tpdir2.dst.u3.ip; napt.extPort = ntohs(tpdir2.dst.u.all); napt.remIp = tpdir2.src.u3.ip; napt.remPort = ntohs(tpdir2.src.u.all); napt.int_remIp = tpdir1.dst.u3.ip; napt.int_remPort = ntohs(tpdir1.dst.u.all); #ifdef CONFIG_PPTP if (ori_tuple.dst.protonum == IPPROTO_GRE) { napt.intPort = napt.extPort = napt.remPort = ntohs(tpdir1.dst.u.all); } #endif #ifdef CONFIG_NET_IPIP if (ori_tuple.dst.protonum == IPPROTO_IPIP) { napt.intPort = napt.extPort = napt.remPort = 0; } #endif//end of CONFIG_NET_IPIP return (fastpath_delNaptConnection(&napt)); } extern void nic_tx2(struct sk_buff* skb,struct net_device *tdev); extern int imq_nf_queue_fast(struct sk_buff *skb); __IRAM_SYS_MIDDLE int ip_finish_output3(struct sk_buff *skb, u8 course, u8 imq_flags) { struct dst_entry *dst = skb_dst(skb); struct net_bridge_fdb_entry *fpdst; struct net_bridge *br; //suppose skb->dev is bridge struct ethhdr *eth; const unsigned char *dest; struct neighbour *neigh = NULL, *neigh2 = NULL; struct hh_cache *hh = NULL; #ifdef CONFIG_NET_IPIP extern int ipip_up_fastpath(struct sk_buff *skb); if (skb->dev->netdev_ops == &ipip_netdev_ops) { return ipip_up_fastpath(skb); } #endif //end of CONFIG_NET_IPIP #ifdef CONFIG_XFRM if(dst->xfrm != NULL){ DEBUGP_PKT("xfrm output in fastpath!\n"); return dst->output(skb->sk, skb); } #endif //20171031patch for skb->len > MTU if(skb->len > ip_skb_dst_mtu(skb)) { extern int ip_finish_output2(struct sk_buff *skb); return ip_fragment(skb, ip_finish_output2); } //patch end neigh = dst_neigh_lookup_skb(dst, skb); if (!neigh){ printk("dst neighbour is NULL.dst %s \n", skb->dev->name); goto DROP; } if (!(neigh->nud_state & NUD_VALID)) { printk("dst neighbour is invalid %s \n", skb->dev->name); goto DROP; } hh = (struct hh_cache *)(&neigh->hh); if (!hh){ if (dst->dev->header_ops && dst->dev->header_ops->cache!=NULL){ neigh_hh_init(neigh, dst); neigh2 = dst_neigh_lookup_skb(dst, skb); if (neigh2){ hh = (struct hh_cache *)(&neigh2->hh); if (!hh) goto DROP1; } else goto DROP1; } else { if (dev_hard_header(skb, dst->dev, ntohs(skb->protocol),neigh->ha, NULL, skb->len) <0 ) goto DROP1; } } if (hh) { unsigned seq; int hh_len; do { int hh_alen; seq = read_seqbegin(&hh->hh_lock); hh_len = hh->hh_len; hh_alen = HH_DATA_ALIGN(hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh_len); skb_set_mac_header(skb, 0); } //forwarding process if (course == 1) {//upstream DEBUGP_PKT("%s xmit dev %s (%x)\n", __func__, skb->dev->name, (unsigned int)skb->dev->netdev_ops->ndo_start_xmit); #ifdef CONFIG_ATP_SUPPORT_ETHUP if(qos_enable) {//nas0 skb->imq_flags &= ~IMQ_F_ENQUEUE; dev_queue_xmit(skb); } else { dev_queue_xmit(skb); } #else #if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)||defined(CONFIG_ATP_SUPPORT_ETHUP) if(up_qos_enable == 1) { if (imq_flags) { skb->skb_iif = skb->dev->ifindex; skb->imq_flags = imq_flags; imq_nf_queue_fast(skb); } else { dev_queue_xmit(skb); } } else #endif { dev_queue_xmit(skb); } #endif goto SUCCEED; } else { struct net_device *out_dev; eth = eth_hdr(skb); dest = eth->h_dest; br = netdev_priv(skb->dev); if( !br) { printk("%s %d br is NULL.\n", __FUNCTION__, __LINE__); kfree_skb(skb); goto SUCCEED; } rcu_read_lock(); if ((fpdst = __br_fdb_get(br, dest, 0)) != NULL) { out_dev = fpdst->dst->dev; } else{ out_dev = skb->dev; } rcu_read_unlock(); DEBUGP_PKT("%s xmit dev %s (%x)\n", __func__, out_dev->name, (unsigned int)out_dev->netdev_ops->ndo_start_xmit); if (netif_running(out_dev)) { skb->dev = out_dev; dev_queue_xmit(skb); } else kfree_skb(skb); goto SUCCEED; } SUCCEED: neigh_release(neigh); if (neigh2) neigh_release(neigh2); return 1; DROP1: neigh_release(neigh); if (neigh2) neigh_release(neigh2); DROP: printk( "ip_finish_output3: %s(%s) No header cache and no neighbour!\n", dst->dev->name, (course==1)?"UP":"DOWN"); kfree_skb(skb); return -EINVAL; } #ifdef CONFIG_RTL_INET_LED extern void tr068_internet_traffic(void); #endif #ifdef CONFIG_RTL867X_KERNEL_MIPS16_NET __NOMIPS16 #endif __SRAM int FastPath_Enter(struct sk_buff *skb) /* Ethertype = 0x0800 (IP Packet) */ { //ql, I think it is impossible that skb->dev is lo here. //if (!strcmp(skb->dev->name, "lo")) return 0; struct net_device *dev_backup = NULL; struct net_bridge_port *br_port = br_port_get_rcu(skb->dev); int ret; if(br_port) { /* Kevin, if dev is under some bridge device(ex.br0) we have to re-assign br0 as skb->dev when the skb enters fastpath because br0 is the source interface in the routing subsystem ip_route_input() in fp_iproute_input() will use it! */ dev_backup = skb->dev; skb->dev= (br_port_get_rcu(skb->dev))->br->dev; } ret = FastPath_Process((void *)skb, (struct iphdr*)skb_network_header(skb),br_port); if(br_port && !ret) { /* Kevin, if it does not go into fastpath, restore the skb->dev */ skb->dev = dev_backup; } #ifdef CONFIG_RTL_INET_LED if (ret == NET_RX_DROP) // go fastpath tr068_internet_traffic(); #endif return ret; } //a wrapper for br_fdb_update void fp_br_fdb_update( struct net_bridge_port *br_port, struct sk_buff *pskb){ br_fdb_update(br_port->br, br_port, eth_hdr(((struct sk_buff *)pskb))->h_source, 0, false); } #if defined(SIP_LIMIT_CHECK) srcIP_reference *srcIP_referenceHead = NULL; static int ref_srcIp_entry(srcIP_reference* new_srcIPentry) { srcIP_reference* srcIPentry; for(srcIPentry = srcIP_referenceHead; srcIPentry; srcIPentry=srcIPentry->next) { if(srcIPentry->intIp==new_srcIPentry->intIp) { srcIPentry->count++; kfree(new_srcIPentry); //showEntry(); return SUCCESS; } } //add new node if (srcIP_referenceHead==NULL) { new_srcIPentry->next = NULL; srcIP_referenceHead = new_srcIPentry; } else { srcIP_reference *lastEntry; lastEntry = srcIP_referenceHead; while(lastEntry->next) lastEntry = lastEntry->next; lastEntry->next = new_srcIPentry; new_srcIPentry->next = NULL; } //showEntry(); return SUCCESS; } static int check_SIP_ref(srcIP_reference* new_srcIPentry) { #if 1 return TRUE; #else srcIP_reference* srcIPentry; for(srcIPentry = srcIP_referenceHead; srcIPentry; srcIPentry=srcIPentry->next) { if(srcIPentry->intIp==new_srcIPentry->intIp) { if(srcIPentry->count>=CONNECTION_LIMIT) return FALSE; else return TRUE; } } return TRUE; #endif } #endif void deref_srcIp_entry(u_int32_t refIP,__u8 refCount) { #if defined(SIP_LIMIT_CHECK) srcIP_reference* srcIPentry; srcIP_reference* lastsrcIPentry; lastsrcIPentry= srcIP_referenceHead; for(srcIPentry = srcIP_referenceHead; srcIPentry; srcIPentry=srcIPentry->next) { if(srcIPentry->intIp==refIP) { srcIPentry->count-=refCount; if(srcIPentry->count==0) { if(srcIPentry!=srcIP_referenceHead||srcIPentry->next!=NULL)//only head node do nothing { if(srcIPentry->next==NULL) lastsrcIPentry->next=NULL; else { if(srcIPentry==srcIP_referenceHead) srcIP_referenceHead=srcIPentry->next; else lastsrcIPentry->next=srcIPentry->next; } } else srcIP_referenceHead=NULL; kfree(srcIPentry); break; } else break; } lastsrcIPentry=srcIPentry; } //showEntry(); #endif return; } extern int rtl865x_qosPriorityMappingGet(u32 sw_Qidx,u32 remark_8021p,u32 remark_dscp); unsigned int add_fastpath_to_asic(struct sk_buff *skb, struct Path_List_Entry *entry_path){ #if defined(TRAFFIC_MONITOR) unsigned int ret = FAILED; struct nf_conn *ct=NULL; #ifdef CONFIG_RTL_ADV_FAST_PATH int swqid=0; extern int32 upstream_default_swQid; #endif /* CONFIG_RTL_ADV_FAST_PATH */ #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) int dscp_remark=0,dscp_value=0; #endif /* ysleu@20140612:Does not support HWNAT on "lo" interface. */ if(strncmp(skb_dst(skb)->dev->name,"lo",2)==0) return FAILED; DEBUG_TRAFFIC_MONITOR_PRINTK("course:%d in_sip:%x:%d in_dip:%x:%d out_sip:%x:%d out_dip:%x:%d\n", entry_path->course, *entry_path->in_sIp, *entry_path->in_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->out_sIp, *entry_path->out_sPort, *entry_path->out_dIp, *entry_path->out_dPort); DEBUG_TRAFFIC_MONITOR_PRINTK("protocol:%d in_intf:%s out_intf:%s\n", *entry_path->protocol, skb->from_dev->name, entry_path->out_ifname); ct = fp_tuple_to_ct(&entry_path->orig_tuple); if (ct == NULL) return FAILED; #ifdef CONFIG_RTL_ADV_FAST_PATH swqid = ((ct->qosmark) & QOS_SWQID_MASK)>>QOS_SWQID_OFFSET; if(!swqid) { swqid = upstream_default_swQid; if(swqid==-1) swqid=0; } #endif /*CONFIG_RTL_ADV_FAST_PATH*/ if(!entry_path->add_into_asic_checked && skb_dst(skb)) { /*QL 20111223 : L2 header will be determinated in ip_finish_ouput, here it is uncertain, so we should check if l2 header is retrievable here?*/ //if (skb_mac_header_was_set(skb) && (SUCCESS == rtl865x_Lookup_L2_by_MAC(eth_hdr(skb)->h_source))) //{ int priority=0; int pri_flag=0; #if defined(SIP_LIMIT_CHECK) srcIP_reference* new_srcIPentry; new_srcIPentry = kmalloc(sizeof(srcIP_reference), GFP_ATOMIC); if(!new_srcIPentry){ DEBUG_TRAFFIC_MONITOR_PRINTK("\n!!!!!!%s(%d): No memory freed for kmalloc!!!",__FUNCTION__,__LINE__); goto ERROR; } memset(new_srcIPentry,0,sizeof(srcIP_reference)); if(entry_path->course == 1) new_srcIPentry->intIp=*entry_path->in_sIp; else if (entry_path->course == 2) new_srcIPentry->intIp=*entry_path->out_dIp; new_srcIPentry->count=1; if(!check_SIP_ref(new_srcIPentry)) DEBUG_TRAFFIC_MONITOR_PRINTK("Exceed connection limit in hwacc\n"); else #endif if(entry_path->course == 1) //upstream { struct neighbour *n; u32 upstream_nexthop_ip; n = dst_neigh_lookup_skb(skb_dst(skb), skb); if (unlikely(n==NULL)) { kfree(new_srcIPentry); goto ERROR; } read_lock_bh(&n->lock); upstream_nexthop_ip = *(u32*)n->primary_key; read_unlock_bh(&n->lock); neigh_release(n); { struct smux_dev_info *dev_info; struct net_device *master_dev=NULL; rtl865x_netif_local_t *slave_netif, *master_netif=NULL; //ysleu: Get master device while dest. interface is PPP. if(strncmp(skb_dst(skb)->dev->name,"ppp",3)==0) { slave_netif = _rtl865x_getSWNetifByName(skb_dst(skb)->dev->name); if (slave_netif){ master_netif = slave_netif->master; if(master_netif) master_dev = dev_get_by_name(&init_net, master_netif->name); } } if(master_dev){ dev_info = SMUX_DEV_INFO(master_dev); dev_put(master_dev); } else dev_info = SMUX_DEV_INFO(skb_dst(skb)->dev); #ifndef CONFIG_RTL_ADV_FAST_PATH if(dev_info && dev_info->m_1p!=0) { if((ct->m_1p&0xffff)>>8>=1) { #ifdef CONFIG_RTL_HW_QOS_SUPPORT priority = rtl865x_qosPriorityMappingGet(0,ct->m_1p&0x7,-1); skb->mark=ct->m_1p; #endif } else { #ifdef CONFIG_RTL_HW_QOS_SUPPORT priority = rtl865x_qosPriorityMappingGet(0,dev_info->m_1p-1,-1); skb->vlan_tci = (dev_info->vid&VLAN_VID_MASK)|((dev_info->m_1p-1)<<13); #endif } pri_flag=1; if(priority==-1) { printk("Leave %s @ %d (get priority failed)\n",__func__,__LINE__); priority=0; pri_flag=0; //return FAILED; } } #else /* CONFIG_RTL_ADV_FAST_PATH*/ if(dev_info) { #ifdef CONFIG_RTL_HW_QOS_SUPPORT #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) if(((ct->qosmark) & QOS_8021P_MASK)!=0) { priority = rtl865x_qosPriorityMappingGet(swqid, (((ct->qosmark)&QOS_8021P_MASK)>>QOS_8021P_OFFSET), -1); #else if(((ct->qosmark) & QOS_8021P_MASK)!=0 || (entry_path->mdscp&DSCP_MASK)!=0) { priority = rtl865x_qosPriorityMappingGet(swqid, (((ct->qosmark)&QOS_8021P_MASK)>>QOS_8021P_OFFSET), (entry_path->mdscp&DSCP_MASK)>>DSCP_SHIFT); #endif //CONFIG_RTL_HW_NAPT_4KENTRY } else priority = rtl865x_qosPriorityMappingGet(swqid,-1,-1); #endif /* CONFIG_RTL_HW_QOS_SUPPORT*/ pri_flag=1; if(priority==-1) { printk("Leave %s @ %d (get priority failed)\n",__func__,__LINE__); priority=0; pri_flag=0; } } #endif /* CONFIG_RTL_ADV_FAST_PATH */ } #ifdef CONFIG_RTL_FLOW_BASE_HWNAT #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) if((entry_path->mdscp&DSCP_MASK)!=0) { dscp_remark = 1; dscp_value = (entry_path->mdscp&DSCP_MASK)>>DSCP_SHIFT; } ret = rtl8676_add_L34Unicast_hwacc_upstream(*entry_path->in_sIp, *entry_path->in_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->out_sIp, *entry_path->out_sPort, *entry_path->protocol, upstream_nexthop_ip, RTL_DRV_LAN_NETIF_NAME, entry_path->out_ifname,priority,pri_flag,dscp_remark,dscp_value); #else ret = rtl8676_add_L34Unicast_hwacc_upstream(*entry_path->in_sIp, *entry_path->in_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->out_sIp, *entry_path->out_sPort, *entry_path->protocol, upstream_nexthop_ip, RTL_DRV_LAN_NETIF_NAME, entry_path->out_ifname,priority,pri_flag); #endif if(ret==SUCCESS) { #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) int asicIdx; asicIdx = rtl865x_getNaptConnectionPosition(*entry_path->in_sIp, *entry_path->in_sPort, *entry_path->out_sIp, *entry_path->out_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->protocol==IPPROTO_TCP?1:0,1); if(asicIdx >= 0) RTL_HWNAT_FP_MAPPING[asicIdx] = entry_path; #endif set_bit(IPS_8676HW_NAPT_BIT, &ct->status); } #endif } else if (entry_path->course == 2) //downstream { #ifdef CONFIG_RTL_FLOW_BASE_HWNAT #ifdef CONFIG_RTL_ADV_FAST_PATH int swqid_d = ((entry_path->mark) & QOS_SWQID_MASK)>>QOS_SWQID_OFFSET; if(swqid_d) { priority = rtl865x_qosPriorityMappingGet(swqid_d,-1,-1); pri_flag = 1; if(priority==-1) { printk("Leave %s @ %d (get priority failed)\n",__func__,__LINE__); priority=0; pri_flag=0; } } #endif #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) if((entry_path->mdscp&DSCP_MASK)!=0) { dscp_remark = 1; dscp_value = (entry_path->mdscp&DSCP_MASK)>>DSCP_SHIFT; } ret = rtl8676_add_L34Unicast_hwacc_downstream(*entry_path->in_sIp, *entry_path->in_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->out_dIp, *entry_path->out_dPort, *entry_path->protocol, skb->from_dev->name, entry_path->out_ifname,priority,pri_flag,dscp_remark,dscp_value); #else ret = rtl8676_add_L34Unicast_hwacc_downstream(*entry_path->in_sIp, *entry_path->in_sPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->out_dIp, *entry_path->out_dPort, *entry_path->protocol, skb->from_dev->name, entry_path->out_ifname,priority,pri_flag); #endif if(ret==SUCCESS) { #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) int asicIdx; asicIdx = rtl865x_getNaptConnectionPosition(*entry_path->out_dIp, *entry_path->out_dPort, *entry_path->in_dIp, *entry_path->in_dPort, *entry_path->in_sIp, *entry_path->in_sPort, *entry_path->protocol==IPPROTO_TCP?1:0,0); if(asicIdx >= 0) RTL_HWNAT_FP_MAPPING[asicIdx] = entry_path; #endif set_bit(IPS_8676HW_NAPT_BIT, &ct->status); } #endif } else{ printk("error course!! %s %d\n", __FUNCTION__, __LINE__); } #if defined(SIP_LIMIT_CHECK) if(ret==SUCCESS) { if(ref_srcIp_entry(new_srcIPentry)==SUCCESS) ct->stream_num++; }else kfree(new_srcIPentry); #endif //} entry_path->add_into_asic_checked = 1; } nf_ct_put(ct); return SUCCESS; ERROR: nf_ct_put(ct); return FAILED; #else return FAILED; #endif } #if defined(TRAFFIC_MONITOR) extern CTAILQ_HEAD(Path_list_inuse_head, Path_List_Entry) path_list_inuse; #endif void fp_monitor_timeout(unsigned long data) { #if defined(TRAFFIC_MONITOR) struct Path_List_Entry *ep; int need_update_ct_timeout = 0; CTAILQ_FOREACH(ep, &path_list_inuse, tqe_link) { if (ep->add_into_asic_checked == 1) { //query napt table if(ep->course == 1)//upstream { #ifdef CONFIG_RTL_FLOW_BASE_HWNAT if(rtl8676_query_L34Unicast_hwacc_upstream(*ep->in_sIp, *ep->in_sPort, *ep->in_dIp, *ep->in_dPort, *ep->out_sIp, *ep->out_sPort, *ep->protocol)>0) { need_update_ct_timeout = 1; } #endif } else //downstream { #ifdef CONFIG_RTL_FLOW_BASE_HWNAT if(rtl8676_query_L34Unicast_hwacc_downstream(*ep->in_sIp, *ep->in_sPort, *ep->in_dIp, *ep->in_dPort, *ep->out_dIp, *ep->out_dPort, *ep->protocol)>0) { need_update_ct_timeout = 1; } #endif } if(need_update_ct_timeout) { fp_updateConxTimer(ep); } } ep->pps = 0; } mod_timer(&fp_monitor_timer, jiffies + 5*HZ); #endif //TRAFFIC_MONITOR return; } #if defined(CONFIG_RTL_HW_NAPT_4KENTRY) void trf_monitor_timeout(unsigned long data) { struct Path_List_Entry *fp; int i; extern struct Path_List_Entry *RTL_HWNAT_FP_MAPPING[RTL8676_TCPUDPTBL_SIZE_HW]; for(i = 0; i < (RTL8676_TCPUDPTBL_SIZE_HW>>5); i++) { int trfBits; trfBits = READ_MEM32(L4_NAPT_TRF_TBL+(i<<2)); if(trfBits) { int k; for(k=0;k<32;k++) { if((trfBits & (1<<k))==0) continue; fp = RTL_HWNAT_FP_MAPPING[((i<<5)+k)]; if(fp == NULL) continue; DEBUG_TRAFFIC_MONITOR_PRINTK("course:%d in_sip:%pI4:%d in_dip:%pI4:%d out_sip:%pI4:%d out_dip:%pI4:%d\n", fp->course, fp->in_sIp, *fp->in_sPort, fp->in_dIp, *fp->in_dPort, fp->out_sIp, *fp->out_sPort, fp->out_dIp, *fp->out_dPort); fp_updateConxTimer(fp); } } } mod_timer(&fp_monitor_timer, jiffies + 5*HZ); return; } #endif