/* * Copyright (c) 2019 AVM GmbH . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include //#include "../ethernet/lantiq/datapath/datapath.h" #define PMAC_MAX_NUM 16 #define VAP_OFFSET 8 #define MAX_SUBIF_PER_PORT 16 #include "hwpa.h" #include "hwpa_grx.h" /* ToE driver signatures */ int lro_available(void); int lro_start_flow(int *lroflow, int timeout, int flags, struct cpumask cpumask); int lro_stop_flow(int lroflow, int timeout, int flags); #define LRO_TIMEOUT_US(us) ((us) * 0x126) /* test-related declarations */ static struct net_device *ktd_netdev, *localpid_netdev; static struct sk_buff_head skb_not_accelerated, skb_accelerated; #define HWPA_PRIVATE_PID (CONFIG_AVM_PA_MAX_PID + 1) #if !defined(MODULE) static struct module fake_mod = { .name = "avm_hw_pa" }; /* required by datapath_api */ #endif static struct ktd_suite *test_suite; #define LOCAL_SESSIONS_NUM (MAX_SUBIF_PER_PORT) HANDLE_POOL_DECLARE(local_sessions, LOCAL_SESSIONS_NUM); #define local_session_alloc() handle_alloc(local_sessions, LOCAL_SESSIONS_NUM) #define local_session_free(s) handle_free((s), local_sessions) static const struct avm_pa_session *vap_to_session[LOCAL_SESSIONS_NUM]; /* Keep the relation between vlan and port/subifid opaque. */ typedef struct { int _entry; } vlan_handle; static struct vlan_entry { #define VLAN_ENTRY_REMOVE_TAG VLAN_N_VID // internal vid to remove tags #define VLAN_ENTRY_INVALID_VID (VLAN_N_VID + 1) #define VLAN_ENTRY_REFCNT_SHIFT 16 #define VLAN_ENTRY_REFCNT(compound) ((compound) >> VLAN_ENTRY_REFCNT_SHIFT) #define VLAN_ENTRY_VID(compound) \ ((compound) & (BIT(VLAN_ENTRY_REFCNT_SHIFT) - 1)) atomic_t refcnt_vid_compound; } vlan_entries[PMAC_MAX_NUM][MAX_SUBIF_PER_PORT]; enum { dtor_id_vhandle, dtor_id_local, dtor_id_lro, dtor_id_tun, #define SESSION_MAX_DTORS 4 }; static void vlan_release_dtor(void *arg); static void local_session_dtor(void *arg); static void lroflow_dtor(void *arg); static void tunnel_dtor(void *arg); struct session { GSW_ROUTE_Entry_t rt_entry; void (*dtors[SESSION_MAX_DTORS])(void *arg); void *dtor_args[SESSION_MAX_DTORS]; vlan_handle vhandle; u8 local_vap; int lroflow; }; static enum hwpa_backend_rv vlan_acquire(int portid, u16 vid, vlan_handle *vhandle_out); static enum hwpa_backend_rv vlan_release(vlan_handle vhandle); static enum hwpa_backend_rv vlan_embed_action(GSW_ROUTE_Session_action_t *a, vlan_handle vhandle); static enum hwpa_backend_rv vlan_remove_tags(uint32_t portid); static int32_t pae_cmd(uint32_t command, void *arg); static void pce_rule_disable(enum pce_rule_idx ruleidx); #define TUNNEL_ENTRY_MAX 16 static struct tunnel { struct kref kref; struct mutex lock; GSW_ROUTE_Tunnel_t gsw_info; enum pce_rule_idx tcp_rule, udp_rule; } tunnel_entries[TUNNEL_ENTRY_MAX]; /* The value of routing extension ID is nothing but an additional key for the * routing table lookup. We use some of its bits to hint at isolated routing * sessions of a specific tunnel. */ struct extid_bitmap { u8 tunnel : 4; u8 has_tunnel : 1; u8 proto : 3; }; static void pae_extid_set_proto(u8 *extid, u8 ipproto) { enum { extid_udp, extid_tcp, }; struct extid_bitmap *bits; BUILD_BUG_ON(sizeof(*bits) != sizeof(*extid)); bits = (void *)extid; switch(ipproto) { case IPPROTO_UDP: case IPPROTO_UDPLITE: bits->proto = extid_udp; break; case IPPROTO_TCP: bits->proto = extid_tcp; break; default: break; } } static void pae_extid_set_tunnel(u8 *extid, int tunnel_index) { struct extid_bitmap *bits; BUILD_BUG_ON(sizeof(*bits) != sizeof(*extid)); bits = (void *)extid; bits->has_tunnel = true; bits->tunnel = tunnel_index; } static bool session_needs_tunnel_isolation(const struct session *hws) { return (hws->rt_entry.routeEntry.action.eSessDirection == GSW_ROUTE_DIRECTION_DNSTREAM && hws->rt_entry.routeEntry.action.bTunnel_Enable); } static bool tunnel_eq(const GSW_ROUTE_Tunnel_t *a, const GSW_ROUTE_Tunnel_t *b) { if (a->eTunnelType != b->eTunnelType) return false; switch (a->eTunnelType) { case GSW_ROUTE_TUNL_DSLITE: return (!memcmp(a->t.tunDSlite.nDstIP6Addr.nIPv6, b->t.tunDSlite.nDstIP6Addr.nIPv6, sizeof(b->t.tunDSlite.nDstIP6Addr.nIPv6)) && !memcmp(a->t.tunDSlite.nSrcIP6Addr.nIPv6, b->t.tunDSlite.nSrcIP6Addr.nIPv6, sizeof(b->t.tunDSlite.nSrcIP6Addr.nIPv6))); case GSW_ROUTE_TUNL_6RD: return (!memcmp(&a->t.tun6RD.nDstIP4Addr.nIPv4, &b->t.tun6RD.nDstIP4Addr.nIPv4, sizeof(b->t.tun6RD.nDstIP4Addr.nIPv4)) && !memcmp(&a->t.tun6RD.nSrcIP4Addr.nIPv4, &b->t.tun6RD.nSrcIP4Addr.nIPv4, sizeof(b->t.tun6RD.nSrcIP4Addr.nIPv4))); case GSW_ROUTE_TUNL_NULL: return true; default: return false; } } static void tunnel_release(struct kref *kref) { GSW_ROUTE_Tunnel_Entry_t tun; struct tunnel *tunnel_entry; ptrdiff_t tunnel_index; tunnel_entry = container_of(kref, struct tunnel, kref); BUG_ON(tunnel_entry < &tunnel_entries[0] || tunnel_entry >= &tunnel_entries[ARRAY_SIZE(tunnel_entries)]); tunnel_index = tunnel_entry - &tunnel_entries[0]; tun.nTunIndex = tunnel_index; pce_rule_disable(tunnel_entry->tcp_rule); pce_idx_free(tunnel_entry->tcp_rule); pce_rule_disable(tunnel_entry->udp_rule); pce_idx_free(tunnel_entry->udp_rule); pae_cmd(GSW_ROUTE_TUNNEL_ENTRY_DELETE, &tun); memset(&tunnel_entry->gsw_info, 0, sizeof(tunnel_entry->gsw_info)); /* We came here via kref_put_mutex. This lock protected the entry from * reuse. */ mutex_unlock(&tunnel_entry->lock); } static GSW_PCE_rule_t tunnel_default_rule = { .pattern = { .bEnable = 1, .bParserFlagMSB_Enable = 1, .nParserFlagMSB_Mask = (u16) ~(BIT(FLAG_L2TPNEXP) | BIT(FLAG_2UDP) | BIT(FLAG_2IPV6EXT) | BIT(FLAG_1IPV6EXT) | BIT(FLAG_IPFRAG) | BIT(FLAG_IPV4OPT) | BIT(FLAG_TCP) | BIT(FLAG_ROUTEXP) | BIT(FLAG_1IPV6) | BIT(FLAG_2IPV4) | BIT(FLAG_2IPV6) >> 16), .bParserFlagLSB_Enable = 1, .nParserFlagLSB_Mask = (u16) ~(BIT(FLAG_1IPV4) | BIT(FLAG_GRE) | BIT(FLAG_CAPWAP)) }, .action = { .bRtInnerIPasKey_Action = 1, .bRtDstPortMaskCmp_Action = 1, .bRtSrcPortMaskCmp_Action = 1, .bRtDstIpMaskCmp_Action = 1, .bRtSrcIpMaskCmp_Action = 1, .bRoutExtId_Action = 1, .bRtAccelEna_Action = 1, .bRtCtrlEna_Action = 1 } }; static enum hwpa_backend_rv tunnel_pce_deploy(struct tunnel *tun) { GSW_PCE_rule_t rule; int tunnel_idx; int rule_idx_tcp, rule_idx_udp; uint8_t udp_extid = 0, tcp_extid = 0; tunnel_idx = tun - tunnel_entries; pae_extid_set_tunnel(&tcp_extid, tunnel_idx); pae_extid_set_tunnel(&udp_extid, tunnel_idx); pae_extid_set_proto(&tcp_extid, IPPROTO_TCP); pae_extid_set_proto(&udp_extid, IPPROTO_UDP); rule = tunnel_default_rule; switch (tun->gsw_info.eTunnelType) { case GSW_ROUTE_TUNL_DSLITE: rule.pattern.nParserFlagMSB |= (BIT(FLAG_2IPV4) | BIT(FLAG_1IPV6)) >> 16; rule.pattern.nParserFlagLSB = 0; rule.pattern.eSrcIP_Select = GSW_PCE_IP_V6; rule.pattern.eDstIP_Select = GSW_PCE_IP_V6; /* header is upstream so swap the adresses */ rule.pattern.nDstIP = tun->gsw_info.t.tunDSlite.nSrcIP6Addr; rule.pattern.nSrcIP = tun->gsw_info.t.tunDSlite.nDstIP6Addr; /* set bit -> don't care about that nibble */ rule.pattern.nDstIP_Mask = ~0xffffffff; rule.pattern.nSrcIP_Mask = ~0xffffffff; break; case GSW_ROUTE_TUNL_6RD: rule.pattern.nParserFlagMSB |= BIT(FLAG_2IPV6) >> 16; rule.pattern.nParserFlagLSB = BIT(FLAG_1IPV4); rule.pattern.eSrcIP_Select = GSW_PCE_IP_V4; rule.pattern.eDstIP_Select = GSW_PCE_IP_V4; /* header is upstream so swap the adresses */ rule.pattern.nDstIP = tun->gsw_info.t.tun6RD.nSrcIP4Addr; rule.pattern.nSrcIP = tun->gsw_info.t.tun6RD.nDstIP4Addr; /* set bit -> don't care about that nibble */ rule.pattern.nDstIP_Mask = ~0xff; rule.pattern.nSrcIP_Mask = ~0xff; break; default: break; } rule_idx_tcp = pce_idx_alloc(PCE_RANGE_DYNAMIC); if (rule_idx_tcp < 0) return HWPA_BACKEND_ERR_GSW_PCE_RULE; rule.pattern.nIndex = rule_idx_tcp; rule.pattern.nParserFlagMSB |= BIT(FLAG_TCP) >> 16; rule.action.nRoutExtId = tcp_extid; if (pae_cmd(GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) goto errout1; rule_idx_udp = pce_idx_alloc(PCE_RANGE_DYNAMIC); if (rule_idx_udp < 0) goto errout2; rule.pattern.nIndex = rule_idx_udp; rule.pattern.nParserFlagMSB &= ~(BIT(FLAG_TCP) >> 16); rule.pattern.nParserFlagMSB |= BIT(FLAG_1UDP) >> 16; rule.action.nRoutExtId = udp_extid; if (pae_cmd(GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) goto errout3; tun->tcp_rule = rule_idx_tcp; tun->udp_rule = rule_idx_udp; return HWPA_BACKEND_SUCCESS; errout3: pce_idx_free(rule_idx_udp); errout2: pce_rule_disable(rule_idx_tcp); errout1: pce_idx_free(rule_idx_tcp); return HWPA_BACKEND_ERR_GSW_PCE_RULE; } static enum hwpa_backend_rv tunnel_add(const GSW_ROUTE_Tunnel_t *info, int *index_out) { int i; GSW_ROUTE_Tunnel_Entry_t tun; struct tunnel *new_entry = NULL; /* See if an existing entry can be reused. */ for (i = 0; i < ARRAY_SIZE(tunnel_entries); i++) { struct tunnel *e; e = &tunnel_entries[i]; if (kref_get_unless_zero(&e->kref)) { if (tunnel_eq(info, &e->gsw_info)) { *index_out = i; return HWPA_BACKEND_SUCCESS; } kref_put_mutex(&e->kref, tunnel_release, &e->lock); } } /* If we reach this, add a new entry */ for (i = 0; !new_entry && i < ARRAY_SIZE(tunnel_entries); i++) { struct tunnel *e; e = &tunnel_entries[i]; /* If this lock is taken, the release path is busy removing * this entry. */ if (mutex_trylock(&e->lock)) { /* The entry is free if no tunnel type is set. The * release path sets the type to NULL after dropping * the last reference. */ if (e->gsw_info.eTunnelType != GSW_ROUTE_TUNL_NULL) { mutex_unlock(&e->lock); } else { *index_out = i; new_entry = e; } } } if (!new_entry) goto alloc_err; tun.tunnelEntry = *info; tun.nTunIndex = *index_out; if (GSW_statusOk != pae_cmd(GSW_ROUTE_TUNNEL_ENTRY_ADD, &tun)) goto alloc_err; new_entry->gsw_info = *info; if (tunnel_pce_deploy(new_entry) != HWPA_BACKEND_SUCCESS) { memset(&new_entry->gsw_info, 0, sizeof(new_entry->gsw_info)); goto alloc_err; } /* We remain the only user until the lock is released or the * refcount becomes >0. Up to here, all state belonging to the entry * needs to be consistent. */ kref_get(&new_entry->kref); mutex_unlock(&new_entry->lock); return HWPA_BACKEND_SUCCESS; alloc_err: mutex_unlock(&new_entry->lock); return HWPA_BACKEND_ERR_GSW_TUN; } static void pce_rule_disable(enum pce_rule_idx ruleidx) { GSW_PCE_rule_t rule; memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = ruleidx; rule.pattern.bEnable = 0; pae_cmd(GSW_PCE_RULE_WRITE, &rule); } static void tunnel_dtor(void *arg) { struct tunnel *e; int tunnel_index = *((u8 *)arg); BUG_ON(tunnel_index > ARRAY_SIZE(tunnel_entries)); e = &tunnel_entries[tunnel_index]; kref_put_mutex(&e->kref, tunnel_release, &e->lock); } static void lroflow_dtor(void *arg) { int ruleidx; int *lroflow = arg; ruleidx = PCE_RULE_IDX_LRO_EXCEPTION + *lroflow; pce_rule_disable(ruleidx); pce_idx_free(ruleidx); lro_stop_flow(*lroflow, 0, 0); } static void local_session_dtor(void *arg) { unsigned long handle; u8 *vap = arg; handle = *vap; if (handle < LOCAL_SESSIONS_NUM) { vap_to_session[handle] = NULL; local_session_free(handle); } } static void put_netdev(struct net_device *netdev) { /* dummy netdevs do not have a refcount */ if (netdev == ktd_netdev) return; dev_put(netdev); } static struct net_device *get_netdev(avm_pid_handle pid) { struct net_device *dev; dev = hwpa_get_netdev(pid); if (dev) return dev; if (ktd_netdev && AVM_PA_DEVINFO(ktd_netdev)->pid_handle == pid) { return ktd_netdev; } return NULL; } static enum hwpa_backend_rv lro_setup(struct session *hws) { GSW_PCE_rule_t rule; int ruleidx; u32 parser_flags, parser_flags_mask; struct cpumask unused = {0}; typeof(hws->rt_entry.routeEntry.pattern) *pattern; if (!lro_available()) return HWPA_BACKEND_ERR_LRO_FULL; pattern = &hws->rt_entry.routeEntry.pattern; if (lro_start_flow(&hws->lroflow, LRO_TIMEOUT_US(200), 0, unused)) return HWPA_BACKEND_ERR_LRO_FULL; ruleidx = PCE_RULE_IDX_LRO_EXCEPTION + hws->lroflow; if (!pce_idx_request(ruleidx)) { lro_stop_flow(hws->lroflow, 0, 0); return HWPA_BACKEND_ERR_GSW_PCE_RULE; } memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = ruleidx; rule.pattern.bEnable = 1; parser_flags = BIT(FLAG_LROEXP) | BIT(FLAG_TCP); parser_flags_mask = ~(BIT(FLAG_LROEXP) | BIT(FLAG_TCP) | BIT(FLAG_2IPV6) | BIT(FLAG_2IPV4)); rule.pattern.bParserFlagMSB_Enable = 1; rule.pattern.nParserFlagMSB = (parser_flags >> 16); rule.pattern.nParserFlagMSB_Mask = (parser_flags_mask >> 16); rule.pattern.bParserFlagLSB_Enable = 1; rule.pattern.nParserFlagLSB = (parser_flags & 0xffff); rule.pattern.nParserFlagLSB_Mask = (parser_flags_mask & 0xffff); if (pattern->eIpType == GSW_RT_IP_V6) { rule.pattern.eDstIP_Select = GSW_PCE_IP_V6; rule.pattern.eSrcIP_Select = GSW_PCE_IP_V6; rule.pattern.nDstIP_Mask = 0x0000; rule.pattern.nSrcIP_Mask = 0x0000; } else { rule.pattern.eDstIP_Select = GSW_PCE_IP_V4; rule.pattern.eSrcIP_Select = GSW_PCE_IP_V4; rule.pattern.nDstIP_Mask = 0xFF00; rule.pattern.nSrcIP_Mask = 0xFF00; } rule.pattern.nDstIP = pattern->nDstIP; rule.pattern.nSrcIP = pattern->nSrcIP; //TCP source port and destination port rule.pattern.bAppDataMSB_Enable = 1; rule.pattern.nAppDataMSB = pattern->nSrcPort; rule.pattern.nAppMaskRangeMSB = 0xF0; rule.pattern.bAppDataLSB_Enable = 1; rule.pattern.nAppDataLSB = pattern->nDstPort; rule.pattern.nAppMaskRangeLSB = 0xF0; rule.pattern.bEnable = 1; rule.pattern.bPktLngEnable = 1; rule.pattern.nPktLng = 0; rule.pattern.nPktLngRange = 1500; rule.action.bRtCtrlEna_Action = 1; rule.action.bFlowID_Action = 1; rule.action.nFlowID = 0xC0 + hws->lroflow; if (pae_cmd(GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { lro_stop_flow(hws->lroflow, 0, 0); return HWPA_BACKEND_ERR_GSW_PCE_RULE; } hws->rt_entry.routeEntry.action.nFlowId = 0x80 + hws->lroflow; hws->dtors[dtor_id_lro] = lroflow_dtor; hws->dtor_args[dtor_id_lro] = &hws->lroflow; return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv extract_action_tunnel(const struct avm_pa_session *s, GSW_ROUTE_Session_action_t *a) { GSW_ROUTE_Tunnel_t tun; u16 ig_encap, eg_encap, encap; const struct avm_pa_pkt_match *pkt_match; enum hwpa_backend_rv rv; int tun_index; ig_encap = s->ingress.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK; eg_encap = avm_pa_first_egress(s)->match.pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK; /* TODO * If both encapsulations are equal in terms of type and data we * should be able to support this. For now, it does not seem worth the * trouble. */ if (ig_encap && eg_encap) { return HWPA_BACKEND_ERR_GSW_TUN; } /* Nothing to be done if there is no encapsulation */ if (!ig_encap && !eg_encap) { return HWPA_BACKEND_SUCCESS; } /* We made sure that one of them is zero */ if (ig_encap) { encap = ig_encap; pkt_match = &s->ingress; } else { encap = eg_encap; pkt_match = &avm_pa_first_egress(s)->match; } /* Copy tunnel info from the respective header */ if (encap == AVM_PA_PKTTYPE_IPV6ENCAP) { const struct ipv6hdr *hdr; GSW_IP_t *gsw_dst, *gsw_src; hdr = hwpa_get_hdr(pkt_match, AVM_PA_IPV6); if (!hdr) return HWPA_BACKEND_ERR_SESSION_MALFORMED; gsw_src = &tun.t.tunDSlite.nSrcIP6Addr; gsw_dst = &tun.t.tunDSlite.nDstIP6Addr; tun.eTunnelType = GSW_ROUTE_TUNL_DSLITE; /* The tunnel info belongs to the header added on egress, so * swap the address pair if necessary. */ memcpy(&gsw_src->nIPv6, encap == eg_encap ? &hdr->saddr : &hdr->daddr, sizeof(gsw_src->nIPv6)); memcpy(&gsw_dst->nIPv6, encap == eg_encap ? &hdr->daddr : &hdr->saddr, sizeof(gsw_dst->nIPv6)); } else if (encap == AVM_PA_PKTTYPE_IPV4ENCAP) { const struct iphdr *hdr; GSW_IP_t *gsw_dst, *gsw_src; hdr = hwpa_get_hdr(pkt_match, AVM_PA_IPV4); if (!hdr) return HWPA_BACKEND_ERR_SESSION_MALFORMED; gsw_src = &tun.t.tun6RD.nSrcIP4Addr; gsw_dst = &tun.t.tun6RD.nDstIP4Addr; tun.eTunnelType = GSW_ROUTE_TUNL_6RD; /* The tunnel info belongs to the header added on egress, so * swap the address pair if necessary. */ memcpy(&gsw_src->nIPv4, encap == eg_encap ? &hdr->saddr : &hdr->daddr, sizeof(gsw_src->nIPv4)); memcpy(&gsw_dst->nIPv4, encap == eg_encap ? &hdr->daddr : &hdr->saddr, sizeof(gsw_dst->nIPv4)); a->eIpType = GSW_RT_IP_V4; memcpy(&a->nNATIPaddr.nIPv4, encap == eg_encap ? &hdr->daddr : &hdr->saddr, sizeof(gsw_dst->nIPv4)); } else { /* The match patterns defined for GRX prevent this case */ BUG(); } rv = tunnel_add(&tun, &tun_index); if (rv != HWPA_BACKEND_SUCCESS) return rv; a->nTunnelIndex = tun_index; a->bTunnel_Enable = 1; a->eTunType = tun.eTunnelType; a->eSessDirection = ig_encap && !eg_encap ? GSW_ROUTE_DIRECTION_DNSTREAM : GSW_ROUTE_DIRECTION_UPSTREAM; return HWPA_BACKEND_SUCCESS; } static void extract_pattern(const struct avm_pa_session *s, GSW_ROUTE_Session_pattern_t *p) { const u16 *ports; p->bValid = 1; if ((s->ingress.pkttype & AVM_PA_PKTTYPE_IP_MASK) == AVM_PA_PKTTYPE_IPV6) { const struct ipv6hdr *hdr; hdr = hwpa_get_hdr(&s->ingress, AVM_PA_IPV6); p->eIpType = GSW_RT_IP_V6; memcpy(&p->nSrcIP.nIPv6, &hdr->saddr, sizeof(p->nSrcIP.nIPv6)); memcpy(&p->nDstIP.nIPv6, &hdr->daddr, sizeof(p->nDstIP.nIPv6)); } else { const struct iphdr *hdr; hdr = hwpa_get_hdr(&s->ingress, AVM_PA_IPV4); p->eIpType = GSW_RT_IP_V4; memcpy(&p->nSrcIP.nIPv4, &hdr->saddr, sizeof(p->nSrcIP.nIPv4)); memcpy(&p->nDstIP.nIPv4, &hdr->daddr, sizeof(p->nDstIP.nIPv4)); } // TODO isolation for vlan * ppp * tunnel * proto pae_extid_set_proto(&p->nRoutExtId, AVM_PA_PKTTYPE_IPPROTO(s->ingress.pkttype)); ports = hwpa_get_hdr(&s->ingress, AVM_PA_PORTS); p->nSrcPort = ports[0]; p->nDstPort = ports[1]; } static enum hwpa_backend_rv portid_from_action(const GSW_ROUTE_Session_action_t *a, int *portid_out) { int port; port = ffs(a->nDstPortMap); if (!port) return HWPA_BACKEND_ERR_INTERNAL; *portid_out = port - 1; return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv extract_action_egvlan(const struct avm_pa_pkt_match *ig_match, const struct avm_pa_pkt_match *eg_match, GSW_PCE_EgVLAN_Entry_t *ventry) { const struct vlan_hdr *igh; const struct vlan_hdr *egh; u16 tci; memset(ventry, 0, sizeof(*ventry)); igh = (void *)hwpa_get_hdr(ig_match, AVM_PA_VLAN); egh = (void *)hwpa_get_hdr(eg_match, AVM_PA_VLAN); if (egh) { if ((u8 *)egh - eg_match->hdrcopy >= AVM_PA_OFFSET_NOT_SET) tci = eg_match->vlan_tci; else tci = egh->h_vlan_TCI; } else { tci = 0; } /* fill those in extract_action() later */ ventry->nPortId = 0; ventry->nIndex = 0; ventry->bEgVLAN_Action = 1; ventry->bEgSVidRem_Action = 1; ventry->bEgSVidIns_Action = 0; ventry->bEgCVidRem_Action = !!igh; ventry->bEgCVidIns_Action = !!egh; ventry->nEgCVid = tci % VLAN_N_VID; return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv extract_action_pppoe(const struct avm_pa_pkt_match *ig_match, const struct avm_pa_pkt_match *eg_match, GSW_ROUTE_Session_action_t *a) { const struct pppoe_hdr *igh; const struct pppoe_hdr *egh; igh = (void *)hwpa_get_hdr(ig_match, AVM_PA_PPPOE); egh = (void *)hwpa_get_hdr(eg_match, AVM_PA_PPPOE); if (!igh && !egh) return HWPA_BACKEND_SUCCESS; if (igh && egh) { if (egh->sid != igh->sid) /* only encap/decap supported */ return HWPA_BACKEND_ERR_PPPOE_MODIFY; else { /* transparent mode, nothing to be done */ return HWPA_BACKEND_SUCCESS; } } if (igh) { /* decap */ a->eSessDirection = GSW_ROUTE_DIRECTION_DNSTREAM; a->nPPPoESessId = igh->sid; } else { /* encap */ a->eSessDirection = GSW_ROUTE_DIRECTION_UPSTREAM; a->nPPPoESessId = egh->sid; } a->bPPPoEmode = 1; return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv extract_action_nat(const struct avm_pa_pkt_match *ig_match, const struct avm_pa_pkt_match *eg_match, GSW_ROUTE_Session_action_t *a) { const void *addr_snat, *addr_dnat; size_t addr_size; bool port_snat, port_dnat; /* compare UDP/TCP ports and copy translated value */ { const u16 *ig_ports, *eg_ports; ig_ports = hwpa_get_hdr(ig_match, AVM_PA_PORTS); eg_ports = hwpa_get_hdr(eg_match, AVM_PA_PORTS); port_snat = ig_ports[0] != eg_ports[0]; if (port_snat) a->nTcpUdpPort = eg_ports[0]; port_dnat = ig_ports[1] != eg_ports[1]; if (port_dnat) a->nTcpUdpPort = eg_ports[1]; if (port_snat || port_dnat) a->eSessRoutingMode = max((int)a->eSessRoutingMode, GSW_ROUTE_MODE_NAPT); } /* check for NAT and copy address */ if ((eg_match->pkttype & AVM_PA_PKTTYPE_IP_MASK) == AVM_PA_PKTTYPE_IPV6) { const struct ipv6hdr *ip6_ig, *ip6_eg; a->eIpType = GSW_RT_IP_V6; ip6_ig = (struct ipv6hdr *)hwpa_get_hdr(ig_match, (AVM_PA_IPV6)); ip6_eg = (struct ipv6hdr *)hwpa_get_hdr(eg_match, (AVM_PA_IPV6)); addr_size = sizeof(ip6_ig->daddr); addr_dnat = (port_dnat || memcmp(&ip6_ig->daddr, &ip6_eg->daddr, addr_size)) ? &ip6_eg->daddr : NULL; addr_snat = (port_snat || memcmp(&ip6_ig->saddr, &ip6_eg->saddr, addr_size)) ? &ip6_eg->saddr : NULL; } else { const struct iphdr *ip4_ig, *ip4_eg; a->eIpType = GSW_RT_IP_V4; ip4_ig = (struct iphdr *)hwpa_get_hdr(ig_match, (AVM_PA_IPV4)); ip4_eg = (struct iphdr *)hwpa_get_hdr(eg_match, (AVM_PA_IPV4)); addr_size = sizeof(ip4_ig->daddr); addr_dnat = (port_dnat || memcmp(&ip4_ig->daddr, &ip4_eg->daddr, addr_size)) ? &ip4_eg->daddr : NULL; addr_snat = (port_snat || memcmp(&ip4_ig->saddr, &ip4_eg->saddr, addr_size)) ? &ip4_eg->saddr : NULL; } if (addr_dnat) memcpy(&a->nNATIPaddr, addr_dnat, addr_size); if (addr_snat) memcpy(&a->nNATIPaddr, addr_snat, addr_size); if (addr_snat || addr_dnat) a->eSessRoutingMode = max((int)a->eSessRoutingMode, GSW_ROUTE_MODE_NAT); /* cannot rewrite source AND destination */ if ((addr_snat || port_snat) && (addr_dnat || port_dnat)) return HWPA_BACKEND_ERR_NAT_CONFLICT; /* set direction (SNAT or DNAT) */ if (addr_snat || port_snat) a->eSessDirection = GSW_ROUTE_DIRECTION_UPSTREAM; else a->eSessDirection = GSW_ROUTE_DIRECTION_DNSTREAM; return HWPA_BACKEND_SUCCESS; } static bool ingress_pid_supported(avm_pid_handle pid) { struct net_device *netdev; dp_subif_t _subif; bool rv; netdev = get_netdev(pid); if (!netdev) return false; rv = !offdp_ep_platform_data(netdev, &_subif, sizeof(_subif)); put_netdev(netdev); return rv; } static enum hwpa_backend_rv extract_action_dst(const struct avm_pa_session *s, struct session *hws, GSW_ROUTE_Session_action_t *a) { struct avm_pa_egress *eg = avm_pa_first_egress(s); enum hwpa_backend_rv rv; dp_subif_t subif; struct net_device *netdev; uint8_t local_session; switch (eg->type) { case avm_pa_egresstype_output: netdev = get_netdev(eg->pid_handle); break; case avm_pa_egresstype_local: case avm_pa_egresstype_rtp: local_session = local_session_alloc(); if (local_session >= LOCAL_SESSIONS_NUM) return HWPA_BACKEND_ERR_INTERNAL; netdev = localpid_netdev; dev_hold(netdev); a->nDstSubIfId |= local_session << VAP_OFFSET; vap_to_session[local_session] = s; hws->dtors[dtor_id_local] = local_session_dtor; hws->local_vap = local_session; hws->dtor_args[dtor_id_local] = &hws->local_vap; break; default: netdev = NULL; } if (!netdev) return HWPA_BACKEND_ERR_PID_NODEV; if (offdp_ep_platform_data(netdev, &subif, sizeof(subif))) { rv = HWPA_BACKEND_ERR_PID_UNREACHABLE; } else { dp_subif_t tmp_subif; rv = HWPA_BACKEND_SUCCESS; memcpy(&tmp_subif, &subif, sizeof(tmp_subif)); if (eg->destmac && dc_dp_get_netif_subifid(netdev, NULL, NULL, eg->destmac->mac, &tmp_subif, 0) == 0) { subif.subif = tmp_subif.subif; rv = vlan_remove_tags(subif.port_id); } a->nDstPortMap |= BIT(subif.port_id); a->nDstSubIfId |= subif.subif; /* Switch_api causes wraparound to 0 for mtu == 0xffff. */ a->nMTUvalue = min(eg->mtu, (u16)0xfffeu); } put_netdev(netdev); return rv; } static void extract_action_routing(const struct avm_pa_pkt_match *ig_match, const struct avm_pa_pkt_match *eg_match, GSW_ROUTE_Session_action_t *a) { const struct ethhdr *ig, *eg; ig = hwpa_get_hdr(ig_match, AVM_PA_ETH); eg = hwpa_get_hdr(eg_match, AVM_PA_ETH); if (!ig || !eg) return; a->bMAC_DstEnable = !!memcmp(ig->h_dest, eg->h_dest, sizeof(ig->h_dest)); a->bMAC_SrcEnable = !!memcmp(ig->h_source, eg->h_source, sizeof(ig->h_source)); memcpy(a->nDstMAC, eg->h_dest, sizeof(a->nDstMAC)); memcpy(a->nSrcMAC, eg->h_source, sizeof(a->nSrcMAC)); if (a->bMAC_DstEnable || a->bMAC_SrcEnable) a->eSessRoutingMode = max((int)a->eSessRoutingMode, GSW_ROUTE_MODE_ROUTING); } static enum hwpa_backend_rv extract_action(const struct avm_pa_session *s, struct session *hws, GSW_ROUTE_Session_action_t *a) { enum hwpa_backend_rv rv; const struct avm_pa_pkt_match *ig_match, *eg_match; GSW_ROUTE_Session_Direction_t pppoe_direction, nat_direction, tun_direction; GSW_PCE_EgVLAN_Entry_t egvlan; bool pppoe_dir_matters, nat_dir_matters, tun_dir_matters; bool direction_and, direction_or; struct avm_pa_egress *eg = avm_pa_first_egress(s); ig_match = &s->ingress; eg_match = &eg->match; /* default to forwarding/bridging */ a->eSessRoutingMode = GSW_ROUTE_MODE_NULL; extract_action_routing(ig_match, eg_match, a); rv = extract_action_egvlan(ig_match, eg_match, &egvlan); if (rv != HWPA_BACKEND_SUCCESS) return rv; rv = extract_action_pppoe(ig_match, eg_match, a); if (rv != HWPA_BACKEND_SUCCESS) return rv; pppoe_direction = a->eSessDirection; pppoe_dir_matters = !!a->bPPPoEmode; rv = extract_action_nat(ig_match, eg_match, a); if (rv != HWPA_BACKEND_SUCCESS) return rv; nat_direction = a->eSessDirection; nat_dir_matters = a->eSessRoutingMode != GSW_ROUTE_MODE_NULL; rv = extract_action_tunnel(s, a); if (rv != HWPA_BACKEND_SUCCESS) return rv; if ((tun_dir_matters = !!a->bTunnel_Enable)) { tun_direction = a->eSessDirection; hws->dtor_args[dtor_id_tun] = &a->nTunnelIndex; hws->dtors[dtor_id_tun] = tunnel_dtor; } /* Identify directional conflicts across actions: * All the deducted directions are either 1 or 0. Iff they are all of * the same value, and'ing and or'ing them yields the same result. * Iff the direction does not matter at all, direction_and and * direction_or stay at their neutral values. */ direction_and = 1; direction_and &= pppoe_dir_matters ? pppoe_direction : 1; direction_and &= nat_dir_matters ? nat_direction : 1; direction_and &= tun_dir_matters ? tun_direction : 1; direction_or = 0; direction_or |= pppoe_dir_matters ? pppoe_direction : 0; direction_or |= nat_dir_matters ? nat_direction : 0; direction_or |= tun_dir_matters ? tun_direction : 0; if(!direction_and && direction_or) return HWPA_BACKEND_ERR_ACT_CONFLICT; a->eSessDirection = direction_and; /* decrease TTL if we are routing */ a->bTTLDecrement = (a->eSessRoutingMode > GSW_ROUTE_MODE_NULL); /* set the traffic class */ a->bTCremarking = 1; if (eg->type == avm_pa_egresstype_rtp) { a->nTrafficClass = 2; /* prioritize telephony towards cpu */ } else if (eg->type == avm_pa_egresstype_output) { a->nTrafficClass = clamp(TC_H_MIN(eg->output.priority), 1u, 7u); } else { a->nTrafficClass = 0; /* default queue */ } rv = extract_action_dst(s, hws, a); if (rv != HWPA_BACKEND_SUCCESS) return rv; if (egvlan.bEgVLAN_Action && !hws->dtors[dtor_id_local]) { int portid; rv = portid_from_action(a, &portid); if (rv != HWPA_BACKEND_SUCCESS) return rv; if (egvlan.bEgCVidIns_Action) { rv = vlan_acquire(portid, egvlan.nEgCVid, &hws->vhandle); } else { /* Force lookup of the special removal slot */ rv = vlan_acquire(portid, VLAN_ENTRY_REMOVE_TAG, &hws->vhandle); } if (rv != HWPA_BACKEND_SUCCESS) return rv; hws->dtors[dtor_id_vhandle] = vlan_release_dtor; hws->dtor_args[dtor_id_vhandle] = &hws->vhandle; rv = vlan_embed_action(a, hws->vhandle); if (rv != HWPA_BACKEND_SUCCESS) return rv; } return HWPA_BACKEND_SUCCESS; } static int32_t pae_cmd(uint32_t command, void *arg) { int32_t ret; GSW_API_HANDLE gswr = 0; gswr = gsw_api_kopen("/dev/switch_api/1"); if (gswr == 0) { return -1; } ret = gsw_api_kioctl(gswr, command, arg); gsw_api_kclose(gswr); return ret; } static enum hwpa_backend_rv vlan_embed_action(GSW_ROUTE_Session_action_t *a, vlan_handle vhandle) { if (a->nDstSubIfId & ~0xff) return HWPA_BACKEND_ERR_VLAN_CONFLICT; if (a->nDstPortMap != BIT(vhandle._entry / PMAC_MAX_NUM)) return HWPA_BACKEND_ERR_INTERNAL; a->nDstSubIfId |= (vhandle._entry % PMAC_MAX_NUM) << VAP_OFFSET; return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv vlan_acquire(int portid, u16 vid, vlan_handle *vhandle_out) { int subifid; if (vid >= VLAN_ENTRY_INVALID_VID) return HWPA_BACKEND_ERR_INTERNAL; /* Find existing entry... */ for (subifid = 1; subifid < ARRAY_SIZE(vlan_entries[portid]); subifid++) { struct vlan_entry *ventry; int v; bool hit; ventry = &vlan_entries[portid][subifid]; do { v = atomic_read(&ventry->refcnt_vid_compound); hit = (VLAN_ENTRY_REFCNT(v) && VLAN_ENTRY_VID(v) == vid); } while (hit && atomic_cmpxchg(&ventry->refcnt_vid_compound, v, v + BIT(VLAN_ENTRY_REFCNT_SHIFT)) != v); if (hit) { vhandle_out->_entry = portid * MAX_SUBIF_PER_PORT + subifid; return HWPA_BACKEND_SUCCESS; } } /* ... or else use a new one. */ for (subifid = 1; subifid < ARRAY_SIZE(vlan_entries[portid]); subifid++) { struct vlan_entry *ventry; int refcnt_vid_compound; GSW_PCE_EgVLAN_Entry_t entry = { 0 }; ventry = &vlan_entries[portid][subifid]; /* Try to claim exclusive access to an empty entry. */ refcnt_vid_compound = BIT(VLAN_ENTRY_REFCNT_SHIFT) | VLAN_ENTRY_REMOVE_TAG; if (atomic_cmpxchg(&ventry->refcnt_vid_compound, 0, refcnt_vid_compound) != 0) continue; /* Write an HW-entry that strips ingress tags and adds ctag with * vid. */ entry.nPortId = portid; entry.nIndex = portid * MAX_SUBIF_PER_PORT + subifid; entry.bEgVLAN_Action = 1; entry.bEgSVidRem_Action = 1; entry.bEgCVidRem_Action = 1; entry.bEgCVidIns_Action = 1; entry.nEgCVid = vid; if (pae_cmd(GSW_PCE_EG_VLAN_ENTRY_WRITE, &entry) < GSW_statusOk) { atomic_set(&ventry->refcnt_vid_compound, 0); return HWPA_BACKEND_ERR_GSW_EGVLAN; } /* Present usable vid to others. */ refcnt_vid_compound = (1 << VLAN_ENTRY_REFCNT_SHIFT) | vid; atomic_set(&ventry->refcnt_vid_compound, refcnt_vid_compound); vhandle_out->_entry = entry.nIndex; return HWPA_BACKEND_SUCCESS; } /* Lack of empty slots */ return HWPA_BACKEND_ERR_VLAN_FULL; } static void vlan_release_dtor(void *arg) { if (arg) vlan_release(*((vlan_handle *)arg)); } static enum hwpa_backend_rv vlan_release(vlan_handle vhandle) { struct vlan_entry *ventry; int v; /* OOB check */ if (vhandle._entry >= (sizeof(vlan_entries) / sizeof(vlan_entries[0][0]))) return HWPA_BACKEND_ERR_INTERNAL; ventry = &vlan_entries[0][0] + vhandle._entry; v = atomic_sub_if_positive(1 << VLAN_ENTRY_REFCNT_SHIFT, &ventry->refcnt_vid_compound); /* Double free */ if (VLAN_ENTRY_REFCNT(v + (1 << VLAN_ENTRY_REFCNT_SHIFT)) == 0) return HWPA_BACKEND_ERR_INTERNAL; if (!VLAN_ENTRY_REFCNT(v)) /* Clear vid to make the slot available, but keep the GSW * entry to give inflight packets a chance to pass. */ atomic_set(&ventry->refcnt_vid_compound, 0); return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv hwpa_backend_init_vlan(void) { int port; { GSW_cfg_t gsw_cfg = { 0 }; if (pae_cmd(GSW_CFG_GET, &gsw_cfg) < GSW_statusOk) { return HWPA_BACKEND_ERR_GSW_CFG; } gsw_cfg.bVLAN_Aware = true; if (pae_cmd(GSW_CFG_SET, &gsw_cfg) < GSW_statusOk) { return HWPA_BACKEND_ERR_GSW_CFG; } } for (port = 0; port < PMAC_MAX_NUM; port++) { int refcnt_vid_compound; int subif_id; GSW_PCE_EgVLAN_Cfg_t cfg = { 0 }; GSW_PCE_EgVLAN_Entry_t entry = { 0 }; cfg.nPortId = port; cfg.bEgVidEna = 1; cfg.eEgVLANmode = GSW_PCE_EG_VLAN_SUBIFID_BASED; cfg.nEgStartVLANIdx = port * MAX_SUBIF_PER_PORT; if (pae_cmd(GSW_PCE_EG_VLAN_CFG_SET, &cfg) < GSW_statusOk) return HWPA_BACKEND_ERR_GSW_CFG; /* Transparent mode for all subif groups except 1 which receives * a rule to remove all tags by default */ for (subif_id = 0; subif_id < MAX_SUBIF_PER_PORT; subif_id++) { if (subif_id == 1) continue; entry.nPortId = port; entry.nIndex = port * MAX_SUBIF_PER_PORT + subif_id; entry.bEgVLAN_Action = 1; if (pae_cmd(GSW_PCE_EG_VLAN_ENTRY_WRITE, &entry) < GSW_statusOk) return HWPA_BACKEND_ERR_GSW_EGVLAN; } /* Remove all tags on subif group 1 */ entry.nPortId = port; entry.nIndex = port * MAX_SUBIF_PER_PORT + 1; entry.bEgVLAN_Action = 1; entry.bEgSVidRem_Action = 1; entry.bEgCVidRem_Action = 1; if (pae_cmd(GSW_PCE_EG_VLAN_ENTRY_WRITE, &entry) < GSW_statusOk) return HWPA_BACKEND_ERR_GSW_EGVLAN; /* block all group 1 slots with an invalid vid */ refcnt_vid_compound = (1 << VLAN_ENTRY_REFCNT_SHIFT) | VLAN_ENTRY_REMOVE_TAG; atomic_set(&vlan_entries[port][1].refcnt_vid_compound, refcnt_vid_compound); } return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv vlan_remove_tags(uint32_t portid) { int refcnt_vid_compound; int subif_id; GSW_PCE_EgVLAN_Entry_t entry = { 0 }; struct vlan_entry *ventry = &vlan_entries[portid][0]; int v = atomic_read(&ventry->refcnt_vid_compound); /* Don't do anything if already initialized */ if (v == ((1 << VLAN_ENTRY_REFCNT_SHIFT) | VLAN_ENTRY_REMOVE_TAG)) return HWPA_BACKEND_SUCCESS; /* Remove all tags on all subif groups */ for (subif_id = 0; subif_id < MAX_SUBIF_PER_PORT; subif_id++) { entry.nPortId = portid; entry.nIndex = portid * MAX_SUBIF_PER_PORT + subif_id; entry.bEgVLAN_Action = 1; entry.bEgSVidRem_Action = 1; entry.bEgCVidRem_Action = 1; if (pae_cmd(GSW_PCE_EG_VLAN_ENTRY_WRITE, &entry) < GSW_statusOk) return HWPA_BACKEND_ERR_GSW_EGVLAN; /* block all slots with an invalid vid */ refcnt_vid_compound = (1 << VLAN_ENTRY_REFCNT_SHIFT) | VLAN_ENTRY_REMOVE_TAG; atomic_set(&vlan_entries[portid][subif_id].refcnt_vid_compound, refcnt_vid_compound); } return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv hwpa_backend_init_counter(void) { GSW_RMON_mode_t rmon_mode; rmon_mode.eRmonType = GSW_RMON_ROUTE_TYPE; rmon_mode.eCountMode = GSW_RMON_COUNT_BYTES; if (pae_cmd(GSW_RMON_MODE_SET, &rmon_mode) < GSW_statusOk) { return HWPA_BACKEND_ERR_INTERNAL; } return HWPA_BACKEND_SUCCESS; } enum hwpa_backend_rv hwpa_backend_init_pce(void) { GSW_PCE_rule_t rule; u32 parser_flags, parser_flags_mask; if (!pce_idx_request(PCE_RULE_IDX_ROUTEEXT_TCP) || !pce_idx_request(PCE_RULE_IDX_ROUTEEXT_UDP)) return HWPA_BACKEND_ERR_GSW_PCE_RULE; /* Besides TCP and UDP for the following rules to enable, this mask * includes all the packet characteristics that should not be * accelerated by default. */ parser_flags_mask = (u32) ~( BIT(FLAG_CAPWAP) | BIT(FLAG_GRE) | BIT(FLAG_GREK) | BIT(FLAG_SNAP) | BIT(FLAG_ROUTEXP) | BIT(FLAG_TCP) | BIT(FLAG_1UDP) | BIT(FLAG_IGMP) | BIT(FLAG_IPV4OPT) | BIT(FLAG_IPFRAG) | BIT(FLAG_EAPOL) | BIT(FLAG_2IPV6EXT) | BIT(FLAG_2UDP) | BIT(FLAG_L2TPNEXP)); /* TCP */ memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = PCE_RULE_IDX_ROUTEEXT_TCP; rule.pattern.bEnable = 1; parser_flags = BIT(FLAG_TCP); rule.pattern.bParserFlagMSB_Enable = 1; rule.pattern.nParserFlagMSB = (parser_flags >> 16); rule.pattern.nParserFlagMSB_Mask = (parser_flags_mask >> 16); rule.pattern.bParserFlagLSB_Enable = 1; rule.pattern.nParserFlagLSB = (parser_flags & 0xffff); rule.pattern.nParserFlagLSB_Mask = (parser_flags_mask & 0xffff); rule.action.bRtInnerIPasKey_Action = 0; rule.action.bRtDstPortMaskCmp_Action = 1; rule.action.bRtSrcPortMaskCmp_Action = 1; rule.action.bRtDstIpMaskCmp_Action = 1; rule.action.bRtSrcIpMaskCmp_Action = 1; rule.action.bRoutExtId_Action = 1; pae_extid_set_proto(&rule.action.nRoutExtId, IPPROTO_TCP); rule.action.bRtAccelEna_Action = 1; rule.action.bRtCtrlEna_Action = 1; if (pae_cmd(GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { return HWPA_BACKEND_ERR_GSW_PCE_RULE; } /* UDP */ memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = PCE_RULE_IDX_ROUTEEXT_UDP; rule.pattern.bEnable = 1; parser_flags = BIT(FLAG_1UDP); rule.pattern.bParserFlagMSB_Enable = 1; rule.pattern.nParserFlagMSB = (parser_flags >> 16); rule.pattern.nParserFlagMSB_Mask = (parser_flags_mask >> 16); rule.pattern.bParserFlagLSB_Enable = 1; rule.pattern.nParserFlagLSB = (parser_flags & 0xffff); rule.pattern.nParserFlagLSB_Mask = (parser_flags_mask & 0xffff); rule.action.bRtInnerIPasKey_Action = 0; rule.action.bRtDstPortMaskCmp_Action = 1; rule.action.bRtSrcPortMaskCmp_Action = 1; rule.action.bRtDstIpMaskCmp_Action = 1; rule.action.bRtSrcIpMaskCmp_Action = 1; rule.action.bRoutExtId_Action = 1; pae_extid_set_proto(&rule.action.nRoutExtId, IPPROTO_UDP); rule.action.bRtAccelEna_Action = 1; rule.action.bRtCtrlEna_Action = 1; if (pae_cmd(GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { return HWPA_BACKEND_ERR_GSW_PCE_RULE; } return HWPA_BACKEND_SUCCESS; } enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session *s, unsigned long *handle_out) { enum hwpa_backend_rv rv; struct session *hws; int i; if (!ingress_pid_supported(s->ingress_pid_handle)) return HWPA_BACKEND_ERR_PID_UNREACHABLE; hws = kzalloc(sizeof(*hws), GFP_ATOMIC); if (!hws) return HWPA_BACKEND_ERR_INTERNAL; hws->rt_entry.nHashVal = -1; hws->rt_entry.nRtIndex = -1; extract_pattern(s, &hws->rt_entry.routeEntry.pattern); rv = extract_action(s, hws, &hws->rt_entry.routeEntry.action); if (rv != HWPA_BACKEND_SUCCESS) goto errout; /* PAE routing matches the inner frame only. To prevent termination of * unrelated tunnels, isolate downstream tunnel sessions using * dedicated extension ids. */ if (session_needs_tunnel_isolation(hws)) pae_extid_set_tunnel( &hws->rt_entry.routeEntry.pattern.nRoutExtId, hws->rt_entry.routeEntry.action.nTunnelIndex); if (avm_pa_first_egress(s)->type == avm_pa_egresstype_local && (s->ingress.pkttype == (AVM_PA_PKTTYPE_IPV4 | IPPROTO_TCP))) { rv = lro_setup(hws); if (rv != HWPA_BACKEND_SUCCESS && rv != HWPA_BACKEND_ERR_LRO_FULL) goto errout; } switch (pae_cmd(GSW_ROUTE_ENTRY_ADD, &hws->rt_entry)) { case 1: /* 'success' according to gsw_pae.c */ case GSW_statusOk: *handle_out = (unsigned long)hws; return HWPA_BACKEND_SUCCESS; case GSW_ROUTE_ERROR_MTU_FULL: rv = HWPA_BACKEND_ERR_GSW_MTU_FULL; break; case GSW_ROUTE_ERROR_PPPOE_FULL: rv = HWPA_BACKEND_ERR_GSW_PPPOE_FULL; break; case GSW_ROUTE_ERROR_RTP_FULL: rv = HWPA_BACKEND_ERR_GSW_RTP_FULL; break; case GSW_ROUTE_ERROR_IP_FULL: rv = HWPA_BACKEND_ERR_GSW_IP_FULL; break; case GSW_ROUTE_ERROR_MAC_FULL: rv = HWPA_BACKEND_ERR_GSW_MAC_FULL; break; case GSW_ROUTE_ERROR_RT_SESS_FULL: rv = HWPA_BACKEND_ERR_GSW_RT_SESS_FULL; break; case GSW_ROUTE_ERROR_RT_COLL_FULL: rv = HWPA_BACKEND_ERR_GSW_RT_COLL_FULL; break; default: rv = HWPA_BACKEND_ERR_GSW_UNKNOWN; break; } errout: for (i = 0; i < ARRAY_SIZE(hws->dtors); i++) if (hws->dtors[i]) hws->dtors[i](hws->dtor_args[i]); kfree(hws); return rv; } enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long handle) { struct session *hws; enum hwpa_backend_rv rv; int i; hws = (void *)handle; if (!hws) { pr_err_once("session->hw_session == NULL (JZ-67633)\n"); return HWPA_BACKEND_ERR_HANDLE_INVALID; } rv = HWPA_BACKEND_SUCCESS; if (pae_cmd(GSW_ROUTE_ENTRY_DELETE, &hws->rt_entry) != GSW_statusOk) rv = HWPA_BACKEND_ERR_GSW_REMOVE; for (i = 0; i < ARRAY_SIZE(hws->dtors); i++) if (hws->dtors[i]) hws->dtors[i](hws->dtor_args[i]); kfree(hws); return rv; } enum hwpa_backend_rv hwpa_backend_stats(unsigned long handle, struct avm_pa_session_stats *stats_out) { struct session *hws; GSW_ROUTE_Session_Hit_t rthit; hws = (void *)handle; rthit.nRtIndex = hws->rt_entry.nRtIndex; rthit.eHitOper = GSW_ROUTE_HIT_N_CNTR_CLEAR; rthit.bHitStatus = 0; rthit.nSessCntr = 0; if (pae_cmd(GSW_ROUTE_SESSION_HIT_OP, &rthit) != GSW_statusOk) return HWPA_BACKEND_ERR_GSW_UNKNOWN; if (rthit.bHitStatus) { stats_out->validflags = AVM_PA_SESSION_STATS_VALID_HIT | AVM_PA_SESSION_STATS_VALID_BYTES; stats_out->tx_bytes = rthit.nSessCntr; } else { stats_out->validflags = 0; } return HWPA_BACKEND_SUCCESS; } int alloc_tx_channel(avm_pid_handle pid_handle) { struct net_device *netdev; netdev = hwpa_get_netdev(pid_handle); if (!netdev) { /* Assume this is a IPv4/v6 local PID and acknowledge the tx * channel. */ return 0; } dev_put(netdev); return -1; } int alloc_rx_channel(avm_pid_handle pid_handle) { struct net_device *netdev; dp_subif_t subif; int rv; netdev = hwpa_get_netdev(pid_handle); if (!netdev) { return -1; } if (offdp_ep_platform_data(netdev, &subif, sizeof(subif))) offdp_vep_register(netdev); rv = offdp_is_vep(netdev) ? 0 : -1; dev_put(netdev); return rv; } static int free_pid_any(avm_pid_handle pid_handle) { struct net_device *netdev; dp_subif_t subif; int rv = 0; netdev = hwpa_get_netdev(pid_handle); if (!netdev) return -1; if (offdp_ep_platform_data(netdev, &subif, sizeof(subif))) if (offdp_vep_unregister(netdev)) rv = -1; dev_put(netdev); return rv; } int free_rx_channel(avm_pid_handle pid_handle) { return free_pid_any(pid_handle); } int free_tx_channel(avm_pid_handle pid_handle) { return free_pid_any(pid_handle); } static int localpid_start_xmit(struct sk_buff *skb, struct net_device *dev) { u32 session_handle; const struct avm_pa_session *session; u32 pid_handle; struct dma_rx_desc_0 *desc_0; desc_0 = (struct dma_rx_desc_0 *)&skb->DW0; skb->dev = dev; session = vap_to_session[desc_0->field.dest_sub_if_id >> VAP_OFFSET]; if (!session) { goto drop; } session_handle = session->session_handle; pid_handle = avm_pa_first_egress(session)->pid_handle; /* AVM PA expects simple Ethernet framing, but skb may have unexpected * VLAN modifications due to some EgVLAN mapping collissions. * Get around this by fixing the mac header relative to * the network header with an offset of ETH_HLEN. The MAC header data * is not relevant for this datapath, so we do not need to move data * around. * TODO If QinQ support is added it needs to be considered here. */ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) skb_pull(skb, VLAN_HLEN); avm_pa_tx_channel_accelerated_packet(pid_handle, session_handle, skb); return 0; drop: dev_kfree_skb_any(skb); return -1; } static enum rx_handler_result localpid_rx_handler(struct sk_buff **skb) { skb_push(*skb, (*skb)->dev->hard_header_len); skb_queue_tail(&skb_not_accelerated, *skb); return RX_HANDLER_CONSUMED; } static struct net_device_ops localpid_netdev_ops = { .ndo_start_xmit = localpid_start_xmit }; static void setup_localpid_datapath(void) { /* Alloc this once and never free it. The netdev refcnt mechanism does * not work unless we register the device. This is merely a mockup, so * it should better not be exposed too much. But without refcnt we * would get into a race. */ if (!localpid_netdev) localpid_netdev = alloc_etherdev(0); BUG_ON(!localpid_netdev); /* do some of the stuff from init_dummy_netdev */ localpid_netdev->reg_state = NETREG_DUMMY; set_bit(__LINK_STATE_PRESENT, &localpid_netdev->state); set_bit(__LINK_STATE_START, &localpid_netdev->state); strlcpy(localpid_netdev->name, "pae_localpid", sizeof(localpid_netdev->name)); localpid_netdev->netdev_ops = &localpid_netdev_ops; localpid_netdev->rx_handler = localpid_rx_handler; BUG_ON(offdp_vep_register(localpid_netdev)); /* Claim the zero handle to cut some corners. See the TODO in * extract_actino() */ local_session_alloc(); } static void teardown_localpid_datapath(void) { if (!localpid_netdev) return; offdp_vep_unregister(localpid_netdev); localpid_netdev->reg_state = NETREG_UNINITIALIZED; free_netdev(localpid_netdev); local_session_free(0); localpid_netdev = 0; } /* tests */ static void mock_session_eth_pppoe_ipv4_udp(struct avm_pa_session *session_out); static void mock_session_eth_pppoe_ipv4_tcp(struct avm_pa_session *session_out); static void mock_session_eth_ipv4_udp(struct avm_pa_session *session_out); static void mock_session_eth_ipv4_tcp(struct avm_pa_session *session_out); static void mock_session_eth_ipv6_udp(struct avm_pa_session *session_out); static void mock_session_eth_ipv6_tcp(struct avm_pa_session *session_out); static int vlan_acquire_test_thread(void *arg) { int i; vlan_handle vhandles[100]; struct completion *compl = arg; memset(&vhandles[0], 0, sizeof(vhandles)); /* Round robin over vhandles[] with alternating acquire/release. */ for (i = 0; i < 1000 * ARRAY_SIZE(vhandles); i++) { int idx = i % ARRAY_SIZE(vhandles); /* Discriminate between error codes and vhandles. */ if (vhandles[idx]._entry <= 0) { u16 vid; get_random_bytes(&vid, sizeof(vid)); vid &= 0x1f; /* narrow range for more collisions */ vhandles[idx]._entry = -1; /* TODO unreliable */ vlan_acquire(0, vid, &vhandles[idx]); } else { vlan_release(vhandles[idx]); vhandles[idx]._entry = 0; } } /* Clean up remaining allocations. */ for (i = 0; i < ARRAY_SIZE(vhandles); i++) { if (vhandles[i]._entry > 0) { vlan_release(vhandles[i]); } } /* Signal waiting parent. */ complete(compl); while (!kthread_should_stop()) schedule(); return 0; } static ktd_ret_t vlan_acquire_test(void *arg) { struct task_struct *threads[4]; u16 portid, vid, i; struct completion compl; vlan_handle vhandles[ARRAY_SIZE(vlan_entries[0]) - 2]; /* Test the vid boundary check */ KTD_EXPECT(vlan_acquire(0, -1, &vhandles[0]) == HWPA_BACKEND_ERR_INTERNAL); KTD_EXPECT(vlan_acquire(0, VLAN_ENTRY_INVALID_VID, &vhandles[0]) == HWPA_BACKEND_ERR_INTERNAL); /* Hit internal VId for tag removal */ KTD_EXPECT(vlan_acquire(0, VLAN_ENTRY_REMOVE_TAG, &vhandles[0]) == HWPA_BACKEND_SUCCESS); /* Fill table for port 0. */ for (vid = 0; vid < ARRAY_SIZE(vhandles); vid++) { KTD_EXPECT(vlan_acquire(0, vid, &vhandles[vid]) == HWPA_BACKEND_SUCCESS); } /* Attempt to get one more. */ KTD_EXPECT(vlan_acquire(0, vid, &vhandles[0]) == HWPA_BACKEND_ERR_VLAN_FULL); /* Get second reference vid 0 */ KTD_EXPECT(vlan_acquire(0, 0, &vhandles[0]) == HWPA_BACKEND_SUCCESS); /* Release the two references */ KTD_EXPECT(vlan_release(vhandles[0]) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(vlan_release(vhandles[0]) == HWPA_BACKEND_SUCCESS); /* Intentional double-free */ KTD_EXPECT(vlan_release(vhandles[0]) == HWPA_BACKEND_ERR_INTERNAL); /* We freed up a slot, so claiming a new vid should work now. */ KTD_EXPECT(vlan_acquire(0, vid, &vhandles[0]) == HWPA_BACKEND_SUCCESS); /* Clean up. */ for (i = 0; i < ARRAY_SIZE(vhandles); i++) { vlan_release(vhandles[i]); } /* Do some concurrent operations to check soundness of the atomic * accesses. */ init_completion(&compl); for (i = 0; i < ARRAY_SIZE(threads); i++) { threads[i] = kthread_create(vlan_acquire_test_thread, &compl, "hwpatest.%u", i); KTD_EXPECT(!IS_ERR(threads[i])); kthread_bind(threads[i], i); wake_up_process(threads[i]); } /* Wait for threads to finish */ for (i = 0; i < ARRAY_SIZE(threads); i++) KTD_EXPECT(wait_for_completion_interruptible_timeout( &compl, 10 * CONFIG_HZ) > 0); /* Shut threads down. */ for (i = 0; i < ARRAY_SIZE(threads); i++) kthread_stop(threads[i]); /* All entries should be free, let's see if that's true! */ for (portid = 0; portid < ARRAY_SIZE(vlan_entries); portid++) { for (vid = 0; vid < ARRAY_SIZE(vhandles); vid++) { KTD_EXPECT(vlan_acquire(portid, vid, &vhandles[vid]) == HWPA_BACKEND_SUCCESS); } for (i = 0; i < ARRAY_SIZE(vhandles); i++) { vlan_release(vhandles[i]); } } return KTD_PASSED; } #define RESET() \ memset(&act, 0, sizeof(act)); \ memset(ig_match.hdrcopy, 0, sizeof(ig_match.hdrcopy)); \ memset(eg_match.hdrcopy, 0, sizeof(eg_match.hdrcopy)); static ktd_ret_t extract_action_routing_test(void *arg) { struct ethhdr *eg_ethh; GSW_ROUTE_Session_action_t act; struct avm_pa_pkt_match ig_match = { .pkttype = AVM_PA_PKTTYPE_IPV4, .match = { { .type = AVM_PA_ETH, .offset = 0 } }, .nmatch = 1 }; struct avm_pa_pkt_match eg_match = { .pkttype = AVM_PA_PKTTYPE_IPV4, .match = { { .type = AVM_PA_ETH, .offset = 0 } }, .nmatch = 1 }; eg_ethh = (void *)&eg_match.hdrcopy[0]; /* src + dst */ RESET(); memcpy(&eg_ethh->h_dest, "newdst", ETH_ALEN); memcpy(&eg_ethh->h_source, "newsrc", ETH_ALEN); extract_action_routing(&ig_match, &eg_match, &act); KTD_EXPECT(act.bMAC_DstEnable); KTD_EXPECT(act.bMAC_SrcEnable); KTD_EXPECT(!memcmp(&act.nDstMAC, &eg_ethh->h_dest, ETH_ALEN)); KTD_EXPECT(!memcmp(&act.nSrcMAC, &eg_ethh->h_source, ETH_ALEN)); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_ROUTING); /* src */ RESET(); memcpy(&eg_ethh->h_source, "newsrc", ETH_ALEN); extract_action_routing(&ig_match, &eg_match, &act); KTD_EXPECT(!act.bMAC_DstEnable); KTD_EXPECT(act.bMAC_SrcEnable); KTD_EXPECT(!memcmp(&act.nSrcMAC, &eg_ethh->h_source, ETH_ALEN)); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_ROUTING); /* dst */ RESET(); memcpy(&eg_ethh->h_dest, "newdst", ETH_ALEN); extract_action_routing(&ig_match, &eg_match, &act); KTD_EXPECT(act.bMAC_DstEnable); KTD_EXPECT(!act.bMAC_SrcEnable); KTD_EXPECT(!memcmp(&act.nDstMAC, &eg_ethh->h_dest, ETH_ALEN)); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_ROUTING); /* nothing */ RESET(); extract_action_routing(&ig_match, &eg_match, &act); KTD_EXPECT(!act.bMAC_DstEnable); KTD_EXPECT(!act.bMAC_SrcEnable); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NULL); return KTD_PASSED; } static ktd_ret_t extract_action_nat_test_v6(void) { struct ipv6hdr *ig_iph, *eg_iph; u16 *eg_ports; GSW_ROUTE_Session_action_t act; const u16 nataddr[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f }; struct avm_pa_pkt_match ig_match = { .pkttype = AVM_PA_PKTTYPE_IPV6, .match = { { .type = AVM_PA_IPV6, .offset = 0 }, { .type = AVM_PA_PORTS, .offset = sizeof(struct ipv6hdr) } }, .nmatch = 2 }; struct avm_pa_pkt_match eg_match = { .pkttype = AVM_PA_PKTTYPE_IPV6, .match = { { .type = AVM_PA_IPV6, .offset = 0 }, { .type = AVM_PA_PORTS, .offset = sizeof(struct ipv6hdr) } }, .nmatch = 2 }; ig_iph = (void *)&ig_match.hdrcopy[0]; eg_iph = (void *)&eg_match.hdrcopy[0]; eg_ports = (void *)&eg_match.hdrcopy[sizeof(struct ipv6hdr)]; /* snat + dnat */ RESET(); memcpy(&eg_iph->daddr, nataddr, 16); memcpy(&eg_iph->saddr, nataddr, 16); KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_ERR_NAT_CONFLICT); /* snapt + dnat */ RESET(); memcpy(&eg_iph->daddr, nataddr, 16); eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_ERR_NAT_CONFLICT); /* nothing */ RESET(); KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == 0); /* dnat */ RESET(); memcpy(&eg_iph->daddr, nataddr, 16); KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_DNSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->daddr, 16)); /* snat */ RESET(); memcpy(&eg_iph->saddr, nataddr, 16); KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 16)); /* snat + snapt */ RESET(); memcpy(&eg_iph->saddr, nataddr, 16); eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAPT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 16)); KTD_EXPECT(!memcmp(&act.nTcpUdpPort, &eg_ports[0], 2)); /* snapt */ RESET(); memcpy(&ig_iph->saddr, nataddr, 16); memcpy(&eg_iph->saddr, nataddr, 16); eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAPT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &ig_iph->saddr, 16)); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 16)); KTD_EXPECT(!memcmp(&act.nTcpUdpPort, &eg_ports[0], 2)); return KTD_PASSED; } static ktd_ret_t extract_action_nat_test_v4(void) { struct iphdr *ig_iph, *eg_iph; u16 *eg_ports; GSW_ROUTE_Session_action_t act; struct avm_pa_pkt_match ig_match = { .pkttype = AVM_PA_PKTTYPE_IPV4, .match = { { .type = AVM_PA_IPV4, .offset = 0 }, { .type = AVM_PA_PORTS, .offset = sizeof(struct iphdr) } }, .nmatch = 2 }; struct avm_pa_pkt_match eg_match = { .pkttype = AVM_PA_PKTTYPE_IPV4, .match = { { .type = AVM_PA_IPV4, .offset = 0 }, { .type = AVM_PA_PORTS, .offset = sizeof(struct iphdr) } }, .nmatch = 2 }; ig_iph = (void *)&ig_match.hdrcopy[0]; eg_iph = (void *)&eg_match.hdrcopy[0]; eg_ports = (void *)&eg_match.hdrcopy[sizeof(struct iphdr)]; /* snat + dnat */ RESET(); eg_iph->daddr = 0x3a2b1c0d; eg_iph->saddr = 0x3a2b1c0d; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_ERR_NAT_CONFLICT); /* snapt + dnat */ RESET(); eg_iph->daddr = 0x3a2b1c0d; eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_ERR_NAT_CONFLICT); /* snapt + dnapt */ RESET(); eg_ports[0] = 0xabba; eg_ports[1] = 0xb00b; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_ERR_NAT_CONFLICT); /* nothing */ RESET(); KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == 0); /* dnat */ RESET(); eg_iph->daddr = 0x3a2b1c0d; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_DNSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->daddr, 4)); /* snat */ RESET(); eg_iph->saddr = 0x3a2b1c0d; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 4)); /* snat + snapt */ RESET(); eg_iph->saddr = 0x3a2b1c0d; eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAPT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 4)); KTD_EXPECT(!memcmp(&act.nTcpUdpPort, &eg_ports[0], 2)); /* snapt */ RESET(); ig_iph->saddr = 0x3a2b1c0d; eg_iph->saddr = 0x3a2b1c0d; eg_ports[0] = 0xabba; KTD_EXPECT(extract_action_nat(&ig_match, &eg_match, &act) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(act.eSessRoutingMode == GSW_ROUTE_MODE_NAPT); KTD_EXPECT(act.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &ig_iph->saddr, 4)); KTD_EXPECT(!memcmp(&act.nNATIPaddr, &eg_iph->saddr, 4)); KTD_EXPECT(!memcmp(&act.nTcpUdpPort, &eg_ports[0], 2)); return KTD_PASSED; } #undef RESET static ktd_ret_t extract_action_pppoe_test(void *arg) { GSW_ROUTE_Session_action_t action; struct avm_pa_session *s_pppoe, *s_nopppoe; struct avm_pa_pkt_match *ig_pppoe, *eg_pppoe; struct avm_pa_pkt_match *ig_nopppoe, *eg_nopppoe; struct pppoe_hdr *pppoeh_eg, *pppoeh_ig; s_pppoe = kzalloc(sizeof(*s_pppoe), GFP_KERNEL); s_nopppoe = kzalloc(sizeof(*s_nopppoe), GFP_KERNEL); mock_session_eth_pppoe_ipv4_tcp(s_pppoe); ig_pppoe = &s_pppoe->ingress; eg_pppoe = &avm_pa_first_egress(s_pppoe)->match; mock_session_eth_ipv4_tcp(s_nopppoe); ig_nopppoe = &s_nopppoe->ingress; eg_nopppoe = &avm_pa_first_egress(s_nopppoe)->match; pppoeh_eg = (void *)hwpa_get_hdr(eg_pppoe, AVM_PA_PPPOE); pppoeh_ig = (void *)hwpa_get_hdr(ig_pppoe, AVM_PA_PPPOE); memset(&action, 0, sizeof(action)); /* no pppoe no worries */ KTD_EXPECT(extract_action_pppoe(ig_nopppoe, eg_nopppoe, &action) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(!action.bPPPoEmode); /* unchanged pppoe on both ends */ KTD_EXPECT(extract_action_pppoe(ig_pppoe, eg_pppoe, &action) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(!action.bPPPoEmode); /* pppoe on both ends with modified sid */ pppoeh_eg->sid += 1; KTD_EXPECT(extract_action_pppoe(ig_pppoe, eg_pppoe, &action) == HWPA_BACKEND_ERR_PPPOE_MODIFY); KTD_EXPECT(!action.bPPPoEmode); /* pppoe added on egress */ get_random_bytes(&pppoeh_eg->sid, sizeof(pppoeh_eg->sid)); KTD_EXPECT(extract_action_pppoe(ig_nopppoe, eg_pppoe, &action) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(action.bPPPoEmode); KTD_EXPECT(action.eSessDirection == GSW_ROUTE_DIRECTION_UPSTREAM); KTD_EXPECT(action.nPPPoESessId == pppoeh_eg->sid); memset(&action, 0, sizeof(action)); /* pppoe added on ingress */ get_random_bytes(&pppoeh_ig->sid, sizeof(pppoeh_ig->sid)); KTD_EXPECT(extract_action_pppoe(ig_pppoe, eg_nopppoe, &action) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(action.bPPPoEmode); KTD_EXPECT(action.eSessDirection == GSW_ROUTE_DIRECTION_DNSTREAM); KTD_EXPECT(action.nPPPoESessId == pppoeh_ig->sid); memset(&action, 0, sizeof(action)); kfree(s_nopppoe); kfree(s_pppoe); return KTD_PASSED; } static ktd_ret_t extract_action_nat_test(void *arg) { ktd_ret_t rv; rv = extract_action_nat_test_v4(); if (rv != KTD_PASSED) return rv; rv = extract_action_nat_test_v6(); if (rv != KTD_PASSED) return rv; return KTD_PASSED; } static ktd_ret_t extract_pattern_test(void *arg) { struct iphdr *iph; struct ipv6hdr *ipv6h; u16 *ports; GSW_ROUTE_Session_pattern_t pattern; struct avm_pa_session s = { .ingress = { .pkttype = AVM_PA_PKTTYPE_IPV4, .match = { { .type = AVM_PA_IPV4, .offset = 0 }, { .type = AVM_PA_PORTS, .offset = sizeof(struct iphdr) } }, .nmatch = 2 } }; /* test ipv4 */ s.ingress.pkttype = AVM_PA_PKTTYPE_IPV4; s.ingress.match[0].type = AVM_PA_IPV4; s.ingress.match[1].offset = sizeof(struct iphdr); iph = (void *)&s.ingress.hdrcopy[0]; ports = (void *)&s.ingress.hdrcopy[sizeof(*iph)]; iph->daddr = REPEAT_BYTE('d'); iph->saddr = REPEAT_BYTE('s'); ports[0] = 0xaaaa; ports[1] = 0xbbbb; memset(&pattern, 0xff, sizeof(pattern)); extract_pattern(&s, &pattern); KTD_EXPECT(pattern.bValid); KTD_EXPECT(pattern.eIpType == GSW_RT_IP_V4); KTD_EXPECT(pattern.nSrcIP.nIPv4 == iph->saddr); KTD_EXPECT(pattern.nDstIP.nIPv4 == iph->daddr); KTD_EXPECT(pattern.nSrcPort == ports[0]); KTD_EXPECT(pattern.nDstPort == ports[1]); /* test ipv6 */ s.ingress.pkttype = AVM_PA_PKTTYPE_IPV6; s.ingress.match[0].type = AVM_PA_IPV6; s.ingress.match[1].offset = sizeof(struct ipv6hdr); ipv6h = (void *)&s.ingress.hdrcopy[0]; ports = (void *)&s.ingress.hdrcopy[sizeof(*ipv6h)]; memset(&ipv6h->daddr, 'd', 16); memset(&ipv6h->saddr, 's', 16); ports[0] = 0xaaaa; ports[1] = 0xbbbb; memset(&pattern, 0xff, sizeof(pattern)); extract_pattern(&s, &pattern); KTD_EXPECT(pattern.bValid); KTD_EXPECT(pattern.eIpType == GSW_RT_IP_V6); KTD_EXPECT(!memcmp(&pattern.nSrcIP.nIPv6, &ipv6h->saddr, 16)); KTD_EXPECT(!memcmp(&pattern.nDstIP.nIPv6, &ipv6h->daddr, 16)); KTD_EXPECT(pattern.nSrcPort == ports[0]); KTD_EXPECT(pattern.nDstPort == ports[1]); return KTD_PASSED; } static int selftest_start_xmit(struct sk_buff *skb, struct net_device *dev) { skb_queue_tail(&skb_accelerated, skb); return 0; } static enum rx_handler_result selftest_rx_handler(struct sk_buff **skb) { skb_push(*skb, (*skb)->dev->hard_header_len); skb_queue_tail(&skb_not_accelerated, *skb); return RX_HANDLER_CONSUMED; } static struct net_device_ops selftest_netdev_ops = { .ndo_start_xmit = selftest_start_xmit }; static void setup_selftest_datapath(void) { skb_queue_head_init(&skb_not_accelerated); skb_queue_head_init(&skb_accelerated); /* Alloc this once and never free it. The netdev refcnt mechanism does * not work unless we register the device. This is merely a mockup, so * it should better not be exposed too much. But without refcnt we * would get into a race. */ if (!ktd_netdev) ktd_netdev = alloc_etherdev(0); BUG_ON(!ktd_netdev); /* do some of the stuff from init_dummy_netdev */ ktd_netdev->reg_state = NETREG_DUMMY; set_bit(__LINK_STATE_PRESENT, &ktd_netdev->state); set_bit(__LINK_STATE_START, &ktd_netdev->state); strlcpy(ktd_netdev->name, "pae_selftest", sizeof(ktd_netdev->name)); ktd_netdev->netdev_ops = &selftest_netdev_ops; ktd_netdev->rx_handler = selftest_rx_handler; BUG_ON(offdp_vep_register(ktd_netdev)); /* Fake pid for netdev to be found by get_netdev() during session * creation. Select an out-of-range value and make sure there is such * a thing. */ BUILD_BUG_ON((avm_pid_handle)(HWPA_PRIVATE_PID) == 0); AVM_PA_DEVINFO(ktd_netdev)->pid_handle = HWPA_PRIVATE_PID; } static void teardown_selftest_datapath(void) { offdp_vep_unregister(ktd_netdev); } static enum hwpa_backend_rv pae_inject(struct sk_buff *skb) { skb->dev = ktd_netdev; offdp_vep_fast_rcv(skb); return HWPA_BACKEND_SUCCESS; } static struct sk_buff *build_packet_ipv6_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv6(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_ipv6_udp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_udp(skb); skb = hwpa_pkt_push_ipv6(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_ipv4_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_ipv4_udp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_udp(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_pppoe_ipv4_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_pppoe(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_vlan_ipv4_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_vlan(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static void mock_session_put_type(struct avm_pa_pkt_match *match, unsigned char type, unsigned char len) { match->match[match->nmatch].type = type; match->match[match->nmatch + 1].offset = match->match[match->nmatch].offset + len; match->nmatch += 1; } static void mock_session_eth_vlan_ipv4_tcp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV4 | IPPROTO_TCP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_VLAN, sizeof(struct vlan_hdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV4, sizeof(struct iphdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1500; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_ipv4_ipv6_udp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV4ENCAP | AVM_PA_PKTTYPE_IPV6 | IPPROTO_UDP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV4, sizeof(struct iphdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV6, sizeof(struct ipv6hdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1500; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_ipv6_ipv4_udp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV6ENCAP | AVM_PA_PKTTYPE_IPV4 | IPPROTO_UDP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV6, sizeof(struct ipv6hdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV4, sizeof(struct iphdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1500; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_ipv4_ipv6_tcp(struct avm_pa_session *session_out) { mock_session_eth_ipv4_ipv6_udp(session_out); session_out->ingress.pkttype = avm_pa_first_egress(session_out)->match.pkttype = AVM_PA_PKTTYPE_IPV4ENCAP | AVM_PA_PKTTYPE_IPV6 | IPPROTO_TCP; } static void mock_session_eth_ipv6_ipv4_tcp(struct avm_pa_session *session_out) { mock_session_eth_ipv6_ipv4_udp(session_out); session_out->ingress.pkttype = avm_pa_first_egress(session_out)->match.pkttype = AVM_PA_PKTTYPE_IPV6ENCAP | AVM_PA_PKTTYPE_IPV4 | IPPROTO_TCP; } static void mock_session_eth_pppoe_ipv4_udp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV4 | IPPROTO_UDP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PPPOE, PPPOE_SES_HLEN - 2); mock_session_put_type(&session_out->ingress, AVM_PA_PPP, 2); mock_session_put_type(&session_out->ingress, AVM_PA_IPV4, sizeof(struct iphdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1492; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_pppoe_ipv4_tcp(struct avm_pa_session *session_out) { mock_session_eth_pppoe_ipv4_udp(session_out); session_out->ingress.pkttype = avm_pa_first_egress(session_out)->match.pkttype = AVM_PA_PKTTYPE_IPV4 | IPPROTO_TCP; } static void mock_session_eth_ipv4_udp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV4 | IPPROTO_UDP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV4, sizeof(struct iphdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1500; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_ipv4_tcp(struct avm_pa_session *session_out) { mock_session_eth_ipv4_udp(session_out); session_out->ingress.pkttype = avm_pa_first_egress(session_out)->match.pkttype = AVM_PA_PKTTYPE_IPV4 | IPPROTO_TCP; } static void mock_session_eth_ipv6_udp(struct avm_pa_session *session_out) { memset(session_out, 0, sizeof(*session_out)); session_out->ingress_pid_handle = HWPA_PRIVATE_PID; session_out->ingress.pkttype = AVM_PA_PKTTYPE_IPV6 | IPPROTO_UDP; mock_session_put_type(&session_out->ingress, AVM_PA_ETH, sizeof(struct ethhdr)); mock_session_put_type(&session_out->ingress, AVM_PA_IPV6, sizeof(struct ipv6hdr)); mock_session_put_type(&session_out->ingress, AVM_PA_PORTS, sizeof(struct udphdr)); avm_pa_first_egress(session_out)->match = session_out->ingress; avm_pa_first_egress(session_out)->mtu = 1500; avm_pa_first_egress(session_out)->pid_handle = HWPA_PRIVATE_PID; } static void mock_session_eth_ipv6_tcp(struct avm_pa_session *session_out) { mock_session_eth_ipv6_udp(session_out); session_out->ingress.pkttype = avm_pa_first_egress(session_out)->match.pkttype = AVM_PA_PKTTYPE_IPV6 | IPPROTO_TCP; } struct pae_test_data { void (*mock_session)(struct avm_pa_session *session_out); struct sk_buff *(*build_packet)(void); }; const static struct pae_test_data pae_test_eth_ipv4_tcp = { .mock_session = mock_session_eth_ipv4_tcp, .build_packet = build_packet_ipv4_tcp }; const static struct pae_test_data pae_test_eth_ipv4_udp = { .mock_session = mock_session_eth_ipv4_udp, .build_packet = build_packet_ipv4_udp }; const static struct pae_test_data pae_test_eth_ipv6_tcp = { .mock_session = mock_session_eth_ipv6_tcp, .build_packet = build_packet_ipv6_tcp }; const static struct pae_test_data pae_test_eth_ipv6_udp = { .mock_session = mock_session_eth_ipv6_udp, .build_packet = build_packet_ipv6_udp }; static struct sk_buff *build_packet_ipv6_ipv4_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_ipv6(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static struct sk_buff *build_packet_ipv4_ipv6_tcp(void) { struct sk_buff *skb; const unsigned int datalen = 500; skb = hwpa_pkt_alloc(datalen); skb = hwpa_pkt_push_tcp(skb); skb = hwpa_pkt_push_ipv6(skb); skb = hwpa_pkt_push_ipv4(skb); skb = hwpa_pkt_push_eth(skb); return skb; } static ktd_ret_t _pae_test_rtmode(void *arg) { struct avm_pa_session *s; struct sk_buff *skb, *skb_result; struct avm_pa_pkt_match *eg_match; struct ethhdr *ethh; struct tcphdr *tcpudph; unsigned long sess_handle; const struct pae_test_data *test_data; __sum16 *tcpudp_csum, *ip_csum, null_csum; u32 *ip_daddr, ip_addrlen; u8 *ip_ttl; test_data = arg; s = kzalloc(sizeof(*s), GFP_ATOMIC); test_data->mock_session(s); skb = test_data->build_packet(); eg_match = &avm_pa_first_egress(s)->match; ethh = (void *)hwpa_get_hdr(eg_match, AVM_PA_ETH); if (AVM_PA_PKTTYPE_IP_VERSION(s->ingress.pkttype) == 4) { struct iphdr *iph; iph = (void *)hwpa_get_hdr(eg_match, AVM_PA_IPV4); ip_addrlen = sizeof(iph->daddr); ip_csum = &iph->check; ip_daddr = &iph->daddr; ip_ttl = &iph->ttl; } else { struct ipv6hdr *iph; iph = (void *)hwpa_get_hdr(eg_match, AVM_PA_IPV6); ip_addrlen = sizeof(iph->daddr); ip_csum = &null_csum; /* no csum for v6 */ ip_daddr = (void *)&iph->daddr; ip_ttl = &iph->hop_limit; } tcpudph = (void *)hwpa_get_hdr(eg_match, AVM_PA_PORTS); tcpudp_csum = (__sum16 *)((u8 *)tcpudph + skb->csum_offset); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); /* Fill hdrcpy data. */ eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb->len); memcpy(&s->ingress.hdrcopy[0], skb->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); /* Test the non-accelerated path. */ KTD_EXPECT(pae_inject(skb_clone(skb, GFP_ATOMIC)) == HWPA_BACKEND_SUCCESS); msleep(20); /* wait for packet to pass through PAE */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 1); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); skb_result = skb_dequeue(&skb_not_accelerated); KTD_EXPECT(skb->len == skb_result->len); KTD_EXPECT(!memcmp(skb->data, skb_result->data, skb->len)); kfree_skb(skb_result); /* Do simple forwarding without modifications. * Expect no modifications at all. */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(pae_inject(skb_clone(skb, GFP_ATOMIC)) == HWPA_BACKEND_SUCCESS); msleep(20); /* wait for packet to pass through PAE */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 1); skb_result = skb_dequeue(&skb_accelerated); KTD_EXPECT(skb->len == skb_result->len); KTD_EXPECT(!memcmp(skb->data, skb_result->data, skb->len)); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree_skb(skb_result); /* Do routing but no NAT. * This should touch Ethernet and IP ttl+csum. */ memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); { /* modify egress hdr */ eth_random_addr(ðh->h_dest[0]); eth_random_addr(ðh->h_source[0]); csum_replace2(ip_csum, htons(*ip_ttl << 8), htons((*ip_ttl - 1) << 8)); *ip_ttl -= 1; } KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(pae_inject(skb_clone(skb, GFP_ATOMIC)) == HWPA_BACKEND_SUCCESS); msleep(20); /* wait for packet to pass through PAE */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 1); skb_result = skb_dequeue(&skb_accelerated); KTD_EXPECT(skb->len == skb_result->len); KTD_EXPECT(!memcmp(&eg_match->hdrcopy[0], skb_result->data, eg_match->hdrlen)); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree_skb(skb_result); /* revert egress modifications */ memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); /* Do DNAT but leave the ports alone. * This should touch IP dst, TTL and csum. */ memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); { /* modify egress hdr */ u32 new_daddr[4]; int i; csum_replace2(ip_csum, htons(*ip_ttl << 8), htons((*ip_ttl - 1) << 8)); *ip_ttl -= 1; get_random_bytes(&new_daddr, sizeof(new_daddr)); csum_replace4(ip_csum, ip_daddr[0], new_daddr[0]); for (i = 0; i < (ip_addrlen / 4); i++) { csum_replace4(tcpudp_csum, ip_daddr[i], new_daddr[i]); } memcpy(ip_daddr, &new_daddr[0], ip_addrlen); } KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(pae_inject(skb_clone(skb, GFP_ATOMIC)) == HWPA_BACKEND_SUCCESS); msleep(20); /* wait for packet to pass through PAE */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 1); skb_result = skb_dequeue(&skb_accelerated); KTD_EXPECT(skb->len == skb_result->len); KTD_EXPECT(!memcmp(&eg_match->hdrcopy[0], skb_result->data, eg_match->hdrlen)); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree_skb(skb_result); /* revert egress modifications */ memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); /* Do SNAPT but keep the IP addresses. * This should touch IP TTL+CSUM and TCP/UDP CSUM+SPORT. */ memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); { /* modify egress hdr */ u16 new_source; csum_replace2(ip_csum, htons(*ip_ttl << 8), htons((*ip_ttl - 1) << 8)); *ip_ttl -= 1; get_random_bytes(&new_source, sizeof(new_source)); csum_replace2(tcpudp_csum, tcpudph->source, new_source); tcpudph->source = new_source; } KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(pae_inject(skb_clone(skb, GFP_ATOMIC)) == HWPA_BACKEND_SUCCESS); msleep(20); /* wait for packet to pass through PAE */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 1); skb_result = skb_dequeue(&skb_accelerated); KTD_EXPECT(skb->len == skb_result->len); KTD_EXPECT(!memcmp(&eg_match->hdrcopy[0], skb_result->data, eg_match->hdrlen)); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree_skb(skb_result); kfree_skb(skb); kfree(s); return KTD_PASSED; } static ktd_ret_t pae_test_rtmode(void *arg) { ktd_ret_t ret; setup_selftest_datapath(); ret = _pae_test_rtmode(arg); teardown_selftest_datapath(); return ret; } #define PAE_EXPECT_ACCEL(skb, skb_expect) \ ({ \ struct sk_buff *skb_result; \ KTD_EXPECT(pae_inject(skb_clone((skb), GFP_ATOMIC)) == \ HWPA_BACKEND_SUCCESS); \ msleep(20); /* wait for packet to pass through PAE */ \ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); \ KTD_EXPECT(skb_queue_len(&skb_accelerated) == 1); \ skb_result = skb_dequeue(&skb_accelerated); \ KTD_EXPECT((skb_expect)->len == skb_result->len); \ KTD_EXPECT(!hwpa_pktcmp(skb_result, (skb_expect))); \ kfree_skb(skb_result); \ }); #define PAE_EXPECT_NOACCEL(skb) \ ({ \ struct sk_buff *skb_result; \ KTD_EXPECT(pae_inject(skb_clone((skb), GFP_ATOMIC)) == \ HWPA_BACKEND_SUCCESS); \ msleep(20); /* wait for packet to pass through PAE */ \ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 1); \ KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); \ skb_result = skb_dequeue(&skb_not_accelerated); \ kfree_skb(skb_result); \ }); static ktd_ret_t pae_test_6rd(void *arg) { struct avm_pa_session *s, *s_encap, *s_decap; struct sk_buff *skb_encap, *skb_decap; struct avm_pa_pkt_match *eg_match; unsigned long sess_handle; s_encap = kzalloc(sizeof(*s_encap), GFP_ATOMIC); mock_session_eth_ipv4_ipv6_tcp(s_encap); skb_encap = build_packet_ipv4_ipv6_tcp(); eg_match = &avm_pa_first_egress(s_encap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_encap->len); memcpy(&s_encap->ingress.hdrcopy[0], skb_encap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_encap->data, eg_match->hdrlen); s_decap = kzalloc(sizeof(*s_decap), GFP_ATOMIC); mock_session_eth_ipv6_tcp(s_decap); skb_decap = build_packet_ipv6_tcp(); /* copy l3/l4 fields */ memcpy(skb_inner_network_header(skb_decap), skb_inner_network_header(skb_encap), skb_encap->len - skb_inner_network_offset(skb_encap)); /* copy L2 addresses */ memcpy(skb_decap->data, skb_encap->data, ETH_ALEN * 2); eg_match = &avm_pa_first_egress(s_decap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_decap->len); memcpy(&s_decap->ingress.hdrcopy[0], skb_decap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_decap->data, eg_match->hdrlen); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); /* Do simple forwarding without modifications. * Expect this to fail. */ KTD_EXPECT(hwpa_backend_add_session(s_encap, &sess_handle) == HWPA_BACKEND_ERR_GSW_TUN); /* combine as upstream session */ s = kzalloc(sizeof(*s), GFP_ATOMIC); *s = *s_encap; s->ingress = s_decap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_decap, skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as downstream session */ *s = *s_decap; s->ingress = s_encap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_encap, skb_decap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* Send decap'd packet matching a downstream session. */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_NOACCEL(skb_decap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* Send packet with modified outer saddr matching a downstream * session. */ { struct iphdr *hdr; hdr = (void *)skb_network_header(skb_encap); hdr->saddr++; } KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_NOACCEL(skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree(s); kfree(s_encap); kfree(s_decap); kfree_skb(skb_encap); kfree_skb(skb_decap); return KTD_PASSED; } static ktd_ret_t pae_test_dslite(void *arg) { struct avm_pa_session *s, *s_encap, *s_decap; struct sk_buff *skb_encap, *skb_decap; struct avm_pa_pkt_match *eg_match; unsigned long sess_handle; s_encap = kzalloc(sizeof(*s_encap), GFP_ATOMIC); mock_session_eth_ipv6_ipv4_tcp(s_encap); skb_encap = build_packet_ipv6_ipv4_tcp(); eg_match = &avm_pa_first_egress(s_encap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_encap->len); memcpy(&s_encap->ingress.hdrcopy[0], skb_encap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_encap->data, eg_match->hdrlen); s_decap = kzalloc(sizeof(*s_decap), GFP_ATOMIC); mock_session_eth_ipv4_tcp(s_decap); skb_decap = build_packet_ipv4_tcp(); /* copy l3/l4 fields */ memcpy(skb_inner_network_header(skb_decap), skb_inner_network_header(skb_encap), skb_encap->len - skb_inner_network_offset(skb_encap)); /* copy L2 addresses */ memcpy(skb_decap->data, skb_encap->data, ETH_ALEN * 2); eg_match = &avm_pa_first_egress(s_decap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_decap->len); memcpy(&s_decap->ingress.hdrcopy[0], skb_decap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_decap->data, eg_match->hdrlen); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); /* Do simple forwarding without modifications. * Expect this to fail. */ KTD_EXPECT(hwpa_backend_add_session(s_encap, &sess_handle) == HWPA_BACKEND_ERR_GSW_TUN); /* combine as upstream session */ s = kzalloc(sizeof(*s), GFP_ATOMIC); *s = *s_encap; s->ingress = s_decap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_decap, skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as downstream session */ *s = *s_decap; s->ingress = s_encap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_encap, skb_decap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* Send decap'd packet matching a downstream session. */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_NOACCEL(skb_decap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* Send packet with modified outer saddr matching a downstream * session. */ { struct ipv6hdr *hdr; hdr = (void *)skb_network_header(skb_encap); hdr->saddr.in6_u.u6_addr8[15]++; } KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_NOACCEL(skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree(s); kfree(s_encap); kfree(s_decap); kfree_skb(skb_encap); kfree_skb(skb_decap); return KTD_PASSED; } static ktd_ret_t pae_test_vlan(void *arg) { struct avm_pa_session *s, *s_tagged, *s_untagged; struct sk_buff *skb_tagged, *skb_untagged, *skb_tagged_modified; struct avm_pa_pkt_match *eg_match; unsigned long sess_handle; s_tagged = kzalloc(sizeof(*s_tagged), GFP_ATOMIC); mock_session_eth_vlan_ipv4_tcp(s_tagged); skb_tagged = build_packet_vlan_ipv4_tcp(); eg_match = &avm_pa_first_egress(s_tagged)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_tagged->len); memcpy(&s_tagged->ingress.hdrcopy[0], skb_tagged->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_tagged->data, eg_match->hdrlen); s_untagged = kzalloc(sizeof(*s_untagged), GFP_ATOMIC); mock_session_eth_ipv4_tcp(s_untagged); skb_untagged = build_packet_ipv4_tcp(); /* copy l3/l4 fields */ memcpy(skb_network_header(skb_untagged), skb_network_header(skb_tagged), skb_tagged->len - skb_network_offset(skb_tagged)); /* copy L2 addresses */ memcpy(skb_untagged->data, skb_tagged->data, ETH_ALEN * 2); eg_match = &avm_pa_first_egress(s_untagged)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_untagged->len); memcpy(&s_untagged->ingress.hdrcopy[0], skb_untagged->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_untagged->data, eg_match->hdrlen); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); /* Do simple forwarding without modifications. * Expect no modifications at all. */ KTD_EXPECT(hwpa_backend_add_session(s_tagged, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_tagged, skb_tagged); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as tagging session */ s = kzalloc(sizeof(*s), GFP_ATOMIC); *s = *s_tagged; s->ingress = s_untagged->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_untagged, skb_tagged); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as untagging session */ *s = *s_untagged; s->ingress = s_tagged->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_tagged, skb_untagged); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as vid-modifying session */ *s = *s_tagged; skb_tagged_modified = skb_copy(skb_tagged, GFP_KERNEL); { struct vlan_ethhdr *vlanethh = (void *)skb_tagged_modified->data; vlanethh->h_vlan_TCI += 1; vlanethh->h_vlan_TCI %= VLAN_N_VID; memcpy(&s->ingress.hdrcopy[0], vlanethh, sizeof(*vlanethh)); } /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_tagged_modified, skb_tagged); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree(s); kfree(s_tagged); kfree(s_untagged); kfree_skb(skb_tagged); kfree_skb(skb_untagged); kfree_skb(skb_tagged_modified); return KTD_PASSED; } static ktd_ret_t pae_test_pppoe(void *arg) { struct avm_pa_session *s, *s_encap, *s_decap; struct sk_buff *skb_encap, *skb_decap; struct avm_pa_pkt_match *eg_match; unsigned long sess_handle; s_encap = kzalloc(sizeof(*s_encap), GFP_ATOMIC); mock_session_eth_pppoe_ipv4_tcp(s_encap); skb_encap = build_packet_pppoe_ipv4_tcp(); eg_match = &avm_pa_first_egress(s_encap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_encap->len); memcpy(&s_encap->ingress.hdrcopy[0], skb_encap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_encap->data, eg_match->hdrlen); s_decap = kzalloc(sizeof(*s_decap), GFP_ATOMIC); mock_session_eth_ipv4_tcp(s_decap); skb_decap = build_packet_ipv4_tcp(); /* copy l3/l4 fields */ memcpy(skb_network_header(skb_decap), skb_network_header(skb_encap), skb_encap->len - skb_network_offset(skb_encap)); /* copy L2 addresses */ memcpy(skb_decap->data, skb_encap->data, ETH_ALEN * 2); eg_match = &avm_pa_first_egress(s_decap)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb_decap->len); memcpy(&s_decap->ingress.hdrcopy[0], skb_decap->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb_decap->data, eg_match->hdrlen); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); /* Do simple forwarding without modifications. * Expect no modifications at all. */ KTD_EXPECT(hwpa_backend_add_session(s_encap, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_encap, skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as upstream session */ s = kzalloc(sizeof(*s), GFP_ATOMIC); *s = *s_encap; s->ingress = s_decap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_decap, skb_encap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); /* combine as downstream session */ *s = *s_decap; s->ingress = s_encap->ingress; /* test session */ KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); PAE_EXPECT_ACCEL(skb_encap, skb_decap); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree(s); kfree(s_encap); kfree(s_decap); kfree_skb(skb_encap); kfree_skb(skb_decap); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_rt_table_coll(void *arg) { enum { RT_TABLE_MAX_COLL = 16 }; struct avm_pa_session s; unsigned long *sess_handles; int i; u16 *sport; mock_session_eth_ipv4_udp(&s); sport = (void *)hwpa_get_hdr(&s.ingress, AVM_PA_PORTS); sess_handles = kmalloc(sizeof(*sess_handles) * (RT_TABLE_MAX_COLL + 1), GFP_KERNEL); /* add sessions to fill the table */ for (i = 0; i < RT_TABLE_MAX_COLL; i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); } /* hit one too many collisions */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_ERR_GSW_RT_COLL_FULL); /* Avoid collision and see if it's okay. */ (*sport)++; KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < RT_TABLE_MAX_COLL + 1; i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } kfree(sess_handles); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_rt_table(void *arg) { enum { RT_TABLE_SIZE = 4096 }; struct avm_pa_session s; unsigned long *sess_handles; int i; u16 *sport; mock_session_eth_ipv4_udp(&s); sport = (void *)hwpa_get_hdr(&s.ingress, AVM_PA_PORTS); sess_handles = kmalloc(sizeof(*sess_handles) * RT_TABLE_SIZE, GFP_KERNEL); /* add sessions to fill the table */ for (i = 0; i < RT_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); (*sport)++; /* let's hope there won't be too many collision */ } /* table should be full now */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_RT_SESS_FULL); /* invalidate an entry */ KTD_EXPECT(hwpa_backend_rem_session(sess_handles[0]) == HWPA_BACKEND_SUCCESS); /* Due to hash collision, the first entry is still bound as a hash * bucket head after removal. It becomes invalid but won't migrate to * the free-list. It may be used if we hit a bucket that lists that * invalid entry already. */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_RT_SESS_FULL); *sport = 0; KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < RT_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } kfree(sess_handles); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_pppoe_table(void *arg) { enum { PPPOE_TABLE_SIZE = 16 }; struct avm_pa_session s; unsigned long *sess_handles; int i; struct pppoe_hdr *pppoeh; u16 *sport; mock_session_eth_ipv4_udp(&s); { struct avm_pa_session *s_pppoe; s_pppoe = kzalloc(sizeof(*s_pppoe), GFP_KERNEL); mock_session_eth_pppoe_ipv4_udp(s_pppoe); *avm_pa_first_egress(&s) = *avm_pa_first_egress(s_pppoe); kfree(s_pppoe); } /* use egress to change action */ pppoeh = (void *)hwpa_get_hdr(&avm_pa_first_egress(&s)->match, AVM_PA_PPPOE); /* use ingress to change pattern */ sport = (void *)hwpa_get_hdr(&s.ingress, AVM_PA_PORTS); sess_handles = kmalloc(sizeof(*sess_handles) * PPPOE_TABLE_SIZE, GFP_ATOMIC); /* add sessions to fill the table */ for (i = 0; i < PPPOE_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); /* force a new pppoe entry */ pppoeh->sid++; /* alter sport to not run into hash collision limits first */ (*sport)++; } /* pppoe table should be full at this point */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_PPPOE_FULL); /* free up an entry and try again */ KTD_EXPECT(hwpa_backend_rem_session(sess_handles[0]) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < PPPOE_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } kfree(sess_handles); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_mac_table(void *arg) { /* Index 0 is unusable, as it disables the rewrite action. */ enum { MAC_TABLE_SIZE = 512 - 1 }; struct avm_pa_session s; unsigned long *sess_handles; int i; struct ethhdr *ethh; u16 *sport; mock_session_eth_ipv4_udp(&s); /* use egress to change action */ ethh = (void *)hwpa_get_hdr(&avm_pa_first_egress(&s)->match, AVM_PA_ETH); /* use ingress to change pattern */ sport = (void *)hwpa_get_hdr(&s.ingress, AVM_PA_PORTS); sess_handles = kmalloc(sizeof(*sess_handles) * MAC_TABLE_SIZE, GFP_KERNEL); /* Change dstmac from ingress to enable the rewrite action */ ethh->h_dest[3] = 1; /* add sessions to fill the table */ for (i = 0; i < MAC_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); /* force a new mac entry */ ethh->h_dest[4] = (i + 1) >> BITS_PER_BYTE; ethh->h_dest[5] = (i + 1); /* alter sport to not run into hash collision limits first */ (*sport)++; } /* mac table should be full at this point */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_MAC_FULL); /* free up an entry and try again */ KTD_EXPECT(hwpa_backend_rem_session(sess_handles[0]) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < MAC_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } kfree(sess_handles); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_ip_table(void *arg) { enum { IP_TABLE_SIZE = 2048 }; struct avm_pa_session s; unsigned long *sess_handles; int i; struct iphdr *iph; mock_session_eth_ipv4_udp(&s); iph = (void *)hwpa_get_hdr(&s.ingress, AVM_PA_IPV4); sess_handles = kmalloc(sizeof(*sess_handles) * IP_TABLE_SIZE, GFP_KERNEL); /* add sessions to fill the table */ for (i = 0; i < IP_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); iph->daddr++; } /* table should be full now */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_IP_FULL); /* Free up an entry. Remove second entry instead of first because * daddr == 0 has refcnt > 1. */ KTD_EXPECT(hwpa_backend_rem_session(sess_handles[1]) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[1]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < IP_TABLE_SIZE; i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } kfree(sess_handles); return KTD_PASSED; } static ktd_ret_t pae_test_exhaust_mtu_table(void *arg) { enum { MTU_TABLE_SIZE = 8 }; struct avm_pa_session s; unsigned long sess_handles[MTU_TABLE_SIZE]; int i; mock_session_eth_ipv4_udp(&s); /* add sessions to fill the table */ for (i = 0; i < ARRAY_SIZE(sess_handles); i++) { KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[i]) == HWPA_BACKEND_SUCCESS); avm_pa_first_egress((&s))->mtu--; } /* table should be full now */ KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_ERR_GSW_MTU_FULL); /* free up an entry and try again */ KTD_EXPECT(hwpa_backend_rem_session(sess_handles[0]) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(hwpa_backend_add_session(&s, &sess_handles[0]) == HWPA_BACKEND_SUCCESS); /* remove all the entries */ for (i = 0; i < ARRAY_SIZE(sess_handles); i++) { KTD_EXPECT(hwpa_backend_rem_session(sess_handles[i]) == HWPA_BACKEND_SUCCESS); } return KTD_PASSED; } static ktd_ret_t test_run_with_datapath(void *arg) { ktd_ret_t (*test)(void *arg); ktd_ret_t ret; test = arg; setup_selftest_datapath(); ret = test(NULL); teardown_selftest_datapath(); return ret; } static ktd_ret_t hwpa_backend_hit_status_test(void *arg) { struct avm_pa_session *s; struct sk_buff *skb; struct avm_pa_pkt_match *eg_match; unsigned long sess_handle; struct avm_pa_session_stats stats; s = kzalloc(sizeof(*s), GFP_ATOMIC); mock_session_eth_ipv4_udp(s); skb = build_packet_ipv4_udp(); /* Fill hdrcpy data. */ eg_match = &avm_pa_first_egress(s)->match; eg_match->hdrlen = min(sizeof(eg_match->hdrcopy), (size_t)skb->len); memcpy(&s->ingress.hdrcopy[0], skb->data, eg_match->hdrlen); memcpy(&eg_match->hdrcopy[0], skb->data, eg_match->hdrlen); /* Check for unexpected packets. This might fail due to other tests * failing earlier. */ KTD_EXPECT(skb_queue_len(&skb_not_accelerated) == 0); KTD_EXPECT(skb_queue_len(&skb_accelerated) == 0); KTD_EXPECT(hwpa_backend_add_session(s, &sess_handle) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(hwpa_backend_stats(sess_handle, &stats) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(!(stats.validflags & AVM_PA_SESSION_STATS_VALID_HIT)); PAE_EXPECT_ACCEL(skb, skb); KTD_EXPECT(hwpa_backend_stats(sess_handle, &stats) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(stats.validflags & AVM_PA_SESSION_STATS_VALID_HIT); KTD_EXPECT(hwpa_backend_stats(sess_handle, &stats) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(!(stats.validflags & AVM_PA_SESSION_STATS_VALID_HIT)); PAE_EXPECT_ACCEL(skb, skb); KTD_EXPECT(hwpa_backend_stats(sess_handle, &stats) == HWPA_BACKEND_SUCCESS); KTD_EXPECT(stats.validflags & AVM_PA_SESSION_STATS_VALID_HIT); KTD_EXPECT(hwpa_backend_rem_session(sess_handle) == HWPA_BACKEND_SUCCESS); kfree_skb(skb); kfree(s); return KTD_PASSED; } enum hwpa_backend_rv __init hwpa_backend_init(void) { int i; enum hwpa_backend_rv rv; for (i = 0; i < ARRAY_SIZE(tunnel_entries); i++) mutex_init(&tunnel_entries[i].lock); rv = hwpa_backend_init_pce(); if (rv != HWPA_BACKEND_SUCCESS) return rv; rv = hwpa_backend_init_vlan(); if (rv != HWPA_BACKEND_SUCCESS) return rv; rv = hwpa_backend_init_counter(); if (rv != HWPA_BACKEND_SUCCESS) return rv; setup_localpid_datapath(); test_suite = ktd_suite_create(THIS_MODULE->name); ktd_register(test_suite, "extract_action_pppoe", extract_action_pppoe_test, NULL); ktd_register(test_suite, "extract_action_nat", extract_action_nat_test, NULL); ktd_register(test_suite, "extract_action_routing", extract_action_routing_test, NULL); ktd_register(test_suite, "extract_pattern", extract_pattern_test, NULL); ktd_register(test_suite, "pae_rtmode_ipv4_tcp", pae_test_rtmode, (void *)&pae_test_eth_ipv4_tcp); ktd_register(test_suite, "pae_rtmode_ipv4_udp", pae_test_rtmode, (void *)&pae_test_eth_ipv4_udp); ktd_register(test_suite, "pae_rtmode_ipv6_tcp", pae_test_rtmode, (void *)&pae_test_eth_ipv6_tcp); ktd_register(test_suite, "pae_rtmode_ipv6_udp", pae_test_rtmode, (void *)&pae_test_eth_ipv6_udp); ktd_register(test_suite, "pae_pppoemode", test_run_with_datapath, (void *)&pae_test_pppoe); ktd_register(test_suite, "pae_egvlan", test_run_with_datapath, (void *)&pae_test_vlan); ktd_register(test_suite, "pae_exhaust_mtu_table", test_run_with_datapath, (void *)&pae_test_exhaust_mtu_table); ktd_register(test_suite, "pae_exhaust_pppoe_table", test_run_with_datapath, (void *)&pae_test_exhaust_pppoe_table); ktd_register(test_suite, "pae_exhaust_mac_table", test_run_with_datapath, (void *)&pae_test_exhaust_mac_table); ktd_register(test_suite, "pae_exhaust_ip_table", test_run_with_datapath, (void *)&pae_test_exhaust_ip_table); ktd_register(test_suite, "pae_exhaust_rt_table", test_run_with_datapath, (void *)&pae_test_exhaust_rt_table); ktd_register(test_suite, "pae_exhaust_rt_table_coll", test_run_with_datapath, (void *)&pae_test_exhaust_rt_table_coll); ktd_register(test_suite, "pae_vlan_allocation", vlan_acquire_test, NULL); ktd_register(test_suite, "session_is_active", test_run_with_datapath, (void *)hwpa_backend_hit_status_test); ktd_register(test_suite, "pae_tunnel_dslite", test_run_with_datapath, (void *)&pae_test_dslite); ktd_register(test_suite, "pae_tunnel_6rd", test_run_with_datapath, (void *)&pae_test_6rd); return HWPA_BACKEND_SUCCESS; } void hwpa_backend_exit(void) { teardown_localpid_datapath(); ktd_suite_destroy(test_suite); }