/* * Copyright (c) 2019 AVM GmbH . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include "offdp.h" #include #include #include #include #include #include #include #include #include #include #include #include /* TODO * * - translate port isolation as it's done in "net: bridge: add support for * port isolation" */ /* our uses of subinterface IDs: * * the subinterface ID consists of multiple parts: * - bit 14, which will be set by the GSWIP for broadcast, multicast and unknown * unicast frames; not important for us * - reserved bits 12-13, we ignore them * - bits 8-11 contain the VAP-ID * - bits 0-7 contain the Station-ID * * use for fast transmits, i.e. frames from the GSWIP to a VEP: * - VAP-ID: ignored by us, because the offload_pa might have used it in its * routing sessions, e.g. to select a specific entry from the egress vlan * table or to identify a specific session * - Station-ID: the lower four bits contain the vep_handle, the upper four bits * are ignored * * use for fast receives, i.e. frames from a VEP to the GSWIP: * - VAP-ID: must be set to the vep_handle, as PCE rules and the roaming * detection only access this part. * - Station-ID: will be learned to the mac table and used for L2 forwardings, * which leads to fast transmits, hence we need to set the lower four bits to * the vep_handle, too. The upper four bits are always set to 0. */ /* Stuff taken from gsw_reg.h as there is no appropiate switch_api command. */ #define PCE_PCTRL_3 (0x120cul / 4) #define PCE_PCTRL_3_PORT(p) (PCE_PCTRL_3 + ((p)*0xa)) #define PCE_PCTRL_3_RXDMIR BIT(10) #define PCE_PCTRL_3_VIO_9 BIT(13) #define PCE_PCTRL_3_IGPTRM BIT(14) #define PCE_IGPTRM (0x1510ul / 4) #define PCE_IGPTRM_PORT(p) (PCE_IGPTRM + ((p)*0x10)) static void hw_bridge_release(struct kobject *kobj); static void hw_port_release(struct kobject *kobj); static ssize_t hw_bridge_laddr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_bridge_filterid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_port_laddr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_port_portid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_port_subifid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_port_pce_rule_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static ssize_t hw_port_learned_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf); static void update_ports(bool enable); static enum offdp_rv configure_ports(bool enable); enum { filterids_num = 64 }; static HANDLE_POOL_DECLARE(filterids, filterids_num); enum { subifid_pool_sz = 0x10 }; static HANDLE_POOL_DECLARE(subifid_pool, subifid_pool_sz); static unsigned long default_fid; static u16 isolated_portmap; static struct ktd_suite *test_suite; static struct net_device vep_netdev; static dp_subif_t vep_subif; static struct kobject *hw_resources; static struct kset *bridges_kset; static struct kset *ports_kset; struct hw_bridge { struct kobject kobj; unsigned long fid; u8 laddr[ETH_ALEN]; }; struct hw_port { struct kobject kobj; int pce_idx; unsigned long fid; bool isolated; bool software_fwd; u16 igptrm; dp_subif_t subif; u8 laddr[ETH_ALEN]; }; static struct kobj_type hw_bridge_kobj_type = { .release = hw_bridge_release, .sysfs_ops = &kobj_sysfs_ops, }; static struct kobj_attribute hw_bridge_attrs[] = { __ATTR(local_addr, 0400, hw_bridge_laddr_show, NULL), __ATTR(filter_id, 0400, hw_bridge_filterid_show, NULL), }; static struct kobj_type hw_port_kobj_type = { .release = hw_port_release, .sysfs_ops = &kobj_sysfs_ops, }; static struct kobj_attribute hw_port_attrs[] = { __ATTR(learned, 0400, hw_port_learned_show, NULL), __ATTR(portid, 0400, hw_port_portid_show, NULL), __ATTR(subifid, 0400, hw_port_subifid_show, NULL), __ATTR(local_addr, 0400, hw_port_laddr_show, NULL), __ATTR(pce_rule, 0400, hw_port_pce_rule_show, NULL), }; static void _maskcpy(void *dst, const void *src, const void *mask, size_t len) { u8 *src_b, *dst_b, *mask_b; int i; src_b = (u8 *)src; dst_b = (u8 *)dst; mask_b = (u8 *)mask; for (i = 0; i < len; i++) { dst_b[i] &= ~mask_b[i]; dst_b[i] |= src_b[i] & mask_b[i]; } } #define gsw_for_each(i, gsw) \ for ((i) = 0, (gsw) = gsw_all[0]; (i) < ARRAY_SIZE(gsw_all); \ (i)++, (gsw) = gsw_all[((i) < ARRAY_SIZE(gsw_all) ? i : 0)]) #define gsw_set(gsw, cmd_get, cmd_set, val, mask) \ ({ \ int rv; \ typeof((mask)) tmp = (val); \ rv = gsw_cmd((gsw), (cmd_get), &tmp); \ if (rv >= GSW_statusOk) { \ _maskcpy(&(tmp), &(val), &(mask), sizeof(tmp)); \ rv = gsw_cmd((gsw), (cmd_set), &tmp); \ } \ rv; \ }) enum { GSWIP_L = 0, GSWIP_R = 1, }; static const int gsw_all[] = { GSWIP_L, GSWIP_R }; static int32_t gsw_cmd(int gsw_id, uint32_t command, void *arg) { int32_t ret; GSW_API_HANDLE gsw = 0; if (gsw_id == 0) gsw = gsw_api_kopen("/dev/switch_api/0"); if (gsw_id == 1) gsw = gsw_api_kopen("/dev/switch_api/1"); if (gsw == 0) { return -1; } ret = gsw_api_kioctl(gsw, command, arg); gsw_api_kclose(gsw); return ret; } static int fid_alloc(unsigned long *fid) { unsigned long _fid; _fid = handle_alloc(filterids, filterids_num); if (_fid >= filterids_num) return OFFDP_ERR_NOAVAIL; *fid = (int)_fid; return OFFDP_SUCCESS; } static void fid_free(unsigned long fid) { if (fid >= filterids_num) pr_err("try to free invalid fid\n"); else handle_free(fid, filterids); } static void gsw_flush_entries_learned(int gsw_id) { GSW_MAC_tableRead_t read_entry = { .bInitial = 1 }; for (;;) { GSW_MAC_tableRemove_t rem_entry = { 0 }; gsw_cmd(gsw_id, GSW_MAC_TABLE_ENTRY_READ, &read_entry); if (read_entry.bLast) break; if (read_entry.nPortId == 0 || read_entry.bStaticEntry) continue; memcpy(&rem_entry.nMAC[0], read_entry.nMAC, sizeof(rem_entry.nMAC)); rem_entry.nFId = read_entry.nFId; pr_debug("mac remove entry %pM from bridge %d\n", rem_entry.nMAC, rem_entry.nFId); gsw_cmd(gsw_id, GSW_MAC_TABLE_ENTRY_REMOVE, &rem_entry); } } static int teach_local_mac(u8 *lladdr, int fid) { int i, gsw; GSW_MAC_tableAdd_t mac_add = { 0 }; memcpy(&mac_add.nMAC[0], lladdr, sizeof(mac_add.nMAC)); mac_add.nFId = fid; mac_add.nPortId = 0; mac_add.nSubIfId = 0; /* Port locking only works for dynamic entries somehow. */ mac_add.bStaticEntry = 0; mac_add.nAgeTimer = 15; pr_debug("mac add entry %pM to bridge %d\n", lladdr, fid); gsw_for_each (i, gsw) { gsw_cmd(gsw, GSW_MAC_TABLE_ENTRY_ADD, &mac_add); } return 0; } enum offdp_rv offdp_backend_bridge_add(const struct net_device *br, struct kobject **kobj) { enum offdp_rv rv; struct hw_bridge *hwbr; update_ports(true); hwbr = kzalloc(sizeof(*hwbr), GFP_KERNEL); if (!hwbr) return OFFDP_ERR_MEM; rv = fid_alloc(&hwbr->fid); if (rv != OFFDP_SUCCESS) { kfree(hwbr); return rv; } pr_debug("alloc fid %lu\n", hwbr->fid); memcpy(&hwbr->laddr, br->dev_addr, sizeof(hwbr->laddr)); teach_local_mac(hwbr->laddr, hwbr->fid); kobject_init(&hwbr->kobj, &hw_bridge_kobj_type); hwbr->kobj.kset = bridges_kset; kobject_add(&hwbr->kobj, NULL, "fid%lu", hwbr->fid); { int i; for (i = 0; i < ARRAY_SIZE(hw_bridge_attrs); i++) sysfs_create_file(&hwbr->kobj, &hw_bridge_attrs[i].attr); } /* Move existing reference to caller */ *kobj = &hwbr->kobj; return OFFDP_SUCCESS; } static ssize_t hw_bridge_laddr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_bridge *hwbr = container_of(kobj, struct hw_bridge, kobj); return sprintf(buf, "%pM\n", hwbr->laddr); } static ssize_t hw_bridge_filterid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_bridge *hwbr = container_of(kobj, struct hw_bridge, kobj); return sprintf(buf, "%lu\n", hwbr->fid); } static void hw_bridge_release(struct kobject *kobj) { struct hw_bridge *hwbr = container_of(kobj, struct hw_bridge, kobj); fid_free(hwbr->fid); kfree(hwbr); update_ports(false); } static void update_ports(bool enable) { spin_lock(&bridges_kset->list_lock); /* Reconfigure the switch either before the first bridge is set up or * after the last bridge is gone . */ if (list_empty(&bridges_kset->list)) configure_ports(enable); spin_unlock(&bridges_kset->list_lock); } static u16 extract_vap_id(const struct hw_port *hwp) { /* Recognize VirtualEPs from DP_F_DIRECT and use the lower bits as * pattern, as these identify the EP. * Everything else gets a direct mapping. */ if (hwp->subif.alloc_flag == DP_F_DIRECT) return hwp->subif.subif & 0xf; return hwp->subif.subif >> 8; } static enum offdp_rv hw_port_write_pce(const struct hw_port *hwp) { GSW_PCE_rule_t rule; enum offdp_rv rv = OFFDP_SUCCESS; pr_debug("add pce rule %d for port %d subif %d\n", hwp->pce_idx, hwp->subif.port_id, hwp->subif.subif); memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = hwp->pce_idx; rule.pattern.bEnable = 1; rule.pattern.bPortIdEnable = 1; rule.pattern.nPortId = hwp->subif.port_id; rule.pattern.bSubIfIdEnable = 1; /* nSubIfId corresponds to the VAP-ID. */ rule.pattern.nSubIfId = extract_vap_id(hwp); /* no learning for vlan */ rule.pattern.bVid = 1; rule.pattern.bVid_Exclude = 1; rule.pattern.bVidRange_Select = 1; rule.pattern.nVid = 0; rule.pattern.nVidRange = 0xfff; /* This probably overwrites the cvid, but vlan is excluded anyway. */ rule.action.eVLAN_Action = GSW_PCE_ACTION_VLAN_ALTERNATIVE; rule.action.nFId = hwp->fid; /* This is required for VEPs as we write the vep_handle to the VAP-ID * for fast receives (see comment about uses of the subinterface ID at * the start of file). The VAP-ID is learned by the mac table and will * be used as an index to eg_vlan as an undesired side-effect. This * action causes eg_vlan to be ineffective for L2 forwarding while * allowing routing sessions to still apply vlan actions via eg_vlan. */ rule.action.bCVLAN_Ignore_Control = 1; /* Enable learning for this one */ rule.action.eLearningAction = GSW_PCE_ACTION_LEARNING_REGULAR; if (hwp->isolated) { rule.action.eSVLAN_Action = GSW_PCE_ACTION_VLAN_ALTERNATIVE; rule.action.ePortMapAction = GSW_PCE_ACTION_PORTMAP_REGULAR; rule.action.bPortBitMapMuxControl = 1; rule.action.nForwardPortMap[0] = ~READ_ONCE(isolated_portmap); } /* * Some of these rules are useless as not all ports are attached to * the LAN switch. There is no harm in doing so, so we add them * nonetheless for the sake of simplicity. */ if (gsw_cmd(GSWIP_L, GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { /* please continue */ rv = OFFDP_ERR_SWAPI; } if (gsw_cmd(GSWIP_R, GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { rv = OFFDP_ERR_SWAPI; } return rv; } static void hw_port_del_pce(struct hw_port *hwp) { GSW_PCE_rule_t rule; pr_debug("del pce rule %d\n", hwp->pce_idx); memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = hwp->pce_idx; rule.pattern.bEnable = 0; gsw_cmd(GSWIP_L, GSW_PCE_RULE_WRITE, &rule); gsw_cmd(GSWIP_R, GSW_PCE_RULE_WRITE, &rule); /* Flush the mac tables */ gsw_flush_entries_learned(GSWIP_L); gsw_flush_entries_learned(GSWIP_R); } static void isolation_portmap_update(void) { u16 portmap; struct kobject *k; portmap = 0; /* Gather isolated ports */ spin_lock(&ports_kset->list_lock); list_for_each_entry (k, &ports_kset->list, entry) { struct hw_port *hwp; hwp = container_of(k, struct hw_port, kobj); if (hwp->subif.port_id < 0 || hwp->subif.port_id >= 16) { pr_err("%s: port_id out of range (%d)\n", hwp->kobj.name, hwp->subif.port_id); continue; } portmap |= (hwp->isolated << hwp->subif.port_id); } spin_unlock(&ports_kset->list_lock); /* Update global map of isolated ports */ WRITE_ONCE(isolated_portmap, portmap); pr_debug("updated portmap: %hx\n", portmap); /* Update classification rule of all ports */ spin_lock(&ports_kset->list_lock); list_for_each_entry (k, &ports_kset->list, entry) { struct hw_port *hwp; hwp = container_of(k, struct hw_port, kobj); hw_port_write_pce(hwp); } spin_unlock(&ports_kset->list_lock); } enum offdp_rv offdp_backend_bridge_port_isolate(struct kobject *kobj, bool isolated) { struct hw_port *hwp; bool need_update; hwp = container_of(kobj, struct hw_port, kobj); need_update = hwp->isolated != isolated; hwp->isolated = isolated; pr_debug("%s: %s %sisolated\n", __func__, need_update ? "set to" : "keep as", isolated ? "" : "un"); if (need_update) isolation_portmap_update(); return OFFDP_SUCCESS; } enum offdp_rv offdp_backend_bridge_port_software_fwd(struct kobject *kobj, bool software_fwd) { struct hw_port *hwp; enum offdp_rv err = OFFDP_SUCCESS; hwp = container_of(kobj, struct hw_port, kobj); if (hwp->software_fwd == software_fwd) return OFFDP_SUCCESS; hwp->software_fwd = software_fwd; if (software_fwd) hw_port_del_pce(hwp); else err = hw_port_write_pce(hwp); return err; } static void read_igptrm_register(struct hw_port *hwp) { GSW_register_t reg = { 0 }; reg.nRegAddr = PCE_IGPTRM_PORT(hwp->subif.port_id); gsw_cmd(GSWIP_R, GSW_REGISTER_GET, ®); hwp->igptrm = reg.nData; } static enum offdp_rv hairpin_mode_update(struct hw_port *hwp, u16 mask, u16 value) { GSW_register_t reg = { 0 }; GSW_register_t reg_m = { 0 }; reg_m.nData = mask; reg.nRegAddr = PCE_IGPTRM_PORT(hwp->subif.port_id); reg.nData = value; if (hwp->subif.port_id < 7 && gsw_set(GSWIP_L, GSW_REGISTER_GET, GSW_REGISTER_SET, reg, reg_m) < GSW_statusOk) return OFFDP_ERR_SWAPI; if (gsw_set(GSWIP_R, GSW_REGISTER_GET, GSW_REGISTER_SET, reg, reg_m) < GSW_statusOk) return OFFDP_ERR_SWAPI; read_igptrm_register(hwp); return OFFDP_SUCCESS; } enum offdp_rv offdp_backend_bridge_port_hairpin(struct kobject *kobj, bool hairpin) { struct hw_port *hwp; u16 subif, value; bool need_update; hwp = container_of(kobj, struct hw_port, kobj); subif = extract_vap_id(hwp); value = (hairpin && !hwp->isolated) ? BIT(subif) : 0; need_update = (hwp->igptrm & BIT(subif)) != value; pr_debug("%s: port %d, subif %d %s %sable ingress port removal\n", __func__, hwp->subif.port_id, subif, need_update ? "set" : "keep", (hairpin && !hwp->isolated) ? "dis" : "en"); if (need_update) return hairpin_mode_update(hwp, BIT(subif), value); return OFFDP_SUCCESS; } enum offdp_rv offdp_backend_bridge_port_add(struct net_device *port, struct kobject *br_kobj, struct kobject **kobj) { unsigned long fid; struct hw_port *hwp; enum offdp_rv rv = OFFDP_SUCCESS; fid = container_of(br_kobj, struct hw_bridge, kobj)->fid; hwp = kzalloc(sizeof(*hwp), GFP_KERNEL); if (!hwp) return OFFDP_ERR_MEM; if (offdp_ep_platform_data(port, &hwp->subif, sizeof(hwp->subif))) { kfree(hwp); return OFFDP_ERR_DEV_RESOLVE; } read_igptrm_register(hwp); hwp->pce_idx = pce_idx_alloc(PCE_RANGE_LAN_SHARED); hwp->fid = fid; rv = hw_port_write_pce(hwp); memcpy(&hwp->laddr, port->dev_addr, sizeof(hwp->laddr)); teach_local_mac(port->dev_addr, fid); kobject_init(&hwp->kobj, &hw_port_kobj_type); hwp->kobj.kset = ports_kset; kobject_add(&hwp->kobj, NULL, "port_%d_%d", hwp->subif.port_id, hwp->subif.subif); { int i; for (i=0; i < ARRAY_SIZE(hw_port_attrs); i++) sysfs_create_file(&hwp->kobj, &hw_port_attrs[i].attr); } /* Move existing reference to caller */ *kobj = &hwp->kobj; return rv; } static ssize_t hw_port_pce_rule_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); return sprintf(buf, "%d\n", hwp->pce_idx); } static ssize_t hw_port_subifid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); return sprintf(buf, "%d\n", hwp->subif.subif); } static ssize_t hw_port_portid_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); return sprintf(buf, "%d\n", hwp->subif.port_id); } static ssize_t hw_port_laddr_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); return sprintf(buf, "%pM\n", hwp->laddr); } static ssize_t hw_port_learned_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); GSW_MAC_tableRead_t read_entry = { .bInitial = 1 }; int gsw_id = GSWIP_L; char *wr_cur = buf; char *const wr_end = wr_cur + PAGE_SIZE; for (;;) { gsw_cmd(gsw_id, GSW_MAC_TABLE_ENTRY_READ, &read_entry); if (read_entry.bLast) { if (gsw_id == GSWIP_L) { memset(&read_entry, 0, sizeof(read_entry)); read_entry.bInitial = 1; gsw_id = GSWIP_R; continue; } else { break; } } if (read_entry.nPortId != hwp->subif.port_id || read_entry.nSubIfId != hwp->subif.subif || read_entry.bStaticEntry) continue; wr_cur += snprintf(wr_cur, wr_end - wr_cur, "%pM (GSWIP-%s)\n", read_entry.nMAC, gsw_id == GSWIP_L ? "L" : "R"); if (wr_cur >= wr_end) { wr_cur = wr_end; break; } } return wr_cur - buf; } static void hw_port_release(struct kobject *kobj) { struct hw_port *hwp = container_of(kobj, struct hw_port, kobj); hw_port_del_pce(hwp); pce_idx_free(hwp->pce_idx); kfree(hwp); } static enum offdp_rv write_pce_catchall(void) { int pce_idx; GSW_PCE_rule_t rule; int rv = OFFDP_SUCCESS; pce_idx_request(PCE_RULE_IDX_LAN_SHARED_CATCHALL); pce_idx = PCE_RULE_IDX_LAN_SHARED_CATCHALL; pr_debug("add pce catchall rule %d\n", pce_idx); memset(&rule, 0, sizeof(rule)); rule.pattern.nIndex = pce_idx; rule.pattern.bEnable = 1; /* Disable learning per default for whitelist behaviour */ rule.action.eLearningAction = GSW_PCE_ACTION_LEARNING_FORCE_NOT; if (gsw_cmd(GSWIP_L, GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { /* please continue */ rv = OFFDP_ERR_SWAPI; } if (gsw_cmd(GSWIP_R, GSW_PCE_RULE_WRITE, &rule) < GSW_statusOk) { rv = OFFDP_ERR_SWAPI; } return rv; } static enum offdp_rv configure_globals(void) { int i, gsw; gsw_for_each (i, gsw) { GSW_cfg_t gsw_cfg = { 0 }; GSW_cfg_t gsw_cfg_m = { 0 }; /* Set the aging time to 1 second to have a somewhat smooth * migration of clients between GSWIP-L and GSWIP-R. */ gsw_cfg.eMAC_TableAgeTimer = GSW_AGETIMER_1_SEC; gsw_cfg_m.eMAC_TableAgeTimer = ~0lu; /* Enable VLAN to get FIDs (Filter Ids) */ gsw_cfg.bVLAN_Aware = 1; gsw_cfg_m.bVLAN_Aware = ~0u; if (gsw_set(gsw, GSW_CFG_GET, GSW_CFG_SET, gsw_cfg, gsw_cfg_m) < GSW_statusOk) return OFFDP_ERR_SWAPI; } return OFFDP_SUCCESS; } static enum offdp_rv configure_ports(bool enable) { int i, gsw, port; GSW_portCfg_t portcfg = { 0 }; GSW_portCfg_t portcfg_m = { 0 }; GSW_VLAN_portCfg_t vlan_port_cfg = { 0 }; GSW_VLAN_portCfg_t vlan_port_cfg_m = { 0 }; GSW_SVLAN_portCfg_t svlan_port_cfg = { 0 }; GSW_SVLAN_portCfg_t svlan_port_cfg_m = { 0 }; GSW_register_t reg = { 0 }; GSW_register_t reg_m = { 0 }; GSW_PCE_EgVLAN_Cfg_t egvlan_cfg = { 0 }; GSW_PCE_EgVLAN_Entry_t egvlan_entry = { 0 }; GSW_monitorPortCfg_t portmoncfg = { 0 }; /* Mask the members we want to change. */ portcfg_m.bLearning = ~0u; portcfg_m.bLearningMAC_PortLock = ~0u; portcfg_m.bAging = ~0u; portcfg_m.ePortMonitor = ~0u; vlan_port_cfg_m.bTVM = ~0u; svlan_port_cfg_m.eVLAN_MemberViolation = ~0u; reg_m.nData = PCE_PCTRL_3_RXDMIR | PCE_PCTRL_3_VIO_9 | PCE_PCTRL_3_IGPTRM; /* Admit dropped frames to mirror ports. * Port isolation is enforced through ad-hoc SVLAN memberships. But * the ultimate forwarding decision belongs to Linux. We use port * mirroring to redirect packets that would otherwise be dropped due * to membership violation towards the CPU. */ reg.nData |= PCE_PCTRL_3_RXDMIR; /* Always forward packets that add or change entries in the mac table * to the CPU. This allows Linux to learn about its * neighborhood. */ reg.nData |= PCE_PCTRL_3_VIO_9; /* Disable ingress port removal. * Our broadcasting domains are restricted to the CPU port, so * removing the ingress port as a possible destination is of * no advantage. By disabling the port removal, we allow * forwarding between different subifs of the same port. * Note that this should be configurable per subif group * ("vap") using the dedicated PCE_IGPTRM register. */ reg.nData |= PCE_PCTRL_3_IGPTRM; gsw_for_each (i, gsw) { for (port = 0; port < (gsw == GSWIP_L ? 8 : 16); port++) { /* Update current port */ portcfg.nPortId = port; portmoncfg.nPortId = port; vlan_port_cfg.nPortId = port; svlan_port_cfg.nPortId = port; reg.nRegAddr = PCE_PCTRL_3_PORT(port); egvlan_cfg.nPortId = port; if (!port) { /* Set CPU port as monitor port. */ portmoncfg.bMonitorPort = 1; gsw_cmd(gsw, GSW_MONITOR_PORT_CFG_SET, &portmoncfg); /* Disable learning on the CPU port */ portcfg.bLearning = 1; /* Protect local entries against spoofing. */ portcfg.bLearningMAC_PortLock = 1; /* Disable aging to keep the local entry around. */ portcfg.bAging = 0; } else { /* Filter SVLAN violations on egress. * An isolated port applies a SVLAN membership * bitmap during flow classification. Any * membership violation after that is * categorized as egress and needs to be * filtered. */ svlan_port_cfg.eVLAN_MemberViolation = GSW_VLAN_MEMBER_VIOLATION_EGRESS; /* Recover packets filtered due to port * isolation */ portcfg.ePortMonitor = GSW_PORT_MONITOR_VLAN_MEMBERSHIP; /* Enable learning (0 == don't disable) */ portcfg.bLearning = !enable; /* Enable aging */ portcfg.bAging = 1; /* Allow on-the-fly updating of entries */ portcfg.bLearningMAC_PortLock = 0; } /* Disable the port-wide "transparent vlan mode" to * allow VLAN modifications */ vlan_port_cfg.bTVM = 0; /* Get index for subif group 0 (default) */ gsw_cmd(gsw, GSW_PCE_EG_VLAN_CFG_GET, &egvlan_cfg); egvlan_entry.nIndex = egvlan_cfg.nEgStartVLANIdx; /* There is this weird quirk where unknown packets hit * index 16 if nEgStartVLANIdx == 0. There are hints * in PAE HAL that index 17 also has a special * meaning, possibly triggered by the processing * flags mpe1/2 etc. * We only consider this for port 0, as it's the only * port expected in the forwarding map for unknown * traffic. */ if (!port && !egvlan_entry.nIndex) { egvlan_entry.nIndex += 16; } /* Behave like transparent VLAN mode for group 0 */ egvlan_entry.bEgVLAN_Action = 1; gsw_cmd(gsw, GSW_PCE_EG_VLAN_ENTRY_WRITE, &egvlan_entry); /* Base the EgVLAN indexing on subif groups */ egvlan_cfg.eEgVLANmode = GSW_PCE_EG_VLAN_SUBIFID_BASED; gsw_cmd(gsw, GSW_PCE_EG_VLAN_CFG_SET, &egvlan_cfg); /* Apply settings */ gsw_set(gsw, GSW_PORT_CFG_GET, GSW_PORT_CFG_SET, portcfg, portcfg_m); gsw_set(gsw, GSW_VLAN_PORT_CFG_GET, GSW_VLAN_PORT_CFG_SET, vlan_port_cfg, vlan_port_cfg_m); if (!port) /* skip CPU port */ continue; gsw_set(gsw, GSW_SVLAN_PORT_CFG_GET, GSW_SVLAN_PORT_CFG_SET, svlan_port_cfg, svlan_port_cfg_m); if (gsw_set(gsw, GSW_REGISTER_GET, GSW_REGISTER_SET, reg, reg_m) < GSW_statusOk) { return OFFDP_ERR_SWAPI; } } } return OFFDP_SUCCESS; } static ktd_ret_t fid_alloc_test(void *data) { typeof(filterids) filterids_backup; int i; unsigned long fid; /* Save current state */ memcpy(&filterids_backup, &filterids, sizeof(filterids)); /* Reset bitfield */ memset(&filterids, 0, sizeof(filterids)); /* Allocate all filter ids. */ for (i = 0; i < filterids_num; i++) { KTD_EXPECT(fid_alloc(&fid) == OFFDP_SUCCESS); KTD_EXPECT(fid == i); } /* Expect it to fail. */ KTD_EXPECT(fid_alloc(&fid) == OFFDP_ERR_NOAVAIL); /* Free one up and try again. */ fid_free(0); KTD_EXPECT(fid_alloc(&fid) == OFFDP_SUCCESS); KTD_EXPECT(fid == 0); /* Free the whole range. */ for (i = 0; i < filterids_num; i++) fid_free(i); /* Check if the whole range is available. */ for (i = 0; i < filterids_num; i++) { KTD_EXPECT(fid_alloc(&fid) == OFFDP_SUCCESS); KTD_EXPECT(fid == i); } /* Restore previous state */ memcpy(&filterids, &filterids_backup, sizeof(filterids)); return KTD_PASSED; } enum offdp_rv offdp_backend_vep_fast_rcv_raw(unsigned long vep_handle, void *buf, unsigned long offset, unsigned long len) { dp_subif_t subif; u8 *cbm_buf, *cbm_data; int cbm_setup_desc(struct cbm_desc * desc, u32 data_ptr, u32 data_len, struct sk_buff * skb); struct pmac_tx_hdr *pmac_hdr; u8 *data; bool need_copy = false; struct cbm_desc desc = { 0 }; /* CBM API needs an SKB to pass the descriptor words */ struct sk_buff skb; struct dma_tx_desc_0 *desc_0 = (void *)&skb.DW1; struct dma_tx_desc_1 *desc_1 = (void *)&skb.DW0; data = buf; data += offset; /* Do not deal with padding. */ if (unlikely(len < ETH_ZLEN)) return OFFDP_ERR_INPUT; len += sizeof(*pmac_hdr); if (!check_ptr_validation((uint32_t)buf)) { /* TODO support through hwmemcpy cbm_buf = cbm_buffer_alloc(smp_processor_id(), 0, len); cbm_data = cbm_buf; need_copy = true; */ return OFFDP_ERR_INPUT; } else if (likely(offset >= sizeof(*pmac_hdr))) { cbm_buf = buf; cbm_data = data - sizeof(*pmac_hdr); } else { return OFFDP_ERR_INPUT; } pmac_hdr = (void *)cbm_data; subif = vep_subif; /* see comment about uses of the subinterface ID at the start of file */ subif.subif = vep_handle | (vep_handle << 8); desc_0->all = 0; desc_0->field.dest_sub_if_id = subif.subif; desc_1->all = 0; desc_1->field.ep = subif.port_id; desc_1->field.enc = 1; desc_1->field.dec = 1; desc_1->field.mpe2 = 0; desc_1->field.color = 1; memset(pmac_hdr, 0, sizeof(*pmac_hdr)); pmac_hdr->sppid = subif.port_id; pmac_hdr->port_map = 0xff; pmac_hdr->port_map2 = 0xff; pmac_hdr->class_en = 0; pmac_hdr->src_sub_inf_id2 = subif.subif & 0xf; pmac_hdr->src_sub_inf_id = (subif.subif >> 8) & 0xf; if (need_copy) memcpy(cbm_data + sizeof(*pmac_hdr), data, len - sizeof(*pmac_hdr)); cbm_setup_desc(&desc, (u32)cbm_data, len, &skb); cbm_track_send(cbm_data, subif.port_id, subif.port_id, smp_processor_id()); /* Shameful performance optimization to omit non-dirty areas. This * leverages knowledge on the QCA wifi driver, which in turn is * identified by the characteristic offset. */ if (likely(offset == 302)) dma_cache_wback((unsigned long)cbm_buf, offset); else dma_cache_wback((unsigned long)cbm_buf, offset + len); pr_debug_ratelimited( "%s: enqueue %p offset %lu buf %p desc %x %x %x %x\n", __func__, (void *)__pa(desc.desc2), offset, buf, desc.desc0, desc.desc1, desc.desc2, desc.desc3); cbm_cpu_enqueue_hw(smp_processor_id(), &desc, (void *)__pa(desc.desc2), 0); return OFFDP_SUCCESS; } enum offdp_rv offdp_backend_vep_fast_rcv(unsigned long vep_handle, struct sk_buff *skb) { dp_subif_t subif; struct dma_tx_desc_0 *desc_0 = (struct dma_tx_desc_0 *)&skb->DW0; struct dma_tx_desc_1 *desc_1 = (struct dma_tx_desc_1 *)&skb->DW1; subif = vep_subif; /* see comment about uses of the subinterface ID at the start of file */ subif.subif = vep_handle | (vep_handle << 8); skb->DW0 = skb->DW1 = skb->DW2 = skb->DW3 = 0; /* XXX superfluous? */ desc_0->field.dest_sub_if_id = subif.subif; desc_1->field.ep = subif.port_id; if (dp_xmit(skb->dev, &subif, skb, skb->len, DP_TX_NO_CLASS)) return OFFDP_ERR_FAST_RCV; return OFFDP_SUCCESS; } enum offdp_rv offdp_backend_vep_add(const struct net_device *dev, unsigned long *vep_handle) { dp_subif_t subif; if (dp_get_port_subitf_via_dev((struct net_device *)dev, &subif) == DP_SUCCESS) return OFFDP_ERR_VEP_NOT_VIRTUAL; *vep_handle = handle_alloc(subifid_pool, subifid_pool_sz); if (*vep_handle >= subifid_pool_sz) return OFFDP_ERR_VEP_LIMIT; return OFFDP_SUCCESS; } void offdp_backend_vep_remove(unsigned long vep_handle) { handle_free(vep_handle, subifid_pool); } static int32_t vep_rx_fn(struct net_device *rxif, struct net_device *txif, struct sk_buff *skb, int32_t len) { unsigned long vep_handle; const struct pmac_rx_hdr *pmac; const struct dma_rx_desc_1 *desc1; int (*next_fn)(unsigned long, struct sk_buff *skb); if (!rxif == !txif) { goto drop; } pmac = (void *)skb->data; skb_pull(skb, sizeof(struct pmac_rx_hdr)); skb_reset_mac_header(skb); skb->protocol = eth_hdr(skb)->h_proto; skb_set_network_header(skb, pmac->ip_offset); skb_reset_mac_len(skb); skb_set_transport_header(skb, skb_network_offset(skb) + pmac->tcp_h_offset * 4); desc1 = (void *)&skb->DW1; skb->ip_summed = (pmac->ver_done && !desc1->field.tcp_err) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; skb->csum_level = 0; if (txif) { struct dma_rx_desc_0 *desc_0 = (void *)&skb->DW0; vep_handle = desc_0->field.dest_sub_if_id & 0xf; next_fn = offdp_vep_fast_xmit; } else { vep_handle = pmac->src_sub_inf_id2; next_fn = offdp_vep_slow_rcv; } return next_fn(vep_handle, skb); drop: kfree_skb(skb); return -1; } static void register_vep(void) { int i; dp_cb_t cb; struct module *owner = THIS_MODULE; uint32_t lro_qid, classid; vep_subif.port_id = 0; vep_subif.subif = -1; init_dummy_netdev(&vep_netdev); strlcpy(vep_netdev.name, owner->name, sizeof(vep_netdev.name)); memset(&cb, 0, sizeof(cb)); cb.rx_fn = vep_rx_fn; /* Claim the DirectPath Rx Port otherwise used by PPA. */ vep_subif.port_id = dp_alloc_port(owner, NULL, 0, 0, NULL, DP_F_DIRECT); dp_register_dev(owner, vep_subif.port_id, &cb, 0); for (i = 0; i < 16; i++) { dp_subif_t subif_local = vep_subif; dp_register_subif(owner, &vep_netdev, vep_netdev.name, &subif_local, 0); } /* Prepare LRO mappings */ /* Index composition for the TMU mapping table: index = ((flow_id << 12) & 0x3000) | ((dec << 11) & 0x800) | ((enc << 10) & 0x400) | ((mpe2 << 9) & 0x200) | ((mpe1 << 8) & 0x100) | ((ep << 4) & 0xF0) | ((classid)&0x0F); */ /* Get queue mapped for EP 0 with LRO indication */ lro_qid = get_lookup_qid_via_index(0x2000); /* Add required mappings from that queue to LRO */ for (classid = 0; classid < 16; classid++) { set_lookup_qid_via_index( 0x2000 | ((vep_subif.port_id << 4) & 0xF0) | classid, lro_qid); set_lookup_qid_via_index( 0x3000 | ((vep_subif.port_id << 4) & 0xF0) | classid, lro_qid); } } enum offdp_rv offdp_backend_ep_platform_data(const struct net_device *dev, const unsigned long *vep_handle, void *data, size_t len) { dp_subif_t *subif = data; if (!data || len != sizeof(*subif)) { return OFFDP_ERR_INPUT; } if (vep_handle && dp_get_port_subitf_via_dev(&vep_netdev, subif) == DP_SUCCESS) { subif->subif = *vep_handle; /* dp_api doesn't return alloc_flag consistently across the * API. Restore it manually. */ subif->alloc_flag = DP_F_DIRECT; return OFFDP_SUCCESS; } if (dp_get_port_subitf_via_dev((struct net_device *)dev, subif) == DP_SUCCESS) return OFFDP_SUCCESS; return OFFDP_ERR_DEV_RESOLVE; } enum offdp_rv offdp_backend_init(void) { enum offdp_rv rv; hw_resources = kobject_create_and_add("backend_objects", offdp_kobj); bridges_kset = kset_create_and_add("bridges", NULL, hw_resources); ports_kset = kset_create_and_add("ports", NULL, hw_resources); /* FId 0 is the default, so we reserve it in our books. */ rv = fid_alloc(&default_fid); BUG_ON(rv != OFFDP_SUCCESS); /* Do the switch-global configuration. */ rv = configure_globals(); if (rv) return rv; /* Add a rule to disable l2 learning unless otherwise specified. */ rv = write_pce_catchall(); if (rv) return rv; test_suite = ktd_suite_create(THIS_MODULE->name); rv = ktd_register(test_suite, "l2fwd_grx_fid_allocator", fid_alloc_test, NULL); register_vep(); return rv; } void offdp_backend_exit(void) { ktd_suite_destroy(test_suite); }