// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) Intel Corporation * Author: Shao Guohua */ #include #include #include #include #include #include #include "datapath.h" #include "datapath_instance.h" #include "datapath_ioctl.h" #include "datapath_swdev.h" static void dp_swdev_insert_bridge_id_entry(struct br_info *); static void dp_swdev_remove_bridge_id_entry(struct br_info *); static int dp_swdev_add_bport_to_list(struct br_info *br_item, struct bridge_id_entry_item *brid_item, struct net_device *dev); static int dp_swdev_del_bport_from_list(struct br_info *br_item, int bport); struct hlist_head g_bridge_id_entry_hash_table[BR_ID_ENTRY_HASH_TABLE_SIZE]; static struct kmem_cache *cache_br_info_list; static struct kmem_cache *cache_br_entry_list; static struct kmem_cache *cache_br_port_list; static inline u32 dp_swdev_cal_hash(const void *ptr) { return hash_long((unsigned long)ptr, BRIDGE_ID_ENTRY_HASH_LENGTH); } /* dp_kfree_br_entry - used as destructor for * switchdev_trans_item_enqueue() API */ void dp_kfree_br_entry(void const *ptr) { kmem_cache_free(cache_br_entry_list, (void *)ptr); } struct hlist_head *get_dp_g_bridge_id_entry_hash_table_info(int idx) { return &g_bridge_id_entry_hash_table[idx]; } int dp_get_fid_by_dev(struct net_device *dev, int *inst) { struct br_info *br_info; int fid = -1; if (unlikely(dp_late_init() <= 0)) { pr_err("%s failed: datapath not initialized yet\n", __func__); return DP_FAILURE; } if (!dev || !inst) { pr_err("%s failed: dev or inst is NULL\n", __func__); return DP_FAILURE; } rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(dev); if (br_info) { *inst = br_info->inst; fid = br_info->fid; } rcu_read_unlock(); return fid; } EXPORT_SYMBOL(dp_get_fid_by_dev); int dp_swdev_chk_bport_in_br(struct net_device *dev, int bport, int inst) { struct bridge_member_port *bmp; struct net_device *br_dev; struct br_info *br_info; br_dev = netdev_master_upper_dev_get(dev); if (!br_dev) return DP_FAILURE; rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (!br_info) goto ERR; list_for_each_entry (bmp, &br_info->bp_list, list) { if (bmp->bportid == bport) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "bport(%s) in bridge\n", dev->name ? dev->name : "NULL"); rcu_read_unlock(); return DP_SUCCESS; } } ERR: DP_DEBUG(DP_DBG_FLAG_SWDEV, "bport(%s) not in bridge\n", dev->name ? dev->name : "NULL"); rcu_read_unlock(); return DP_FAILURE; } static LIST_HEAD(persist_bridge_list); /** * dp_swdev_bridge_lookup_rcu - Fetches the persistent bridge shadow state. * @br_dev: The struct net_device of the bridge. */ static struct persist_bridge * dp_persist_bridge_lookup_rcu(struct net_device *br_dev) { struct persist_bridge *br; list_for_each_entry_rcu (br, &persist_bridge_list, list) { if (br->dev == br_dev) return br; } return NULL; } /** * dp_persist_bridge_add_rcu() - Registers the persistent bridge shadow state. * @br_dev: The struct net_device of the bridge. * * This is used internally when calling dp_persist_bridge_port_add_rcu() * for an unregistered bridge device. */ static struct persist_bridge * dp_persist_bridge_add_rcu(struct net_device *br_dev, const struct persist_bridge *br_init) { struct persist_bridge *br; br = kzalloc(sizeof(*br), GFP_ATOMIC); if (!br) return NULL; *br = *br_init; br->dev = br_dev; INIT_LIST_HEAD(&br->bp_list); list_add_rcu(&br->list, &persist_bridge_list); return br; } /** * dp_persist_bridge_del_rcu() - Removes the persistent bridge shadow state. * @br: The persistent bridge shadow state to remove. * * This is used internally when calling dp_persist_bridge_port_del_rcu() * for the last registered bridge device. */ static void dp_persist_bridge_del_rcu(struct persist_bridge *br) { list_del_rcu(&br->list); kfree(br); } /** * dp_persist_bridge_port_lookup_by_bridge_rcu() - Fetches the persistent bridge port shadow state. * @br: The persistent bridge shadow state. * @dev: The struct net_device of the bridge port. */ static struct persist_bridge_port * dp_persist_bridge_port_lookup_by_bridge_rcu(struct persist_bridge *pb, struct net_device *dev) { struct persist_bridge_port *pbp; list_for_each_entry_rcu (pbp, &pb->bp_list, list) { if (pbp->dev == dev) return pbp; } return NULL; } /** * dp_persist_bridge_port_lookup_rcu() - Fetches the persistent bridge port shadow state. * @br_dev: The struct net_device of the bridge. * @dev: The struct net_device of the bridge port. */ static struct persist_bridge_port * dp_persist_bridge_port_lookup_rcu(struct net_device *br_dev, struct net_device *dev) { struct persist_bridge *pb; pb = dp_persist_bridge_lookup_rcu(br_dev); if (!pb) return NULL; return dp_persist_bridge_port_lookup_by_bridge_rcu(pb, dev); } /** * dp_persist_bridge_port_add_rcu() - Registers the persistent bridge port shadow state. * @br_dev: The struct net_device of the bridge. * @dev: The struct net_device of the bridge port. * @init: The default initial state of the bridge port. * * When called on an bridge device with no shadow state, the state is * automatically initialized by calling dp_persist_bridge_add_rcu(). */ struct persist_bridge_port * dp_persist_bridge_port_add_rcu(struct net_device *br_dev, const struct persist_bridge *br_init, struct net_device *dev, const struct persist_bridge_port *port_init) { struct persist_bridge *pb; struct persist_bridge_port *pbp; bool br_found; int ret; pb = dp_persist_bridge_lookup_rcu(br_dev); br_found = pb; if (!br_found) pb = dp_persist_bridge_add_rcu(br_dev, br_init); if (!pb) return ERR_PTR(-ENOMEM); pbp = dp_persist_bridge_port_lookup_by_bridge_rcu(pb, dev); if (pbp) { ret = -EEXIST; goto fail; } pbp = kzalloc(sizeof(*pbp), GFP_ATOMIC); if (!pbp) { ret = -ENOMEM; goto fail; } *pbp = *port_init; pbp->dev = dev; list_add_rcu(&pbp->list, &pb->bp_list); return pbp; fail: if (!br_found) dp_persist_bridge_del_rcu(pb); return ERR_PTR(ret); } /** * dp_persist_bridge_port_del_rcu() - Removes the persistent bridge port shadow state. * @br_dev: The struct net_device of the bridge. * @dev: The struct net_device of the bridge port. * * When called on the last bridge port of a bridge device, the shadow state * is automatically cleaned up by calling dp_persist_bridge_del_rcu(). */ int dp_persist_bridge_port_del_rcu(struct net_device *br_dev, struct net_device *dev) { struct persist_bridge *pb; struct persist_bridge_port *pbp; pb = dp_persist_bridge_lookup_rcu(br_dev); if (!pb) return -ENODEV; pbp = dp_persist_bridge_port_lookup_by_bridge_rcu(pb, dev); if (!pbp) return -ENODEV; list_del_rcu(&pbp->list); kfree(pbp); if (list_empty(&pb->bp_list)) dp_persist_bridge_del_rcu(pb); return 0; } struct br_info *dp_swdev_bridge_entry_lookup_rcu(struct net_device *dev) { struct br_info *br_item; struct hlist_head *head; u32 idx; idx = dp_swdev_cal_hash(dev); head = get_dp_g_bridge_id_entry_hash_table_info(idx); hlist_for_each_entry_rcu (br_item, head, br_hlist) { if (br_item->dev == dev) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "Hash entry found(%s)\n", dev->name); return br_item; } } DP_DEBUG(DP_DBG_FLAG_SWDEV, "Hash entry not found(%s)\n", dev->name); return NULL; } static void dp_swdev_rcu_free(struct rcu_head *head) { struct br_info *br_item = container_of(head, struct br_info, rcu_head); kmem_cache_free(cache_br_info_list, br_item); } static void dp_swdev_remove_bridge_id_entry(struct br_info *br_item) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "Hash del(%s)\n", br_item->dev->name); hlist_del_rcu(&br_item->br_hlist); call_rcu(&br_item->rcu_head, dp_swdev_rcu_free); } static void dp_swdev_insert_bridge_id_entry(struct br_info *br_item) { struct hlist_head *head; u32 idx; idx = dp_swdev_cal_hash(br_item->dev); head = get_dp_g_bridge_id_entry_hash_table_info(idx); hlist_add_head_rcu(&br_item->br_hlist, head); DP_DEBUG(DP_DBG_FLAG_SWDEV, "Hash added idx:%d bridge(%s)\n", idx, br_item->dev->name); } void dp_swdev_bridge_id_entry_free(void) { struct hlist_node *node; struct hlist_head *head; struct br_info *br_item; u32 idx; for (idx = 0; idx < BR_ID_ENTRY_HASH_TABLE_SIZE; idx++) { head = get_dp_g_bridge_id_entry_hash_table_info(idx); hlist_for_each_entry_safe (br_item, node, head, br_hlist) { dp_swdev_remove_bridge_id_entry(br_item); } } kmem_cache_destroy(cache_br_port_list); kmem_cache_destroy(cache_br_entry_list); kmem_cache_destroy(cache_br_info_list); } int dp_swdev_bridge_id_entry_init(void) { int i; for (i = 0; i < BR_ID_ENTRY_HASH_TABLE_SIZE; i++) INIT_HLIST_HEAD(&g_bridge_id_entry_hash_table[i]); cache_br_info_list = kmem_cache_create("dp_br_info_list", sizeof(struct br_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cache_br_info_list) return DP_FAILURE; cache_br_entry_list = kmem_cache_create("dp_br_entry_list", sizeof(struct bridge_id_entry_item), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cache_br_entry_list) goto ERR; cache_br_port_list = kmem_cache_create("dp_br_port_list", sizeof(struct bridge_member_port), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cache_br_port_list) goto ERR; return DP_SUCCESS; ERR: kmem_cache_destroy(cache_br_entry_list); kmem_cache_destroy(cache_br_info_list); return DP_FAILURE; } static int dp_swdev_del_bport_from_list(struct br_info *br_item, int bport) { struct bridge_member_port *bmp; int found = 0; list_for_each_entry (bmp, &br_item->bp_list, list) { if (bmp->bportid == bport) { found = 1; break; } } if (found) { list_del(&bmp->list); kmem_cache_free(cache_br_port_list, bmp); return DP_SUCCESS; } return DP_FAILURE; } static int dp_swdev_add_bport_to_list(struct br_info *br_item, struct bridge_id_entry_item *brid_item, struct net_device *dev) { struct bridge_member_port *bmp; int found = 0; list_for_each_entry (bmp, &br_item->bp_list, list) { if (bmp->bportid == brid_item->bportid) { found = 1; break; } } if (!found) { struct persist_bridge_port *pbp; pbp = dp_persist_bridge_port_lookup_by_bridge_rcu(br_item->persist, dev); if (WARN_ON(!pbp)) return DP_FAILURE; bmp = kmem_cache_zalloc(cache_br_port_list, GFP_ATOMIC); if (!bmp) return DP_FAILURE; INIT_LIST_HEAD(&bmp->bport_vlan_list); bmp->dev_priv_flag = brid_item->priv_flag; bmp->dev_reg_flag = br_item->flag; bmp->bportid = brid_item->bportid; bmp->dp_port = br_item->dp_port; bmp->dev = dev; bmp->persist = pbp; DP_DEBUG(DP_DBG_FLAG_SWDEV, "bport:%d reg_flag:%d priv_flag:0x%x\n", bmp->bportid, bmp->dev_reg_flag, bmp->dev_priv_flag); list_add(&bmp->list, &br_item->bp_list); } return DP_SUCCESS; } static int dp_swdev_clr_gswip_cfg(struct bridge_id_entry_item *br_item, u8 *addr, struct net_device *dev) { struct pmac_port_info *p_info; struct inst_info *i_info; struct br_info *br_info; struct persist_bridge_port *pbp; int ret; if (br_item->flags == BRIDGE_NO_ACTION) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "bport not added so no action required\n"); return DP_SUCCESS; } br_info = dp_swdev_bridge_entry_lookup_rcu(br_item->dev); if (!br_info) goto EXIT; if (dp_swdev_del_bport_from_list(br_info, br_item->bportid)) goto EXIT; p_info = get_dp_port_info(br_item->inst, br_item->dp_port); i_info = &dp_port_prop[br_item->inst].info; /* dev is the bridge port member dev */ i_info->dp_reg_bport_vlan(br_info, dev, br_item->bportid, DP_OPS_RESET); pbp = dp_persist_bridge_port_lookup_rcu(br_info->dev, dev); if (WARN_ON(!pbp)) goto EXIT; ret = i_info->swdev_bridge_port_cfg_reset(br_info, br_item->inst, br_item->bportid, !!(pbp->flags & DP_PORT_HAIRPIN)); if (ret == DEL_BRENTRY) { i_info->swdev_free_brcfg(br_item->inst, br_item->fid); i_info->dp_reg_br_vlan(br_info, DP_OPS_RESET); dp_swdev_remove_bridge_id_entry(br_info); DP_DEBUG(DP_DBG_FLAG_SWDEV, "last bport(%d) to remove from bridge(%s)\n", br_item->bportid, br_item->dev->name); } EXIT: DP_DEBUG(DP_DBG_FLAG_SWDEV, "remove bport(%d)\n", br_item->bportid); return DP_SUCCESS; } static int dp_swdev_cfg_vlan(struct bridge_id_entry_item *br_item, struct net_device *dev) { struct pmac_port_info *p_info; struct dp_bp_attr conf = { 0 }; struct inst_info *i_info; struct dp_dev *dp_dev; int vap; p_info = get_dp_port_info(br_item->inst, br_item->dp_port); if (!p_info->swdev_en) return DP_SUCCESS; if (br_item->flags & LOGIC_DEV_REGISTER) { dp_dev = dp_dev_lookup(dev); if (!dp_dev) { pr_err("%s dp_dev is NULL\n", __func__); /* Cannot return -1 from here as this fn is * called by swdev commit phase */ return DP_SUCCESS; } p_info = get_dp_port_info(br_item->inst, dp_dev->ep); vap = GET_VAP(dp_dev->ctp, p_info->vap_offset, p_info->vap_mask); DP_DEBUG(DP_DBG_FLAG_SWDEV, "vap=%d ep=%d bp=%d\n", vap, dp_dev->ep, dp_dev->bp); i_info = get_dp_prop_info(br_item->inst); if (br_item->flags & CPU_PORT_DISABLE) { /* For vUNI vlan dev need to remove CPU * from bp member list */ conf.inst = br_item->inst; conf.dev = dev; i_info->dp_set_bp_attr(&conf, br_item->bportid, 0); } i_info->dp_cfg_vlan(br_item->inst, vap, dp_dev->ep); } return DP_SUCCESS; } static int dp_swdev_filter_vlan(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans, struct net_device *br_dev) { struct bridge_id_entry_item *br_item; struct br_info *br_info; int ret = -EOPNOTSUPP; dp_subif_t subif; if (switchdev_trans_ph_prepare(trans)) { if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { pr_err("%s get subif fail, dev (%s) not registered\n", __func__, dev->name); return ret; } br_item = kmem_cache_zalloc(cache_br_entry_list, GFP_ATOMIC); if (!br_item) return ret; br_item->bportid = subif.bport; br_item->inst = subif.inst; rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (br_info) { br_item->dev = br_info->dev; br_item->fid = br_info->fid; } else { rcu_read_unlock(); kmem_cache_free(cache_br_entry_list, br_item); return ret; } switchdev_trans_item_enqueue(trans, br_item, dp_kfree_br_entry, &br_item->tritem); rcu_read_unlock(); return DP_SUCCESS; } br_item = switchdev_trans_item_dequeue(trans); DP_LIB_LOCK(&dp_lock); if (br_item) { dp_swdev_cfg_vlan(br_item, dev); kmem_cache_free(cache_br_entry_list, br_item); } DP_LIB_UNLOCK(&dp_lock); return DP_SUCCESS; } static int dp_swdev_cfg_gswip(struct bridge_id_entry_item *br_item, struct net_device *dev) { struct inst_info *i_info; struct br_info *br_info; int ret; i_info = get_dp_prop_info(br_item->inst); DP_DEBUG(DP_DBG_FLAG_SWDEV, "britem flags:%x\n", br_item->flags); if (br_item->flags & ADD_BRENTRY) { struct persist_bridge *pb; DP_DEBUG(DP_DBG_FLAG_SWDEV, "Add bridge entry %s\n", br_item->dev->name); pb = dp_persist_bridge_lookup_rcu(br_item->dev); if (WARN_ON(!pb)) return -ENODEV; br_info = kmem_cache_zalloc(cache_br_info_list, GFP_ATOMIC); if (!br_info) /* Cannot return failure in switchdev commit */ return DP_SUCCESS; br_info->fid = br_item->fid; br_info->inst = br_item->inst; br_info->dp_port = br_item->dp_port; br_info->persist = pb; /* Logic dev flag added to verify if SWDEV registered * the logical i.e. VLAN device.Helpful during * br/bport delete */ if (br_item->flags & LOGIC_DEV_REGISTER) br_info->flag = LOGIC_DEV_REGISTER; br_info->dev = br_item->dev; INIT_LIST_HEAD(&br_info->bp_list); INIT_LIST_HEAD(&br_info->br_vlan_list); ret = i_info->swdev_bridge_cfg_set(br_info); if (ret) { kmem_cache_free(cache_br_info_list, br_info); DP_DEBUG(DP_DBG_FLAG_SWDEV, "No configuration, please check!\n"); return DP_SUCCESS; } dp_swdev_insert_bridge_id_entry(br_info); /* br_info->dev is the bridge device */ i_info->dp_reg_br_vlan(br_info, 0); /* dev is the bridge device */ i_info->dp_reg_bport_vlan(br_info, dev, br_item->bportid, 0); dp_swdev_add_bport_to_list(br_info, br_item, dev); i_info->swdev_bridge_port_cfg_set(br_info, br_item->inst, br_item->bportid); br_item->flags &= ~ADD_BRENTRY; DP_DEBUG(DP_DBG_FLAG_SWDEV, "added bport(%d),bridge(%s)\n", br_item->bportid, br_item->dev->name); return DP_SUCCESS; } else { rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_item->dev); if (!br_info) goto EXIT; br_info->flag = 0; br_info->dp_port = br_item->dp_port; if (br_item->flags & LOGIC_DEV_REGISTER) br_info->flag = LOGIC_DEV_REGISTER; /* dev is the bridge port device */ i_info->dp_reg_bport_vlan(br_info, dev, br_item->bportid, 0); dp_swdev_add_bport_to_list(br_info, br_item, dev); i_info->swdev_bridge_port_cfg_set(br_info, br_item->inst, br_item->bportid); DP_DEBUG(DP_DBG_FLAG_SWDEV, "added bport(%d)\n", br_item->bportid); EXIT: rcu_read_unlock(); return 0; } DP_DEBUG(DP_DBG_FLAG_SWDEV, "No configuration, please check!\n"); return DP_SUCCESS; } static int dp_del_swdev_info(struct bridge_id_entry_item *br_item, struct net_device *dev, u8 *addr) { struct pmac_port_info *p_info; struct net_device *base; dp_subif_t subif; if (br_item) { dp_swdev_clr_gswip_cfg(br_item, addr, dev); /* De-register logical dev i.e. VLAN DEV * if it is registered */ if (!(br_item->flags & LOGIC_DEV_REGISTER)) return DP_SUCCESS; base = get_base_dev(dev, -1); if (!base) base = dev; DP_DEBUG(DP_DBG_FLAG_SWDEV, "base dev name:%s\n", base->name); dp_swdev_cfg_vlan(br_item, dev); /* Handles de-registration of vlan dev created during add_if */ if (dp_get_netif_subifid(base, NULL, NULL, NULL, &subif, 0)) { pr_err("%s get subif fail for %s\n", __func__, base->name); /* Cannot return -EOPNOTSUPP in swdev commit stage */ return DP_SUCCESS; } p_info = get_dp_port_info(subif.inst, subif.port_id); DP_LIB_UNLOCK(&dp_lock); /* Release dp_lock here since dp_register_subif will be acquiring the dp_lock. Need to set DP_F_SUBIF_LOGICAL flag to avoid looping in dp_inst_del_dev */ if (dp_register_subif(p_info->owner, dev, dev->name, &subif, DP_F_DEREGISTER | DP_F_SUBIF_LOGICAL)) { pr_err("%s de-register subif-%s fail\n", __func__, dev->name); } DP_LIB_LOCK(&dp_lock); } return DP_SUCCESS; } static int dp_swdev_add_if(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans, struct net_device *br_dev) { struct bridge_id_entry_item *br_item; struct pmac_port_info *p_info; struct dp_subif_info *sif; struct inst_info *i_info; struct br_info *br_info; struct net_device *base; u32 flag = 0, vap; dp_subif_t subif; int br_id; if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "Get subifid failed for %s, check if vlan dev\n", dev->name); /* Check bridge port exists otherwise register * device with datapath i.e. only in case of new * VLAN interface */ if (!is_vlan_dev(dev)) return -EOPNOTSUPP; base = get_base_dev(dev, -1); if (!base) base = dev; DP_DEBUG(DP_DBG_FLAG_SWDEV, "base dev name:%s\n", base->name); if (dp_get_netif_subifid(base, NULL, NULL, NULL, &subif, 0)) { pr_err("%s get subif fail for %s\n", __func__, base->name); return -EOPNOTSUPP; } subif.subif = -1; p_info = get_dp_port_info(subif.inst, subif.port_id); if (dp_register_subif(p_info->owner, dev, dev->name, &subif, DP_F_SUBIF_LOGICAL)) { pr_err("%s register subif-%s fail\n", __func__, dev->name); return -EOPNOTSUPP; } flag = LOGIC_DEV_REGISTER; DP_DEBUG(DP_DBG_FLAG_SWDEV, "registered subif,bp=%d port=%d\n", subif.bport, subif.port_id); } p_info = get_dp_port_info(subif.inst, subif.port_id); i_info = &dp_port_prop[subif.inst].info; vap = GET_VAP(subif.subif, p_info->vap_offset, p_info->vap_mask); sif = get_dp_port_subif(p_info, vap); /* SWITCHDEV_TRANS_PREPARE phase */ if (switchdev_trans_ph_prepare(trans)) { br_item = kmem_cache_zalloc(cache_br_entry_list, GFP_ATOMIC); if (!br_item) /*need to check dequeue if no memory*/ return -ENOMEM; br_item->dp_port = subif.port_id; br_item->bportid = subif.bport; br_item->inst = subif.inst; /* dev->priv_flags should be set only for base dev */ if (!(flag & LOGIC_DEV_REGISTER)) br_item->priv_flag = dev->priv_flags; if (!sif->cpu_port_en) br_item->flags |= CPU_PORT_DISABLE; rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (br_info) { br_item->dev = br_info->dev; br_item->fid = br_info->fid; br_item->flags |= flag; } else { br_item->flags |= ADD_BRENTRY | flag; if (sif->swdev_en == 1) { br_id = i_info->swdev_alloc_bridge_id(br_item->inst); if (br_id) { /* Store bridge information to add in * the table. This info is used during * switchdev commit phase */ br_item->dev = br_dev; br_item->fid = br_id; } else { pr_err("Switch config failed\n"); kmem_cache_free(cache_br_entry_list, br_item); rcu_read_unlock(); return -EOPNOTSUPP; } } else { br_item->dev = br_dev; } } rcu_read_unlock(); switchdev_trans_item_enqueue(trans, br_item, dp_kfree_br_entry, &br_item->tritem); return DP_SUCCESS; } /* Configure switch in commit phase and it cannot return failure*/ br_item = switchdev_trans_item_dequeue(trans); if (br_item) { DP_LIB_LOCK(&dp_lock); if (sif->swdev_en) dp_swdev_cfg_gswip(br_item, dev); if (br_item->flags & LOGIC_DEV_REGISTER) dp_swdev_cfg_vlan(br_item, dev); DP_LIB_UNLOCK(&dp_lock); kmem_cache_free(cache_br_entry_list, br_item); } return DP_SUCCESS; } /* flags = DP_SWDEV_BRIDGE_ITEM - britem pointer is valid * flags = 0 - trans pointer is valid */ static int dp_swdev_del_if(struct net_device *dev, struct switchdev_trans *trans, struct net_device *br_dev, struct bridge_id_entry_item *britem, int flags) { struct bridge_id_entry_item *br_item; struct bridge_member_port *bmp; struct pmac_port_info *p_info; struct dp_subif_info *sif; struct br_info *br_info; dp_subif_t subif; u8 *addr; int vap; /* Get current BR_PORT ID from DP */ if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { pr_err("%s get subif failed for %s\n", __func__, dev->name); /* cannot return DP_FAILURE since for VLAN case device * registration is done after this */ return -EOPNOTSUPP; } if (switchdev_trans_ph_prepare(trans)) { br_item = kmem_cache_zalloc(cache_br_entry_list, GFP_ATOMIC); if (!br_item) /* TODO need to check dequeue if no memory */ return -ENOMEM; rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (br_info) { br_item->flags = BRIDGE_NO_ACTION; br_item->dp_port = subif.port_id; br_item->bportid = subif.bport; br_item->dev = br_info->dev; br_item->fid = br_info->fid; br_item->inst = subif.inst; list_for_each_entry (bmp, &br_info->bp_list, list) { if (bmp->bportid == br_item->bportid) { br_item->flags = bmp->dev_reg_flag; break; } } } else { /* Check if dev is vlan_dev, * de-register vlan device with datapath */ if (is_vlan_dev(dev)) { br_item->flags = LOGIC_DEV_REGISTER | BRIDGE_NO_ACTION; br_item->dp_port = subif.port_id; br_item->inst = subif.inst; } else { DP_DEBUG(DP_DBG_FLAG_SWDEV, "No bridge entry or bp to delete\r\n"); br_item->flags = BRIDGE_NO_ACTION; } } rcu_read_unlock(); /*check return value & enqueue*/ switchdev_trans_item_enqueue(trans, br_item, dp_kfree_br_entry, &br_item->tritem); return DP_SUCCESS; } p_info = get_dp_port_info(subif.inst, subif.port_id); vap = GET_VAP(subif.subif, p_info->vap_offset, p_info->vap_mask); sif = get_dp_port_subif(p_info, vap); addr = (u8 *)dev->dev_addr; if (flags == DP_SWDEV_BRIDGE_ITEM) br_item = britem; else /* Configure switch in commit phase & * it cannot return failure */ br_item = switchdev_trans_item_dequeue(trans); if (sif->swdev_en) dp_del_swdev_info(br_item, dev, addr); if (flags != DP_SWDEV_BRIDGE_ITEM) kmem_cache_free(cache_br_entry_list, br_item); return DP_SUCCESS; } int dp_del_br_if(struct net_device *dev, struct net_device *br_dev, int inst, int bport) { struct bridge_id_entry_item br_item; struct bridge_member_port *bmp; struct br_info *br_info; rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (br_info) { br_item.dev = br_info->dev; br_item.fid = br_info->fid; br_item.bportid = bport; br_item.inst = inst; list_for_each_entry (bmp, &br_info->bp_list, list) { if (bmp->bportid == br_item.bportid) { br_item.flags = bmp->dev_reg_flag; break; } } } else { if (is_vlan_dev(dev)) { br_item.inst = inst; br_item.flags = LOGIC_DEV_REGISTER | BRIDGE_NO_ACTION; } else { DP_DEBUG(DP_DBG_FLAG_SWDEV, "No bridge entry or bport to delete\r\n"); br_item.flags = BRIDGE_NO_ACTION; } } rcu_read_unlock(); return dp_swdev_del_if(dev, NULL, br_dev, &br_item, DP_SWDEV_BRIDGE_ITEM); } static void dp_set_bp_flags(unsigned long flags, struct persist_bridge_port *pbp) { pbp->flags &= ~(DP_PORT_ISOLATED | DP_PORT_MULTICAST_TO_UNICAST); if (!(pbp->flags & DP_PORT_HAIRPIN_WLAN_WORKAROUND)) { pbp->flags &= ~DP_PORT_HAIRPIN; pbp->flags |= flags & BR_HAIRPIN_MODE ? DP_PORT_HAIRPIN : 0; } pbp->flags |= flags & BR_ISOLATED ? DP_PORT_ISOLATED : 0; pbp->flags |= flags & BR_MULTICAST_TO_UNICAST ? DP_PORT_MULTICAST_TO_UNICAST : 0; DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s: dev=%s flags=0x%08x\n", __func__, pbp->dev->name, pbp->flags); } static int dp_swdev_port_attr_bridge_flags_set(struct net_device *dev, unsigned long flags, struct switchdev_trans *trans) { struct inst_info *i_info; dp_subif_t subif; struct net_device *br_dev; struct br_info *br_info; struct persist_bridge_port *pbp; int ret = DP_SUCCESS; br_dev = netdev_master_upper_dev_get(dev); if (!br_dev) return -1; if (switchdev_trans_ph_prepare(trans)) return DP_SUCCESS; if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "dp_get_netif_subifid failed for %s\n", dev->name); return -EINVAL; } i_info = &dp_port_prop[subif.inst].info; if (!i_info->swdev_bridge_cfg_set || !i_info->swdev_bridge_port_flags_set) return -EOPNOTSUPP; DP_LIB_LOCK(&dp_lock); pbp = dp_persist_bridge_port_lookup_rcu(br_dev, dev); if (WARN_ON(!pbp)) goto fail; dp_set_bp_flags(flags, pbp); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); /* Skip reconfiguring the bridge if it is currently down. */ if (br_info) ret = i_info->swdev_bridge_port_flags_set(br_info, subif.inst, subif.bport, flags); if (br_info && !ret) ret = i_info->swdev_bridge_cfg_set(br_info); fail: DP_LIB_UNLOCK(&dp_lock); return ret; } static int dp_swdev_port_attr_bridge_flags_get(struct net_device *dev, unsigned long *flags) { struct inst_info *i_info; dp_subif_t subif; struct net_device *br_dev; struct persist_bridge_port *pbp; br_dev = netdev_master_upper_dev_get(dev); if (!br_dev) return -1; if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { pr_err("%s get subif failed for %s\n", __func__, dev->name); return -EINVAL; } rcu_read_lock(); pbp = dp_persist_bridge_port_lookup_rcu(br_dev, dev); if (!WARN_ON(!pbp)) { *flags |= (pbp->flags & DP_PORT_HAIRPIN) && !(pbp->flags & DP_PORT_HAIRPIN_WLAN_WORKAROUND) ? BR_HAIRPIN_MODE : 0; *flags |= (pbp->flags & DP_PORT_ISOLATED) ? BR_ISOLATED : 0; *flags |= (pbp->flags & DP_PORT_MULTICAST_TO_UNICAST) ? BR_MULTICAST_TO_UNICAST : 0; } rcu_read_unlock(); i_info = &dp_port_prop[subif.inst].info; if (i_info->swdev_bridge_port_flags_get) return i_info->swdev_bridge_port_flags_get(subif.inst, subif.bport, flags); return -EOPNOTSUPP; } static int dp_swdev_port_attr_learning_limit_set(struct net_device *dev, int learning_limit, struct switchdev_trans *trans) { struct inst_info *i_info; dp_subif_t subif; /* If we are in preperation do not execute the command */ if (switchdev_trans_ph_prepare(trans)) return DP_SUCCESS; if (dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0)) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "Get subif failed for %s\n", dev->name); return -EINVAL; } i_info = &dp_port_prop[subif.inst].info; if (!i_info->swdev_port_learning_limit_set) return -EOPNOTSUPP; /* The maximum limited supported by the GSWIP 3.1 is 254 entries */ if (learning_limit > i_info->cap.max_num_learn_limit_port) { netdev_err(dev, "mac learning limit (%i) too high, max: %i\n", learning_limit, i_info->cap.max_num_learn_limit_port); return -EINVAL; } return i_info->swdev_port_learning_limit_set(subif.inst, subif.bport, learning_limit); } static int dp_swdev_port_attr_avm_flood_ratelimit_set(struct net_device *br_dev, int flood_ratelimit, struct switchdev_trans *trans) { struct br_info *br_info; struct persist_bridge *pb; struct inst_info *i_info; int ret; /* If we are in preperation do not execute the command */ if (switchdev_trans_ph_prepare(trans)) return DP_SUCCESS; pb = dp_persist_bridge_lookup_rcu(br_dev); if (!pb) return -ENODEV; pb->avm_flood_ratelimit = flood_ratelimit; /* Skip reconfiguring the bridge if it is currently down. */ br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (!br_info) return DP_SUCCESS; i_info = get_dp_prop_info(br_info->inst); ret = i_info->swdev_bridge_cfg_set(br_info); if (ret) return ret; return DP_SUCCESS; }; static int dp_swdev_port_attr_set(struct net_device *dev, const struct switchdev_attr *attr, struct switchdev_trans *trans) { struct net_device *br_dev; int ret = -EOPNOTSUPP; /* Find the associated bridge. BRIDGE-flags pass the bridge device * directly, while PORT-flags pass the slave device. */ switch (attr->id) { case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: case SWITCHDEV_ATTR_ID_BRIDGE_AVM_FLOOD_RATELIMIT: br_dev = attr->orig_dev; break; default: br_dev = netdev_master_upper_dev_get(attr->orig_dev); break; } if (!br_dev) return ret; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_STP_STATE: if (attr->u.stp_state == BR_STATE_FORWARDING) { /* STP STATE forwading or ifconfig UP - add bridge*/ ret = dp_swdev_add_if(attr->orig_dev, attr, trans, br_dev); } else if (attr->u.stp_state == BR_STATE_DISABLED) { /* STP STATE disabled or ifconfig DOWN - del bridge*/ DP_LIB_LOCK(&dp_lock); ret = dp_swdev_del_if(attr->orig_dev, trans, br_dev, NULL, 0); DP_LIB_UNLOCK(&dp_lock); } break; case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: if (attr->u.brport_flags & ~(BR_LEARNING | BR_HAIRPIN_MODE | BR_ISOLATED | BR_MULTICAST_TO_UNICAST)) ret = -EINVAL; else ret = 0; break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: ret = dp_swdev_port_attr_bridge_flags_set(dev, attr->u.brport_flags, trans); break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: /* ret = dp_swdev_port_attr_bridge_ageing_time_set( * dp_swdev_port, * attr->u.ageing_time, * trans); */ break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: /* ret = dp_swdev_port_attr_bridge_br_vlan_set(dev, * attr->orig_dev,trans); */ break; case SWITCHDEV_ATTR_ID_PORT_LEARNING_LIMIT: ret = dp_swdev_port_attr_learning_limit_set(dev, attr->u.learning_limit, trans); break; case SWITCHDEV_ATTR_ID_BRIDGE_AVM_FLOOD_RATELIMIT: DP_LIB_LOCK(&dp_lock); ret = dp_swdev_port_attr_avm_flood_ratelimit_set(br_dev, attr->u.avm_flood_ratelimit, trans); DP_LIB_UNLOCK(&dp_lock); break; default: break; } return ret; } static int dp_swdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct net_device *br_dev; struct br_info *br_info; int ret = DP_SUCCESS; br_dev = netdev_master_upper_dev_get(attr->orig_dev); if (!br_dev) return -EOPNOTSUPP; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: /* Default err return value "-EOPNOTSUPP"cannot be set as * this blocks bridgeport offload_fwd_mark setting at * linux bridge level("nbp_switchdev_mark_set") */ rcu_read_lock(); br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (!br_info) { rcu_read_unlock(); return ret; } attr->u.ppid.id_len = sizeof(br_info->fid); memcpy(&attr->u.ppid.id, &br_info->fid, attr->u.ppid.id_len); DP_DEBUG(DP_DBG_FLAG_SWDEV, "SWITCHDEV_ATTR_ID_PORT_PARENT_ID:%s fid=%d\n", attr->orig_dev ? attr->orig_dev->name : "NULL", br_info->fid); rcu_read_unlock(); break; case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: ret = dp_swdev_port_attr_bridge_flags_get(dev, &attr->u.brport_flags); break; default: break; } return ret; } static int dp_swdev_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, struct switchdev_trans *trans) { struct net_device *br_dev; int ret = -EOPNOTSUPP; DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s id:%d flags:%d dev name:%s\r\n", __func__, obj->id, obj->flags, dev->name); br_dev = netdev_master_upper_dev_get(obj->orig_dev); if (!br_dev) return ret; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: ret = dp_swdev_filter_vlan(obj->orig_dev, obj, trans, br_dev); break; default: break; } return ret; } static int dp_swdev_port_obj_del(struct net_device *dev, const struct switchdev_obj *obj) { DP_DEBUG(DP_DBG_FLAG_SWDEV, "%s\n", __func__); return -EOPNOTSUPP; } /* API to free bridge id from DP notifier event */ int dp_notif_br_free(struct net_device *br_dev) { struct inst_info *i_info = get_dp_prop_info(0); struct br_info *br_info; br_info = dp_swdev_bridge_entry_lookup_rcu(br_dev); if (!br_info) goto EXIT; i_info->swdev_free_brcfg(0, br_info->fid); i_info->dp_reg_br_vlan(br_info, DP_OPS_RESET); dp_swdev_remove_bridge_id_entry(br_info); DP_DEBUG(DP_DBG_FLAG_SWDEV, "Freed device(%s)\n", br_dev->name); EXIT: return DP_SUCCESS; } /* API to alloc bridge id from DP notifier event */ int dp_notif_br_alloc(struct net_device *br_dev) { struct inst_info *dp_info = get_dp_prop_info(0); struct br_info *br_info; struct persist_bridge *pb; int br_id; pb = dp_persist_bridge_lookup_rcu(br_dev); if (WARN_ON(!pb)) return DP_FAILURE; br_id = dp_info->swdev_alloc_bridge_id(0); if (br_id == DP_FAILURE) { pr_err("Switch bridge alloc failed\n"); return DP_FAILURE; } br_info = kmem_cache_zalloc(cache_br_info_list, GFP_ATOMIC); if (!br_info) return DP_FAILURE; INIT_LIST_HEAD(&br_info->bp_list); INIT_LIST_HEAD(&br_info->br_vlan_list); br_info->dev = br_dev; br_info->fid = br_id; br_info->persist = pb; if (dp_info->swdev_bridge_cfg_set(br_info)) { kmem_cache_free(cache_br_info_list, br_info); pr_err("Switch configuration failed\n"); return DP_FAILURE; } dp_info->dp_reg_br_vlan(br_info, 0); dp_swdev_insert_bridge_id_entry(br_info); return br_id; } /* This function registers the created port in datapath to switchdev */ int dp_register_switchdev_ops(struct dp_dev *dp_dev, int reset) { int offset[] = { offsetof(struct switchdev_ops, switchdev_port_attr_get), offsetof(struct switchdev_ops, switchdev_port_attr_set), offsetof(struct switchdev_ops, switchdev_port_obj_add), offsetof(struct switchdev_ops, switchdev_port_obj_del) }; void *cb[] = { &dp_swdev_port_attr_get, &dp_swdev_port_attr_set, &dp_swdev_port_obj_add, &dp_swdev_port_obj_del }; struct inst_info *inst_info = get_dp_prop_info(dp_dev->inst); u32 vap, flag = DP_OPS_SWITCHDEV; struct pmac_port_info *port_info; struct dp_subif_info *sif; int i; port_info = get_dp_port_info(dp_dev->inst, dp_dev->ep); vap = GET_VAP(dp_dev->ctp, port_info->vap_offset, port_info->vap_mask); sif = get_dp_port_subif(port_info, vap); if (inst_info->swdev_flag && sif->swdev_en) { if (reset) flag |= DP_OPS_RESET; /* switchdev ops register */ for (i = 0; i < ARRAY_SIZE(offset); i++) { if (dp_set_net_dev_ops_priv(dp_dev->dev, cb[i], offset[i], flag)) { pr_err("%s failed to register ops %d\n", __func__, i); return DP_FAILURE; } } } return DP_SUCCESS; } int dp_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct inst_info *dp_info = get_dp_prop_info(0); int ret = 0; if (!dev || !nlh || !dp_info || !dp_info->dp_ndo_br_setlink) return -EINVAL; DP_LIB_LOCK(&dp_lock); ret = dp_info->dp_ndo_br_setlink(dev, nlh, flags); DP_LIB_UNLOCK(&dp_lock); return ret; } int dp_ndo_bridge_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { struct inst_info *dp_info = get_dp_prop_info(0); int ret = 0; if (!dev || !nlh || !dp_info || !dp_info->dp_ndo_br_dellink) return -EINVAL; DP_LIB_LOCK(&dp_lock); ret = dp_info->dp_ndo_br_dellink(dev, nlh, flags); DP_LIB_UNLOCK(&dp_lock); return ret; } void dp_switchdev_exit(void) { dp_swdev_bridge_id_entry_free(); } int dp_switchdev_init(void) { return dp_swdev_bridge_id_entry_init(); }