// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * * Copyright (c) 2020 - 2022 MaxLinear, Inc. * Copyright (c) 2020 Intel Corporation * *****************************************************************************/ #include #include #include #include #include #include #include #include #include #include #include "pon_qos_tc_flower.h" #include #include "pon_qos_tc_pce.h" #include "pon_qos_tc_parser.h" #include "pon_qos_tc_mirred.h" struct pon_qos_mirr_filter { int proto; struct flow_dissector_key_vlan key; struct flow_dissector_key_vlan mask; bool drop; enum pce_type pce_type; }; static int pon_qos_parse_flower(struct net_device *dev, struct tc_cls_flower_offload *f, struct pon_qos_mirr_filter *flt) { memset(flt, 0, sizeof(*flt)); if (f->dissector->used_keys & ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) | BIT(FLOW_DISSECTOR_KEY_BASIC) | BIT(FLOW_DISSECTOR_KEY_VLAN) | BIT(FLOW_DISSECTOR_KEY_CVLAN) | BIT(FLOW_DISSECTOR_KEY_ICMP) | BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) { pr_debug("%s: Unsupported key used: 0x%x\n", __func__, f->dissector->used_keys); return -EINVAL; } pr_debug("%s: Supported key used: 0x%x\n", __func__, f->dissector->used_keys); flt->proto = f->common.protocol; /* Classification/Matching arguments parsing */ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->key); struct flow_dissector_key_vlan *mask = skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_VLAN, f->mask); netdev_dbg(dev, "%s: match vid: %#x/%#x pcp: %#x\n", __func__, key->vlan_id, key->vlan_priority, mask->vlan_id); flt->key = *key; flt->mask = *mask; } return 0; } static int pon_qos_flow_get_bp_id(struct net_device *dev) { dp_subif_t subif; int err; err = dp_get_netif_subifid(dev, NULL, NULL, NULL, &subif, 0); if (err != DP_SUCCESS) return -ENODEV; return subif.bport; } static int pon_qos_set_forward_port(struct net_device *dev, struct net_device *mirr_dev, u16 nForwardPortMap[16]) { int bp = 0, pidx; bp = pon_qos_flow_get_bp_id(mirr_dev); if (bp < 0) { netdev_err(dev, "%s: cannot get BP id from DPM for %s\n", __func__, netdev_name(mirr_dev)); return bp; } if (bp >= 16 * 16) { netdev_err(dev, "%s: bridge port (%i) is too big\n", __func__, bp); return -EINVAL; } /* Set destination bridge port id in 256 bit nForwardPortMap * which is split into 16 u16 blocks. */ pidx = ((bp / 16)) % 16; nForwardPortMap[pidx] |= 1 << (bp - (pidx << 4)); return 0; } static int pon_qos_parse_act_mirred(struct net_device *dev, struct tcf_exts *exts, u16 nForwardPortMap[16], bool *drop_act) { const struct tc_action *a; struct net_device *mirr_dev; struct tcf_gact *gact; #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) LIST_HEAD(actions); int ifindex; #else int i; #endif int ret = -EINVAL; *drop_act = false; #if (KERNEL_VERSION(4, 14, 0) > LINUX_VERSION_CODE) if (tc_no_actions(exts)) #else if (!tcf_exts_has_actions(exts)) #endif return -EINVAL; #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) tcf_exts_to_list(exts, &actions); list_for_each_entry (a, &actions, list) { #else tcf_exts_for_each_action (i, a, exts) { #endif /* If one of the actions is a drop action just configure a drop * rule and no forwarding. Normal drop rules are configured in * the extended VLAN configuration, this only gets called if * the first action is a mirred action. */ if (is_tcf_gact_shot(a)) { *drop_act = true; return 0; } } /* We support one drop action (is_tcf_gact_shot()) or multiple mirred * redirect actions. If multiple mirred redirect actions are piped * together the traffic will be duplicated and forwarded to all * these devices. */ #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) list_for_each_entry (a, &actions, list) { if (is_tcf_mirred_redirect(a)) { ifindex = tcf_mirred_ifindex(a); mirr_dev = dev_get_by_index(dev_net(dev), ifindex); #else tcf_exts_for_each_action (i, a, exts) { if (is_tcf_mirred_egress_redirect(a)) { mirr_dev = tcf_mirred_dev(a); #endif if (!mirr_dev) return -ENODEV; ret = pon_qos_set_forward_port(dev, mirr_dev, nForwardPortMap); #if (KERNEL_VERSION(4, 19, 0) > LINUX_VERSION_CODE) dev_put(mirr_dev); #endif if (ret) return ret; } else if (a->ops && a->ops->type == TCA_ACT_GACT) { /* Only accept pipe actions here which are used to pipe * together multiple mirred redirect rules. */ gact = to_gact(a); if (gact->tcf_action != TC_ACT_PIPE) return -EINVAL; } else { netdev_err(dev, "%s: unsupported action: %i\n", __func__, a->ops ? a->ops->type : -1); return -EINVAL; } } netdev_dbg(dev, "%s: ret: %d\n", __func__, ret); return ret; } static void pon_qos_set_flt_pce_type(struct net_device *dev, struct pon_qos_mirr_filter *flt) { if (eth_type_vlan(flt->proto)) { if (!flt->mask.vlan_id) { /* wildcard */ netdev_dbg(dev, "%s: Mirring VLAN wildcard\n", __func__); flt->pce_type = PCE_MIRR_VLAN_WILD; } if (flt->mask.vlan_id == 0xfff) { /* single */ if (flt->drop) { pr_debug("%s: Mirring VLAN drop\n", __func__); flt->pce_type = PCE_MIRR_VLAN_DROP; } else { pr_debug("%s: Mirring VLAN forward\n", __func__); flt->pce_type = PCE_MIRR_VLAN_FWD; } } } else { /* untagged */ if (flt->drop) { netdev_dbg(dev, "%s: Mirring VLAN untag drop\n", __func__); flt->pce_type = PCE_MIRR_UNTAG_DROP; } else { netdev_dbg(dev, "%s: Mirring VLAN untag forward\n", __func__); flt->pce_type = PCE_MIRR_UNTAG_FWD; } } } static int pon_qos_tc_to_pce(struct net_device *dev, struct tc_cls_flower_offload *f, u16 nForwardPortMap[16], struct pon_qos_mirr_filter *flt, GSW_PCE_rule_t *pce_rule) { int ret = 0; pce_rule->pattern.bEnable = 1; flt->pce_type = PCE_COMMON; pon_qos_tc2pce_subif_parse(dev, pce_rule, FL_FLOW_KEY_IFINDEX(f)); pon_qos_tc2pce_eth_proto_parse(dev, f, pce_rule); pon_qos_tc2pce_key_eth_addr_parse(dev, f, pce_rule); pon_qos_tc2pce_vlan_parse(dev, f, pce_rule); pon_qos_tc2pce_icmp_parse(dev, f, pce_rule); if (pon_qos_tc_parse_is_mcc(dev, f)) { /* use only on reinserted packets */ pce_rule->pattern.bInsertionFlag_Enable = 1; pce_rule->pattern.nInsertionFlag = 1; } /* Parse hw_tc */ pon_qos_tc2pce_set_traffic_class(dev, f->classid, pce_rule); pon_qos_set_flt_pce_type(dev, flt); if (pon_qos_tc_parse_is_mcc(dev, f)) pce_rule->action.ePortMapAction = GSW_PCE_ACTION_PORTMAP_ALTERNATIVE; else pce_rule->action.ePortFilterType_Action = GSW_PCE_PORT_FILTER_ACTION_1; if (flt->drop) { netdev_dbg(dev, "%s: PCE rule prepared\n", __func__); return ret; } memcpy(pce_rule->action.nForwardPortMap, nForwardPortMap, sizeof(pce_rule->action.nForwardPortMap)); netdev_dbg(dev, "%s: PCE rule prepared\n", __func__); return ret; } int pon_qos_mirred_offload(struct net_device *dev, struct tc_cls_flower_offload *f, uint32_t tc_handle) { struct pon_qos_mirr_filter flt = { 0 }; GSW_PCE_rule_t *pce_rule = NULL; int pref = f->common.prio >> 16; u16 nForwardPortMap[16] = { 0, }; int ret = 0; ret = pon_qos_parse_flower(dev, f, &flt); if (ret != 0) return ret; ret = pon_qos_parse_act_mirred(dev, f->exts, nForwardPortMap, &flt.drop); if (ret != 0) return ret; pce_rule = kzalloc(sizeof(*pce_rule), GFP_KERNEL); if (!pce_rule) return -ENOMEM; ret = pon_qos_tc_to_pce(dev, f, nForwardPortMap, &flt, pce_rule); if (ret != 0) { kfree(pce_rule); return ret; } ret = pon_qos_pce_rule_create(dev, tc_handle, pref, flt.pce_type, pce_rule); if (ret != 0) { kfree(pce_rule); return ret; } kfree(pce_rule); ret = pon_qos_tc_flower_storage_add(dev, f->cookie, TC_TYPE_MIRRED, NULL, NULL); if (ret < 0) { (void)pon_qos_pce_rule_delete(tc_handle, pref); return ret; } return 0; } int pon_qos_mirred_unoffload(struct net_device *dev, struct tc_cls_flower_offload *f, uint32_t tc_handle) { int pref = f->common.prio >> 16; int ret = 0; ret = pon_qos_pce_rule_delete(tc_handle, pref); return ret; }