/* * Copyright (c) 2019 AVM GmbH . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include "../avm_qos.h" #include #include #define INCLUDE_UNUSED_LL_FUNCTIONS 1 // ugh #include #include extern void (*ltq_vectoring_priority_hook)(uint32_t priority); static int tmu_get_port(const struct net_device *netdev); uint32_t class_queue_lookup(const struct net_device *netdev, uint32_t classid); enum { SIT_QID = 0, SIT_SBID = 1 }; enum { V_PORT = 0, V_SBID = 1 }; #define SBIN_BASE(sbid) ((sbid)*SCHEDULER_MAX_LEAF) struct avm_qos_priv { uint32_t tbid; }; struct avm_qos_priv *avm_qos_alloc() { struct avm_qos_priv *hw_priv = kmalloc(sizeof(struct avm_qos_priv), GFP_KERNEL); if (!hw_priv) return NULL; hw_priv->tbid = TOKEN_BUCKET_MAX; return hw_priv; } EXPORT_SYMBOL(avm_qos_alloc); void avm_qos_free(struct avm_qos_priv *hw_priv) { kfree(hw_priv); } EXPORT_SYMBOL(avm_qos_free); static void set_lookup_qids(int pmac_port, uint32_t qsid, uint32_t index_offset) { uint32_t k, index; for (k = 0; k < 64; k++) { /*flow id/dec/enc/mpe2/mpe1*/ if (pmac_port == 0 && (k & 0x20)) /* EP=0 and F2=1 */ continue; /* don't touch LRO entries */ #ifndef DATAPATH_ACA_CSUM_WORKAROUND if (pmac_port == 8 && ((k >> 1) & 0xf) == 0x7) continue; #endif index = (k << 8) | (pmac_port << 4) | index_offset; set_lookup_qid_via_index(index, qsid); } } bool avm_qos_netdev_supported(struct net_device *netdev) { dp_subif_t dp_subif = { 0 }; /* Netdev needs to be registered with datapath_api. This ignores * offload_dp-style VEPs on purpose as they should use Linux QoS. */ if (0 != dp_get_netif_subifid(netdev, NULL, NULL, 0, &dp_subif, 0)) { return false; } /* TMU filters can't handle ATM cells */ if (netdev->type == ARPHRD_ATM) { return false; } return true; } EXPORT_SYMBOL(avm_qos_netdev_supported); void avm_qos_set_default_queue(struct net_device *netdev, uint32_t classid) { dp_subif_t dp_subif = { 0 }; struct tmu_equeue_link equeue_link; struct tmu_sched_blk_in_link sbinlink; uint32_t old_qid, prio_sb, prio; int pmac_port; dp_get_netif_subifid(netdev, NULL, NULL, 0, &dp_subif, 0); pmac_port = dp_subif.port_id; old_qid = class_queue_lookup(netdev, classid); if (old_qid == NULL_QUEUE_ID) return; tmu_equeue_link_get(old_qid, &equeue_link); if (equeue_link.sbin == NULL_SCHEDULER_BLOCK_ID) return; prio_sb = equeue_link.sbin / SCHEDULER_MAX_LEAF; prio = classid % SCHEDULER_MAX_LEAF; tmu_sched_blk_in_link_get(SBIN_BASE(prio_sb) + prio, &sbinlink); if (!sbinlink.sie || sbinlink.sit != SIT_QID) return; pr_debug("%s(0x%x)", __func__, classid); /* Set all classid 0 mappings for this PMAC port to the queue * corresponding to the supplied classid/prio. * * TODO Check if we have to restrict this to certain flags. */ set_lookup_qids(pmac_port, sbinlink.qsid, 0); } EXPORT_SYMBOL(avm_qos_set_default_queue); int avm_qos_add_hw_queue(struct net_device *netdev, uint32_t classid, uint32_t priority, uint32_t weight) { dp_subif_t dp_subif = { 0 }; struct tmu_sched_blk_in_link sbinlink; uint32_t shaper_sb, prio_sb, prio; int pmac_port; int tmu_port; struct tmu_eport_link eport_link = { 0 }; tmu_port = tmu_get_port(netdev); if (tmu_port < 0) return -1; tmu_egress_port_link_get(tmu_port, &eport_link); if (!eport_link.epe || eport_link.sbid == NULL_SCHEDULER_BLOCK_ID) return -1; shaper_sb = eport_link.sbid; tmu_sched_blk_in_link_get(SBIN_BASE(shaper_sb), &sbinlink); if (!sbinlink.sie || sbinlink.sit != SIT_SBID) return -1; prio_sb = sbinlink.qsid; prio = classid % SCHEDULER_MAX_LEAF; tmu_sched_blk_in_link_get(SBIN_BASE(prio_sb) + prio, &sbinlink); if (!sbinlink.sie || sbinlink.sit != SIT_QID) return -1; dp_get_netif_subifid(netdev, NULL, NULL, 0, &dp_subif, 0); pmac_port = dp_subif.port_id; set_lookup_qids(pmac_port, sbinlink.qsid, prio); return 0; } EXPORT_SYMBOL(avm_qos_add_hw_queue); static void tmu_set_queue_len(uint32_t qid, uint32_t pkts) { struct tmu_equeue_drop_params q_drop_params; uint32_t *thresh[4]; /* drop thresholds */ enum { COL_UNASSIGNED = 0, COL_GREEN, COL_YELLOW, COL_RED, }; tmu_equeue_drop_params_get(qid, &q_drop_params); thresh[0] = &q_drop_params.qtth0; thresh[1] = &q_drop_params.qtth1; thresh[2] = &q_drop_params.math0; thresh[3] = &q_drop_params.math1; q_drop_params.dmod = 0; /* configure hard tail drop */ /* Thresholds are defined in units of 8 segments. * Give the full length to unassigned and green, half of it to yellow * and none to red packets. */ *thresh[q_drop_params.col[COL_UNASSIGNED]] = DIV_ROUND_UP(pkts, 8); *thresh[q_drop_params.col[COL_GREEN]] = DIV_ROUND_UP(pkts, 8); *thresh[q_drop_params.col[COL_YELLOW]] = DIV_ROUND_UP(pkts / 2, 8); *thresh[q_drop_params.col[COL_RED]] = 0; tmu_equeue_drop_params_set(qid, &q_drop_params); } uint32_t class_queue_lookup(const struct net_device *netdev, uint32_t classid) { uint32_t lookup_idx; dp_subif_t dp_subif = { 0 }; if (offdp_ep_platform_data(netdev, &dp_subif, sizeof(dp_subif))) return NULL_QUEUE_ID; lookup_idx = (dp_subif.port_id & 0xf) << 4; lookup_idx |= classid & 0xf; return get_lookup_qid_via_index(lookup_idx); } int avm_qos_set_queue_len(struct net_device *netdev, uint32_t classid, uint32_t len) /* bytes */ { uint32_t qid; enum { bytes_per_packet = 340 }; /* average of "simple IMIX" */ if (len == 0) return -EINVAL; qid = class_queue_lookup(netdev, classid); if (qid == NULL_QUEUE_ID) return -EINVAL; tmu_set_queue_len(qid, DIV_ROUND_UP(len, bytes_per_packet)); return 0; } EXPORT_SYMBOL(avm_qos_set_queue_len); int avm_qos_remove_hw_queue(struct net_device *netdev, uint32_t classid, uint32_t priority, uint32_t weight) { /* Nothing to do here. */ return 0; } EXPORT_SYMBOL(avm_qos_remove_hw_queue); int avm_qos_flush_hw_queues(struct net_device *netdev) { /* Nothing to do here. */ return 0; } EXPORT_SYMBOL(avm_qos_flush_hw_queues); static uint32_t tmu_sbid_first_free_leaf(uint32_t sbid) { uint32_t leaf; for (leaf = 0; leaf < SCHEDULER_MAX_LEAF; leaf++) { struct tmu_sched_blk_in_link sbinlink; tmu_sched_blk_in_link_get(SBIN_BASE(sbid) + leaf, &sbinlink); if (!sbinlink.sie) return leaf; } return SCHEDULER_MAX_LEAF; } static uint32_t tmu_sbid_alloc(void) { uint32_t sbid; for (sbid = 0; sbid < NULL_SCHEDULER_BLOCK_ID; sbid++) { struct tmu_sched_blk_out_link olink; /* Skip sbid if output is enabled */ tmu_sched_blk_out_link_get(sbid, &olink); if (olink.soe) continue; /* Skip sbid if any input is enabled */ if (tmu_sbid_first_free_leaf(sbid) > 0) { continue; } /* Seems to be unsused */ break; } return sbid; } static uint32_t tmu_tbid_alloc(void) { uint32_t tbid; for (tbid = 0; tbid < TOKEN_BUCKET_MAX; tbid++) { uint32_t sbin; /* Return tbid if it is not linked to a sbin */ tmu_token_bucket_shaper_link_get(tbid, &sbin); if (sbin == NULL_SCHEDULER_INPUT_ID) break; } return tbid; } static uint32_t tmu_qid_alloc(void) { uint32_t qid; for (qid = 16; qid < NULL_QUEUE_ID; qid++) if (!tmu_is_equeue_enabled(qid)) return qid; return NULL_QUEUE_ID; } static int tmu_qos_set_gswr(GSW_QoS_queuePort_t *cfg) { GSW_API_HANDLE gsw; int ret; gsw = gsw_api_kopen("/dev/switch_api/1"); ret = gsw_api_kioctl(gsw, GSW_QOS_QUEUE_PORT_SET, cfg); gsw_api_kclose(gsw); return ret; } static int wan_port_qos_init(void) { int i, ret; GSW_QoS_queuePort_t qos_queueport_set_pae = { 0 }; /* Redirect WAN port to CBM for QoS and statistics */ for (i = 0; i <= 15; i++) { qos_queueport_set_pae.nTrafficClassId = i; qos_queueport_set_pae.nPortId = 15; qos_queueport_set_pae.nRedirectPortId = 15; qos_queueport_set_pae.nQueueId = 31; qos_queueport_set_pae.bRedirectionBypass = 1; ret = tmu_qos_set_gswr(&qos_queueport_set_pae); if (ret != GSW_statusOk) break; qos_queueport_set_pae.nTrafficClassId = i; qos_queueport_set_pae.nPortId = 15; qos_queueport_set_pae.nRedirectPortId = 0; qos_queueport_set_pae.nQueueId = 30; qos_queueport_set_pae.bRedirectionBypass = 0; ret = tmu_qos_set_gswr(&qos_queueport_set_pae); if (ret != GSW_statusOk) break; } return ret; } static uint32_t gswip_backpressure_config(void) { GSW_PMAC_BM_Cfg_t bmCfg = {0}; GSW_API_HANDLE gswl; GSW_API_HANDLE gswr; uint32_t i; /* Do the GSWIP-L configuration */ gswl = gsw_api_kopen("/dev/switch_api/0"); if (gswl == 0) { pr_debug("[%s] Open SWAPI device FAILED !!\n", __FUNCTION__ ); return -1; } /* GSWIP-L PMAC Backpressure config */ for (i = 1; i <= 6; i++) { bmCfg.nTxDmaChanId = i; bmCfg.txQMask = 1 << (4 * i + 1); bmCfg.rxPortMask = 0x01; gsw_api_kioctl(gswl, GSW_PMAC_BM_CFG_SET, &bmCfg); } for (i = 9; i <= 14; i++) { bmCfg.nTxDmaChanId = i; bmCfg.txQMask = 1 << (4 * (i - 8) + 3); bmCfg.rxPortMask = 0x01; gsw_api_kioctl(gswl, GSW_PMAC_BM_CFG_SET, &bmCfg); } gsw_api_kclose(gswl); /* Do the GSWIP-R configuration for Back Pressure */ gswr = gsw_api_kopen("/dev/switch_api/1"); if (gswr == 0) { pr_debug("[%s] Open SWAPI device FAILED !!\n", __FUNCTION__ ); return -1; } memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 0; bmCfg.txQMask = 0x7fffeaab; bmCfg.rxPortMask = 0x007e; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 6; bmCfg.txQMask = 0x6aaaaaa2; bmCfg.rxPortMask = 0x0; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 5; bmCfg.txQMask = 0; bmCfg.rxPortMask = 0x0801; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 7; bmCfg.txQMask = 0; bmCfg.rxPortMask = 0x0080; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 8; bmCfg.txQMask = 0; bmCfg.rxPortMask = 0x100; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 13; bmCfg.txQMask = 0; bmCfg.rxPortMask = 0x2000; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); memset((void *)&bmCfg, 0x00, sizeof(bmCfg)); bmCfg.nTxDmaChanId = 15; bmCfg.txQMask = 0x80000000; bmCfg.rxPortMask = 0x0; gsw_api_kioctl(gswr, GSW_PMAC_BM_CFG_SET, &bmCfg); gsw_api_kclose(gswr); return 0; } static void _gswip_qos_port_config(GSW_API_HANDLE gsw) { GSW_QoS_portCfg_t port_cfg; GSW_QoS_PCP_ClassCfg_t pcp_cfg; uint32_t i; memset((void *)&pcp_cfg, 0x00, sizeof(pcp_cfg)); for (i = 0; i < 8; i++) { /* Higher PCP value means higher priority, except for PCP 0 and * 1, which are swapped. For traffic classes, lower value means * higher priority. Hence we first reverse all values and than * swap PCP 0 and 1. */ pcp_cfg.nTrafficClass[i] = 7 - i; if (i < 2) pcp_cfg.nTrafficClass[i] ^= 1; } gsw_api_kioctl(gsw, GSW_QOS_PCP_CLASS_SET, &pcp_cfg); for (i = 1; i <= 15; i++) { /* Configure the non-CPU ports to derive the traffic class on * ingress from CVLAN PCP. Routing sessions or PCE rules * overwrite this traffic class. */ memset((void *)&port_cfg, 0x00, sizeof(port_cfg)); port_cfg.nPortId = i; port_cfg.eClassMode = GSW_QOS_CLASS_SELECT_PCP; port_cfg.nTrafficClass = 6; /* use PCP 0, if no CVLAN header present */ gsw_api_kioctl(gsw, GSW_QOS_PORT_CFG_SET, &port_cfg); } } static uint32_t gswip_qos_port_config(void) { GSW_API_HANDLE gswl; GSW_API_HANDLE gswr; /* GSWIP-L config */ gswl = gsw_api_kopen("/dev/switch_api/0"); if (gswl == 0) { pr_debug("[%s] Open SWAPI device FAILED !!\n", __FUNCTION__ ); return -1; } _gswip_qos_port_config(gswl); gsw_api_kclose(gswl); /* GSWIP-R config */ gswr = gsw_api_kopen("/dev/switch_api/1"); if (gswr == 0) { pr_debug("[%s] Open SWAPI device FAILED !!\n", __FUNCTION__ ); return -1; } _gswip_qos_port_config(gswr); gsw_api_kclose(gswr); return 0; } int offqos_backend_setup(void) { int epn; /* Disable TMU to change the topology */ tmu_enable(false); /* Set dequeue to count packets */ cbm_counter_mode_set(1, 0); for (epn = 0; epn < EPNNULL_EGRESS_PORT_ID; epn++) { struct tmu_eport_link eplink; struct tmu_sched_blk_in_link prio_ilink; struct tmu_sched_blk_in_link shaper_ilink = { 0 }; struct tmu_sched_blk_out_link olink = { 0 }; uint32_t prio_sb, shaper_sb; uint32_t leaf; uint32_t default_qid; if (!tmu_is_egress_port_enabled(epn)) continue; tmu_egress_port_link_get(epn, &eplink); if (eplink.sbid >= NULL_SCHEDULER_BLOCK_ID) continue; if (!tmu_is_sched_blk_out_enabled(eplink.sbid)) continue; prio_sb = eplink.sbid; shaper_sb = tmu_sbid_alloc(); if (shaper_sb == NULL_SCHEDULER_BLOCK_ID) goto errout; tmu_sched_blk_in_link_get(SBIN_BASE(prio_sb), &prio_ilink); /* Skip if input is disabled or a input type is a scheduling * block. */ if (!prio_ilink.sie || prio_ilink.sit == 1) continue; default_qid = prio_ilink.qsid; /* Link prio_sb to the first and only input of shaper_sb. */ shaper_ilink.sie = true; shaper_ilink.sit = SIT_SBID; shaper_ilink.iwgt = 0; shaper_ilink.qsid = prio_sb; tmu_sched_blk_in_link_set(SBIN_BASE(shaper_sb), &shaper_ilink); /* Assign shaper_sb to the TMU egress port */ tmu_egress_port_link_set(epn, shaper_sb); /* Copy the output settings of prio_sb to shaper_sb */ tmu_sched_blk_out_link_get(prio_sb, &olink); tmu_sched_blk_out_link_set(shaper_sb, &olink); /* Increase the hierarchy level and link the output of prio_sb * to shaper_sb */ olink.lvl++; olink.v = V_SBID; olink.omid = SBIN_BASE(shaper_sb); tmu_sched_blk_out_link_set(prio_sb, &olink); /* Link a queue to every input of prio_sb */ while ((leaf = tmu_sbid_first_free_leaf(prio_sb)) < SCHEDULER_MAX_LEAF) { uint32_t qid; uint32_t sbin; int pmac_port; sbin = SBIN_BASE(prio_sb) + leaf; qid = tmu_qid_alloc(); if (qid == NULL_QUEUE_ID) goto errout; tmu_egress_queue_create(qid, sbin, epn); for (pmac_port = 0; pmac_port < 16; pmac_port++) { uint32_t index; index = pmac_port << 4; if (get_lookup_qid_via_index(index) != default_qid) continue; pr_debug("default_qid %u pmac %u\n", default_qid, pmac_port); set_lookup_qids(pmac_port, qid, leaf); } } } if (wan_port_qos_init() != GSW_statusOk) goto errout; if (gswip_backpressure_config()) goto errout; if (gswip_qos_port_config()) goto errout; tmu_enable(true); return 0; errout: tmu_enable(true); return -1; } static void tmu_overhead_set(const struct net_device *netdev, int8_t overhead) { dp_subif_t dp_subif = { 0 }; int port; uint32_t dma_chan, flags; if (offdp_ep_platform_data(netdev, &dp_subif, sizeof(dp_subif))) return; port = tmu_get_port(netdev); if (port < 0) return; if (cbm_get_egress_port_info(port, &dma_chan, &flags)) return; /* Account for the PMAC header consumed by the switch. If we target a * DirectConnect port, this header is not present though. */ if (!(flags & (DP_F_FAST_WLAN | DP_F_FAST_DSL))) overhead -= 8; cbm_enqueue_port_overhead_set(dp_subif.port_id, overhead); } static int tmu_get_port(const struct net_device *netdev) { dp_subif_t dp_subif = { 0 }; cbm_dq_port_res_t dq_port_res; uint32_t tmu_port, cbm_port; if (offdp_ep_platform_data(netdev, &dp_subif, sizeof(dp_subif))) return -1; if (cbm_dequeue_port_resources_get(dp_subif.port_id, &dq_port_res, 0)) return -1; if (dq_port_res.num_deq_ports > 0 && dq_port_res.deq_info) { struct cbm_dq_info *info; info = dq_port_res.deq_info; cbm_port = info[0].port_no; kfree(info); } else { return -1; } /* TODO assert mapping assumption */ tmu_port = cbm_port; return tmu_port; } static int shaper_delete(uint32_t sbin) { struct tmu_sched_blk_in_link ilink = { 0 }; tmu_sched_blk_in_link_get(sbin, &ilink); tmu_token_bucket_shaper_delete(ilink.tbid, sbin); return 0; } int avm_qos_reset_prio_shaper(struct net_device *netdev, uint32_t classid) { struct tmu_equeue_link equeue_link; uint32_t qid; qid = class_queue_lookup(netdev, classid); if (qid == NULL_QUEUE_ID) return -EINVAL; tmu_equeue_link_get(qid, &equeue_link); if (equeue_link.sbin == NULL_SCHEDULER_BLOCK_ID) return -EINVAL; return shaper_delete(equeue_link.sbin); } EXPORT_SYMBOL(avm_qos_reset_prio_shaper); int avm_qos_reset_port_shaper(struct net_device *netdev) { int tmu_port; struct tmu_eport_link eport_link = { 0 }; tmu_port = tmu_get_port(netdev); if (tmu_port < 0) return -1; tmu_egress_port_link_get(tmu_port, &eport_link); if (!eport_link.epe || eport_link.sbid == NULL_SCHEDULER_BLOCK_ID) return -1; return shaper_delete(SBIN_BASE(eport_link.sbid)); } EXPORT_SYMBOL(avm_qos_reset_port_shaper); static int tmu_tb_setup(uint32_t sbin, uint32_t pir, uint32_t cir, uint32_t pbs, uint32_t cbs, struct avm_qos_priv *priv) { struct tmu_token_bucket_shaper_params tbf_params = { 0 }; bool new = false; if (priv->tbid == TOKEN_BUCKET_MAX) { priv->tbid = tmu_tbid_alloc(); if (priv->tbid == TOKEN_BUCKET_MAX) return -1; new = true; } /* The TB mode description is a bit vague so I did some experiments: * * mode 0: "strict color blind" * - fills both buckets at once * - needs conformance against both buckets to pass * - always depletes both * - only requires tbe0 to be enabled but can use both * - leaves previous colors alone * * mode 1: "RFC 2698 color aware" * - ignores uncolored PDUs * - green * complies to both * consumes both * -> not rfc 2698 * no change in color according to tbst * - yellow * complies tb1 * consumes tb1 * no change in color according to tbst * * mode 2: "RFC 4115 color aware" * - ignores uncolored PDUs * - green * consumes tb0 * complies to tb0 * does not check tb1 -> not rfc 4115 ?! * no change in color according to tbst * - yellow * consumes tb1 * complies tb1 * no change in color according to tbst * * mode 3: "loose color blind" * - like mode 0 but allow a single PDU to drive the fill level * negative */ /* Choose mode 0 because it behaves just like sch_tbf. */ tbf_params.mod = 0; pir = pir ?: cir; pbs = pbs ?: cbs; tbf_params.tbe0 = 1; tbf_params.pir = pir; tbf_params.pbs = pbs; tbf_params.tbe1 = 1; tbf_params.cir = cir; tbf_params.cbs = cbs; tmu_token_bucket_shaper_cfg_set(priv->tbid, &tbf_params); if (new) tmu_token_bucket_shaper_create(priv->tbid, sbin); return 0; } int avm_qos_set_port_shaper(struct net_device *netdev, uint32_t pir, uint32_t cir, uint32_t pbs, uint32_t cbs, int8_t overhead, bool init, struct avm_qos_priv *priv) { uint32_t shaper_sb; int tmu_port; struct tmu_eport_link eport_link = { 0 }; tmu_port = tmu_get_port(netdev); if (tmu_port < 0) return -1; tmu_egress_port_link_get(tmu_port, &eport_link); if (!eport_link.epe || eport_link.sbid == NULL_SCHEDULER_BLOCK_ID) return -1; shaper_sb = eport_link.sbid; /* The shaper SB should use one input only */ if (tmu_sbid_first_free_leaf(shaper_sb) != 1) return -1; tmu_overhead_set(netdev, overhead); tmu_tb_setup(SBIN_BASE(shaper_sb), pir, cir, pbs, cbs, priv); return 0; } EXPORT_SYMBOL(avm_qos_set_port_shaper); int avm_qos_get_prio_stats(struct net_device *netdev, uint32_t classid, struct avm_qos_stats *stats) { uint32_t qid; qid = class_queue_lookup(netdev, classid); if (qid == NULL_QUEUE_ID) return -EINVAL; memset(stats, 0, sizeof(stats)); stats->valid_bytes = true; stats->bytes = get_enq_counter(qid); stats->valid_packets = true; stats->packets = get_deq_counter(qid); return 0; } EXPORT_SYMBOL(avm_qos_get_prio_stats); int avm_qos_set_prio_shaper(struct net_device *netdev, uint32_t classid, uint32_t pir, uint32_t cir, uint32_t pbs, uint32_t cbs, bool init, struct avm_qos_priv *priv) { struct tmu_equeue_link equeue_link; uint32_t qid; qid = class_queue_lookup(netdev, classid); if (qid == NULL_QUEUE_ID) return -EINVAL; tmu_equeue_link_get(qid, &equeue_link); if (equeue_link.sbin == NULL_SCHEDULER_BLOCK_ID) return -EINVAL; tmu_tb_setup(equeue_link.sbin, pir, cir, pbs, cbs, priv); return 0; } EXPORT_SYMBOL(avm_qos_set_prio_shaper);