// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) Intel Corporation * Author: Shao Guohua */ #include #include #include #include "../datapath.h" #include "datapath_misc.h" #define FLUSH_RESTORE_LOOKUP BIT(0) #define FLUSH_RESTORE_QOS_PORT BIT(1) static int set_parent_arbi(int inst, int node_id, int arbi, int flag); static struct limit_map limit_maps[] = { { QOS_NO_BANDWIDTH_LIMIT, DP_NO_SHAPER_LIMIT }, { QOS_MAX_BANDWIDTH_LIMIT, DP_MAX_SHAPER_LIMIT } }; static struct arbi_map arbi_maps[] = { { PP_QOS_ARBITRATION_WSP, ARBITRATION_WSP }, { PP_QOS_ARBITRATION_WRR, ARBITRATION_WRR }, { PP_QOS_ARBITRATION_WFQ, ARBITRATION_WFQ }, { PP_QOS_ARBITRATION_WRR, ARBITRATION_NULL } }; static void dp_wred_def(struct pp_qos_queue_conf *conf) { if (!conf) return; conf->wred_enable = 0; conf->wred_min_guaranteed = DEF_QRED_MIN_ALLOW; conf->wred_max_allowed = DEF_QRED_MAX_ALLOW; } int qos_platform_set(int cmd_id, void *node, int flag) { struct dp_node_link *node_link = (struct dp_node_link *)node; int inst; struct hal_priv *priv; int res = DP_FAILURE; if (!node) return DP_FAILURE; inst = node_link->inst; priv = HAL(inst); if (!priv->qdev) { pr_err("qdev NULL with inst=%d\n", inst); return DP_FAILURE; } DP_LIB_LOCK(&priv->qos_lock); switch (cmd_id) { case NODE_LINK_ADD: res = dp_node_link_add_31((struct dp_node_link *)node, flag); break; case NODE_LINK_GET: res = dp_node_link_get_31((struct dp_node_link *)node, flag); break; case NODE_LINK_EN_GET: res = dp_node_link_en_get_31((struct dp_node_link_enable *)node, flag); break; case NODE_LINK_EN_SET: res = dp_node_link_en_set_31((struct dp_node_link_enable *)node, flag); break; case NODE_UNLINK: res = dp_node_unlink_31((struct dp_node_link *)node, flag); break; case LINK_ADD: res = dp_link_add_31((struct dp_qos_link *)node, flag); break; case LINK_GET: res = dp_link_get_31((struct dp_qos_link *)node, flag); break; case LINK_PRIO_SET: res = dp_qos_link_prio_set_31((struct dp_node_prio *)node, flag); break; case LINK_PRIO_GET: res = dp_qos_link_prio_get_31((struct dp_node_prio *)node, flag); break; case QUEUE_CFG_SET: res = dp_queue_conf_set_31((struct dp_queue_conf *)node, flag); break; case QUEUE_CFG_GET: res = dp_queue_conf_get_31((struct dp_queue_conf *)node, flag); break; case SHAPER_SET: res = dp_shaper_conf_set_31((struct dp_shaper_conf *)node, flag); break; case SHAPER_GET: res = dp_shaper_conf_get_31((struct dp_shaper_conf *)node, flag); break; case NODE_ALLOC: res = dp_node_alloc_31((struct dp_node_alloc *)node, flag); break; case NODE_FREE: res = dp_node_free_31((struct dp_node_alloc *)node, flag); break; case NODE_CHILDREN_FREE: res = dp_free_children_via_parent_31((struct dp_node_alloc *)node, flag); break; case DEQ_PORT_RES_GET: res = dp_deq_port_res_get_31((struct dp_dequeue_res *)node, flag); break; case COUNTER_MODE_SET: res = dp_counter_mode_set_31((struct dp_counter_conf *)node, flag); break; case COUNTER_MODE_GET: res = dp_counter_mode_set_31((struct dp_counter_conf *)node, flag); break; case QUEUE_MAP_GET: res = dp_queue_map_get_31((struct dp_queue_map_get *)node, flag); break; case QUEUE_MAP_SET: res = dp_queue_map_set_31((struct dp_queue_map_set *)node, flag); break; case NODE_CHILDREN_GET: res = dp_children_get_31((struct dp_node_child *)node, flag); break; case QOS_LEVEL_GET: res = dp_qos_level_get_31((struct dp_qos_level *)node, flag); break; case QOS_GLOBAL_CFG_GET: res = dp_qos_global_info_get_31((struct dp_qos_cfg_info *)node, flag); break; case QOS_PORT_CFG_SET: res = dp_qos_port_conf_set_31((struct dp_port_cfg_info *)node, flag); break; case QOS_BLOCK_FLUSH_PORT: res = dp_port_block_flush_31((struct dp_qos_blk_flush_port *)node, flag); break; case QOS_BLOCK_FLUSH_QUEUE: res = dp_queue_block_flush_31((struct dp_qos_blk_flush_queue *)node, flag); break; default: pr_err("no support yet cmd_id %d\n", cmd_id); break; } DP_LIB_UNLOCK(&priv->qos_lock); return res; } #define MBPS_2_KBPS 1000 /* convert pp shaper limit to dp shaper limit */ static int limit_pp2dp(u32 pp_limit, u32 *dp_limit) { int i; if (!dp_limit) { pr_err("dp_limit is NULL!\n"); return DP_FAILURE; } if (pp_limit > QOS_MAX_BANDWIDTH_LIMIT) { pr_err("Wrong pp shaper limit: %u\n", pp_limit); return DP_FAILURE; } for (i = 0; i < ARRAY_SIZE(limit_maps); i++) { if (limit_maps[i].pp_limit == pp_limit) { *dp_limit = limit_maps[i].dp_limit; return DP_SUCCESS; } } *dp_limit = pp_limit; /* kbps */ if (*dp_limit <= 0 || *dp_limit > DP_MAX_SHAPER_LIMIT) { pr_err("Wrong dp shaper limit: %u\n", *dp_limit); return DP_FAILURE; } return DP_SUCCESS; } /* convert dp shaper limit to pp shaper limit */ static int limit_dp2pp(u32 dp_limit, u32 *pp_limit) { int i; if (!pp_limit) { pr_err("pp_limit is NULL!\n"); return DP_FAILURE; } if (dp_limit > DP_MAX_SHAPER_LIMIT || dp_limit == 0) { pr_err("Wrong dp shaper limit: %u\n", dp_limit); return DP_FAILURE; } for (i = 0; i < ARRAY_SIZE(limit_maps); i++) { if (limit_maps[i].dp_limit == dp_limit) { *pp_limit = limit_maps[i].pp_limit; return DP_SUCCESS; } } *pp_limit = dp_limit; /* kbps */ if (*pp_limit > QOS_MAX_BANDWIDTH_LIMIT) { pr_err("Wrong dp shaper limit: %u\n", *pp_limit); return DP_FAILURE; } return DP_SUCCESS; } /* convert PP arbitrate to DP arbitrate */ int arbi_pp2dp(int pp_arbi) { int i; for (i = 0; i < ARRAY_SIZE(arbi_maps); i++) { if (arbi_maps[i].pp_arbi == pp_arbi) return arbi_maps[i].dp_arbi; } return DP_FAILURE; } /* convert DP arbitrate to PP arbitrate */ int arbi_dp2pp(int dp_arbi) { int i; for (i = 0; i < ARRAY_SIZE(arbi_maps); i++) { if (arbi_maps[i].dp_arbi == dp_arbi) return arbi_maps[i].pp_arbi; } pr_err("Wrong dp_arbitrate: %d\n", dp_arbi); return DP_FAILURE; } /* get_qid_by_node API * checks for queue node id * upon Success * return physical id of queue * else return DP_FAILURE */ static int get_qid_by_node(int inst, int node_id, int flag) { int i; struct hal_priv *priv = HAL(inst); for (i = 0; i < MAX_QUEUE; i++) { if (node_id == priv->qos_queue_stat[i].node_id) return i; } return DP_FAILURE; } /* get_cqm_deq_port_by_node API * checks for qos deque port * upon Success * return physical cqm_deq_port id * else return DP_FAILURE */ static int get_cqm_deq_port_by_node(int inst, int node_id, int flag) { int i; struct hal_priv *priv = HAL(inst); for (i = 0; i < MAX_CQM_DEQ; i++) { if (node_id == priv->deq_port_stat[i].node_id) return i; } return DP_FAILURE; } static int dp_qos_queue_set_spl_31(int inst, struct pp_qos_dev *qdev, u32 node_id, u32 qid, struct pp_qos_queue_conf *cfg) { struct hal_priv *priv = HAL(inst); /* To set queue to blocking state during Q movement */ cfg->blocked = 1; if (dp_qos_queue_set(qdev, node_id, cfg)) { pr_err("qos_queue_set fail: q[%d/%d]\n", qid, node_id); return DP_FAILURE; } /* Update Local Q status */ priv->qos_queue_stat[qid].blocked = cfg->blocked; return DP_SUCCESS; } #ifndef DP_FLUSH_VIA_AUTO static int cqm_queue_flush_31(int cqm_inst, int cqm_drop_port, int qid) { /* Before call this API, the queue is already unmapped in lookup table, * For the queue itself, it is blocked and resume. * Also attached to drop port */ /* need call low level CQM API */ cqm_qos_queue_flush(cqm_inst, cqm_drop_port, qid); DP_DEBUG(DP_DBG_FLAG_QOS, "%s done\n", __func__); return DP_SUCCESS; } #endif /* Note: When this API is returned,make sure the queue is in suspend/block * since the queue may need to move to other scheduler/port after flush. * node_id is logical node it here */ static int queue_flush_31(int inst, int node_id, int flag) { struct hal_priv *priv = HAL(inst); struct pp_qos_queue_conf queue_cfg = { 0 }; struct pp_qos_queue_conf tmp_q_cfg = { 0 }; int qid = get_qid_by_node(inst, node_id, 0); int res = DP_SUCCESS; struct dp_lookup_entry *lookup = NULL; int cbm_inst = dp_port_prop[inst].cbm_inst; u32 blocked = 0; int parent = 0; if (qid < 0) { pr_err("no physical qid for q_node=%d\n", node_id); res = DP_FAILURE; goto EXIT; } if (flag & FLUSH_RESTORE_QOS_PORT) { if (dp_qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) { pr_err("qos_queue_conf_get fail: q[%d/%d]\n", qid, node_id); res = DP_FAILURE; goto EXIT; } blocked = queue_cfg.blocked; parent = queue_cfg.queue_child_prop.parent; } else { blocked = priv->qos_queue_stat[qid].blocked; parent = priv->qos_sch_stat[node_id].parent.node_id; } lookup = devm_kzalloc(&g_dp_dev->dev, sizeof(*lookup), GFP_ATOMIC); if (!lookup) { res = DP_FAILURE; goto EXIT; } /* map to drop queue and save the changed lookup entries for recover */ dp_map_to_drop_q(inst, qid, lookup); /* block/disable: ensure to drop all coming enqueue packet */ if (blocked == 0) { /* to block */ if (pp_qos_queue_block(priv->qdev, node_id)) { pr_err("pp_qos_queue_block fail: q[%d/%d]\n", qid, node_id); res = DP_FAILURE; goto EXIT; } } if (parent == priv->ppv4_drop_p) { /* already attached to drop queue and can directly flush */ DP_DEBUG(DP_DBG_FLAG_QOS, "Flush:Q[%d] already under drop port[/%d]\n", qid, priv->ppv4_drop_p); cqm_queue_flush_31(cbm_inst, priv->cqm_drop_p, qid); } else { DP_DEBUG(DP_DBG_FLAG_QOS, "Queue movement before flush"); /*move to drop port and set block and resume the queue */ dp_qos_queue_conf_set_default(&tmp_q_cfg); /*use new variable */ dp_wred_def(&tmp_q_cfg); tmp_q_cfg.queue_child_prop.parent = priv->ppv4_drop_p; if (dp_qos_queue_set_spl_31(inst, priv->qdev, node_id, qid, &tmp_q_cfg)) { pr_err("qos_queue_set fail for queue=%d to parent=%d\n", qid, tmp_q_cfg.queue_child_prop.parent); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "Flush:Move Q[%d] to drop port[/%d]\n", qid, tmp_q_cfg.queue_child_prop.parent); cqm_queue_flush_31(cbm_inst, priv->cqm_drop_p, qid); if (flag & FLUSH_RESTORE_QOS_PORT) { /* move back the queue to original parent * with original variable queue_cfg */ if (dp_qos_queue_set_spl_31(inst, priv->qdev, node_id, qid, &queue_cfg)) { pr_err("qos_queue_conf_get fail: q[%d/%d]\n", qid, node_id); res = DP_FAILURE; goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "Flush:Move Q[%d] back to port[/%d]\n", qid, queue_cfg.queue_child_prop.parent); } } /* restore lookup entry mapping for this qid if needed */ if (flag & FLUSH_RESTORE_LOOKUP) { int i; if (lookup->num) { DP_DEBUG(DP_DBG_FLAG_QOS, "Try to restore qid[%d] lookup entry: %d\n", qid, lookup->num); for (i = 0; i < lookup->num; i++) set_lookup_qid_via_index(lookup->entry[i], qid); } } EXIT: devm_kfree(&g_dp_dev->dev, lookup); lookup = NULL; return res; } /* get_node_type_by_node_id API * get node_type node_id in sch global table * upon Success * return node_type of node_id */ static int get_node_type_by_node_id(int inst, int node_id, int flag) { struct hal_priv *priv = HAL(inst); return priv->qos_sch_stat[node_id].type; } /* get_free_child_idx API * check free flag for child in parent's table and return index * else return DP_FAILURE */ static int get_free_child_idx(int inst, int node_id, int flag) { int i; struct hal_priv *priv = HAL(inst); struct pp_sch_stat *qos_sch_stat; qos_sch_stat = &priv->qos_sch_stat[node_id]; for (i = 0; i < DP_MAX_CHILD_PER_NODE; i++) { if (qos_sch_stat->child[i].flag == PP_NODE_FREE) return i; } return DP_FAILURE; } /* get_parent_node API * check parent flag in node global table if active retrun parent id * else return DP_FAILURE */ static int get_parent_node(int inst, int node_id, int flag) { struct hal_priv *priv = HAL(inst); int type = get_node_type_by_node_id(inst, node_id, 0); struct pp_sch_stat *qos_sch_stat; qos_sch_stat = &priv->qos_sch_stat[node_id]; if (qos_sch_stat->parent.flag && type != DP_NODE_PORT) return qos_sch_stat->parent.node_id; return DP_FAILURE; } /* get_child_idx_node_id API * check free flag in parent's global table and return index * else return DP_FAILURE */ static int get_child_idx_node_id(int inst, int node_id, int flag) { struct hal_priv *priv = HAL(inst); int i, p_id; struct pp_sch_stat *qos_sch_stat; qos_sch_stat = &priv->qos_sch_stat[node_id]; p_id = qos_sch_stat->parent.node_id; qos_sch_stat = &priv->qos_sch_stat[p_id]; for (i = 0; i < DP_MAX_CHILD_PER_NODE; i++) { if (node_id == qos_sch_stat->child[i].node_id) return i; } return DP_FAILURE; } /* node_queue_dec API * for queue id = node_id, flag = DP_NODE_DEC * Set Queue flag from PP_NODE_ACTIVE to PP_NODE_ALLOC * else return DP_FAILURE */ static int node_queue_dec(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id, pid, idx; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } phy_id = get_qid_by_node(inst, node_id, flag); if (phy_id == DP_FAILURE) { pr_err("get_qid_by_node failed\n"); return DP_FAILURE; } qos_queue_stat = &priv->qos_queue_stat[phy_id]; qos_sch_stat = &priv->qos_sch_stat[node_id]; if (!(qos_queue_stat->flag & PP_NODE_ACTIVE)) { pr_err("Wrong Q[%d] Stat(%d):Expect ACTIVE\n", phy_id, qos_queue_stat->flag); return DP_FAILURE; } if (!qos_sch_stat->parent.flag) { pr_err("Wrong Q[%d]'s Parent Stat(%d):Expect ACTIVE\n", node_id, qos_sch_stat->parent.flag); return DP_FAILURE; } pid = get_parent_node(inst, node_id, flag); if (pid == DP_FAILURE) { pr_err("get_parent_node failed for Q:%d\n", phy_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "parent:%d of Q:%d\n", pid, phy_id); idx = get_child_idx_node_id(inst, node_id, 0); if (idx == DP_FAILURE) { pr_err("get_child_idx_node_id failed for Q:%d\n", phy_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parent:%d of Q:[%d/%d] child idx:%d\n", __func__, pid, phy_id, node_id, idx); if (pid == qos_sch_stat->parent.node_id) { priv->qos_sch_stat[pid].child[idx].flag = PP_NODE_FREE; priv->qos_sch_stat[pid].child[idx].node_id = 0; priv->qos_sch_stat[pid].child[idx].type = 0; } qos_sch_stat->parent.node_id = 0; qos_sch_stat->parent.type = 0; qos_sch_stat->parent.flag = 0; qos_queue_stat->flag |= PP_NODE_ALLOC; qos_sch_stat->p_flag |= PP_NODE_ALLOC; return DP_SUCCESS; } /* node_queue_inc API * for queue id = node_id, flag = DP_NODE_INC * Set Queue flag from PP_NODE_ALLOC to PP_NODE_ACTIVE * else return DP_FAILURE */ static int node_queue_inc(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id, pid, idx = 0; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } phy_id = get_qid_by_node(inst, node_id, flag); if (phy_id == DP_FAILURE) { pr_err("get_qid_by_node failed\n"); return DP_FAILURE; } qos_queue_stat = &priv->qos_queue_stat[phy_id]; qos_sch_stat = &priv->qos_sch_stat[node_id]; if (!(qos_queue_stat->flag & PP_NODE_ALLOC)) { pr_err("Wrong Q[%d] Stat(%d):Expect ALLOC\n", phy_id, qos_queue_stat->flag); return DP_FAILURE; } pid = get_parent_node(inst, node_id, flag); if (pid == DP_FAILURE) { pr_err("get_parent_node failed for Q:%d\n", phy_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "parent:%d of Q:%d\n", pid, phy_id); idx = get_free_child_idx(inst, pid, 0); if (idx == DP_FAILURE) { pr_err("get_free_child_idx failed for Q:%d\n", phy_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parent:%d of Q:[%d/%d] child idx:%d\n", __func__, pid, phy_id, node_id, idx); if (pid == qos_sch_stat->parent.node_id) { priv->qos_sch_stat[pid].child[idx].flag = PP_NODE_ACTIVE; priv->qos_sch_stat[pid].child[idx].node_id = node_id; priv->qos_sch_stat[pid].child[idx].type = DP_NODE_QUEUE; } qos_queue_stat->flag |= PP_NODE_ACTIVE; qos_sch_stat->p_flag |= PP_NODE_ACTIVE; DP_DEBUG(DP_DBG_FLAG_QOS, "Q:[%d] type:%d idx:%d attach to parent:%d\n", CHILD(pid, idx).node_id, CHILD(pid, idx).type, idx, pid); return DP_SUCCESS; } /* node_queue_rst API * for queue id = node_id, flag = DP_NODE_RST * Set Queue flag from PP_NODE_ALLOC to PP_NODE_FREE * Set allocated memory free * else return DP_FAILURE */ static int node_queue_rst(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id = get_qid_by_node(inst, node_id, flag); int dp_port, resv_idx; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (phy_id == DP_FAILURE) { pr_err("get_qid_by_node failed\n"); return DP_FAILURE; } qos_queue_stat = &priv->qos_queue_stat[phy_id]; qos_sch_stat = &priv->qos_sch_stat[node_id]; dp_port = qos_queue_stat->dp_port; resv_idx = qos_queue_stat->resv_idx; DP_DEBUG(DP_DBG_FLAG_QOS, "%s: Q:[%d/%d] resv_idx:%d\n", __func__, phy_id, node_id, resv_idx); if (!(qos_queue_stat->flag & PP_NODE_ALLOC)) { pr_err("Wrong Q[%d] Stat(%d):Expect ALLOC\n", phy_id, qos_queue_stat->flag); return DP_FAILURE; } /* Check for Reserve resource */ if (qos_queue_stat->flag & PP_NODE_RESERVE) { priv->resv[dp_port].resv_q[resv_idx].flag = PP_NODE_FREE; DP_DEBUG(DP_DBG_FLAG_QOS, "%s:Q:[%d/%d] resv_idx:%d of EP:%d free\n", __func__, phy_id, node_id, resv_idx, dp_port); } /* Remove resource from global table */ memset(qos_queue_stat, 0, sizeof(*qos_queue_stat)); memset(qos_sch_stat, 0, sizeof(*qos_sch_stat)); qos_queue_stat->resv_idx = INV_RESV_IDX; qos_sch_stat->resv_idx = INV_RESV_IDX; DP_DEBUG(DP_DBG_FLAG_QOS, "%s:%s Q:[%d/%d] resv_idx:%d dp_port=%d\n", __func__, "After memfree", phy_id, node_id, resv_idx, dp_port); return DP_SUCCESS; } /* node_sched_dec API * for scheduler id = node_id, flag = DP_NODE_DEC * Set Sched flag from PP_NODE_ACTIVE to PP_NODE_ALLOC * else return DP_FAILURE */ static int node_sched_dec(int inst, int node_id, int flag) { struct hal_priv *priv; int pid, idx; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } qos_sch_stat = &priv->qos_sch_stat[node_id]; if (flag & C_FLAG) { DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parentSCH:[%d]\n", __func__, node_id); if (!qos_sch_stat->child_num || !(qos_sch_stat->c_flag & PP_NODE_ACTIVE)) { pr_err("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n", node_id, qos_sch_stat->c_flag, qos_sch_stat->child_num, "Expect ACTIVE Or non-zero child_num"); return DP_FAILURE; } qos_sch_stat->child_num--; if (!qos_sch_stat->child_num) qos_sch_stat->c_flag |= PP_NODE_ALLOC; return DP_SUCCESS; } else if (flag & P_FLAG) { if (!(qos_sch_stat->p_flag & PP_NODE_ACTIVE)) { pr_err("Wrong Sch[%d] Stat(%d):Expect ACTIVE\n", node_id, priv->qos_sch_stat[node_id].p_flag); return DP_FAILURE; } pid = get_parent_node(inst, node_id, flag); if (pid == DP_FAILURE) { pr_err("get_parent_node failed for sched:%d\n", node_id); return DP_FAILURE; } idx = get_child_idx_node_id(inst, node_id, flag); if (idx == DP_FAILURE) { pr_err("get_child_idx_node_id failed for sched:%d\n", node_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parent:%d of SCH:[%d] child idx:%d\n", __func__, pid, node_id, idx); if (!qos_sch_stat->parent.flag) { pr_err("Wrong SCH[%d] Parent Stat(%d):Expect ACTIV\n", node_id, qos_sch_stat->parent.flag); return DP_FAILURE; } if (pid == qos_sch_stat->parent.node_id) { priv->qos_sch_stat[pid].child[idx].flag = PP_NODE_FREE; priv->qos_sch_stat[pid].child[idx].node_id = 0; priv->qos_sch_stat[pid].child[idx].type = 0; } qos_sch_stat->parent.node_id = 0; qos_sch_stat->parent.type = 0; qos_sch_stat->parent.flag = 0; qos_sch_stat->p_flag |= PP_NODE_ALLOC; return DP_SUCCESS; } return DP_FAILURE; } /* node_sched_inc API * for scheduler id = node_id, flag = DP_NODE_INC * Set Sched flag from PP_NODE_ALLOC to PP_NODE_ACTIVE * else return DP_FAILURE */ static int node_sched_inc(int inst, int node_id, int flag) { struct hal_priv *priv; int pid, idx; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } qos_sch_stat = &priv->qos_sch_stat[node_id]; if (flag & C_FLAG) { DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parent SCH:[%d]\n", __func__, node_id); if (qos_sch_stat->child_num && !(qos_sch_stat->c_flag & PP_NODE_ACTIVE)) { pr_err("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n", node_id, qos_sch_stat->c_flag, qos_sch_stat->child_num, "Expect ACTIVE And Non-Zero child_num"); return DP_FAILURE; } if (!qos_sch_stat->child_num && !(qos_sch_stat->c_flag & PP_NODE_ALLOC)) { pr_err("Wrong Sch[%d] Stat(%d)/child_num(%d):%s\n", node_id, qos_sch_stat->c_flag, qos_sch_stat->child_num, "Expect ALLOC And zero child_num"); return DP_FAILURE; } qos_sch_stat->child_num++; qos_sch_stat->c_flag |= PP_NODE_ACTIVE; return DP_SUCCESS; } else if (flag & P_FLAG) { if (!(qos_sch_stat->p_flag & PP_NODE_ALLOC)) { pr_err("Wrong Sch[%d] Stat(%d):Expect ALLOC\n", node_id, qos_sch_stat->p_flag); return DP_FAILURE; } pid = get_parent_node(inst, node_id, flag); if (pid == DP_FAILURE) { pr_err("get_parent_node failed for sched:%d\n", node_id); return DP_FAILURE; } idx = get_free_child_idx(inst, pid, 0); if (idx == DP_FAILURE) { pr_err("get_free_child_idx failed for sched:%d\n", node_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "%s: parent:%d of SCH:[%d] child idx:%d\n", __func__, pid, node_id, idx); if (pid == qos_sch_stat->parent.node_id) { CHILD(pid, idx).flag = PP_NODE_ACTIVE; CHILD(pid, idx).node_id = node_id; CHILD(pid, idx).type = DP_NODE_SCH; } qos_sch_stat->p_flag |= PP_NODE_ACTIVE; DP_DEBUG(DP_DBG_FLAG_QOS, "SCH:[%d] type:%d idx:%d attach to parent:%d\n", CHILD(pid, idx).node_id, CHILD(pid, idx).type, idx, pid); return DP_SUCCESS; } return DP_FAILURE; } /* node_sched_rst API * for scheduler id = node_id, flag = DP_NODE_RST * sanity check for child_num and both c and p_flag in alloc state * then reset whole sched * Set Sched flag from PP_NODE_ALLOC to PP_NODE_FREE * else return DP_FAILURE */ static int node_sched_rst(int inst, int node_id, int flag) { struct hal_priv *priv; int dp_port, resv_idx; struct pp_sch_stat *qos_sch_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } qos_sch_stat = &priv->qos_sch_stat[node_id]; dp_port = qos_sch_stat->dp_port; resv_idx = qos_sch_stat->resv_idx; DP_DEBUG(DP_DBG_FLAG_QOS, "%s:dp_port=%d SCH:%d resv_idx:%d\n", __func__, dp_port, node_id, resv_idx); /* sanity check for child_num and both c and p_flag in alloc state * then reset whole sched */ if (qos_sch_stat->child_num || !(qos_sch_stat->c_flag & PP_NODE_ALLOC) || !(qos_sch_stat->p_flag & PP_NODE_ALLOC)) { pr_err("Wrong Sch[%d] c_flag(%d)/p_flag(%d)/child_num(%d):%s\n", node_id, qos_sch_stat->c_flag, qos_sch_stat->p_flag, qos_sch_stat->child_num, "Expect c_flag OR p_flag ALLOC OR Non-zero child_num"); return DP_FAILURE; } /* Free Reserved Resource */ if (qos_sch_stat->p_flag & PP_NODE_RESERVE) { priv->resv[dp_port].resv_sched[resv_idx].flag = PP_NODE_FREE; DP_DEBUG(DP_DBG_FLAG_QOS, "node_sch_rst:Sch:[%d] resv_idx:%d of EP:%d free\n", node_id, resv_idx, dp_port); } /* Free Global Resource */ memset(qos_sch_stat, 0, sizeof(*qos_sch_stat)); qos_sch_stat->resv_idx = INV_RESV_IDX; DP_DEBUG(DP_DBG_FLAG_QOS, "%s:%s SCH:[/%d] resv_idx:%d dp_port=%d\n", __func__, "After mem free", node_id, resv_idx, dp_port); return DP_SUCCESS; } /* node_port_dec API * Check for child_num and active flag * for port logical node_id, flag = DP_NODE_DEC * Set Port flag from PP_NODE_ACTIVE to PP_NODE_ALLOC * else return DP_FAILURE */ static int node_port_dec(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id; struct cqm_deq_stat *deq_port_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } phy_id = get_cqm_deq_port_by_node(inst, node_id, flag); if (phy_id == DP_FAILURE) { pr_err("get_cqm_deq_port_by_node failed\n"); return DP_FAILURE; } deq_port_stat = &priv->deq_port_stat[phy_id]; if (!deq_port_stat->child_num || !(deq_port_stat->flag & PP_NODE_ACTIVE)) { pr_err("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id, deq_port_stat->flag, deq_port_stat->child_num, "Expect ACTIVE Or non-zero child_num"); return DP_FAILURE; } priv->qos_sch_stat[node_id].child_num--; deq_port_stat->child_num--; if (!deq_port_stat->child_num) deq_port_stat->flag = PP_NODE_ALLOC; return DP_SUCCESS; } /* node_port_inc API * for port logical node_id, flag = DP_NODE_INC * Set Port flag from PP_NODE_ALLOC to PP_NODE_ACTIVE * else return DP_FAILURE */ static int node_port_inc(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id; struct cqm_deq_stat *deq_port_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } phy_id = get_cqm_deq_port_by_node(inst, node_id, flag); if (phy_id == DP_FAILURE) { pr_err("get_cqm_deq_port_by_node failed\n"); return DP_FAILURE; } deq_port_stat = &priv->deq_port_stat[phy_id]; if (deq_port_stat->child_num && !(deq_port_stat->flag & PP_NODE_ACTIVE)) { pr_err("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id, deq_port_stat->flag, deq_port_stat->child_num, "Expect ACTIVE And Non-Zero child_num"); return DP_FAILURE; } if (!deq_port_stat->child_num && !(deq_port_stat->flag & PP_NODE_ALLOC)) { pr_err("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id, deq_port_stat->flag, deq_port_stat->child_num, "Expect ALLOC And Zero child_num"); return DP_FAILURE; } priv->qos_sch_stat[node_id].child_num++; deq_port_stat->child_num++; deq_port_stat->flag = PP_NODE_ACTIVE; return DP_SUCCESS; } /* node_port_rst API * Check for child_num and alloc flag * for port logical node_id, flag = DP_NODE_RST * Set Port flag from PP_NODE_ALLOC to PP_NODE_FREE * else return DP_FAILURE */ static int node_port_rst(int inst, int node_id, int flag) { struct hal_priv *priv; int phy_id; struct cqm_deq_stat *deq_port_stat; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } phy_id = get_cqm_deq_port_by_node(inst, node_id, flag); if (phy_id == DP_FAILURE) { pr_err("get_cqm_deq_port_by_node failed\n"); return DP_FAILURE; } deq_port_stat = &priv->deq_port_stat[phy_id]; if (deq_port_stat->child_num || !(deq_port_stat->flag & PP_NODE_ALLOC)) { pr_err("Wrong P[%d] Stat(%d)/child_num(%d):%s\n", phy_id, deq_port_stat->flag, deq_port_stat->child_num, "Expect ALLOC Or non-zero child_num"); return DP_FAILURE; } memset(deq_port_stat, 0, sizeof(*deq_port_stat)); memset(&priv->qos_sch_stat[node_id], 0, sizeof(priv->qos_sch_stat[node_id])); return DP_SUCCESS; } /* node_stat_update API * node_id is logical node id * if flag = DP_NODE_DEC * update flag PP_NODE_ACTIVE to PP_NODE_ALLOC if needed * update child info * else if flag = DP_NODE_INC * update flag PP_NODE_ALLOC to PP_NODE_ACTIVE * else if flag = DP_NODE_RST * update flag PP_NODE_ALLOC to PP_NODE_FREE * reset table info * else return DP_FAILURE */ static int node_stat_update(int inst, int node_id, int flag) { int node_type = get_node_type_by_node_id(inst, node_id, 0); if (flag & DP_NODE_DEC) { if (node_type == DP_NODE_QUEUE) return node_queue_dec(inst, node_id, flag); else if (node_type == DP_NODE_SCH) return node_sched_dec(inst, node_id, flag); else if (node_type == DP_NODE_PORT) return node_port_dec(inst, node_id, flag); return DP_FAILURE; } else if (flag & DP_NODE_INC) { if (node_type == DP_NODE_QUEUE) return node_queue_inc(inst, node_id, flag); else if (node_type == DP_NODE_SCH) return node_sched_inc(inst, node_id, flag); else if (node_type == DP_NODE_PORT) return node_port_inc(inst, node_id, flag); return DP_FAILURE; } else if (flag & DP_NODE_RST) { if (node_type == DP_NODE_QUEUE) return node_queue_rst(inst, node_id, flag); else if (node_type == DP_NODE_SCH) return node_sched_rst(inst, node_id, flag); else if (node_type == DP_NODE_PORT) return node_port_rst(inst, node_id, flag); return DP_FAILURE; } return DP_FAILURE; } /* dp_node_alloc_resv_pool API * Checks for flag and input node * upon success allocate resource from reserve table * otherwise return failure */ static int dp_node_alloc_resv_pool(struct dp_node_alloc *node, int flag) { int i, cnt, phy_id, node_id; struct hal_priv *priv; struct resv_q *resv_q; struct pp_sch_stat *qos_sch_stat; struct pp_queue_stat *qos_queue_stat; if (!node) { pr_err("node is NULL\n"); return DP_FAILURE; } priv = node ? HAL(node->inst) : NULL; if (!priv) { pr_err("priv is NULL\n"); return DP_FAILURE; } resv_q = priv->resv[node->dp_port].resv_q; DP_DEBUG(DP_DBG_FLAG_QOS, "inst=%d dp_port=%d num_resv_q=%d num_resv_sched=%d\n", node->inst, node->dp_port, priv->resv[node->dp_port].num_resv_q, priv->resv[node->dp_port].num_resv_sched); if (node->type == DP_NODE_QUEUE) { cnt = priv->resv[node->dp_port].num_resv_q; if (!cnt) return DP_FAILURE; DP_DEBUG(DP_DBG_FLAG_QOS, "try to look for resv queue...\n"); for (i = 0; i < cnt; i++) { if (resv_q[i].flag != PP_NODE_FREE) continue; phy_id = resv_q[i].physical_id; node_id = resv_q[i].id; resv_q[i].flag = PP_NODE_ALLOC; qos_sch_stat = &priv->qos_sch_stat[node_id]; qos_queue_stat = &priv->qos_queue_stat[phy_id]; qos_queue_stat->flag = PP_NODE_RESERVE | PP_NODE_ALLOC; qos_queue_stat->node_id = node_id; qos_queue_stat->resv_idx = i; qos_queue_stat->dp_port = node->dp_port; qos_sch_stat->dp_port = node->dp_port; qos_sch_stat->resv_idx = i; qos_sch_stat->type = DP_NODE_QUEUE; qos_sch_stat->p_flag = PP_NODE_RESERVE | PP_NODE_ALLOC; node->id.q_id = phy_id; DP_DEBUG(DP_DBG_FLAG_QOS, "queue[%d/%d]:Resv_idx=%d\n", phy_id, node_id, qos_queue_stat->resv_idx); return DP_SUCCESS; } } else if (node->type == DP_NODE_SCH) { struct resv_sch *resv_sched; cnt = priv->resv[node->dp_port].num_resv_sched; if (!cnt) return DP_FAILURE; resv_sched = priv->resv[node->dp_port].resv_sched; for (i = 0; i < cnt; i++) { if (resv_sched[i].flag != PP_NODE_FREE) continue; DP_DEBUG(DP_DBG_FLAG_QOS, "try to look for resv sche...\n"); node_id = resv_sched[i].id; resv_sched[i].flag = PP_NODE_ALLOC; qos_sch_stat = &priv->qos_sch_stat[node_id]; qos_sch_stat->c_flag = PP_NODE_RESERVE | PP_NODE_ALLOC; qos_sch_stat->p_flag = PP_NODE_RESERVE | PP_NODE_ALLOC; qos_sch_stat->resv_idx = i; qos_sch_stat->child_num = 0; qos_sch_stat->dp_port = node->dp_port; qos_sch_stat->type = DP_NODE_SCH; node->id.sch_id = node_id; DP_DEBUG(DP_DBG_FLAG_QOS, "Sched[/%d]: Resv_idx=%d\n", resv_sched[i].id, qos_sch_stat->resv_idx); return DP_SUCCESS; } } return DP_FAILURE; } /* dp_node_alloc_global_pool API * Checks for flag and input node * upon success allocate resource from global table * otherwise return failure */ static int dp_node_alloc_global_pool(struct dp_node_alloc *node, int flag) { int id, phy_id; struct pp_qos_queue_info *qos_queue_info = NULL; struct pp_qos_queue_conf *q_conf = NULL; struct hal_priv *priv; int res = DP_FAILURE; struct pp_sch_stat *qos_sch_stat; struct pp_queue_stat *qos_queue_stat; if (!node) { pr_err("node is NULL\n"); goto EXIT; } priv = HAL(node->inst); if (!priv) { pr_err("priv is NULL\n"); goto EXIT; } q_conf = devm_kzalloc(&g_dp_dev->dev, sizeof(*q_conf), GFP_ATOMIC); if (!q_conf) goto EXIT; qos_queue_info = devm_kzalloc(&g_dp_dev->dev, sizeof(*qos_queue_info), GFP_ATOMIC); if (!qos_queue_info) goto EXIT; if (node->type == DP_NODE_QUEUE) { if (dp_qos_queue_allocate_id_phy(priv->qdev, &id, &phy_id)) { pr_err("qos_queue_allocate fail\n"); goto EXIT; } qos_queue_stat = &priv->qos_queue_stat[phy_id]; qos_sch_stat = &priv->qos_sch_stat[id]; qos_queue_stat->flag = PP_NODE_ALLOC; qos_queue_stat->node_id = id; qos_queue_stat->resv_idx = INV_RESV_IDX; qos_queue_stat->dp_port = node->dp_port; qos_queue_stat->blocked = q_conf->blocked; qos_sch_stat->parent.node_id = priv->ppv4_drop_p; qos_sch_stat->dp_port = node->dp_port; qos_sch_stat->resv_idx = INV_RESV_IDX; qos_sch_stat->type = DP_NODE_QUEUE; qos_sch_stat->p_flag = PP_NODE_ALLOC; node->id.q_id = phy_id; res = DP_SUCCESS; goto EXIT; } else if (node->type == DP_NODE_SCH) { if (dp_qos_sched_allocate(priv->qdev, &id)) { pr_err("failed to qos_sched_allocate\n"); dp_qos_sched_remove(priv->qdev, id); goto EXIT; } qos_sch_stat = &priv->qos_sch_stat[id]; qos_sch_stat->c_flag = PP_NODE_ALLOC; qos_sch_stat->p_flag = PP_NODE_ALLOC; qos_sch_stat->resv_idx = INV_RESV_IDX; qos_sch_stat->dp_port = node->dp_port; qos_sch_stat->child_num = 0; qos_sch_stat->type = DP_NODE_SCH; node->id.sch_id = id; res = DP_SUCCESS; goto EXIT; } else { pr_err("Unknown node type %d\n", node->type); } EXIT: devm_kfree(&g_dp_dev->dev, q_conf); devm_kfree(&g_dp_dev->dev, qos_queue_info); return res; } /* dp_alloc_qos_port API * upon success returns qos_deq_port * otherwise return failure */ static int dp_alloc_qos_port(struct dp_node_alloc *node, int flag) { u32 qos_port; int cqm_deq_port; int inst; struct pp_qos_port_conf port_cfg; struct hal_priv *priv; struct cqm_port_info *deq; struct cqm_deq_stat *deq_port_stat; struct pp_sch_stat *qos_sch_stat; if (!node) { pr_err("node NULL\n"); goto EXIT; } inst = node->inst; priv = HAL(node->inst); if (!priv) { pr_err("priv NULL\n"); goto EXIT; } cqm_deq_port = node->id.cqm_deq_port; DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "inst=%d dp_port=%d cqm_deq_port=%d\n", node->inst, node->dp_port, cqm_deq_port); if (cqm_deq_port == DP_NODE_AUTO_ID) { pr_err("cqm_deq_port wrong: %d\n", cqm_deq_port); goto EXIT; } deq_port_stat = &priv->deq_port_stat[cqm_deq_port]; if (deq_port_stat->flag != PP_NODE_FREE) { DP_DEBUG(DP_DBG_FLAG_QOS, "cqm_deq_port[%d] already init:\n", cqm_deq_port); return deq_port_stat->node_id; } if (dp_qos_port_allocate(priv->qdev, cqm_deq_port, &qos_port)) { pr_err("failed to qos_port_allocate:%d\n", cqm_deq_port); goto EXIT; } qos_sch_stat = &priv->qos_sch_stat[qos_port]; deq = get_dp_deqport_info(inst, cqm_deq_port); /* Configure QOS dequeue port */ dp_qos_port_conf_set_default(&port_cfg); port_cfg.ring_address = (unsigned long)deq->txpush_addr_qos; port_cfg.ring_size = deq->tx_ring_size; port_cfg.credit = deq->tx_pkt_credit; if (port_cfg.credit) port_cfg.packet_credit_enable = 1; port_cfg.port_parent_prop.arbitration = PP_QOS_ARBITRATION_WSP; if (dp_qos_port_set(priv->qdev, qos_port, &port_cfg)) { pr_err("qos_port_set fail for port %d/%d\n", cqm_deq_port, qos_port); dp_qos_port_remove(priv->qdev, qos_port); goto EXIT; } deq_port_stat->flag = PP_NODE_ALLOC; qos_sch_stat->type = DP_NODE_PORT; qos_sch_stat->child_num = 0; deq_port_stat->node_id = qos_port; deq_port_stat->disabled = port_cfg.disable; DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s ok: port=%d/%d for dp_port=%d\n", __func__, cqm_deq_port, qos_port, node->dp_port); return qos_port; EXIT: return DP_FAILURE; } /* dp_node_alloc_31 API * Checks for flag and input node type * upon success allocate node from global/reserve resource * otherwise return failure */ int dp_node_alloc_31(struct dp_node_alloc *node, int flag) { struct hal_priv *priv; if (!node) { pr_err("node NULL\n"); return DP_FAILURE; } priv = HAL(node->inst); if (!priv) { pr_err("priv is NULL cannot proceed!!\n"); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "allocate flag %d\n", flag); if (flag & DP_ALLOC_RESV_ONLY) { /* if can get from its reserved resource and return DP_SUCCESS. * Otherwise return DP_FAIL; */ return dp_node_alloc_resv_pool(node, flag); } if (flag & DP_ALLOC_GLOBAL_ONLY) { /* if can get from global pool and return DP_SUCCESS. * Otherwise return DP_FAILURE; */ return dp_node_alloc_global_pool(node, flag); } if (flag & DP_ALLOC_GLOBAL_FIRST) { /* if can get from the global pool, return DP_SUCCESS; * if can get from its reserved resource and return DP_SUCCESS; * return DP_FAILURE; */ if (dp_node_alloc_global_pool(node, flag) == DP_SUCCESS) return DP_SUCCESS; return dp_node_alloc_resv_pool(node, flag); } /* default order: reserved pool, */ /* if can get from its reserved resource and return DP_SUCCESS * if can get from the global pool, return DP_SUCCESS * return DP_FAILURE */ if (dp_node_alloc_resv_pool(node, flag) == DP_SUCCESS) return DP_SUCCESS; return dp_node_alloc_global_pool(node, flag); } /* dp_map_to_drop_q API * check index in lookup table for q_id and set drop_q */ void dp_map_to_drop_q(int inst, int q_id, struct dp_lookup_entry *lookup) { u32 i, k = 0; struct hal_priv *priv = HAL(inst); for (i = 0; i < MAX_LOOKUP_TBL_SIZE; i++) { if (q_id == get_lookup_qid_via_index(i)) { if (lookup) { lookup->entry[k] = i; k++; } set_lookup_qid_via_index(i, priv->ppv4_drop_q); } } if (lookup) lookup->num = k; } /* dp_smart_free_from_child_31 API * flush and unlink queue from its parent * check parent's child list if empty free parent recursively * else return DP_FAILURE */ static int dp_smart_free_from_child_31(struct dp_node_alloc *node, int flag) { int id, res, f_free; struct dp_node_link info = { 0 }; struct dp_node_alloc temp = { 0 }; struct hal_priv *priv; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; if (!node) { pr_err("node is NULL cannot proceed!!\n"); return DP_FAILURE; } priv = HAL(node->inst); if (!priv) { pr_err("priv is NULL cannot proceed!!\n"); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:type=%d q_id=%d\n", __func__, node->type, node->id.q_id); if (node->type == DP_NODE_QUEUE) { if (!is_qid_valid(node->id.q_id)) return DP_FAILURE; qos_queue_stat = &priv->qos_queue_stat[node->id.q_id]; info.node_id.q_id = node->id.q_id; info.node_type = node->type; id = qos_queue_stat->node_id; qos_sch_stat = &priv->qos_sch_stat[id]; info.p_node_id.q_id = qos_sch_stat->parent.node_id; info.p_node_type = qos_sch_stat->parent.type; if (!info.p_node_id.q_id) { DP_DEBUG(DP_DBG_FLAG_QOS, "current node doesnot have parent\n"); } else if (dp_node_unlink_31(&info, 0)) { pr_err("dp_node_unlink_31 failed\n"); return DP_FAILURE; } if (dp_node_free_31(node, 0)) { pr_err("failed to free Queue:[%d]\n", node->id.q_id); return DP_FAILURE; } } else if (node->type == DP_NODE_SCH) { if (!is_sch_valid(node->id.sch_id)) return DP_FAILURE; info.node_id.sch_id = node->id.sch_id; info.node_type = node->type; id = node->id.sch_id; qos_sch_stat = &priv->qos_sch_stat[id]; info.p_node_id.q_id = qos_sch_stat->parent.node_id; info.p_node_type = qos_sch_stat->parent.type; if (!info.p_node_id.sch_id) { DP_DEBUG(DP_DBG_FLAG_QOS, "current node doesnot have parent\n"); return DP_FAILURE; } if (dp_node_free_31(node, 0)) { pr_err("failed to free Sched:[%d]\n", node->id.sch_id); return DP_FAILURE; } } else if (node->type == DP_NODE_PORT) { if (!is_deqport_valid(node->id.cqm_deq_port)) return DP_FAILURE; if (dp_node_free_31(node, 0)) { pr_err("failed to free Port:[%d]\n", node->id.cqm_deq_port); return DP_FAILURE; } return DP_SUCCESS; } while (1) { info.node_id = info.p_node_id; info.node_type = info.p_node_type; temp.id = info.p_node_id; temp.type = info.p_node_type; if (temp.type == DP_NODE_PORT) { temp.id.cqm_deq_port = get_cqm_deq_port_by_node(node->inst, temp.id.cqm_deq_port, flag); id = temp.id.cqm_deq_port; if (!is_deqport_valid(id)) return DP_FAILURE; } else { id = temp.id.sch_id; if (!is_sch_valid(id)) return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:type=%d q_id=%d\n", __func__, temp.type, id); if ((temp.type == DP_NODE_SCH && priv->qos_sch_stat[id].child_num) || (temp.type == DP_NODE_PORT && priv->deq_port_stat[id].child_num)) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "parent node(%d) still have child\n", id); break; } f_free = (dp_node_link_get_31(&info, 0)); res = dp_node_free_31(&temp, 0); if (res) { pr_err("failed to free node:%d res %d\n", temp.id.q_id, res); return DP_FAILURE; } if (f_free) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "current node doesnot have parent\n"); break; } } return DP_SUCCESS; } /* dp_free_children_via_parent_31 API * reset parent to free state * check parent's child list and free all resources recursively * else return DP_FAILURE */ int dp_free_children_via_parent_31(struct dp_node_alloc *node, int flag) { int idx, id, pid, child_id; struct dp_node_alloc temp = { 0 }; struct dp_node_link info = { 0 }; struct hal_priv *priv; struct pp_sch_stat *qos_sch_stat; if (!node) { pr_err("node is NULL cannot proceed!!\n"); return DP_FAILURE; } priv = HAL(node->inst); if (!priv) { pr_err("priv is NULL cannot proceed!!\n"); return DP_FAILURE; } if (node->type == DP_NODE_PORT) { if (!is_deqport_valid(node->id.cqm_deq_port)) return DP_FAILURE; id = priv->deq_port_stat[node->id.cqm_deq_port].node_id; qos_sch_stat = &priv->qos_sch_stat[id]; DP_DEBUG(DP_DBG_FLAG_QOS, "parent Port(%d) have child num:%d\n", node->id.cqm_deq_port, qos_sch_stat->child_num); for (idx = 0; idx < DP_MAX_CHILD_PER_NODE; idx++) { if (qos_sch_stat->child[idx].flag & PP_NODE_ACTIVE) { temp.type = CHILD(id, idx).type; child_id = get_qid_by_node(node->inst, CHILD(id, idx).node_id, 0); if (CHILD(id, idx).type == DP_NODE_SCH) temp.id.q_id = CHILD(id, idx).node_id; else temp.id.q_id = child_id; if (dp_free_children_via_parent_31(&temp, 0)) { pr_err("fail %s=%d child:%d type=%d\n", "to free Port's", node->id.cqm_deq_port, CHILD(id, idx).node_id, CHILD(id, idx).type); return DP_FAILURE; } } } DP_DEBUG(DP_DBG_FLAG_QOS, "Port(%d)'s all children:%d freed!\n", node->id.cqm_deq_port, qos_sch_stat->child_num); if (!qos_sch_stat->child_num) { if (dp_node_free_31(node, 0)) { pr_err("failed to free Port:[%d]\n", node->id.cqm_deq_port); return DP_FAILURE; } } } else if (node->type == DP_NODE_SCH) { if (!is_sch_valid(node->id.sch_id)) return DP_FAILURE; id = node->id.sch_id; qos_sch_stat = &priv->qos_sch_stat[id]; DP_DEBUG(DP_DBG_FLAG_QOS, "parent SCH(%d) have child num:%d\n", node->id.sch_id, qos_sch_stat->child_num); for (idx = 0; idx < DP_MAX_CHILD_PER_NODE; idx++) { if (qos_sch_stat->child[idx].flag & PP_NODE_ACTIVE) { temp.type = CHILD(id, idx).type; child_id = get_qid_by_node(node->inst, CHILD(id, idx).node_id, 0); if (CHILD(id, idx).type == DP_NODE_SCH) temp.id.q_id = CHILD(id, idx).node_id; else temp.id.q_id = child_id; if (dp_free_children_via_parent_31(&temp, 0)) { pr_err("fail %s=%d child:%d type=%d\n", "to free Sched's", node->id.sch_id, CHILD(id, idx).node_id, CHILD(id, idx).type); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "Free SCH(%d)'s child:%d done!\n", node->id.sch_id, CHILD(id, idx).node_id); } } DP_DEBUG(DP_DBG_FLAG_QOS, "SCH(%d)'s all children:%d freed!\n", node->id.sch_id, qos_sch_stat->child_num); if (!qos_sch_stat->child_num) { if (dp_node_free_31(node, 0)) { pr_err("failed to free Sched:[%d]\n", node->id.sch_id); return DP_FAILURE; } } } else if (node->type == DP_NODE_QUEUE) { if (!is_qid_valid(node->id.q_id) || is_q_node_free(priv, node->id.q_id)) return DP_FAILURE; /* get parent node from global table and fill info */ info.node_id.q_id = node->id.q_id; info.node_type = node->type; pid = priv->qos_queue_stat[node->id.q_id].node_id; qos_sch_stat = &priv->qos_sch_stat[pid]; info.p_node_id.q_id = qos_sch_stat->parent.node_id; info.p_node_type = qos_sch_stat->parent.type; if (priv->qos_queue_stat[node->id.q_id].flag != PP_NODE_FREE) { if (dp_node_unlink_31(&info, 0)) { pr_err("dp_node_unlink_31 failed\n"); return DP_FAILURE; } if (dp_node_free_31(node, 0)) { pr_err("failed to free Queue:[%d]\n", node->id.q_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS, "%s:Q[%d] Parent:%d type:%d\n", "free_children_via_parent", node->id.q_id, info.p_node_id.q_id, info.p_node_type); } } else { pr_err("Incorrect Parameter:%d\n", node->type); return DP_FAILURE; } return DP_SUCCESS; } struct link_free_var { struct pp_qos_queue_conf queue_cfg; struct pp_qos_queue_conf tmp_q; struct pp_qos_sched_conf sched_cfg; struct pp_qos_sched_conf tmp_sch; struct dp_node_link info; int sch_id, phy_id, node_id, parent_id, p_type; int dp_port, resv_flag; }; /* dp_node_free_31 API * if (this node is not unlinked yet) * unlink it * If (this node is a reserved node) * return this node to this device's reserved node table * mark this node as Free in this device's reserved node table * else * return this node to the system global table * mark this node free in system global table */ int dp_node_free_31(struct dp_node_alloc *node, int flag) { struct hal_priv *priv; struct link_free_var *t = NULL; int res = DP_FAILURE; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; struct cqm_deq_stat *deq_port_stat; if (!node) { pr_err("node is NULL cannot proceed!!\n"); return DP_FAILURE; } priv = HAL(node->inst); if (!priv) { pr_err("priv is NULL cannot proceed!!\n"); return DP_FAILURE; } t = devm_kzalloc(&g_dp_dev->dev, sizeof(*t), GFP_ATOMIC); if (!t) return DP_FAILURE; if (flag == DP_NODE_SMART_FREE) { /* dont pass flag */ res = dp_smart_free_from_child_31(node, 0); if (res == DP_FAILURE) { pr_err("dp_smart_free_from_child_31 failed\n"); goto EXIT; } } if (node->type == DP_NODE_QUEUE) { if (is_q_node_free(priv, node->id.q_id)) goto EXIT; qos_queue_stat = &priv->qos_queue_stat[node->id.q_id]; t->node_id = qos_queue_stat->node_id; t->dp_port = qos_queue_stat->dp_port; t->resv_flag = qos_queue_stat->flag; /* call node unlink api and set drop queue */ t->info.inst = node->inst; t->info.node_id = node->id; t->info.node_type = node->type; if (dp_node_unlink_31(&t->info, 0)) { pr_err("failed to dp_node_unlink_31 for Q:%d\n", node->id.q_id); goto EXIT; } t->parent_id = priv->qos_sch_stat[t->node_id].parent.node_id; t->p_type = get_node_type_by_node_id(node->inst, t->parent_id, flag); /* Remove Queue link only for global resource */ if (!(t->resv_flag & PP_NODE_RESERVE)) { if (dp_qos_queue_remove(priv->qdev, t->node_id)) { pr_err("failed to qos_queue_remove\n"); goto EXIT; } } if (!(qos_queue_stat->flag & PP_NODE_ACTIVE)) { res = DP_SUCCESS; goto EXIT; } if (node_stat_update(node->inst, t->node_id, DP_NODE_DEC)) { pr_err("node_stat_update failed\n"); goto EXIT; } /* call node_stat_update to update parent status */ if (node_stat_update(node->inst, t->parent_id, DP_NODE_DEC | C_FLAG)) { pr_err("stat update fail Q:[%d/%d]'s parent:%d\n", node->id.q_id, t->node_id, t->parent_id); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "Q[%d] removed parent:[%d] stat updated\n", node->id.q_id, t->parent_id); /* call node_Stat_update to free the node */ if (node_stat_update(node->inst, t->node_id, DP_NODE_RST)) { pr_err("node_stat_update failed\n"); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "Queue[%d/%d] removed and stat updated\n", node->id.q_id, t->node_id); /* Reserve Q temp attach to drop port */ if (t->resv_flag & PP_NODE_RESERVE) { dp_qos_queue_conf_set_default(&t->tmp_q); t->tmp_q.queue_child_prop.parent = priv->ppv4_drop_p; if (dp_qos_queue_set(priv->qdev, t->node_id, &t->tmp_q)) { pr_err("qos_queue_set %s=%d to parent=%d\n", "fail to reserve queue", t->node_id, t->tmp_q.queue_child_prop.parent); goto EXIT; } priv->qos_sch_stat[t->node_id].parent.node_id = t->tmp_q.queue_child_prop.parent; priv->qos_queue_stat[node->id.q_id].blocked = t->tmp_q.blocked; DP_DEBUG(DP_DBG_FLAG_QOS, "Q:%d/%d attached temp to Drop_port:%d\n", node->id.q_id, t->node_id, t->tmp_q.queue_child_prop.parent); } res = DP_SUCCESS; goto EXIT; } else if (node->type == DP_NODE_SCH) { bool c_active, p_active; t->sch_id = node->id.sch_id; qos_sch_stat = &priv->qos_sch_stat[t->sch_id]; t->dp_port = qos_sch_stat->dp_port; t->resv_flag = qos_sch_stat->p_flag; if (qos_sch_stat->child_num) { pr_err("Node Sch[%d] still have child num %d\n", t->sch_id, qos_sch_stat->child_num); goto EXIT; } if (is_sch_parent_free(priv, t->sch_id)) goto EXIT; c_active = (qos_sch_stat->c_flag & PP_NODE_ACTIVE) ? true : false; p_active = (qos_sch_stat->p_flag & PP_NODE_ACTIVE) ? true : false; t->parent_id = priv->qos_sch_stat[t->sch_id].parent.node_id; t->p_type = get_node_type_by_node_id(node->inst, t->parent_id, flag); /* Remove Sched link only if global resource */ if (!(t->resv_flag & PP_NODE_RESERVE)) { if (dp_qos_sched_remove(priv->qdev, t->sch_id)) { pr_err("failed to qos_sched_remove\n"); goto EXIT; } } if (!p_active && !c_active) { res = DP_SUCCESS; goto EXIT; } if (p_active & node_stat_update(node->inst, t->sch_id, DP_NODE_DEC | P_FLAG)) { pr_err("node_stat_update failed\n"); goto EXIT; } /* call node_stat_update to update parent status */ if (c_active & node_stat_update(node->inst, t->parent_id, DP_NODE_DEC | C_FLAG)) { pr_err("stat update fail Sch:[%d]'s parent:%d\n", node->id.sch_id, t->parent_id); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "SCH[%d] removed and parent:[%d] stat updated\n", node->id.sch_id, t->parent_id); /* call node_stat_update to free the node */ if (node_stat_update(node->inst, t->sch_id, DP_NODE_RST | P_FLAG)) { pr_err("Node Reset failed Sched[/%d]\n", node->id.sch_id); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "Sched[%d] removed and stat updated\n", node->id.sch_id); /* Reserve Sched temp attach to drop port */ if (t->resv_flag & PP_NODE_RESERVE) { dp_qos_sched_conf_set_default(&t->tmp_sch); t->tmp_sch.sched_child_prop.parent = priv->ppv4_drop_p; if (dp_qos_sched_set(priv->qdev, t->sch_id, &t->tmp_sch)) { pr_err("qos_sched_set %s=%d to parent=%d\n", "fail to reserve SCH", t->sch_id, t->tmp_sch.sched_child_prop.parent); goto EXIT; } DP_DEBUG(DP_DBG_FLAG_QOS, "SCH:%d attached temp to Drop_port:%d\n", t->sch_id, t->tmp_sch.sched_child_prop.parent); } res = DP_SUCCESS; goto EXIT; } else if (node->type == DP_NODE_PORT) { t->phy_id = node->id.cqm_deq_port; deq_port_stat = &priv->deq_port_stat[t->phy_id]; t->node_id = deq_port_stat->node_id; if (deq_port_stat->child_num) { pr_err("Node port[%d] still have child num %d\n", t->phy_id, deq_port_stat->child_num); goto EXIT; } if (deq_port_stat->flag & PP_NODE_ACTIVE) { res = node_stat_update(node->inst, t->node_id, DP_NODE_DEC); if (res == DP_FAILURE) { pr_err("Wrong Port %d flag:0x%x\n", t->phy_id, deq_port_stat->flag); goto EXIT; } goto EXIT; } /* No reset API call for Port should freed by child's call */ if (deq_port_stat->flag & PP_NODE_ALLOC) { res = DP_SUCCESS; goto EXIT; } pr_err("Unexpect port %d flag %d\n", t->phy_id, deq_port_stat->flag); goto EXIT; } pr_err("Unexpect node->type %d\n", node->type); EXIT: if (res == DP_FAILURE) pr_err("failed to free node:%d res %d\n", node->id.q_id, res); devm_kfree(&g_dp_dev->dev, t); return res; } /* dp_qos_parent_chk API * checks for parent type * upon Success * return parent node id * else return DP_FAILURE */ static int dp_qos_parent_chk(struct dp_node_link *info, int flag) { struct dp_node_alloc node; if (info->p_node_type == DP_NODE_SCH) { if (info->p_node_id.sch_id == DP_NODE_AUTO_ID) { node.inst = info->inst; node.type = info->p_node_type; node.dp_port = info->dp_port; if ((dp_node_alloc_31(&node, flag)) == DP_FAILURE) { pr_err("dp_node_alloc_31 queue alloc fail\n"); return DP_FAILURE; } info->p_node_id = node.id; } return info->p_node_id.sch_id; } else if (info->p_node_type == DP_NODE_PORT) { node.inst = info->inst; node.id = info->cqm_deq_port; node.type = info->p_node_type; node.dp_port = info->dp_port; return dp_alloc_qos_port(&node, flag); } return DP_FAILURE; } /* get_parent_arbi API * return parent's arbi of given node * else return DP_FAILURE */ static int get_parent_arbi(int inst, int node_id, int flag) { int pid, arbi; struct hal_priv *priv = HAL(inst); struct pp_qos_sched_conf sched_cfg = { 0 }; struct pp_qos_port_conf port_cfg = { 0 }; struct pp_sch_stat *qos_sch_stat = &priv->qos_sch_stat[node_id]; if (qos_sch_stat->parent.flag == PP_NODE_FREE) { pr_err("Parent is not set for node\n"); return DP_FAILURE; } pid = qos_sch_stat->parent.node_id; if (qos_sch_stat->parent.type == DP_NODE_SCH) { if (dp_qos_sched_conf_get(priv->qdev, pid, &sched_cfg)) { pr_err("fail to get sched config\n"); return DP_FAILURE; } arbi = arbi_pp2dp(sched_cfg.sched_parent_prop.arbitration); if (arbi == DP_FAILURE) pr_err("Wrong pp_arbitrate: %d for %s:%d\n", port_cfg.port_parent_prop.arbitration, (qos_sch_stat->type == DP_NODE_SCH) ? "sched" : "Q", node_id); } else if (qos_sch_stat->parent.type == DP_NODE_PORT) { if (dp_qos_port_conf_get(priv->qdev, pid, &port_cfg)) { pr_err("fail to get port config\n"); return DP_FAILURE; } arbi = arbi_pp2dp(port_cfg.port_parent_prop.arbitration); if (arbi == DP_FAILURE) pr_err("Wrong pp_arbitrate: %d for %s:%d\n", port_cfg.port_parent_prop.arbitration, (qos_sch_stat->type == DP_NODE_SCH) ? "sched" : "Q", node_id); } else { pr_err("incorrect parent type:0x%x for node:%d.\n", qos_sch_stat->parent.type, node_id); return DP_FAILURE; } return arbi; } /* dp_node_link_get_31 API * upon success check node link info and return DP_SUCCESS * else return DP_FAILURE */ int dp_node_link_get_31(struct dp_node_link *info, int flag) { struct pp_qos_queue_conf queue_cfg = { 0 }; struct pp_qos_sched_conf sched_cfg = { 0 }; struct hal_priv *priv; int node_id, arbi; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (info->node_type == DP_NODE_QUEUE) { qos_queue_stat = &priv->qos_queue_stat[info->node_id.q_id]; node_id = qos_queue_stat->node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) { pr_err("failed to qos_queue_conf_get\n"); return DP_FAILURE; } if (!queue_cfg.queue_child_prop.parent || !(qos_queue_stat->flag & PP_NODE_ACTIVE)) return DP_FAILURE; info->p_node_id.q_id = queue_cfg.queue_child_prop.parent; info->p_node_type = get_node_type_by_node_id(info->inst, queue_cfg.queue_child_prop.parent, flag); arbi = get_parent_arbi(info->inst, node_id, flag); if (arbi == DP_FAILURE) return DP_FAILURE; info->arbi = arbi; if (info->arbi == ARBITRATION_WRR) info->prio_wfq = queue_cfg.queue_child_prop.wrr_weight; else if (info->arbi == ARBITRATION_WSP) info->prio_wfq = queue_cfg.queue_child_prop.priority; else pr_err("Invalid Arbi %s %d\n", __func__, info->arbi); info->leaf = 0; return DP_SUCCESS; } else if (info->node_type == DP_NODE_SCH) { qos_sch_stat = &priv->qos_sch_stat[info->node_id.sch_id]; if (dp_qos_sched_conf_get(priv->qdev, info->node_id.sch_id, &sched_cfg)) { pr_err("failed to qos_sched_conf_get\n"); return DP_FAILURE; } if (!sched_cfg.sched_child_prop.parent) { pr_err("sched child do not have parent\n"); return DP_FAILURE; } if (!(qos_sch_stat->p_flag & PP_NODE_ACTIVE)) { pr_err("sched id %d flag not active, flag %d\n", info->node_id.sch_id, qos_sch_stat->p_flag); return DP_FAILURE; } info->p_node_id.sch_id = sched_cfg.sched_child_prop.parent; info->p_node_type = get_node_type_by_node_id(info->inst, sched_cfg.sched_child_prop.parent, flag); info->arbi = arbi_pp2dp(sched_cfg.sched_parent_prop.arbitration); if (info->arbi == ARBITRATION_WRR) info->prio_wfq = sched_cfg.sched_child_prop.wrr_weight; else if (info->arbi == ARBITRATION_WSP) info->prio_wfq = sched_cfg.sched_child_prop.priority; else pr_err("Invalid Arbi %s %d\n", __func__, info->arbi); info->leaf = 0; return DP_SUCCESS; } return DP_FAILURE; } static int dp_map_qid_to_cqmdeq(struct dp_node_link *info, int flag) { struct dp_qos_link q_link = { 0 }; struct dp_node_child node = { 0 }; struct dp_node_link node_info = { 0 }; int idx; if (info->node_type == DP_NODE_QUEUE) { q_link.q_id = info->node_id.q_id; dp_link_get_31(&q_link, 0); DP_DEBUG(DP_DBG_FLAG_QOS, "Parent PORT[%d]\n", q_link.cqm_deq_port); cqm_qid2ep_map_set(q_link.q_id, q_link.cqm_deq_port); DP_DEBUG(DP_DBG_FLAG_QOS, "%s qid:%d, dq_port:%d\n", "cqm_qid2ep_map_set", q_link.q_id, q_link.cqm_deq_port); } else if (info->node_type == DP_NODE_SCH) { node.type = info->node_type; node.id.sch_id = info->node_id.sch_id; if (dp_children_get_31(&node, flag)) { DP_DEBUG(DP_DBG_FLAG_QOS, "dp_children_get fail (qid->cqmdq map)\n"); } DP_DEBUG(DP_DBG_FLAG_QOS, "Node[%d]has %d Children!!\n", node.id.q_id, node.num); for (idx = 0; idx < node.num; idx++) { if (node.child[idx].type == DP_NODE_QUEUE) { q_link.q_id = node.child[idx].id.q_id; dp_link_get_31(&q_link, 0); DP_DEBUG(DP_DBG_FLAG_QOS, "Parent PORT[%d]\n", q_link.cqm_deq_port); cqm_qid2ep_map_set(q_link.q_id, q_link.cqm_deq_port); DP_DEBUG(DP_DBG_FLAG_QOS, "%s qid:%d, dq_port:%d %s:%d->%s:%d\n", "cqm_qid2ep_map_set", q_link.q_id, q_link.cqm_deq_port, "child", idx, "Q", node.child[idx].id.q_id); break; } node_info.node_type = node.child[idx].type; node_info.node_id = node.child[idx].id; dp_map_qid_to_cqmdeq(&node_info, flag); } } return DP_SUCCESS; } /* if conflict return first available free priority * If no conflict return the original priority * Assuming Prio is from 0 .. 7, this logic only used for WSP */ static int is_prio_used(int inst, int parent_node, int prio_wfq) { struct hal_priv *priv = HAL(inst); int i = 0, child_node = 0; struct pp_sch_stat *qos_sch_stat = &priv->qos_sch_stat[parent_node]; int child_prio_wfq = 0; int conflict = 0; u8 arr[MAX_PP_CHILD_PER_NODE] = { 0 }; /* If priority is more than or equal to 8 DP cannot * remove conflict, upper layer need to take care */ if (prio_wfq >= MAX_PP_CHILD_PER_NODE) goto EXIT; /* Check whether there is any conflict */ for (i = 0; i < MAX_PP_CHILD_PER_NODE; i++) { child_node = qos_sch_stat->child[i].node_id; child_prio_wfq = priv->qos_sch_stat[child_node].prio_wfq; if (qos_sch_stat->child[i].flag != PP_NODE_FREE) { /* Mark all the used priorities */ arr[child_prio_wfq] = 1; /* If someone really using this prio, return conflict */ if (prio_wfq == child_prio_wfq) conflict = 1; } } /* if conflict Return first available free priority */ if (conflict) { /* Find the first free priority */ for (i = 0; i < MAX_PP_CHILD_PER_NODE; i++) { if (!arr[i]) break; } /* No free priority found return fail */ if (i >= MAX_PP_CHILD_PER_NODE) { pr_err("Cannot get a free priority, %s", "upper layer need to handle proper priority\n"); goto EXIT; } /* Return the priority DP calculated */ return i; } EXIT: /* If no conflict return the original priority */ return prio_wfq; } /* dp_link_set API * upon success links node to parent and returns DP_SUCCESS * else return DP_FAILURE */ static int dp_link_set(struct dp_node_link *info, int parent_node, int flag) { int node_id; int res = DP_FAILURE; int node_flag = DP_NODE_INC; struct hal_priv *priv = HAL(info->inst); struct pp_qos_queue_conf *queue_cfg; struct pp_qos_sched_conf *sched_cfg; queue_cfg = devm_kzalloc(&g_dp_dev->dev, sizeof(*queue_cfg), GFP_ATOMIC); sched_cfg = devm_kzalloc(&g_dp_dev->dev, sizeof(*sched_cfg), GFP_ATOMIC); if (!queue_cfg || !sched_cfg) goto ERROR_EXIT; if (info->arbi == ARBITRATION_WSP) { /* Use the free priority DP calculated if there is conflict */ info->prio_wfq = is_prio_used(info->inst, parent_node, info->prio_wfq); } if (info->node_type == DP_NODE_QUEUE) { dp_qos_queue_conf_set_default(queue_cfg); node_id = priv->qos_queue_stat[info->node_id.q_id].node_id; if ((priv->qos_queue_stat[info->node_id.q_id].flag & PP_NODE_ALLOC)) { dp_wred_def(queue_cfg); } else { if (dp_qos_queue_conf_get(priv->qdev, node_id, queue_cfg)) { pr_err("fail to qos_queue_conf_get\n"); return DP_FAILURE; } } queue_cfg->queue_child_prop.parent = parent_node; /* convert q_id to logical node id and pass it to * low level api */ node_id = priv->qos_queue_stat[info->node_id.q_id].node_id; if (info->arbi == ARBITRATION_WRR) queue_cfg->queue_child_prop.wrr_weight = info->prio_wfq; else if (info->arbi == ARBITRATION_WSP) queue_cfg->queue_child_prop.priority = info->prio_wfq; else pr_err("Invalid Arbi %s %d\n", __func__, info->arbi); DP_DEBUG(DP_DBG_FLAG_QOS, "Try to link Q[%d/%d] to parent[%d/%d] port[%d]\n", info->node_id.q_id, node_id, info->p_node_id.cqm_deq_port, queue_cfg->queue_child_prop.parent, info->cqm_deq_port.cqm_deq_port); if (dp_qos_queue_set_spl_31(info->inst, priv->qdev, node_id, info->node_id.q_id, queue_cfg)) { pr_err("failed to qos_queue_set\n"); dp_qos_queue_remove(priv->qdev, node_id); goto ERROR_EXIT; } goto EXIT; } else if (info->node_type == DP_NODE_SCH) { node_id = info->node_id.sch_id; if (dp_qos_sched_conf_get(priv->qdev, node_id, sched_cfg)) dp_qos_sched_conf_set_default(sched_cfg); sched_cfg->sched_child_prop.parent = parent_node; if (info->arbi == ARBITRATION_WRR) sched_cfg->sched_child_prop.wrr_weight = info->prio_wfq; else if (info->arbi == ARBITRATION_WSP) sched_cfg->sched_child_prop.priority = info->prio_wfq; else pr_err("Invalid Arbi %s %d\n", __func__, info->arbi); DP_DEBUG(DP_DBG_FLAG_QOS, "Try to link SCH[/%d] to parent[%d/%d] port[%d]\n", node_id, info->p_node_id.cqm_deq_port, sched_cfg->sched_child_prop.parent, info->cqm_deq_port.cqm_deq_port); if (dp_qos_sched_set(priv->qdev, node_id, sched_cfg)) { pr_err("failed to %s %d parent_node %d\n", "qos_sched_set node_id", node_id, parent_node); dp_qos_sched_remove(priv->qdev, node_id); goto ERROR_EXIT; } node_flag |= P_FLAG; goto EXIT; } goto ERROR_EXIT; EXIT: res = DP_SUCCESS; /* fill parent info in child's global table */ priv->qos_sch_stat[node_id].parent.node_id = parent_node; priv->qos_sch_stat[node_id].parent.type = info->p_node_type; priv->qos_sch_stat[node_id].parent.flag = PP_NODE_ACTIVE; priv->qos_sch_stat[node_id].prio_wfq = info->prio_wfq; /* increase child_num in parent's global table and status */ DP_DEBUG(DP_DBG_FLAG_QOS, "node_stat_update after %s start\n", __func__); node_stat_update(info->inst, node_id, node_flag); node_stat_update(info->inst, parent_node, DP_NODE_INC | C_FLAG); DP_DEBUG(DP_DBG_FLAG_QOS, "node_stat_update after %s end\n", __func__); dp_map_qid_to_cqmdeq(info, flag); /* Set parent arbitration */ if (set_parent_arbi(info->inst, node_id, info->arbi, flag)) { pr_err("fail to set arbi:%d in Parent of node:%d\n", info->arbi, node_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:node=%d arbi=%d prio=%d\n", __func__, info->node_id.q_id, info->arbi, info->prio_wfq); ERROR_EXIT: devm_kfree(&g_dp_dev->dev, queue_cfg); devm_kfree(&g_dp_dev->dev, sched_cfg); return res; } /* set_parent_arbi API * set arbitration of node_id's all children and return DP_SUCCESS * else return DP_FAILURE */ static int set_parent_arbi(int inst, int node_id, int arbi, int flag) { int pid; struct hal_priv *priv = HAL(inst); struct pp_qos_sched_conf sched_cfg = { 0 }; struct pp_qos_port_conf port_cfg = { 0 }; if (priv->qos_sch_stat[node_id].parent.flag == PP_NODE_FREE) { pr_err("Parent is not set for node\n"); return DP_FAILURE; } pid = priv->qos_sch_stat[node_id].parent.node_id; arbi = arbi_dp2pp(arbi); if (arbi == DP_FAILURE) { pr_err("Incorrect arbitration value provided:%d!\n", arbi); return DP_FAILURE; } if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_SCH) { if (dp_qos_sched_conf_get(priv->qdev, pid, &sched_cfg)) { pr_err("fail to get sched config\n"); return DP_FAILURE; } sched_cfg.sched_parent_prop.arbitration = arbi; if (dp_qos_sched_set(priv->qdev, pid, &sched_cfg)) { pr_err("fail to set arbi sched:%d parent of node:%d\n", pid, node_id); return DP_FAILURE; } } else if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_PORT) { /* QoS port arbitr is WSP mode always except for drop port */ if ((arbi == PP_QOS_ARBITRATION_WRR) && (pid != priv->ppv4_drop_p)) return DP_SUCCESS; if (dp_qos_port_conf_get(priv->qdev, pid, &port_cfg)) { pr_err("fail to get port config\n"); return DP_FAILURE; } port_cfg.port_parent_prop.arbitration = arbi; if (dp_qos_port_set(priv->qdev, pid, &port_cfg)) { pr_err("fail to set arbi port:%d parent of node:%d\n", pid, node_id); return DP_FAILURE; } } else { pr_err("incorrect parent type:0x%x for node:%d.\n", priv->qos_sch_stat[node_id].parent.type, node_id); return DP_FAILURE; } return DP_SUCCESS; } /* dp_qos_link_prio_set_31 API * set node_prio struct for link and return DP_SUCCESS * else return DP_FAILURE */ int dp_qos_link_prio_set_31(struct dp_node_prio *info, int flag) { struct pp_qos_queue_conf queue_cfg = { 0 }; struct pp_qos_sched_conf sched_cfg = { 0 }; struct hal_priv *priv; int node_id; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (info->type == DP_NODE_QUEUE) { if (!is_qid_valid(info->id.q_id) || is_q_node_free(priv, info->id.q_id)) return DP_FAILURE; node_id = priv->qos_queue_stat[info->id.q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) { pr_err("fail to get queue prio and parent\n"); return DP_FAILURE; } if (info->arbi == ARBITRATION_WRR) queue_cfg.queue_child_prop.wrr_weight = info->prio_wfq; else if (info->arbi == ARBITRATION_WSP) queue_cfg.queue_child_prop.priority = info->prio_wfq; DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "Prio:%d paased to low level for queue[%d]\n", info->prio_wfq, info->id.q_id); if (dp_qos_queue_set(priv->qdev, node_id, &queue_cfg)) { pr_err("failed to qos_queue_set\n"); return DP_FAILURE; } priv->qos_sch_stat[node_id].prio_wfq = info->prio_wfq; /* get parent conf and set arbi in parent */ if (set_parent_arbi(info->inst, node_id, info->arbi, flag)) { pr_err("fail to set arbi:%d in Parent of Q:%d\n", info->arbi, node_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:Q=%d arbi=%d prio=%d\n", __func__, info->id.q_id, info->arbi, info->prio_wfq); return DP_SUCCESS; } else if (info->type == DP_NODE_SCH) { if (!is_sch_valid(info->id.sch_id) || is_sch_parent_free(priv, info->id.sch_id)) return DP_FAILURE; if (dp_qos_sched_conf_get(priv->qdev, info->id.sch_id, &sched_cfg)) { pr_err("fail to get sched prio and parent\n"); return DP_FAILURE; } if (info->arbi == ARBITRATION_WRR) sched_cfg.sched_child_prop.wrr_weight = info->prio_wfq; else if (info->arbi == ARBITRATION_WSP) sched_cfg.sched_child_prop.priority = info->prio_wfq; else pr_err("Invalid Arbi %s %d\n", __func__, info->arbi); DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "Prio:%d paased to low level for Sched[%d]\n", info->prio_wfq, info->id.sch_id); if (dp_qos_sched_set(priv->qdev, info->id.sch_id, &sched_cfg)) { pr_err("failed to qos_sched_set\n"); return DP_FAILURE; } priv->qos_sch_stat[info->id.sch_id].prio_wfq = info->prio_wfq; /* get parent conf and set arbi in parent */ if (set_parent_arbi(info->inst, info->id.sch_id, info->arbi, 0)) { pr_err("fail to set arbi:%d Parent of Sched:%d\n", info->arbi, info->id.sch_id); return DP_FAILURE; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:Sched=%d arbi=%d prio=%d\n", __func__, info->id.sch_id, info->arbi, info->prio_wfq); return DP_SUCCESS; } pr_err("incorrect info type provided:0x%x\n", info->type); return DP_FAILURE; } /* dp_qos_link_prio_get_31 API * get node_prio struct for link and return DP_SUCCESS * else return DP_FAILURE */ int dp_qos_link_prio_get_31(struct dp_node_prio *info, int flag) { struct hal_priv *priv; int node_id, arbi; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (info->type == DP_NODE_QUEUE) { if (!is_qid_valid(info->id.q_id) || is_q_node_free(priv, info->id.q_id)) return DP_FAILURE; node_id = priv->qos_queue_stat[info->id.q_id].node_id; arbi = get_parent_arbi(info->inst, node_id, flag); if (arbi == DP_FAILURE) return DP_FAILURE; info->arbi = arbi; info->prio_wfq = priv->qos_sch_stat[node_id].prio_wfq; return DP_SUCCESS; } else if (info->type == DP_NODE_SCH) { if (!is_sch_valid(info->id.sch_id) || is_sch_parent_free(priv, info->id.sch_id)) return DP_FAILURE; arbi = get_parent_arbi(info->inst, info->id.sch_id, flag); if (arbi == DP_FAILURE) return DP_FAILURE; info->arbi = arbi; info->prio_wfq = priv->qos_sch_stat[info->id.sch_id].prio_wfq; return DP_SUCCESS; } pr_err("incorrect info type provided:0x%x\n", info->type); return DP_FAILURE; } /* dp_deq_port_res_get_31 API * Remove link of attached nodes and return DP_SUCCESS * else return DP_FAILURE */ union local_t { struct pp_qos_port_info p_info; struct pp_qos_queue_info q_info; struct pp_qos_queue_conf q_conf; struct pp_qos_sched_conf sched_conf; }; static union local_t t; int dp_deq_port_res_get_31(struct dp_dequeue_res *res, int flag) { struct hal_priv *priv; u16 q_ids[MAX_Q_PER_PORT] = { 0 }; u32 q_num; u32 q_size = MAX_Q_PER_PORT; int i, j, k; int port_num = 1; int p_id, idx; struct pmac_port_info *port_info; if (!res) { pr_err("res cannot be NULL\n"); return DP_FAILURE; } priv = HAL(res->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } port_info = get_dp_port_info(res->inst, res->dp_port); if (!port_info->deq_port_num) return DP_FAILURE; if (res->cqm_deq_idx == DEQ_PORT_OFFSET_ALL) { port_num = port_info->deq_port_num; res->cqm_deq_port = port_info->deq_port_base; res->num_deq_ports = port_info->deq_port_num; } else { res->cqm_deq_port = port_info->deq_port_base + res->cqm_deq_idx; res->num_deq_ports = 1; } if (!is_deqport_valid(res->cqm_deq_port)) return DP_FAILURE; DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "%s:dp_port=%d cqm_deq_port=(%d ~%d) %d\n", __func__, res->dp_port, res->cqm_deq_port, res->cqm_deq_port + port_num - 1, port_info->deq_port_num); res->num_q = 0; idx = 0; for (k = res->cqm_deq_port; k < (res->cqm_deq_port + port_num); k++) { if (priv->deq_port_stat[k].flag == PP_NODE_FREE) continue; q_num = 0; if (dp_qos_port_get_queues(priv->qdev, priv->deq_port_stat[k].node_id, q_ids, q_size, &q_num)) { pr_err("qos_port_get_queues: port[%d/%d]\n", k, priv->deq_port_stat[k].node_id); return DP_FAILURE; } res->num_q += q_num; if (!res->q_res) continue; #if IS_ENABLED(CONFIG_INTEL_DATAPATH_DBG) DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "port[%d/%d] queue list\n", k, priv->deq_port_stat[k].node_id); for (i = 0; i < q_num; i++) DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, " q[/%d]\n", q_ids[i]); #endif for (i = 0; (i < q_num) && (idx < res->q_res_size); i++) { memset(&t.q_info, 0, sizeof(t.q_info)); if (dp_qos_queue_info_get(priv->qdev, q_ids[i], &t.q_info)) { pr_err("qos_port_info_get fail:q[/%d]\n", q_ids[i]); continue; } res->q_res[idx].q_id = t.q_info.physical_id; res->q_res[idx].q_node = q_ids[i]; DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "q[%d/%d]\n", t.q_info.physical_id, q_ids[i]); res->q_res[idx].sch_lvl = 0; memset(&t.q_conf, 0, sizeof(t.q_conf)); if (dp_qos_queue_conf_get(priv->qdev, q_ids[i], &t.q_conf)) { pr_err("qos_port_conf_get fail:q[/%d]\n", q_ids[i]); continue; } p_id = t.q_conf.queue_child_prop.parent; j = 0; do { if (priv->qos_sch_stat[p_id].type == DP_NODE_PORT) { /* port */ res->q_res[idx].qos_deq_port = p_id; res->q_res[idx].cqm_deq_port = k; res->q_res[idx].cqm_deq_port_type = dp_deq_port_tbl[res->inst][k].cpu_type; break; } else if (priv->qos_sch_stat[p_id].type != DP_NODE_SCH) { pr_err("wrong p[/%d] type:%d\n", p_id, priv->qos_sch_stat[p_id].type); break; } /* for sched as parent */ res->q_res[idx].sch_id[j] = p_id; j++; res->q_res[idx].sch_lvl = j; /* get next parent */ if (dp_qos_sched_conf_get(priv->qdev, p_id, &t.sched_conf)) { pr_err("qos_sched_conf_get %s[/%d]\n", "fail:sch", p_id); break; } p_id = t.sched_conf.sched_child_prop.parent; } while (1); idx++; } } return DP_SUCCESS; } /* dp_node_unlink_31 API * check child node keep queue in blocked state * flush queues and return DP_SUCCESS * Else return DP_FAILURE */ int dp_node_unlink_31(struct dp_node_link *info, int flag) { struct hal_priv *priv; u16 queue_buf[MAX_Q_PER_PORT] = { 0 }; int queue_size = MAX_Q_PER_PORT; int queue_num = 0; int i, node_id; struct pp_queue_stat *qos_queue_stat; struct pp_sch_stat *qos_sch_stat; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } qos_queue_stat = &priv->qos_queue_stat[info->node_id.q_id]; qos_sch_stat = &priv->qos_sch_stat[info->node_id.sch_id]; if (info->node_type == DP_NODE_QUEUE) { node_id = qos_queue_stat->node_id; /* Need to check ACTIVE Flag */ if (!(qos_queue_stat->flag & PP_NODE_ACTIVE)) { DP_DEBUG(DP_DBG_FLAG_QOS, "Queue[%d]:No need unlink since not active\n", info->node_id.q_id); return DP_SUCCESS; } cqm_qid2ep_map_set(info->node_id.q_id, priv->ppv4_drop_p); DP_DEBUG(DP_DBG_FLAG_QOS, "%s qid:%d, dq_port:%d\n", "cqm_qid2ep_map_set to drop port", info->node_id.q_id, priv->ppv4_drop_p); queue_flush_31(info->inst, node_id, 0); } else if (info->node_type == DP_NODE_SCH) { if (!(qos_sch_stat->c_flag & PP_NODE_ACTIVE)) { DP_DEBUG(DP_DBG_FLAG_QOS, "Sched[%d]: No need unlink since not active\n", info->node_id.sch_id); return DP_SUCCESS; } if (dp_qos_sched_get_queues(priv->qdev, info->node_id.sch_id, queue_buf, queue_size, &queue_num)) return DP_FAILURE; for (i = 0; i < queue_num; i++) { if (!(priv->qos_queue_stat[queue_buf[i]].flag & PP_NODE_ACTIVE)) continue; queue_flush_31(info->inst, queue_buf[i], 0); } } return DP_SUCCESS; } struct link_add_var { struct pp_qos_queue_conf queue_cfg; struct pp_qos_sched_conf sched_cfg; struct dp_node_alloc node; u16 queue_buf[MAX_Q_PER_PORT]; int q_orig_block[MAX_Q_PER_PORT]; int q_orig_suspend[MAX_Q_PER_PORT]; int queue_size; int queue_num; int node_id; struct pp_node parent; int f_child_free; int f_parent_free; int f_sch_auto_id; int f_restore; }; /* dp_node_link_add_31 API * check for parent type and allocate parent node * then check for child type and allocate child node * then call dp_link_set api to link child to parent * upon success links node to given parent and return DP_SUCCESS * else return DP_FAILURE */ int dp_node_link_add_31(struct dp_node_link *info, int flag) { #define DP_SUSPEND(t) ((t)->queue_cfg.common_prop.suspended) int i; int res = DP_SUCCESS; struct hal_priv *priv; struct link_add_var *t = NULL; int qid = 0; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (!info->dp_port && info->dp_port != DP_PORT(info).dp_port) { pr_err("Fix wrong dp_port from %d to %d\n", info->dp_port, DP_PORT(info).dp_port); info->dp_port = DP_PORT(info).dp_port; } t = devm_kzalloc(&g_dp_dev->dev, sizeof(*t), GFP_ATOMIC); if (!t) return DP_FAILURE; for (i = 0; i < ARRAY_SIZE(t->q_orig_block); i++) { t->q_orig_block[i] = -1; t->q_orig_suspend[i] = -1; } t->queue_size = MAX_Q_PER_PORT; DP_DEBUG(DP_DBG_FLAG_QOS, "inst=%d dp_port=%d\n", info->inst, info->dp_port); /* Get Parent node_id after sanity check */ if (info->p_node_type == DP_NODE_SCH && info->p_node_id.sch_id == DP_NODE_AUTO_ID) t->f_sch_auto_id = 1; i = dp_qos_parent_chk(info, flag); if (i == DP_FAILURE) { pr_err("dp_qos_parent_chk fail\n"); goto EXIT_ERR; } t->parent.node_id = i; t->parent.type = info->p_node_type; t->parent.flag = 1; /* Check parent's children limit not exceeded */ if (priv->qos_sch_stat[t->parent.node_id].child_num >= DP_MAX_CHILD_PER_NODE) { pr_err("Child Num:%d is exceeding limit for Node:[%d]\n", priv->qos_sch_stat[t->parent.node_id].child_num, t->parent.node_id); goto EXIT_ERR; } DP_DEBUG(DP_DBG_FLAG_QOS, "dp_qos_parent_chk succeed: parent node %d\n", t->parent.node_id); /* workaround to pass parrent to queue allcoate api */ priv->ppv4_tmp_p = t->parent.node_id; if (t->f_sch_auto_id) t->f_parent_free = 1; /* Get Child node after sanity check */ if (info->node_type == DP_NODE_QUEUE) { if (info->node_id.q_id == DP_NODE_AUTO_ID) { t->node.inst = info->inst; t->node.dp_port = info->dp_port; t->node.type = info->node_type; if ((dp_node_alloc_31(&t->node, flag)) == DP_FAILURE) { pr_err("dp_node_alloc_31 queue alloc fail\n"); goto EXIT_ERR; } info->node_id = t->node.id; t->f_child_free = 1; } /* add check for free flag and error */ if (priv->qos_queue_stat[info->node_id.q_id].flag == PP_NODE_FREE) { pr_err("Queue ID:%d is in Free state:0x%x\n", info->node_id.q_id, priv->qos_queue_stat[info->node_id.q_id].flag); goto EXIT_ERR; } /* convert q_id to logical node id and pass it to * low level api */ t->node_id = priv->qos_queue_stat[info->node_id.q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, t->node_id, &t->queue_cfg) == 0) { t->queue_num = 1; t->queue_buf[0] = t->node_id; /* save original block/suspend status */ if (t->queue_cfg.blocked == 0) t->q_orig_block[0] = t->queue_cfg.blocked; DP_DEBUG(DP_DBG_FLAG_QOS, "qos_queue_flush queue[%d]\n", t->node_id); queue_flush_31(info->inst, t->node_id, FLUSH_RESTORE_LOOKUP | FLUSH_RESTORE_QOS_PORT); if (t->queue_cfg.queue_child_prop.parent != priv->ppv4_drop_p) { /* decrease stat */ /* Child flag update before link */ DP_DEBUG(DP_DBG_FLAG_QOS, "node_stat_update for queue[%d]\n", t->node_id); if (node_stat_update(info->inst, t->node_id, DP_NODE_DEC)) { pr_err("node_stat_update fail\n"); goto EXIT_ERR; } /* reduce child_num in parent's global table */ DP_DEBUG(DP_DBG_FLAG_QOS, "node_stat_update parent %d for q[%d]\n", PARENT(t->queue_cfg), t->node_id); if (node_stat_update(info->inst, PARENT(t->queue_cfg), DP_NODE_DEC | C_FLAG)) { pr_err("node_stat_update fail\n"); goto EXIT_ERR; } } } else { t->queue_num = 1; t->queue_buf[0] = t->node_id; t->q_orig_block[0] = 0; } /* link set */ /* if parent is same, but need to fill in other parameters for * parents hence commenting below code */ /* if (info->p_node_id.sch_id == parent.node_id || * info->p_node_id.cqm_deq_port == parent.node_id) * goto EXIT_ERR; */ if (dp_link_set(info, t->parent.node_id, flag)) { pr_err("dp_link_set fail to link to parent\n"); goto EXIT_ERR; } } else if (info->node_type == DP_NODE_SCH) { if (info->node_id.sch_id == DP_NODE_AUTO_ID) { t->node.inst = info->inst; t->node.dp_port = info->dp_port; t->node.type = info->node_type; if ((dp_node_alloc_31(&t->node, flag)) == DP_FAILURE) { pr_err("dp_node_alloc_31 sched alloc fail\n"); goto EXIT_ERR; } info->node_id = t->node.id; t->f_child_free = 1; } DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "inst=%d dp_port=%d type=%d info->node_id %d\n", t->node.inst, t->node.dp_port, t->node.type, info->node_id.q_id); /* add check for free flag and error */ if (priv->qos_sch_stat[info->node_id.sch_id].p_flag == PP_NODE_FREE) { pr_err("Sched:%d is in Free state:0x%x\n", info->node_id.sch_id, priv->qos_sch_stat[info->node_id.sch_id].p_flag); goto EXIT_ERR; } if ((t->f_child_free == 0) && dp_qos_sched_conf_get(priv->qdev, info->node_id.sch_id, &t->sched_cfg) == 0) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "info->node_id.sch_id %d\n", info->node_id.sch_id); if (dp_qos_sched_get_queues(priv->qdev, info->node_id.sch_id, t->queue_buf, t->queue_size, &t->queue_num)) { pr_err("Can not get queues:%d\n", info->node_id.sch_id); goto EXIT_ERR; } for (i = 0; i < t->queue_num; i++) { if (dp_qos_queue_conf_get(priv->qdev, t->queue_buf[i], &t->queue_cfg)) continue; if (t->queue_cfg.blocked == 0) t->q_orig_block[i] = t->queue_cfg.blocked; queue_flush_31(info->inst, t->queue_buf[i], FLUSH_RESTORE_LOOKUP | FLUSH_RESTORE_QOS_PORT); } /* update flag for sch node */ if (node_stat_update(info->inst, info->node_id.sch_id, DP_NODE_DEC | P_FLAG)) { pr_err("node_stat_update fail\n"); goto EXIT_ERR; } /* reduce child_num in parent's global table */ if (node_stat_update(info->inst, PARENT_S(t->sched_cfg), DP_NODE_DEC | C_FLAG)) { pr_err("node_stat_update fail\n"); goto EXIT_ERR; } } /* if parent is same, but need to fill in other parameters for * parents hence commenting below code */ /* if (info->p_node_id.sch_id == parent.node_id || * info->p_node_id.cqm_deq_port == parent.node_id) * goto EXIT_ERR; */ if (dp_link_set(info, t->parent.node_id, flag)) { pr_err("dp_link_set failed to link to parent\n"); goto EXIT_ERR; } } for (i = 0; i <= t->queue_num; i++) { if (t->q_orig_block[i] < 0 && /* non-valid block stat */ t->q_orig_suspend[i] < 0) /* non-valid suspend stat */ continue; if (dp_qos_queue_conf_get(priv->qdev, t->queue_buf[i], &t->queue_cfg)) continue; t->f_restore = 0; if (t->q_orig_block[i] >= 0) { t->f_restore = 1; t->queue_cfg.blocked = t->q_orig_block[i]; /* restore */ DP_DEBUG(DP_DBG_FLAG_QOS, "to unblock queue[%d/%d]:block=%d\n", get_qid_by_node(info->inst, t->queue_buf[i], 0), t->queue_buf[i], t->queue_cfg.blocked); } if (!t->f_restore) continue; if (dp_qos_queue_set(priv->qdev, t->queue_buf[i], &t->queue_cfg)) { pr_err("qos_queue_set fail for q[/%d]\n", t->queue_buf[i]); res = DP_FAILURE; } qid = get_qid_by_node(info->inst, t->queue_buf[i], 0); if (qid == DP_FAILURE) return DP_FAILURE; priv->qos_queue_stat[qid].blocked = t->queue_cfg.blocked; } devm_kfree(&g_dp_dev->dev, t); return res; EXIT_ERR: res = DP_FAILURE; if (t->f_child_free) { if (t->node.type == DP_NODE_QUEUE) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "queue remove id %d\n", t->node_id); dp_qos_queue_remove(priv->qdev, t->node_id); } else if (t->node.type == DP_NODE_SCH) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "sched remove id %d\n", t->node.id.sch_id); dp_qos_sched_remove(priv->qdev, t->node.id.sch_id); } else { pr_err("Unexpect node type %d\n", t->node.type); } } if (t->f_parent_free) { if (info->p_node_type == DP_NODE_PORT) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "port remove id %d\n", t->parent.node_id); dp_qos_port_remove(priv->qdev, t->parent.node_id); } else if (info->p_node_type == DP_NODE_SCH) { DP_DEBUG(DP_DBG_FLAG_QOS_DETAIL, "sched remove id %d\n", t->parent.node_id); dp_qos_sched_remove(priv->qdev, t->parent.node_id); } else { pr_err("Unexpect node type %d\n", t->node.type); } } devm_kfree(&g_dp_dev->dev, t); return res; } /* dp_queue_conf_set_31 API * Set Current Queue config and return DP_SUCCESS * else return DP_FAILURE */ int dp_queue_conf_set_31(struct dp_queue_conf *cfg, int flag) { struct pp_qos_queue_conf *conf; struct hal_priv *priv; int node_id, res = DP_FAILURE; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } conf = devm_kzalloc(&g_dp_dev->dev, sizeof(*conf), GFP_ATOMIC); if (!conf) return DP_FAILURE; if (!is_qid_valid(cfg->q_id) || is_q_node_free(priv, cfg->q_id)) goto EXIT; node_id = priv->qos_queue_stat[cfg->q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, conf)) { pr_err("qos_queue_conf_get fail:%d\n", cfg->q_id); goto EXIT; } if (cfg->act & DP_NODE_DIS) conf->blocked = 1; else if (cfg->act & DP_NODE_EN) conf->blocked = 0; priv->qos_queue_stat[cfg->q_id].blocked = conf->blocked; if (cfg->drop == DP_QUEUE_DROP_WRED) conf->wred_enable = 1; else if (cfg->drop == DP_QUEUE_DROP_TAIL) conf->wred_enable = 0; conf->wred_max_avg_green = cfg->max_size[0]; conf->wred_max_avg_yellow = cfg->max_size[1]; conf->wred_min_avg_green = cfg->min_size[0]; conf->wred_min_avg_yellow = cfg->min_size[1]; conf->wred_slope_green = cfg->wred_slope[0]; conf->wred_slope_yellow = cfg->wred_slope[1]; conf->wred_min_guaranteed = cfg->wred_min_guaranteed; conf->wred_max_allowed = cfg->wred_max_allowed; if (dp_qos_queue_set(priv->qdev, node_id, conf)) { pr_err("failed to qos_queue_set:%d\n", cfg->q_id); goto EXIT; } res = DP_SUCCESS; EXIT: devm_kfree(&g_dp_dev->dev, conf); return res; } /* dp_queue_conf_get_31 API * Get Current Queue config and return DP_SUCCESS * else return DP_FAILURE */ int dp_queue_conf_get_31(struct dp_queue_conf *cfg, int flag) { int node_id, res = DP_FAILURE; struct pp_qos_queue_conf *conf; struct hal_priv *priv; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } conf = devm_kzalloc(&g_dp_dev->dev, sizeof(*conf), GFP_ATOMIC); if (!conf) return DP_FAILURE; if (!is_qid_valid(cfg->q_id) || is_q_node_free(priv, cfg->q_id)) goto EXIT; node_id = priv->qos_queue_stat[cfg->q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, conf)) { pr_err("qos_queue_conf_get fail\n"); goto EXIT; } if (conf->blocked) cfg->act = DP_NODE_DIS; else cfg->act = DP_NODE_EN; if (conf->wred_enable) { cfg->drop = DP_QUEUE_DROP_WRED; cfg->wred_slope[0] = conf->wred_slope_green; cfg->wred_slope[1] = conf->wred_slope_yellow; cfg->wred_slope[2] = 0; cfg->wred_max_allowed = conf->wred_max_allowed; cfg->wred_min_guaranteed = conf->wred_min_guaranteed; cfg->min_size[0] = conf->wred_min_avg_green; cfg->min_size[1] = conf->wred_min_avg_yellow; cfg->min_size[2] = 0; cfg->max_size[0] = conf->wred_max_avg_green; cfg->max_size[1] = conf->wred_max_avg_yellow; cfg->max_size[2] = 0; //cfg->unit = conf->max_burst; res = DP_SUCCESS; goto EXIT; } cfg->drop = DP_QUEUE_DROP_TAIL; cfg->min_size[0] = conf->wred_min_avg_green; cfg->min_size[1] = conf->wred_min_avg_yellow; cfg->max_size[0] = conf->wred_max_avg_green; cfg->max_size[1] = conf->wred_max_avg_yellow; //cfg->unit = conf->max_burst; res = DP_SUCCESS; EXIT: devm_kfree(&g_dp_dev->dev, conf); return res; } /* dp_node_link_en_set_31 API * Enable current link node and return DP_SUCCESS * else return DP_FAILURE */ int dp_node_link_en_set_31(struct dp_node_link_enable *en, int flag) { struct hal_priv *priv; int node_id; if (!en) { pr_err("en info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(en->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (en->en & DP_NODE_EN && en->en & DP_NODE_DIS) { pr_err("enable & disable cannot be set together!\n"); return DP_FAILURE; } if (en->en & DP_NODE_SUSPEND && en->en & DP_NODE_RESUME) { pr_err("suspend & resume cannot be set together!\n"); return DP_FAILURE; } if (en->type == DP_NODE_QUEUE) { if (en->en >= DP_NODE_SET_CMD_MAX) { pr_err("Incorrect commands provided!\n"); return DP_FAILURE; } if (!is_qid_valid(en->id.q_id) || is_q_node_free(priv, en->id.q_id)) return DP_FAILURE; node_id = priv->qos_queue_stat[en->id.q_id].node_id; if (en->en & DP_NODE_EN) { if (pp_qos_queue_unblock(priv->qdev, node_id)) { pr_err("pp_qos_queue_unblock fail Queue[%d]\n", en->id.q_id); return DP_FAILURE; } priv->qos_queue_stat[en->id.q_id].blocked = 0; } if (en->en & DP_NODE_DIS) { if (pp_qos_queue_block(priv->qdev, node_id)) { pr_err("pp_qos_queue_block fail Queue[%d]\n", en->id.q_id); return DP_FAILURE; } priv->qos_queue_stat[en->id.q_id].blocked = 1; } } else if (en->type == DP_NODE_SCH) { if (!(en->en & (DP_NODE_SUSPEND | DP_NODE_RESUME))) { pr_err("Incorrect commands provided!\n"); return DP_FAILURE; } if (!is_sch_valid(en->id.sch_id) || is_sch_parent_free(priv, en->id.sch_id)) return DP_FAILURE; } else if (en->type == DP_NODE_PORT) { if (en->en >= DP_NODE_SET_CMD_MAX) { pr_err("Incorrect commands provided!\n"); return DP_FAILURE; } if (!is_deqport_valid(en->id.cqm_deq_port) || is_port_node_free(priv, en->id.cqm_deq_port)) return DP_FAILURE; node_id = priv->deq_port_stat[en->id.cqm_deq_port].node_id; if (en->en & DP_NODE_EN) { if (pp_qos_port_unblock(priv->qdev, node_id)) { pr_err("pp_qos_port_unblock fail Port[%d]\n", en->id.cqm_deq_port); return DP_FAILURE; } } if (en->en & DP_NODE_DIS) { if (pp_qos_port_block(priv->qdev, node_id)) { pr_err("pp_qos_port_block fail Port[%d]\n", en->id.cqm_deq_port); return DP_FAILURE; } } if (en->en & DP_NODE_SUSPEND) { if (pp_qos_port_disable(priv->qdev, node_id)) { pr_err("pp_qos_port_disable fail Port[%d]\n", en->id.cqm_deq_port); return DP_FAILURE; } /* Not allowed to dequeue */ priv->deq_port_stat[en->id.cqm_deq_port].disabled = 1; } if (en->en & DP_NODE_RESUME) { if (pp_qos_port_enable(priv->qdev, node_id)) { pr_err("pp_qos_port_enable fail Port[%d]\n", en->id.cqm_deq_port); return DP_FAILURE; } /* Allowed to dequeue */ priv->deq_port_stat[en->id.cqm_deq_port].disabled = 0; } } return DP_SUCCESS; } /* dp_node_link_en_get_31 API * Get status of link node and return DP_SUCCESS * else return DP_FAILURE */ int dp_node_link_en_get_31(struct dp_node_link_enable *en, int flag) { int node_id; struct hal_priv *priv = HAL(en->inst); if (!priv || !priv->qdev) { pr_err("priv or priv->qdev NULL\n"); return DP_FAILURE; } if (!en) { pr_err("en info NULL\n"); return DP_FAILURE; } if (en->type == DP_NODE_QUEUE) { DP_DEBUG(DP_DBG_FLAG_QOS, "en->id.q_id=%d\n", en->id.q_id); node_id = priv->qos_queue_stat[en->id.q_id].node_id; if (priv->qos_queue_stat[en->id.q_id].blocked) en->en |= DP_NODE_DIS; else en->en |= DP_NODE_EN; } else if (en->type == DP_NODE_SCH) { DP_DEBUG(DP_DBG_FLAG_QOS, "en->id.sch_id=%d\n", en->id.sch_id); if (!(priv->qos_sch_stat[en->id.sch_id].c_flag & PP_NODE_ACTIVE)) { pr_err("Wrong Sched [/%d] FLAG Expect ACTIVE\n", en->id.sch_id); return DP_FAILURE; } en->en |= DP_NODE_EN; } else if (en->type == DP_NODE_PORT) { DP_DEBUG(DP_DBG_FLAG_QOS, "en->id.cqm_deq_port=%d\n", en->id.cqm_deq_port); node_id = priv->deq_port_stat[en->id.cqm_deq_port].node_id; if (priv->deq_port_stat[en->id.cqm_deq_port].disabled) en->en |= DP_NODE_DIS; else en->en |= DP_NODE_EN; } return DP_SUCCESS; } /* dp_link_get_31 API * get full link based on queue and return DP_SUCCESS * else return DP_FAILURE */ int dp_link_get_31(struct dp_qos_link *cfg, int flag) { struct hal_priv *priv; int i, node_id; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } node_id = priv->qos_queue_stat[cfg->q_id].node_id; if (!(priv->qos_queue_stat[cfg->q_id].flag & PP_NODE_ACTIVE)) { pr_err("Incorrect queue:%d state:expect ACTIV\n", cfg->q_id); return DP_FAILURE; } cfg->q_arbi = get_parent_arbi(cfg->inst, node_id, 0); cfg->q_leaf = 0; cfg->n_sch_lvl = 0; cfg->q_prio_wfq = priv->qos_sch_stat[node_id].prio_wfq; if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_PORT) { cfg->cqm_deq_port = priv->qos_sch_stat[node_id].parent.node_id; cfg->cqm_deq_port = get_cqm_deq_port_by_node(cfg->inst, cfg->cqm_deq_port, 0); return DP_SUCCESS; } else if (priv->qos_sch_stat[node_id].parent.type == DP_NODE_SCH) { for (i = 0; i < DP_MAX_SCH_LVL - 1; i++) { cfg->sch[i].id = priv->qos_sch_stat[node_id].parent.node_id; node_id = cfg->sch[i].id; cfg->n_sch_lvl = i + 1; cfg->sch[i].leaf = 0; cfg->sch[i].arbi = get_parent_arbi(cfg->inst, cfg->sch[i].id, 0); cfg->sch[i + 1].id = priv->qos_sch_stat[node_id].parent.node_id; cfg->sch[i].prio_wfq = priv->qos_sch_stat[node_id].prio_wfq; if (priv->qos_sch_stat[cfg->sch[i].id].parent.type == DP_NODE_PORT) break; } cfg->cqm_deq_port = priv->qos_sch_stat[cfg->sch[i].id].parent.node_id; cfg->cqm_deq_port = get_cqm_deq_port_by_node(cfg->inst, cfg->cqm_deq_port, 0); return DP_SUCCESS; } return DP_FAILURE; } /* dp_link_add_31 API * configure end to end link and return DP_SUCCESS * else return DP_FAILURE */ int dp_link_add_31(struct dp_qos_link *cfg, int flag) { struct dp_node_link info = { 0 }; struct dp_node_alloc node = { 0 }; int i, id; if (!cfg) { pr_err("%s: cfg cannot be NULL\n", __func__); return DP_FAILURE; } if (cfg->n_sch_lvl > DP_MAX_SCH_LVL || cfg->n_sch_lvl < 0) { pr_err("%s: n_sch_lvl out of range, expect 0 ~ %d\n", __func__, DP_MAX_SCH_LVL); return DP_FAILURE; } info.inst = cfg->inst; info.dp_port = cfg->dp_port; info.cqm_deq_port.cqm_deq_port = cfg->cqm_deq_port; info.p_node_id.cqm_deq_port = cfg->cqm_deq_port; info.p_node_type = DP_NODE_PORT; for (i = cfg->n_sch_lvl - 1; i >= 0; i--) { info.node_id.sch_id = cfg->sch[i].id; info.node_type = DP_NODE_SCH; info.leaf = cfg->sch[i].leaf; info.arbi = cfg->sch[i].arbi; info.prio_wfq = cfg->sch[i].prio_wfq; if (dp_node_link_add_31(&info, flag)) { pr_err("%s: fail to link node %d to %d\n", __func__, info.node_id.sch_id, info.p_node_id.sch_id); goto ERR; } info.p_node_id = info.node_id; info.p_node_type = info.node_type; } info.node_id.q_id = cfg->q_id; info.node_type = DP_NODE_QUEUE; info.leaf = cfg->q_leaf; info.arbi = cfg->q_arbi; info.prio_wfq = cfg->q_prio_wfq; if (dp_node_link_add_31(&info, flag)) { pr_err("%s: fail to link node %d to %d\n", __func__, info.node_id.q_id, info.p_node_id.sch_id); goto ERR; } return DP_SUCCESS; ERR: while (i < cfg->n_sch_lvl) { id = i < 0 ? cfg->q_id : cfg->sch[i].id; if (id == DP_NODE_AUTO_ID) { node.inst = cfg->inst; node.dp_port = cfg->dp_port; node.type = i < 0 ? DP_NODE_QUEUE : DP_NODE_SCH; node.id.sch_id = id; dp_node_free_31(&node, flag); } node_stat_update(cfg->inst, id, DP_NODE_DEC); i++; } return DP_FAILURE; } /* dp_shaper_conf_set_31 API * DP_NO_SHAPER_LIMIT no limit for shaper * DP_MAX_SHAPER_LIMIT max limit for shaper * configure shaper limit for node and return DP_SUCCESS * else return DP_FAILURE */ int dp_shaper_conf_set_31(struct dp_shaper_conf *cfg, int flag) { struct pp_qos_queue_conf queue_cfg = { 0 }; struct pp_qos_sched_conf sched_cfg = { 0 }; struct pp_qos_port_conf port_cfg = { 0 }; struct hal_priv *priv; int node_id, res; u32 bw_limit; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (cfg->type == DP_NODE_QUEUE) { if (!is_qid_valid(cfg->id.q_id) || is_q_node_free(priv, cfg->id.q_id)) return DP_FAILURE; node_id = priv->qos_queue_stat[cfg->id.q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) { pr_err("qos_queue_conf_get fail:%d\n", cfg->id.q_id); return DP_FAILURE; } switch (cfg->cmd) { case DP_SHAPER_CMD_ADD: case DP_SHAPER_CMD_ENABLE: res = limit_dp2pp(cfg->cir, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong dp shaper limit:%u\n", cfg->cir); return DP_FAILURE; } queue_cfg.common_prop.bandwidth_limit = bw_limit; queue_cfg.eir = cfg->pir > cfg->cir ? cfg->pir - cfg->cir : 0; break; case DP_SHAPER_CMD_REMOVE: case DP_SHAPER_CMD_DISABLE: queue_cfg.common_prop.bandwidth_limit = 0; break; default: pr_err("Incorrect command provided:%d\n", cfg->cmd); return DP_FAILURE; } if (dp_qos_queue_set(priv->qdev, node_id, &queue_cfg)) { pr_err("qos_queue_set fail:%d\n", cfg->id.q_id); return DP_FAILURE; } return DP_SUCCESS; } else if (cfg->type == DP_NODE_SCH) { if (!is_sch_valid(cfg->id.sch_id) || is_sch_parent_free(priv, cfg->id.sch_id)) return DP_FAILURE; if (dp_qos_sched_conf_get(priv->qdev, cfg->id.sch_id, &sched_cfg)) { pr_err("qos_sched_conf_get fail:%d\n", cfg->id.sch_id); return DP_FAILURE; } switch (cfg->cmd) { case DP_SHAPER_CMD_ADD: case DP_SHAPER_CMD_ENABLE: res = limit_dp2pp(cfg->cir, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong dp shaper limit:%u\n", cfg->cir); return DP_FAILURE; } sched_cfg.common_prop.bandwidth_limit = bw_limit; break; case DP_SHAPER_CMD_REMOVE: case DP_SHAPER_CMD_DISABLE: sched_cfg.common_prop.bandwidth_limit = 0; break; default: pr_err("Incorrect command provided:%d\n", cfg->cmd); return DP_FAILURE; } if (dp_qos_sched_set(priv->qdev, cfg->id.sch_id, &sched_cfg)) { pr_err("qos_sched_set fail:%d\n", cfg->id.sch_id); return DP_FAILURE; } return DP_SUCCESS; } else if (cfg->type == DP_NODE_PORT) { if (!is_deqport_valid(cfg->id.cqm_deq_port) || is_port_node_free(priv, cfg->id.cqm_deq_port)) return DP_FAILURE; node_id = priv->deq_port_stat[cfg->id.cqm_deq_port].node_id; if (dp_qos_port_conf_get(priv->qdev, node_id, &port_cfg)) { pr_err("qos_port_conf_get fail:%d\n", cfg->id.cqm_deq_port); return DP_FAILURE; } switch (cfg->cmd) { case DP_SHAPER_CMD_ADD: case DP_SHAPER_CMD_ENABLE: res = limit_dp2pp(cfg->cir, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong dp shaper limit:%u\n", cfg->cir); return DP_FAILURE; } port_cfg.common_prop.bandwidth_limit = bw_limit; break; case DP_SHAPER_CMD_REMOVE: case DP_SHAPER_CMD_DISABLE: port_cfg.common_prop.bandwidth_limit = 0; break; default: pr_err("Incorrect command provided:%d\n", cfg->cmd); return DP_FAILURE; } if (dp_qos_port_set(priv->qdev, node_id, &port_cfg)) { pr_err("qos_port_set fail:%d\n", cfg->id.cqm_deq_port); return DP_FAILURE; } return DP_SUCCESS; } pr_err("Unkonwn type provided:0x%x\n", cfg->type); return DP_FAILURE; } /* dp_shaper_conf_get_31 API * DP_NO_SHAPER_LIMIT no limit for shaper * DP_MAX_SHAPER_LIMIT max limit for shaper * get shaper limit for node fill struct and return DP_SUCCESS * else return DP_FAILURE */ int dp_shaper_conf_get_31(struct dp_shaper_conf *cfg, int flag) { struct pp_qos_queue_conf queue_cfg = { 0 }; struct pp_qos_sched_conf sched_cfg = { 0 }; struct pp_qos_port_conf port_cfg = { 0 }; struct hal_priv *priv; int node_id, res; u32 bw_limit, pir = 0; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (cfg->type == DP_NODE_QUEUE) { if (!is_qid_valid(cfg->id.q_id) || is_q_node_free(priv, cfg->id.q_id)) return DP_FAILURE; node_id = priv->qos_queue_stat[cfg->id.q_id].node_id; if (dp_qos_queue_conf_get(priv->qdev, node_id, &queue_cfg)) { pr_err("qos_queue_conf_get fail:%d\n", cfg->id.q_id); return DP_FAILURE; } res = limit_pp2dp(queue_cfg.common_prop.bandwidth_limit, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong pp shaper limit:%u\n", queue_cfg.common_prop.bandwidth_limit); return DP_FAILURE; } pir = queue_cfg.common_prop.bandwidth_limit + queue_cfg.eir; } else if (cfg->type == DP_NODE_SCH) { if (!is_sch_valid(cfg->id.sch_id) || is_sch_parent_free(priv, cfg->id.sch_id)) return DP_FAILURE; if (dp_qos_sched_conf_get(priv->qdev, cfg->id.sch_id, &sched_cfg)) { pr_err("qos_sched_conf_get fail:%d\n", cfg->id.sch_id); return DP_FAILURE; } res = limit_pp2dp(sched_cfg.common_prop.bandwidth_limit, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong pp shaper limit:%u\n", sched_cfg.common_prop.bandwidth_limit); return DP_FAILURE; } } else if (cfg->type == DP_NODE_PORT) { if (!is_deqport_valid(cfg->id.cqm_deq_port) || is_port_node_free(priv, cfg->id.cqm_deq_port)) return DP_FAILURE; node_id = priv->deq_port_stat[cfg->id.cqm_deq_port].node_id; if (dp_qos_port_conf_get(priv->qdev, node_id, &port_cfg)) { pr_err("qos_port_conf_get fail:%d\n", cfg->id.cqm_deq_port); return DP_FAILURE; } res = limit_pp2dp(port_cfg.common_prop.bandwidth_limit, &bw_limit); if (res == DP_FAILURE) { pr_err("Wrong pp shaper limit:%u\n", port_cfg.common_prop.bandwidth_limit); return DP_FAILURE; } } else { pr_err("Unkonwn type provided:0x%x\n", cfg->type); return DP_FAILURE; } cfg->cir = bw_limit; cfg->pir = pir; cfg->cbs = 0; cfg->pbs = 0; return DP_SUCCESS; } int dp_queue_map_get_31(struct dp_queue_map_get *cfg, int flag) { struct hal_priv *priv; cbm_queue_map_entry_t *qmap_entry = NULL; s32 num_entry; int i; int res = DP_SUCCESS; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (!is_qid_valid(cfg->q_id) || (is_q_node_free(priv, cfg->q_id) && cfg->q_id != priv->ppv4_drop_q)) return DP_FAILURE; if (cbm_queue_map_get(cfg->inst, cfg->q_id, &num_entry, &qmap_entry, 0)) { pr_err("cbm_queue_map_get fail:%d\n", cfg->q_id); return DP_FAILURE; } cfg->num_entry = num_entry; if (!qmap_entry) { DP_DEBUG(DP_DBG_FLAG_QOS, "queue map entry returned null value\n"); if (num_entry) { pr_err("num_entry is not null:%d\n", num_entry); res = DP_FAILURE; } goto EXIT; } if (!cfg->qmap_entry) goto EXIT; if (num_entry > cfg->qmap_size) { DP_DEBUG(DP_DBG_FLAG_QOS, "num_entry is greater than qmap_size:%d\n", num_entry); goto EXIT; } for (i = 0; i < num_entry; i++) { cfg->qmap_entry[i].qmap.flowid = qmap_entry[i].flowid; cfg->qmap_entry[i].qmap.dec = qmap_entry[i].dec; cfg->qmap_entry[i].qmap.enc = qmap_entry[i].enc; cfg->qmap_entry[i].qmap.mpe1 = qmap_entry[i].mpe1; cfg->qmap_entry[i].qmap.mpe2 = qmap_entry[i].mpe2; cfg->qmap_entry[i].qmap.dp_port = qmap_entry[i].ep; cfg->qmap_entry[i].qmap.class = qmap_entry[i].tc; cfg->qmap_entry[i].qmap.subif = qmap_entry[i].sub_if_id; } EXIT: cbm_queue_map_buf_free(qmap_entry); qmap_entry = NULL; return res; } int dp_queue_map_set_31(struct dp_queue_map_set *cfg, int flag) { struct hal_priv *priv; cbm_queue_map_entry_t qmap_cfg = { 0 }; u32 cqm_flags = 0; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (!is_qid_valid(cfg->q_id)) return DP_FAILURE; qmap_cfg.mpe1 = cfg->map.mpe1; qmap_cfg.mpe2 = cfg->map.mpe2; qmap_cfg.ep = cfg->map.dp_port; qmap_cfg.flowid = cfg->map.flowid; qmap_cfg.dec = cfg->map.dec; qmap_cfg.enc = cfg->map.enc; qmap_cfg.tc = cfg->map.class; qmap_cfg.sub_if_id = cfg->map.subif; if (cfg->mask.mpe1) cqm_flags |= CBM_QUEUE_MAP_F_MPE1_DONTCARE; if (cfg->mask.mpe2) cqm_flags |= CBM_QUEUE_MAP_F_MPE2_DONTCARE; if (cfg->mask.dp_port) cqm_flags |= CBM_QUEUE_MAP_F_EP_DONTCARE; if (cfg->mask.flowid) { cqm_flags |= CBM_QUEUE_MAP_F_FLOWID_L_DONTCARE; cqm_flags |= CBM_QUEUE_MAP_F_FLOWID_H_DONTCARE; } if (cfg->mask.dec) cqm_flags |= CBM_QUEUE_MAP_F_DE_DONTCARE; if (cfg->mask.enc) cqm_flags |= CBM_QUEUE_MAP_F_EN_DONTCARE; if (cfg->mask.class) cqm_flags |= CBM_QUEUE_MAP_F_TC_DONTCARE; if (cfg->mask.dp_port) cqm_flags |= CBM_QUEUE_MAP_F_EP_DONTCARE; if (cfg->mask.subif) { cqm_flags |= CBM_QUEUE_MAP_F_SUBIF_DONTCARE; qmap_cfg.sub_if_id_mask_bits = cfg->mask.subif_id_mask; } if (cbm_queue_map_set(cfg->inst, cfg->q_id, &qmap_cfg, cqm_flags)) { pr_err("cbm_queue_map_set fail for Q:%d\n", cfg->q_id); return DP_FAILURE; } return DP_SUCCESS; } int dp_counter_mode_set_31(struct dp_counter_conf *cfg, int flag) { return DP_FAILURE; } int dp_counter_mode_get_31(struct dp_counter_conf *cfg, int flag) { return DP_FAILURE; } int get_sch_level(int inst, int pid, int flag) { struct hal_priv *priv; int level; priv = HAL(inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } for (level = 0; level < DP_MAX_SCH_LVL; level++) { if (priv->qos_sch_stat[pid].parent.type == DP_NODE_PORT) { level = level + 1; break; } pid = priv->qos_sch_stat[pid].parent.node_id; } return level; } /* dp_qos_level_get_31 API * get max scheduler level and return DP_SUCCESS * else return DP_FAILURE */ int dp_qos_level_get_31(struct dp_qos_level *dp, int flag) { struct hal_priv *priv; u16 i, id, pid, lvl_x = 0; if (!dp) { pr_err("dp cannot be NULL\n"); return DP_FAILURE; } dp->max_sch_lvl = 0; priv = HAL(dp->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } for (i = 0; i < MAX_QUEUE; i++) { if (priv->qos_queue_stat[i].flag & PP_NODE_FREE) continue; id = priv->qos_queue_stat[i].node_id; if (priv->qos_sch_stat[id].parent.type == DP_NODE_PORT) { continue; } else if (priv->qos_sch_stat[id].parent.type == DP_NODE_SCH) { pid = priv->qos_sch_stat[id].parent.node_id; lvl_x = get_sch_level(dp->inst, pid, 0); } if (lvl_x > dp->max_sch_lvl) dp->max_sch_lvl = lvl_x; } if (dp->max_sch_lvl >= 0) return DP_SUCCESS; else return DP_FAILURE; } static int get_children_list(int inst, struct dp_node *child, int node_id) { int idx, num = 0, child_id; struct hal_priv *priv = HAL(inst); for (idx = 0; idx < DP_MAX_CHILD_PER_NODE; idx++) { child_id = get_qid_by_node(inst, CHILD(node_id, idx).node_id, 0); if (priv->qos_sch_stat[node_id].child[idx].flag & PP_NODE_ACTIVE) { child[idx].type = CHILD(node_id, idx).type; if (child[idx].type == DP_NODE_SCH) child[idx].id.q_id = CHILD(node_id, idx).node_id; else child[idx].id.q_id = child_id; num++; } } return num; } /* dp_children_get_31 API * Get direct chldren and type of given node and return DP_SUCCESS * else return DP_FAILURE */ int dp_children_get_31(struct dp_node_child *cfg, int flag) { int node_id, res = 0; struct hal_priv *priv; if (!cfg) { pr_err("cfg cannot be NULL\n"); return DP_FAILURE; } priv = HAL(cfg->inst); cfg->num = 0; if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (cfg->type == DP_NODE_SCH) { if (!is_sch_valid(cfg->id.sch_id) || is_sch_child_free(priv, cfg->id.sch_id)) return DP_FAILURE; node_id = cfg->id.sch_id; } else if (cfg->type == DP_NODE_PORT) { if (!is_deqport_valid(cfg->id.cqm_deq_port) || is_port_node_free(priv, cfg->id.cqm_deq_port)) return DP_FAILURE; node_id = priv->deq_port_stat[cfg->id.cqm_deq_port].node_id; } else { pr_err("Unkonwn type provided:0x%x\n", cfg->type); return DP_FAILURE; } if (!priv->qos_sch_stat[node_id].child_num) return DP_SUCCESS; cfg->num = priv->qos_sch_stat[node_id].child_num; res = get_children_list(cfg->inst, cfg->child, node_id); if (cfg->num == res) return DP_SUCCESS; pr_err("child_num:[%d] not matched to res:[%d] for Node:%d\n", cfg->num, res, cfg->id.sch_id); return DP_FAILURE; } static int dp_q_reserve_contiguous(int inst, int ep, struct dp_dev_data *data) { struct hal_priv *priv = HAL(inst); struct resv_info *r = &priv->resv[ep]; size_t len; int i, res; u32 *ids, *phy_ids; len = sizeof(*ids) * data->num_resv_q * 2; ids = devm_kzalloc(&g_dp_dev->dev, len, GFP_ATOMIC); if (!ids) return DP_FAILURE; phy_ids = ids + data->num_resv_q; res = pp_qos_contiguous_queue_allocate(priv->qdev, ids, phy_ids, data->num_resv_q); if (res) { pr_err("%s: pp qos contiguous q alloc failed with %d\n", __func__, res); devm_kfree(&g_dp_dev->dev, ids); return DP_FAILURE; } for (i = 0; i < data->num_resv_q; i++) { r->resv_q[i].id = ids[i]; r->resv_q[i].physical_id = phy_ids[i]; DP_DEBUG(DP_DBG_FLAG_QOS, "%s: reseve q[%d/%d] for ep %d\n", __func__, ids[i], phy_ids[i], ep); } r->num_resv_q = data->num_resv_q; r->flag_ops |= DP_F_DEV_CONTINUOUS_Q; data->qos_resv_q_base = phy_ids[0]; devm_kfree(&g_dp_dev->dev, ids); return DP_SUCCESS; } static int dp_q_remove(int inst, int ep) { struct hal_priv *priv = HAL(inst); struct resv_info *r = &priv->resv[ep]; int i, res; if (!r->resv_q) return DP_SUCCESS; if (r->num_resv_q <= 0) goto out; if (r->flag_ops & DP_F_DEV_CONTINUOUS_Q) { res = pp_qos_contiguous_queue_remove(priv->qdev, r->resv_q[0].id, r->num_resv_q); if (res) { pr_err("%s: pp qos contiguous q remove failed with %d\n", __func__, res); return DP_FAILURE; } r->num_resv_q = 0; r->flag_ops &= ~DP_F_DEV_CONTINUOUS_Q; goto out; } while (r->num_resv_q > 0) { i = r->num_resv_q - 1; res = dp_qos_queue_remove(priv->qdev, r->resv_q[i].id); if (res) { pr_err("%s: pp qos q remove failed with %d for q id %u\n", __func__, res, r->resv_q[i].id); return DP_FAILURE; } r->num_resv_q--; } out: devm_kfree(&g_dp_dev->dev, r->resv_q); r->resv_q = NULL; return DP_SUCCESS; } static int dp_q_reserve(int inst, int ep, struct dp_dev_data *data) { struct hal_priv *priv = HAL(inst); struct resv_info *r = &priv->resv[ep]; size_t len; int i, res; u32 id, phy_id; if (!(data->flag_ops & DP_F_DEV_RESV_Q) || data->num_resv_q <= 0) return DP_SUCCESS; len = sizeof(struct resv_q) * data->num_resv_q; r->resv_q = devm_kzalloc(&g_dp_dev->dev, len, GFP_ATOMIC); if (!r->resv_q) return DP_FAILURE; if (data->flag_ops & DP_F_DEV_CONTINUOUS_Q) { if (dp_q_reserve_contiguous(inst, ep, data)) { dp_q_remove(inst, ep); return DP_FAILURE; } return DP_SUCCESS; } for (i = 0; i < data->num_resv_q; i++) { res = dp_qos_queue_allocate_id_phy(priv->qdev, &id, &phy_id); if (res) { pr_err("%s: pp qos q alloc failed with %d\n", __func__, res); dp_q_remove(inst, ep); return DP_FAILURE; } r->resv_q[i].id = id; r->resv_q[i].physical_id = phy_id; r->num_resv_q++; DP_DEBUG(DP_DBG_FLAG_QOS, "%s: reseve q[%d/%d] for ep %d\n", __func__, id, phy_id, ep); } return DP_SUCCESS; } static int dp_sched_remove(int inst, int ep) { struct hal_priv *priv = HAL(inst); struct resv_info *r = &priv->resv[ep]; int i, res; if (!r->resv_sched) return DP_SUCCESS; while (r->num_resv_sched > 0) { i = r->num_resv_sched - 1; res = dp_qos_sched_remove(priv->qdev, r->resv_sched[i].id); if (res) { pr_err("%s: pp qos sched remove failed with %d for sched id %u\n", __func__, res, r->resv_sched[i].id); return DP_FAILURE; } r->num_resv_sched--; } devm_kfree(&g_dp_dev->dev, r->resv_sched); r->resv_sched = NULL; return DP_SUCCESS; } static int dp_sched_reserve(int inst, int ep, struct dp_dev_data *data) { struct hal_priv *priv = HAL(inst); struct resv_info *r = &priv->resv[ep]; size_t len; int i, res; u32 id; if (!(data->flag_ops & DP_F_DEV_RESV_SCH) || data->num_resv_sched <= 0) return DP_SUCCESS; len = sizeof(struct resv_sch) * data->num_resv_sched; r->resv_sched = devm_kzalloc(&g_dp_dev->dev, len, GFP_ATOMIC); if (!r->resv_sched) return DP_FAILURE; for (i = 0; i < data->num_resv_sched; i++) { res = dp_qos_sched_allocate(priv->qdev, &id); if (res) { pr_err("%s: pp qos sched alloc failed with %d\n", __func__, res); dp_sched_remove(inst, ep); return DP_FAILURE; } r->resv_sched[i].id = id; r->num_resv_sched++; DP_DEBUG(DP_DBG_FLAG_QOS, "%s: reseve sched[%u] for ep %d\n", __func__, id, ep); } return DP_SUCCESS; } int dp_node_reserve(int inst, int ep, struct dp_dev_data *data, int flags) { int rc0, rc1, rc2; if (flags == DP_F_DEREGISTER) { rc0 = DP_SUCCESS; goto remove; } rc0 = dp_q_reserve(inst, ep, data); if (!rc0) rc0 = dp_sched_reserve(inst, ep, data); if (!rc0) return DP_SUCCESS; remove: rc1 = dp_sched_remove(inst, ep); rc2 = dp_q_remove(inst, ep); return (rc0 || rc1 || rc2) ? DP_FAILURE : DP_SUCCESS; } /* dp_qos_global_info_get_31 API * Get global qos config information return DP_SUCCESS * else return DP_FAILURE */ int dp_qos_global_info_get_31(struct dp_qos_cfg_info *info, int flag) { struct cqm_port_info *cqm_info; struct hal_priv *priv; u32 quanta = 0; if (!info) { pr_err("info cannot be NULL\n"); return DP_FAILURE; } priv = HAL(info->inst); if (!priv) { pr_err("priv cannot be NULL\n"); return DP_FAILURE; } if (pp_qos_get_quanta(priv->qdev, &quanta)) { pr_err("failed pp_qos_get_quanta\n"); return DP_FAILURE; } info->quanta = quanta; cqm_info = get_dp_deqport_info(info->inst, reinsert_deq_port); info->reinsert_deq_port = reinsert_deq_port; info->reinsert_qid = cqm_info->qid[0]; DP_DEBUG(DP_DBG_FLAG_QOS, "quanta=%d, reinsert_deq_port = %d, reinsert_qid = %d\n", quanta, info->reinsert_deq_port, info->reinsert_qid); return DP_SUCCESS; } /* dp_qos_port_conf_set_31 API * Get global qos config information return DP_SUCCESS * else return DP_FAILURE */ int dp_qos_port_conf_set_31(struct dp_port_cfg_info *info, int flag) { struct hal_priv *priv; struct pp_qos_port_conf port_cfg = { 0 }; int node_id; struct cqm_deq_stat *deq_port_stat; priv = HAL(info->inst); if (!priv) { pr_err("%s priv cannot be NULL\n", __func__); return DP_FAILURE; } if (!is_deqport_valid(info->pid) || is_port_node_free(priv, info->pid)) return DP_FAILURE; deq_port_stat = &priv->deq_port_stat[info->pid]; node_id = deq_port_stat->node_id; DP_DEBUG(DP_DBG_FLAG_QOS, "%s cqm_deq:%d, qos_port:%d\n", __func__, info->pid, node_id); if (dp_qos_port_conf_get(priv->qdev, node_id, &port_cfg)) { pr_err("failed qos_port_conf_get_32\n"); return DP_FAILURE; } if (flag & DP_PORT_CFG_GREEN_THRESHOLD) port_cfg.green_threshold = info->green_threshold; if (flag & DP_PORT_CFG_YELLOW_THRESHOLD) port_cfg.yellow_threshold = info->yellow_threshold; if (flag & DP_PORT_CFG_EWSP) port_cfg.enhanced_wsp = info->ewsp; if (dp_qos_port_set(priv->qdev, node_id, &port_cfg)) { pr_err("fail to set yellow:%d green:%d for node:%d\n", info->yellow_threshold, info->green_threshold, node_id); return DP_FAILURE; } return DP_SUCCESS; } int dp_queue_block_flush_31(struct dp_qos_blk_flush_queue *cfg, int flag) { struct hal_priv *priv; struct pp_queue_stat *qos_queue_stat; int node_id; priv = HAL(cfg->inst); if (!priv || !priv->qdev) { pr_err("%s failed: qdev NULL with inst=%d\n", __func__, cfg->inst); return DP_FAILURE; } if (!is_qid_valid(cfg->q_id) || is_q_node_free(priv, cfg->q_id)) return DP_FAILURE; qos_queue_stat = &priv->qos_queue_stat[cfg->q_id]; node_id = qos_queue_stat->node_id; /* Flush the Queue and Restore Q back to original Deq Port */ if (queue_flush_31(cfg->inst, node_id, flag | FLUSH_RESTORE_QOS_PORT)) { pr_err("%s queue_flush for Phy Queue[%d] failed\n", __func__, cfg->q_id); return DP_FAILURE; } /* Unblock the Q, Since Q is mapped to Drop port */ if (pp_qos_queue_unblock(priv->qdev, node_id)) { pr_err("%s pp_qos_queue_unblock fail for Phy Queue[%d]\n", __func__, cfg->q_id); return DP_FAILURE; } qos_queue_stat->blocked = 0; return DP_SUCCESS; } int dp_port_block_flush_31(struct dp_qos_blk_flush_port *cfg, int flag) { struct hal_priv *priv; int i = 0; struct dp_qos_blk_flush_queue q_cfg; struct pmac_port_info *port_info; u32 cqm_deq_port = 0; u16 q_ids[MAX_Q_PER_PORT] = { 0 }; u32 q_num; u32 q_size = MAX_Q_PER_PORT; if (!cfg || is_invalid_inst(cfg->inst) || is_invalid_port(cfg->dp_port)) return DP_FAILURE; priv = HAL(cfg->inst); if (!priv || !priv->qdev) { pr_err("%s failed: qdev NULL with inst=%d\n", __func__, cfg->inst); return DP_FAILURE; } port_info = get_dp_port_info(cfg->inst, cfg->dp_port); if (!port_info->deq_port_num) return DP_FAILURE; if (cfg->deq_port_idx == DEQ_PORT_OFFSET_ALL) cqm_deq_port = port_info->deq_port_base; else cqm_deq_port = port_info->deq_port_base + cfg->deq_port_idx; if (!is_deqport_valid(cqm_deq_port) || is_port_node_free(priv, cqm_deq_port)) return DP_FAILURE; if (dp_qos_port_get_queues(priv->qdev, priv->deq_port_stat[cqm_deq_port].node_id, q_ids, q_size, &q_num)) { pr_err("%s: port[%d/%d]\n", __func__, cqm_deq_port, priv->deq_port_stat[cqm_deq_port].node_id); return DP_FAILURE; } q_cfg.inst = cfg->inst; /* Even if fail, continue to block and flush other queues */ for (i = 0; i < q_size; i++) { q_cfg.q_id = get_qid_by_node(cfg->inst, q_ids[i], 0); dp_queue_block_flush_31(&q_cfg, flag); } return DP_SUCCESS; }