// SPDX-License-Identifier: GPL-2.0 /** * @brief HWPA Accelerator frontend. Abstracts commonly used functionality of different accelerators. * */ #include "hwpa_ppe_internal.h" #include #include /** * @fn struct hwpa_ppe_accelerator* hwpa_ppe_get_accelerator(struct hwpa_ppe_session*) * @brief checks accel_type of hws and gets coresponfing accelerator * * @param hws [in] hw session to get accelerator from * @return NULL if bad accel_type, accelerator otherwise */ struct hwpa_ppe_accelerator *hwpa_ppe_get_accelerator(struct hwpa_ppe_session *hws) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_accelerator *accelerator = NULL; list_for_each_entry(accelerator, &ppe_ctx->accelerators, ctx_node) { if (accelerator->accel_type == hws->accel_type) return accelerator; } PR_DEVEL("%p bad acceleration type (%d)\n", hws, hws->accel_type); return NULL; } enum hwpa_backend_rv hwpa_ppe_get_and_hold_ppe_master(struct net_device *netdev, struct net_device **master_out) { struct net_device *bond_slave; struct net_device *master = NULL; if (!netif_is_bridge_port(netdev) && !netif_is_bond_slave(netdev)) { *master_out = NULL; return HWPA_BACKEND_SUCCESS; } master = hwpa_get_and_hold_dev_master(netdev); if (master && netif_is_bridge_port(netdev)) { *master_out = master; return HWPA_BACKEND_SUCCESS; } if (!master) { *master_out = NULL; return HWPA_BACKEND_ERR_BAD_HIERARCHY; } if (netif_is_bond_slave(netdev) && netif_is_bridge_port(master)) { bond_slave = master; master = hwpa_get_and_hold_dev_master(bond_slave); dev_put(bond_slave); if (master) { *master_out = master; return HWPA_BACKEND_SUCCESS; } *master_out = NULL; return HWPA_BACKEND_ERR_BAD_HIERARCHY; } dev_put(master); *master_out = NULL; return HWPA_BACKEND_ERR_BAD_HIERARCHY; } bool hwpa_ppe_mac_to_wan_if_op(ppe_drv_iface_t idx, const uint8_t *mac_addr, fal_ip_direction_t dir, bool add) { uint8_t mac[ETH_ALEN]; ppe_drv_ret_t ppe_rv; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); /* copy as ssdk/ppe driver is not using const */ ether_addr_copy(mac, mac_addr); /* Handling of multiple times added mac addresses is handled on a lower level */ ppe_rv = add ? ppe_drv_avm_iface_mac_addr_add(idx, mac, dir) : ppe_drv_avm_iface_mac_addr_del(idx, mac, dir); if (unlikely(ppe_rv != PPE_DRV_RET_SUCCESS)) { PR_DEVEL("Not able to %s AVM MAC on ppe_drv_iface_t %d, dir = %d, error code: %u\n", add ? "add" : "delete", idx, dir, ppe_rv); return false; } PR_DEVEL("%s AVM MAC %pM on ppe_drv_iface_t %d, dir = %d\n", add ? "Added" : "Deleted", mac, idx, dir); if (dir == FAL_IP_EGRESS) return true; if (add) atomic_inc(&ppe_ctx->counter[PPE_WAN_MAC_REFS]); else atomic_dec(&ppe_ctx->counter[PPE_WAN_MAC_REFS]); return true; } void hwpa_ppe_uninstall_egress_mac(void) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct ppe_wan_egress_info *wan_egress = &ppe_ctx->wan_egress; spin_lock_bh(&wan_egress->lock); BUG_ON(wan_egress->refcount == 0); wan_egress->refcount--; if (!wan_egress->refcount) { /* * Here we do not just delete our internet MAC, we write back the MAC we fetched before. * For this reason the last parameter here needs to be "true" */ if (!hwpa_ppe_mac_to_wan_if_op(wan_egress->ppe_ifnum, wan_egress->ppe_if_mac, FAL_IP_EGRESS, true)) pr_warn("Failed to remove eg mac from ppe-if %d\n", wan_egress->ppe_ifnum); eth_zero_addr(wan_egress->ppe_if_mac); wan_egress->ppe_ifnum = -1; wan_egress->state = HWPA_PPE_WAN_EGRESS_STATE_INITIALIZED; } spin_unlock_bh(&wan_egress->lock); } enum hwpa_backend_rv hwpa_ppe_install_egress_mac(int32_t ifnum_out, struct net_device *out, bool *eg_mac_added) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct ppe_wan_egress_info *wan_egress = &ppe_ctx->wan_egress; enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; *eg_mac_added = false; spin_lock_bh(&wan_egress->lock); /* Here we assume all basic checks were performed during probe */ switch(wan_egress->state) { case HWPA_PPE_WAN_EGRESS_STATE_INITIALIZED: if (unlikely(!hwpa_ppe_mac_to_wan_if_op(ifnum_out, wan_egress->fos_mac, FAL_IP_EGRESS, true))) { retval = HWPA_BACKEND_ERR_WAN_EG_MAC; PR_DEVEL("Egress MAC add failed!\n"); goto failure_1; } wan_egress->ppe_ifnum = ifnum_out; ether_addr_copy(wan_egress->ppe_if_mac, out->dev_addr); wan_egress->refcount++; *eg_mac_added = true; wan_egress->state = HWPA_PPE_WAN_EGRESS_STATE_ACTIVE; break; case HWPA_PPE_WAN_EGRESS_STATE_ACTIVE: /* * Here we catch a potential race where we allowed offload in probe for different wan interfaces * with our internet mac. */ if (wan_egress->ppe_ifnum == ifnum_out) { wan_egress->refcount++; *eg_mac_added = true; } else { retval = HWPA_BACKEND_ERR_WAN_EG_MAC; PR_DEVEL("Tried to install wan egress mac for more than one interface\n"); goto failure_1; } break; default: retval = HWPA_BACKEND_ERR_WAN_EG_MAC; PR_DEVEL("Bad egress state\n"); goto failure_1; } failure_1: spin_unlock_bh(&wan_egress->lock); return retval; } /** * @fn enum hwpa_backend_rv hwpa_ppe_add_session(struct hwpa_ppe_session*) * @brief adds session to hw using accelerator * * @param hws [in] hw session to add * @return error code on error, success otherwise */ enum hwpa_backend_rv hwpa_ppe_add_session(struct hwpa_ppe_session *hws, uint32_t *hash) { struct hwpa_ppe_accelerator *accelerator; accelerator = hwpa_ppe_get_accelerator(hws); if (!accelerator) { return HWPA_BACKEND_ERR_NO_ACCELERATOR; } PR_DEVEL("Adding hw session %p with avm_pa session %p and accelerator %s\n", hws, hws->sess_pa, accelerator->label); return accelerator->add_session(hws, hash); } /** * @fn enum hwpa_backend_rv hwpa_ppe_rem_session(struct hwpa_ppe_session*) * @brief removes session from hw using accelerator * * @param hws [in] hw session to remove * @return error code on error, success otherwise */ enum hwpa_backend_rv hwpa_ppe_rem_session(struct hwpa_ppe_session *hws) { struct hwpa_ppe_accelerator *accelerator; accelerator = hwpa_ppe_get_accelerator(hws); if (!accelerator) { return HWPA_BACKEND_ERR_NO_ACCELERATOR; } PR_DEVEL("Removing hws %p with accelerator %s\n", hws, accelerator->label); return accelerator->remove_session(hws); } /* check whether ppe qdisc are enabled on a device * referenced from Vendor function * ecm_front_end_common_intf_qdisc_check() */ bool hwpa_ppe_qdisc_enabled(struct net_device *dev) { int i; bool ret = false; struct netdev_queue *txq; struct Qdisc *q; rcu_read_lock_bh(); for (i = 0; i < dev->real_num_tx_queues; i++) { txq = netdev_get_tx_queue(dev, i); q = rcu_dereference_bh(txq->qdisc); if ((!q) || (!q->enqueue)) { continue; } if (q->flags & TCQ_F_NSS) { ret = true; break; } } rcu_read_unlock_bh(); return ret; } /** * @fn static void hwpa_ppe_calc_delta_tx_stats(struct hwpa_ppe_session *hws, * uint32_t tx_bytes, uint32_t tx_packets) * * @brief prepares the stats for summation * @param[in,out] hws hwpa session for the stats * @param[in] delta_tx_bytes sent bytes * @param[in] delta_tx_packets sent packets * * @todo Stats CB sends the bytes only in 32bit -> wrap around is possible */ void hwpa_ppe_calc_delta_tx_stats(struct hwpa_ppe_session *hws, uint32_t delta_tx_bytes, uint32_t delta_tx_packets) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); if (!delta_tx_bytes && !delta_tx_packets) return; spin_lock_bh(&ppe_ctx->hws_stats_lock); hws->stats_updated = true; hws->stats.tx_bytes += delta_tx_bytes; hws->stats.tx_pkts += delta_tx_packets; hws->stats.total_tx_bytes += delta_tx_bytes; hws->stats.total_tx_pkts += delta_tx_packets; spin_unlock_bh(&ppe_ctx->hws_stats_lock); } /** * @fn static void hwpa_ppe_calc_abs_tx_stats(struct hwpa_ppe_session *hws, * uint32_t tx_bytes, uint32_t tx_packets) * * @brief calculates the delta bytes and packes from total stats for a session * @param[in,out] hws hwpa session for the stats * @param[in] tx_bytes sent bytes (total) * @param[in] tx_bytes sent packets (total) * * @todo Stats CB sends the bytes only in 32bit -> wrap around is possible */ void hwpa_ppe_calc_abs_tx_stats(struct hwpa_ppe_session *hws, uint64_t tx_bytes, uint64_t tx_packets) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); uint32_t delta_bytes = 0; uint32_t delta_packets = 0; spin_lock_bh(&ppe_ctx->hws_stats_lock); if ((hws->stats.total_tx_bytes < tx_bytes) && (hws->stats.total_tx_pkts < tx_packets)) { delta_bytes = tx_bytes - hws->stats.total_tx_bytes; delta_packets = tx_packets - hws->stats.total_tx_pkts; } spin_unlock_bh(&ppe_ctx->hws_stats_lock); hwpa_ppe_calc_delta_tx_stats(hws, delta_bytes, delta_packets); } /** * @fn enum hwpa_backend_rv hwpa_ppe_accelerator_init(struct hwpa_ppe_accelerator*) * @brief initializes accelerator * * @param accelerator [in] accelerator to initialize * @return error code on error, success otherwise */ enum hwpa_backend_rv hwpa_ppe_accelerator_init(struct hwpa_ppe_accelerator *accelerator) { enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; if (accelerator->init) retval = accelerator->init(); if (retval != HWPA_BACKEND_SUCCESS) { goto failure_1; } PR_DEVEL("Initialized Accelerator %s\n", accelerator->label); if (!accelerator->add_session || !accelerator->remove_session) { retval = HWPA_BACKEND_ERR_INTERNAL; PR_DEVEL("Accelerator %s misses obligatory functions. Aborting\n", accelerator->label); goto failure_1; } set_bit(HWPA_QCA_ACCEL_FLAG_INITIALIZED, &accelerator->flags); failure_1: return retval; } /** * @fn enum hwpa_backend_rv hwpa_ppe_accelerator_exit(struct hwpa_ppe_accelerator*) * @brief de-initializes accelerator * * @param accelerator [in] accelerator to de-initialize * @return error code on error, success otherwise */ enum hwpa_backend_rv hwpa_ppe_accelerator_exit(struct hwpa_ppe_accelerator *accelerator) { enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; if (test_bit(HWPA_QCA_ACCEL_FLAG_INITIALIZED, &accelerator->flags) && accelerator->exit) retval = accelerator->exit(); return retval; }