// SPDX-License-Identifier: GPL-2.0 /** * @brief AVM Hardware PA (hwpa) for QCA PPE * */ #include "hwpa_ppe_internal.h" #include #include #include /* for of_get_mac_address */ #if defined(CONFIG_ARCH_IPQ9574) #elif defined(CONFIG_ARCH_IPQ5332) #else #error "Unsupported platform for ppe offloading" #endif /* Period for syncing stats with ppe */ #define HWPA_PPE_STATS_SYNC_PERIOD msecs_to_jiffies(500) /* HWPA PPE Context encapsulating all driver data */ struct hwpa_ppe_context hwpa_ppe_ctx = { .counter_label[PPE_NO_FREE_HWS] = "no free hws", .counter_label[PPE_OFFLOAD_FAILED] = "offload failed", .counter_label[PPE_FLOW_FLUSHED] = "flow flushed by hw", .counter_label[PPE_WAN_MAC_REFS] = "wanif ig mac refcount", .counter_label[PPE_BAD_EG_MAC] = "invalid egress mac", .counter_count = PPE_COUNTER_MAX, }; struct hwpa_ppe_context *hwpa_ppe_get_context(void) { return &hwpa_ppe_ctx; } static inline struct hwpa_ppe_session *hwpa_ppe_fetch_first_free_element(void) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_session *hws; int i; hash_for_each(ppe_ctx->free_hws_hlist, i, hws, node) return hws; return NULL; } /** * @fn void hwpa_ppe_init_hws(struct hwpa_ppe_session*) * @brief initializes hws to default. * * @param hws [in] hws to set to default */ static void hwpa_ppe_init_hws(struct hwpa_ppe_session *hws) { memset(hws, 0, sizeof(struct hwpa_ppe_session)); hws->accel_type = HWPA_PPE_ACCELERATOR_MAX; hws->flow_index = -1; } /** * @fn enum hwpa_backend_rv hwpa_ppe_return_hws(struct hwpa_ppe_session*) * @brief returns hws from used list to free list. * * @param hws [in] a used session to free */ static void hwpa_ppe_return_hws(struct hwpa_ppe_session *hws) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); BUG_ON(hws->on_free_list); PR_DEVEL("Moving HWS to free list\n"); if (hws->on_used_list) { spin_lock_bh(&ppe_ctx->hws_list_lock); hash_del_rcu(&hws->node); spin_unlock_bh(&ppe_ctx->hws_list_lock); synchronize_rcu(); } hwpa_ppe_init_hws(hws); spin_lock_bh(&ppe_ctx->hws_list_lock); hws->on_free_list = true; hash_add(ppe_ctx->free_hws_hlist, &hws->node, 0); spin_unlock_bh(&ppe_ctx->hws_list_lock); synchronize_rcu(); } /** * @fn enum hwpa_backend_rv hwpa_ppe_fetch_hws(struct hwpa_ppe_session**) * @brief fetches free hws if available and adds it to used list. * * @attention This function takes the hws from free list but does not put * it on use list. This have to be done after the session has * been offloaded in hw. * * @param[out] hws handle of the hwpa session or NULL if free lis is empty * @return Error Code or success */ static enum hwpa_backend_rv hwpa_ppe_fetch_hws(struct hwpa_ppe_session **hws) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; struct hwpa_ppe_session *hws_fetched; PR_DEVEL("Getting free HWS from list\n"); spin_lock_bh(&ppe_ctx->hws_list_lock); hws_fetched = hwpa_ppe_fetch_first_free_element(); if (!hws_fetched) { *hws = NULL; PR_DEVEL("No free HWS on list\n"); retval = HWPA_BACKEND_ERR_NO_FREE_HWS; atomic_inc(&ppe_ctx->counter[PPE_NO_FREE_HWS]); goto failure_1; } hash_del(&hws_fetched->node); hws_fetched->on_free_list = false; *hws = hws_fetched; failure_1: spin_unlock_bh(&ppe_ctx->hws_list_lock); synchronize_rcu(); return retval; } /** * @fn int try_to_accelerate(avm_pid_handle, struct sk_buff*) * @brief avm_pa callback function * * @param pid_handle [in] corresponding endpoint pid * @param skb [in] the packet * @return AVM_PA_RX_OK */ int try_to_accelerate(avm_pid_handle pid_handle, struct sk_buff *skb) { return AVM_PA_RX_OK; } /** * @fn int backend_activate_hw(avm_pid_handle) * @brief checks if interface is suitable for an hwpa_ppe offload * * @param pid_handle [in] PID to check * @return Error code if not suitable or HWPA_BACKEND_SUCCESS if suitable */ enum hwpa_backend_rv backend_activate_hw(avm_pid_handle pid_handle) { struct net_device *dev; int32_t ifnum; enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct ppe_drv_iface *iface; enum hwpa_ppe_pid_type pid_type = HWPA_PPE_PID_TYPE_UNDEFINED; bool bridging_ok = false; uint32_t port_bmp = 0; dev = hwpa_get_netdev(pid_handle); if (unlikely(!dev)) { PR_DEVEL("Could not get netdevice for pid %d!\n", pid_handle); goto failure_1; } ifnum = ppe_drv_port_num_from_dev(dev); if (ifnum < 0) { PR_DEVEL("Interface %s not registered in NSS\n", dev->name); goto failure_2; } iface = ppe_drv_iface_get_by_dev(dev); if (!iface) { PR_DEVEL("Interface %s with ifnum %d wrongly registered in NSS\n", dev->name, ifnum); goto failure_2; } if (ppe_drv_iface_is_physical(iface)) { pid_type = HWPA_PPE_PID_TYPE_PP; } else { int32_t port = ppe_drv_port_num_from_dev(dev); struct avm_pa_pid_hwinfo *hwinfo; if (port == -1 || port < PPE_DRV_VIRTUAL_START || port > PPE_DRV_VIRTUAL_END) { PR_DEVEL("Bad port number (%d) for %s (ifnum=%d)\n", port, dev->name, ifnum); goto failure_2; } hwinfo = avm_pa_pid_get_hwinfo(pid_handle); if (hwinfo) { port_bmp = (uint32_t) hwinfo->hw; pr_err("ATHTAG Port with external port_bmp %d\n", port_bmp); pid_type = HWPA_PPE_PID_TYPE_VP_ATHTAG; bridging_ok = true; } else { pid_type = HWPA_PPE_PID_TYPE_VP_WIFI; ppe_drv_fse_feature_enable(); /* Activate fse feature only if we have at least one Wifi interface */ } } ppe_ctx->pid_info[pid_handle].mht_port_bmp = port_bmp; ppe_ctx->pid_info[pid_handle].type = pid_type; avm_pa_pid_set_bridging(pid_handle, bridging_ok); PR_DEVEL("Activated HWPA for %s (pid=%d, ifnum=%d, type=%d)\n", dev->name, pid_handle, ifnum, pid_type); retval = HWPA_BACKEND_SUCCESS; failure_2: dev_put(dev); failure_1: return retval; } bool hwpa_ppe_valid_pid_handle(avm_pid_handle pid_handle) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); enum hwpa_ppe_pid_type pid_type; if (pid_handle < 1 || pid_handle > CONFIG_AVM_PA_MAX_PID) return false; pid_type = ppe_ctx->pid_info[pid_handle].type; return pid_type > HWPA_PPE_PID_TYPE_UNDEFINED && pid_type < HWPA_PPE_PID_TYPE_MAX; } /** * @fn bool hwpa_ppe_valid_egress(const struct avm_pa_session*) * @brief check egress interface for routing flows if it can be configured correctly * @pre Requires a session with valid egress interface * * @param sess_pa [in] avm_pa session to offload * * @return true if valid egress, false if not */ static bool hwpa_ppe_valid_egress(const struct avm_pa_session *sess_pa) { bool is_routed; const struct avm_pa_egress *eg; avm_pid_handle eg_pid_handle; struct net_device *netdev, *master; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct ppe_wan_egress_info *wan_egress = &ppe_ctx->wan_egress; bool retval = true; const struct avm_pa_pkt_match *eg_match; const struct ethhdr *ethh_eg; is_routed = !!(sess_pa->mod.modflags & AVM_PA_MOD_TTL); if (!is_routed) return retval; eg = avm_pa_first_egress(sess_pa); eg_match = &eg->match; ethh_eg = hwpa_get_hdr(eg_match, AVM_PA_ETH); eg_pid_handle = eg->pid_handle; /* No sanity checks needed, as we already have a checked device here */ netdev = hwpa_get_netdev(eg_pid_handle); master = hwpa_get_and_hold_dev_master(netdev); /* * If Routing Egress has a master the correct MAC may be taken out of that master. * This applies to WAN traffic in Download direction as well, which goes over a interface * which hangs in a bridge. If we have DL-Traffic to an Interface not hanging in a bridge we * can currently not accelerate it. */ if(master) goto finished; /* We only support one egress smac in HW. If we have a different one, cancel offload here*/ if(!ether_addr_equal(wan_egress->fos_mac, ethh_eg->h_source)) { retval = false; atomic_inc(&ppe_ctx->counter[PPE_BAD_EG_MAC]); goto finished; } spin_lock_bh(&wan_egress->lock); switch (wan_egress->state) { case HWPA_PPE_WAN_EGRESS_STATE_NOT_INITIALIZED: retval = false; break; case HWPA_PPE_WAN_EGRESS_STATE_INITIALIZED: /* Now we will try to register this wan egress interface during add_session */ break; case HWPA_PPE_WAN_EGRESS_STATE_ACTIVE: /* Only allow egress traffic on registered wan egress interface */ retval = ppe_drv_iface_idx_get_by_dev(netdev) == wan_egress->ppe_ifnum; break; default: retval = false; break; } spin_unlock_bh(&wan_egress->lock); finished: dev_put(netdev); if (master) dev_put(master); PR_DEVEL("Routing session has %svalid egress wan (%s) config\n", retval ? "" : "in", netdev->name); return retval; } /** * @fn bool hwpa_ppe_valid_interface(avm_pid_handle) * @brief check pid handle if valid. Valid means that there is a high chance our ppe backend * can work with this interface. * * @param pid_handle [in] interface to be checked * * @return true if valid interface, false if not */ bool hwpa_ppe_valid_interface(avm_pid_handle pid_handle) { struct net_device *netdev, *master; bool valid_if = false; if (!hwpa_ppe_valid_pid_handle(pid_handle)) { PR_DEVEL("unsupported pid_handle %d\n", pid_handle); return false; } netdev = hwpa_get_netdev(pid_handle); if (unlikely(!netdev)) { PR_DEVEL("Could not get netdevice for pid %d\n", pid_handle); return false; } /* Here we just allow known hierarchies */ if (netif_is_bridge_port(netdev) || !netif_is_bond_slave(netdev)) { valid_if = true; goto finished; } /* Her we have to sort things out for mlo where we have athX in bond in bridge*/ master = hwpa_get_and_hold_dev_master(netdev); if (master && netif_is_bridge_port(master)) { PR_DEVEL("%s uses bond %s with bridge master!\n", netdev->name, master->name); valid_if = true; /* This is for MLO */ } else { PR_DEVEL("%s bad bond master (%s)!\n", netdev->name, master->name); } if (master) dev_put(master); finished: dev_put(netdev); PR_DEVEL("%s (pid=%d) is %s interface\n", netdev->name, pid_handle, valid_if ? "a valid" : "an invalid"); return valid_if; } /** * @fn enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session*, unsigned long*) * @brief probe session for ppe offload. Sort out easy to determine and unsupported flow early * * @param sess_pa [in] avm_pa session to offload * @param handle_out [in] handle of the created hwpa_session * * @return success or error code */ enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session *sess_pa, unsigned long *handle_out) { enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_PROBE; const struct avm_pa_egress *eg; const struct avm_pa_pkt_match *ig_match, *eg_match; avm_pid_handle ig_pid_handle, eg_pid_handle; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); enum hwpa_ppe_pid_type eg_pid_type; ig_match = &sess_pa->ingress; eg = avm_pa_first_egress(sess_pa); eg_match = &eg->match; /* PPE cannot accelerate traffic without Ethernet headers */ if (!hwpa_get_hdr(ig_match, AVM_PA_ETH) || !hwpa_get_hdr(eg_match, AVM_PA_ETH)) { PR_DEVEL("Session %p: Not accelerating traffic without Ethernet headers\n", sess_pa); goto failure_1; } /* PPE cannot accelerate local traffic */ if (eg->type == avm_pa_egresstype_local) { PR_DEVEL("Session %p: Not Accelerating local traffic", sess_pa); goto failure_1; } /* Broadcast Traffic is not supported either */ if (eg_match->casttype == AVM_PA_IS_BROADCAST) { PR_DEVEL("Session %p: Not accelerating broadcast traffic\n", sess_pa); goto failure_1; } /* Multicast Traffic is not supported either */ if (eg_match->casttype == AVM_PA_IS_MULTICAST) { PR_DEVEL("Session %p: Not accelerating multicast traffic\n", sess_pa); goto failure_1; } /* IPv6 extension headers are not supported. */ if (AVM_PA_PKTTYPE_IP_VERSION(ig_match->pkttype) == 6) { struct ipv6hdr *in_ip6hdr = (struct ipv6hdr *) (HDRCOPY(ig_match) + ig_match->ip_offset); if (ipv6_ext_hdr(in_ip6hdr->nexthdr)) { PR_DEVEL("Session %p: Not accelerating IPv6 with extension headers\n", sess_pa); goto failure_1; } } if (AVM_PA_PKTTYPE_IP_VERSION(eg_match->pkttype) == 6) { struct ipv6hdr *eg_ip6hdr = (struct ipv6hdr *) (HDRCOPY(eg_match) + eg_match->ip_offset); if (ipv6_ext_hdr(eg_ip6hdr->nexthdr)) { PR_DEVEL("Session %p: Not accelerating IPv6 with extension headers\n", sess_pa); goto failure_1; } } ig_pid_handle = sess_pa->ingress_pid_handle; eg_pid_handle = eg->pid_handle; if (!hwpa_ppe_valid_interface(ig_pid_handle)) goto failure_1; if (!hwpa_ppe_valid_interface(eg_pid_handle)) goto failure_1; if (!hwpa_ppe_valid_egress(sess_pa)) goto failure_1; /* no sanity checks needed as these are performed in hwpa_ppe_valid_interface */ eg_pid_type = ppe_ctx->pid_info[eg_pid_handle].type; /* We only allow bsessions between vp athag interfaces */ if (sess_pa->bsession) { if (!((ppe_ctx->pid_info[ig_pid_handle].type == HWPA_PPE_PID_TYPE_VP_ATHTAG) && ppe_ctx->pid_info[eg_pid_handle].type == HWPA_PPE_PID_TYPE_VP_ATHTAG)) { PR_DEVEL("Not allowing bsessions between pid %d and %d\n", ig_pid_handle, eg_pid_handle); goto failure_1; } /* No bsessions only for non ATHTAG-TX-Flows*/ } else if (eg_pid_type == HWPA_PPE_PID_TYPE_VP_ATHTAG) { PR_DEVEL("Not accelerating traffic to ATHTAG interface %d\n", eg_pid_handle); goto failure_1; } PR_DEVEL("avm_pa session %p probed successfully\n", sess_pa); retval = HWPA_BACKEND_SUCCESS; failure_1: *handle_out = hw_handle_invalid; return retval; } /** * @fn unsigned long hwpa_ppe_hws_to_handle(struct hwpa_ppe_session*) * @brief hw session to handle for higher layers * * @param hws [in] hw session * * @return hw_handle_invalid in case of error, handle otherwise */ static unsigned long hwpa_ppe_hws_to_handle(struct hwpa_ppe_session *hws) { /* TODO: refine */ if (!hws) return hw_handle_invalid; return (unsigned long) hws; } /** * @fn struct hwpa_ppe_session* hwpa_ppe_handle_to_hws(unsigned long) * @brief handle from higher layers to hw session * * @param handle [in] handle * * @return NULL in case of error, pointer to hw session otherwise */ static struct hwpa_ppe_session *hwpa_ppe_handle_to_hws(unsigned long handle) { /* TODO: refine */ if (!handle) return (struct hwpa_ppe_session *) NULL; return (struct hwpa_ppe_session *) handle; } /** * @fn enum hwpa_ppe_accelerator_type hwpa_ppe_get_accelerator(const struct avm_pa_session*) * @brief determine accelerator type for avm_pa sessions * * @param sess_pa [in] avm_pa session to offload * * @return Accelerator type for session or HWPA_PPE_ACCELERATOR_MAX in case none is found */ static enum hwpa_ppe_accelerator_type hwpa_ppe_determine_accelerator(const struct avm_pa_session *sess_pa) { enum hwpa_ppe_accelerator_type accel_type = HWPA_PPE_ACCELERATOR_MAX; if (sess_pa->bsession) { accel_type = HWPA_PPE_ACCELERATOR_PPE_MHT_L2; } else { switch (AVM_PA_PKTTYPE_IP_VERSION(sess_pa->ingress.pkttype)) { case 4: accel_type = HWPA_PPE_ACCELERATOR_PPE_IPV4; break; case 6: accel_type = HWPA_PPE_ACCELERATOR_PPE_IPV6; break; default: return HWPA_PPE_ACCELERATOR_MAX; } } PR_DEVEL("%p: determined accel_type %d\n", sess_pa, accel_type); return accel_type; } /** * @fn enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session*, unsigned long*) * @brief Determines accelerator and gathers free hw session. Initiates offload. * * @param sess_pa [in] avm_pa session to offload * @param handle_out [in] handle of the created hwpa_session * * @return success or error code */ enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session *sess_pa, unsigned long *handle_out) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL; enum hwpa_ppe_accelerator_type accel_type; struct hwpa_ppe_session *hws = NULL; uint32_t hash; accel_type = hwpa_ppe_determine_accelerator(sess_pa); if (accel_type == HWPA_PPE_ACCELERATOR_MAX) { PR_DEVEL("%p: No matching accelerator\n", sess_pa); goto failure_1; } PR_DEVEL("%p: Determined accel_type %d\n", sess_pa, accel_type); retval = hwpa_ppe_fetch_hws(&hws); if (retval != HWPA_BACKEND_SUCCESS) { PR_DEVEL("%ps: No free HW Session\n", sess_pa); goto failure_1; } PR_DEVEL("%p: fetched hws %p\n", sess_pa, hws); hws->accel_type = accel_type; hws->sess_pa = sess_pa; retval = hwpa_ppe_add_session(hws, &hash); if (retval != HWPA_BACKEND_SUCCESS) { PR_DEVEL("%p: Offload failed for hws %p\n", sess_pa, hws); atomic_inc(&ppe_ctx->counter[PPE_OFFLOAD_FAILED]); goto failure_2; } spin_lock_bh(&ppe_ctx->hws_list_lock); hws->on_used_list = true; hash_add_rcu(ppe_ctx->used_hws_hlist, &hws->node, hash); spin_unlock_bh(&ppe_ctx->hws_list_lock); synchronize_rcu(); *handle_out = hwpa_ppe_hws_to_handle(hws); return retval; failure_2: hwpa_ppe_return_hws(hws); failure_1: return retval; } /** * @fn enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long) * @brief Remove session from hardware * * @param handle [in] the hwpa session * * @return success or error code */ enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long handle) { enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL; struct hwpa_ppe_session *hws; hws = hwpa_ppe_handle_to_hws(handle); if (!hws) { pr_warn("Bad hws handle (0x%lx)\n", handle); goto failure_1; } PR_DEVEL("Removing session for hws %p\n", hws); retval = hwpa_ppe_rem_session(hws); if (retval != HWPA_BACKEND_SUCCESS) { PR_DEVEL("%p: Session removal failed for hws %p\n", hws->sess_pa, hws); goto failure_2; } failure_2: hwpa_ppe_return_hws(hws); failure_1: return retval; } /** * @fn enum hwpa_backend_rv hwpa_backend_stats(unsigned long, struct avm_pa_session_stats*) * @brief Gather session stats from hardware * * @param handle [in] handle of the hwpa session * @param stats [out] avm_pa stats to fill * @return In case of bad handle error. Success otherwise. */ enum hwpa_backend_rv hwpa_backend_stats(unsigned long handle, struct avm_pa_session_stats *stats) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_session *hws; struct hwpa_ppe_accelerator *accelerator; hws = hwpa_ppe_handle_to_hws(handle); if (!hws) { pr_warn("Bad hws handle (0x%lx)\n", handle); return HWPA_BACKEND_ERR_INTERNAL; } accelerator = hwpa_ppe_get_accelerator(hws); if (!accelerator) { pr_warn("Bad accelerator(0x%lx)\n", handle); return HWPA_BACKEND_ERR_INTERNAL; } spin_lock_bh(&ppe_ctx->hws_stats_lock); if (hws->stats_updated) { hws->stats_updated = false; stats->validflags |= accelerator->stats_updated_flags; stats->tx_pkts = hws->stats.tx_pkts; stats->tx_bytes = (u64) hws->stats.tx_bytes; hws->stats.tx_pkts = 0; hws->stats.tx_bytes = 0; spin_unlock_bh(&ppe_ctx->hws_stats_lock); return HWPA_BACKEND_SUCCESS; } spin_unlock_bh(&ppe_ctx->hws_stats_lock); stats->validflags &= ~AVM_PA_SESSION_STATS_VALID_BYTES; stats->validflags &= ~AVM_PA_SESSION_STATS_VALID_PKTS; return HWPA_BACKEND_SUCCESS; } /** * @fn void hwpa_ppe_ipv_sync_work(struct work_struct*) * @brief work function for the ip sync workqueue * * @param work [in] work struct */ static void hwpa_ppe_sync_work(struct work_struct *work) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_accelerator *accelerator; struct hwpa_ppe_session *hws; int bkt; rcu_read_lock(); hash_for_each_rcu(ppe_ctx->used_hws_hlist, bkt, hws, node) { accelerator = hwpa_ppe_get_accelerator(hws); if ((!accelerator || !accelerator->sync_session) || (hws->on_free_list)) { PR_DEVEL_SYNC("Error, no accelerator or not in hw for hws %p.\n", hws); continue; } accelerator->sync_session(hws); } rcu_read_unlock(); queue_delayed_work(ppe_ctx->workqueue, &ppe_ctx->work, HWPA_PPE_STATS_SYNC_PERIOD); } /** * @fn uint8_t *of_find_avm_mac_address(const char*) * @brief find avm mac in dts, eg. avm_mac_addr_macrouter_0 * * @param label [in] handle of the hwpa session * @return NULL if nothing found */ static uint8_t *of_find_avm_mac_address(const char* label) { struct device_node *dev_node = NULL; uint8_t *maddr = NULL; dev_node = of_find_node_by_name(NULL, DTS_SYMBOLS_NODE); if(dev_node) { struct property *pp; pp = of_find_property(dev_node, label, NULL); if (pp) { char path[DTS_MAX_PATH] = {0}; strncpy(path, pp->value, pp->length); dev_node = of_find_node_by_path(path); if (dev_node) { maddr = (uint8_t *)of_get_mac_address(dev_node); if (IS_ERR(maddr)) { maddr = NULL; } else { PR_DEVEL("Found internet MAC: %pM\n", maddr); } } else { pr_err("MAC not found on path %s\n", path); } } else { pr_err("MAC property %s not found\n", label); } } else { pr_err("%s not found\n", DTS_SYMBOLS_NODE); } return maddr; } /** * @fn enum hwpa_backend_rv hwpa_ppe_ctx_init() * @brief initializes hw sessions, and kfifo with free sessions * * @param handle [in] handle of the hwpa session * @param stats [out] avm_pa stats to fill * @return success only */ enum hwpa_backend_rv hwpa_ppe_ctx_init(void) { uint32_t i = 0; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_session *hws; struct ppe_wan_egress_info *wan_egress; uint8_t *dts_wan_mac; PR_DEVEL("Initialize HWPA Context"); INIT_LIST_HEAD(&ppe_ctx->accelerators); hash_init(ppe_ctx->free_hws_hlist); hash_init(ppe_ctx->used_hws_hlist); spin_lock_init(&ppe_ctx->hws_list_lock); spin_lock_init(&ppe_ctx->hws_stats_lock); /* initialize all sessions and move them to free list */ for (i = 0; i < ARRAY_SIZE(ppe_ctx->hws_storage); ++i) { hws = &ppe_ctx->hws_storage[i]; hwpa_ppe_init_hws(hws); hash_add(ppe_ctx->free_hws_hlist, &hws->node, 0); hws->on_free_list = true; } for (i = 0; i < PPE_COUNTER_MAX; ++i) atomic_set(&ppe_ctx->counter[i], 0); PR_DEVEL("Initialized %u PPE counters\n", i); ppe_ctx->workqueue = create_singlethread_workqueue("hwpa_ppe_sync_workqueue"); if (!ppe_ctx->workqueue) { PR_DEVEL("Error could not init workqueue.\n"); return HWPA_BACKEND_ERR_NO_MEM; } wan_egress = &ppe_ctx->wan_egress; memset(wan_egress, 0, sizeof(*wan_egress)); spin_lock_init(&wan_egress->lock); wan_egress->state = HWPA_PPE_WAN_EGRESS_STATE_NOT_INITIALIZED; wan_egress->ppe_ifnum = -1; dts_wan_mac = of_find_avm_mac_address(AVM_DTS_INTERNET_MAC); if (dts_wan_mac && is_valid_ether_addr(dts_wan_mac)) { ether_addr_copy(wan_egress->fos_mac, dts_wan_mac); wan_egress->state = HWPA_PPE_WAN_EGRESS_STATE_INITIALIZED; } else { pr_warn("No valid WAN egress MAC given -- WAN egress acceleration not permitted!\n"); } hwpa_ppe_exception_init(); INIT_DELAYED_WORK(&ppe_ctx->work, hwpa_ppe_sync_work); queue_delayed_work(ppe_ctx->workqueue, &ppe_ctx->work, HWPA_PPE_STATS_SYNC_PERIOD); return HWPA_BACKEND_SUCCESS; } /** * @fn enum hwpa_backend_rv hwpa_ppe_accelerators_exit * @brief de-initializes all accelerators * * @return success or in case of error crash */ enum hwpa_backend_rv hwpa_ppe_accelerators_exit(void) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_accelerator *accelerator; enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; if (ppe_ctx->workqueue) { cancel_delayed_work_sync(&ppe_ctx->work); destroy_workqueue(ppe_ctx->workqueue); } list_for_each_entry(accelerator, &ppe_ctx->accelerators, ctx_node) { retval = hwpa_ppe_accelerator_exit(accelerator); if (retval != HWPA_BACKEND_SUCCESS) pr_err("Could not de-initialize accelerator: %s\n", accelerator->label); } hwpa_ppe_exception_exit(); return retval; } /** * @fn enum hwpa_backend_rv hwpa_ppe_accelerators_init * @brief initializes all accelerators * * @return success or in case of error crash */ enum hwpa_backend_rv hwpa_ppe_accelerators_init(void) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_accelerator *accelerator; enum hwpa_backend_rv retval; list_for_each_entry(accelerator, &ppe_ctx->accelerators, ctx_node) { retval = hwpa_ppe_accelerator_init(accelerator); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not initialize accelerator: %s\n", accelerator->label); break; } } /* In case of error de-initialize all active accelerators */ if (retval != HWPA_BACKEND_SUCCESS) hwpa_ppe_accelerators_exit(); return retval; } enum hwpa_backend_rv hwpa_ppe_register_accelerator(struct hwpa_ppe_accelerator *accelerator) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); int accelerator_count = 0; struct hwpa_ppe_accelerator *_unused_; BUG_ON(accelerator->counter_count > HWPA_PPE_ACCELERATOR_MAX_COUNTERS); list_for_each_entry(_unused_, &ppe_ctx->accelerators, ctx_node) { accelerator_count++; } if (accelerator_count >= HWPA_PPE_MAX_ACCELERATORS) { pr_err("Too many accelerators!\n"); return HWPA_BACKEND_ERR_INTERNAL; } list_add(&accelerator->ctx_node, &ppe_ctx->accelerators); return HWPA_BACKEND_SUCCESS; } /** * @fn enum hwpa_backend_rv hwpa_backend_check_session(unsigned long handle) * @brief checks is a certain session is already flushed * @return * @li AVM_HW_CHK_FLUSH - Session already flushed * @li AVM_HW_CHK_OK - Session OK */ enum hwpa_backend_rv hwpa_backend_check_session(unsigned long handle) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_session *hws; enum hwpa_backend_rv retval; hws = hwpa_ppe_handle_to_hws(handle); if (!hws) { pr_warn("Bad hws handle (0x%lx)\n", handle); return HWPA_BACKEND_ERR_INTERNAL; } spin_lock_bh(&ppe_ctx->hws_list_lock); retval = hws->session_flushed ? HWPA_BACKEND_ERR_INTERNAL : HWPA_BACKEND_SUCCESS; spin_unlock_bh(&ppe_ctx->hws_list_lock); if (retval == HWPA_BACKEND_ERR_INTERNAL) { PR_DEVEL("%p: Flow ID %d is marked as flushed\n", hws, hws->flow_index); atomic_inc(&ppe_ctx->counter[PPE_FLOW_FLUSHED]); } return retval; } /** * @fn enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config*) * @brief Initialize Backend * @return success or error code */ enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config *hw_pa_config) { enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL; hw_pa_config->alloc_rx_channel = NULL; hw_pa_config->alloc_tx_channel = NULL; hw_pa_config->free_rx_channel = NULL; hw_pa_config->free_tx_channel = NULL; hw_pa_config->flags |= HWPA_BACKEND_HAS_SESSION_CHECK; retval = hwpa_ppe_ctx_init(); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not initialize hwpa ppe context\n"); goto failure_1; } retval = hwpa_ppe_ipv4_register_accelerator(); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not register ipv4 ppe accelerator\n"); goto failure_1; } retval = hwpa_ppe_ipv6_register_accelerator(); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not register IPv6 PPE accelerator\n"); goto failure_1; } #ifdef CONFIG_QCA_PPE_AVM_QCA838X_FRITZ_BOX retval = hwpa_ppe_mht_l2_register_accelerator(); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not register mht_l2 ppe accelerator\n"); goto failure_1; } #endif retval = hwpa_ppe_accelerators_init(); if (retval != HWPA_BACKEND_SUCCESS) { pr_err("Could not initialize hwpa accelerators\n"); goto failure_1; } hwpa_ppe_debugfs_init(); retval = HWPA_BACKEND_SUCCESS; PR_DEVEL("Initialized HWPA Backend\n"); failure_1: return retval; } /** * @fn hwpa_backend_exit(void) * @brief De-Init Backend */ void hwpa_backend_exit(void) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); clear_bit(HWPA_QCA_CTX_FLAG_ACTIVE, &ppe_ctx->flags); hwpa_ppe_accelerators_exit(); hwpa_ppe_debugfs_exit(); PR_DEVEL("De-initialized HWPA Backend"); }