// SPDX-License-Identifier: GPL-2.0 /** * @brief HWPA Accelerator frontend. Abstracts commonly used functionality. * */ #include "hwpa_ppe_internal.h" #include #include #include #include struct hwpa_ppe_accelerator ipv4_accelerator; enum HWPA_PPE_IPV4_COUNTER { IPV4_NO_FLOW_ID, IPV4_COUNTER_MAX, }; static void hwpa_ppe_ipv4_fill_tuple(const struct avm_pa_pkt_match *match, struct ppe_drv_v4_5tuple *tuple) { const uint16_t *ports; const struct iphdr *ips; ips = hwpa_get_hdr(match, AVM_PA_IPV4); ports = hwpa_get_hdr(match, AVM_PA_PORTS); tuple->flow_ip = ntohl(ips->saddr); tuple->flow_ident = ntohs(ports[0]); tuple->return_ip = ntohl(ips->daddr); tuple->return_ident = ntohs(ports[1]); tuple->protocol = AVM_PA_PKTTYPE_IPPROTO(match->pkttype); } void hwpa_ppe_ipv4_dump_hws(hwpa_ppe_fprintf fprintffunc, void *arg, const struct hwpa_ppe_session *hws) { struct ppe_drv_v4_5tuple tuple = {0}; hwpa_ppe_ipv4_fill_tuple(&hws->sess_pa->ingress, &tuple); fprintffunc(arg, " protocol: %d\n from ip: %pI4h:%d\n to ip: %pI4h:%d\n", tuple.protocol, &tuple.flow_ip, tuple.flow_ident, &tuple.return_ip, tuple.return_ident); } static bool hwpa_ppe_ipv4_match_tuple(const struct ppe_drv_v4_5tuple *tuple1, const struct ppe_drv_v4_5tuple *tuple2) { /* AVM TLG: memcmp does not work here due of padding */ return ((tuple1->flow_ip == tuple2->flow_ip) && (tuple1->flow_ident == tuple2->flow_ident) && (tuple1->return_ip == tuple2->return_ip) && (tuple1->return_ident == tuple2->return_ident) && (tuple1->protocol == tuple2->protocol)); } /** * @fn static struct hwpa_ppe_session *hwpa_ppe_ipv4_find_connection * (struct ppe_drv_v4_5tuple *tuple, bool only_unflushed) * * @brief search a session given by the 3/5 tuple * @param[in] tuple 3/5 Tuple of the wanted session * @param[in] only_unflushed This function returns only not flused sessions if set to true * @retval hwpa session * @retval NULL - not found */ static struct hwpa_ppe_session *hwpa_ppe_ipv4_find_connection(struct ppe_drv_v4_5tuple *tuple, bool only_unflushed) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct hwpa_ppe_session *hws; uint32_t hash = hwpa_ipv4_gen_session_hash_raw(tuple->flow_ip, tuple->flow_ident, tuple->return_ip, tuple->return_ident, tuple->protocol); struct ppe_drv_v4_5tuple hws_tuple; struct hwpa_ppe_accelerator *accelerator; rcu_read_lock(); hash_for_each_possible_rcu(ppe_ctx->used_hws_hlist, hws, node, hash) { accelerator = hwpa_ppe_get_accelerator(hws); if (accelerator->accel_type != HWPA_PPE_ACCELERATOR_PPE_IPV4) { continue; } hwpa_ppe_ipv4_fill_tuple(&hws->sess_pa->ingress, &hws_tuple); if (hwpa_ppe_ipv4_match_tuple(tuple, &hws_tuple)) { if (hws->session_flushed && only_unflushed) continue; rcu_read_unlock(); return hws; } } rcu_read_unlock(); return NULL; } static uint32_t hwpa_ppe_ipv4_get_hash(struct hwpa_ppe_session *hws) { const struct avm_pa_pkt_match *match = &hws->sess_pa->ingress; const uint16_t *ports; const struct iphdr *ips; ips = hwpa_get_hdr(match, AVM_PA_IPV4); ports = hwpa_get_hdr(match, AVM_PA_PORTS); BUG_ON(hws->on_used_list); return hwpa_ipv4_gen_session_hash_raw(ntohl(ips->saddr), ntohs(ports[0]), ntohl(ips->daddr), ntohs(ports[1]), AVM_PA_PKTTYPE_IPPROTO(match->pkttype)); } enum hwpa_backend_rv hwpa_ppe_ipv4_add_session(struct hwpa_ppe_session *hws, uint32_t *hash) { struct ppe_drv_v4_rule_create *create_rule; struct ppe_drv_v4_5tuple *tuple; struct ppe_drv_v4_connection_rule *conn_rule; ppe_drv_ret_t ppe_rv; enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; struct net_device *in, *out; int32_t ifnum_in, ifnum_out, i; const struct ethhdr *ethh_ig; const struct avm_pa_session *sess_pa; const struct avm_pa_egress *eg; const struct avm_pa_pkt_match *ig_match, *eg_match; struct ppe_drv_top_if_rule *top_rule; struct net_device *in_master, *out_master; struct net_device *out_qos_dev; int32_t ifnum_in_master, ifnum_out_master; bool is_routed; bool eg_mac_added = false; PR_DEVEL("Adding hws %p\n", hws); create_rule = kzalloc(sizeof(*create_rule), GFP_KERNEL); if (!create_rule) { retval = HWPA_BACKEND_ERR_NO_MEM; goto failure_1; } tuple = &create_rule->tuple; conn_rule = &create_rule->conn_rule; top_rule = &create_rule->top_rule; sess_pa = hws->sess_pa; eg = avm_pa_first_egress(sess_pa); ig_match = &sess_pa->ingress; eg_match = &eg->match; out = hwpa_get_netdev(eg->pid_handle); if (unlikely(!out)) { PR_DEVEL("Could not get out netdevice!\n"); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_2; } in = hwpa_get_netdev(sess_pa->ingress_pid_handle); if (unlikely(!in)) { PR_DEVEL("Could not get in netdevice!\n"); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_3; } retval = hwpa_ppe_get_and_hold_ppe_master(in, &in_master); if (retval != HWPA_BACKEND_SUCCESS) { PR_DEVEL("Bad hierarchy on ingress!\n"); goto failure_4; } if (in_master) { PR_DEVEL("Found master %s for dev %s\n", in_master->name, in->name); ifnum_in_master = ppe_drv_iface_idx_get_by_dev(in_master); if (ifnum_in_master < 0) { PR_DEVEL("%s not a ppe port\n", in_master->name); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_5; } } retval = hwpa_ppe_get_and_hold_ppe_master(out, &out_master); if (retval != HWPA_BACKEND_SUCCESS) { PR_DEVEL("Bad hierarchy on ingress!\n"); goto failure_5; } if (out_master) { PR_DEVEL("Found out master %s for dev %s\n", out_master->name, out->name); ifnum_out_master = ppe_drv_iface_idx_get_by_dev(out_master); if (ifnum_out_master < 0) { PR_DEVEL("%s not a ppe port\n", out_master->name); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_6; } } ifnum_in = ppe_drv_iface_idx_get_by_dev(in); if (ifnum_in < 0) { PR_DEVEL("%s not a ppe port\n", in->name); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_6; } ifnum_out = ppe_drv_iface_idx_get_by_dev(out); if (ifnum_out < 0) { PR_DEVEL("%s not a ppe port\n", out->name); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_6; } /* * Init create rule for ppe * No special fields (e.g.) PPPoE or VLAN valid by default */ create_rule->valid_flags = 0; create_rule->rule_flags = 0; is_routed = !!(sess_pa->mod.modflags & AVM_PA_MOD_TTL); /* * Initialize VLAN tag information */ create_rule->vlan_rule.primary_vlan.ingress_vlan_tag = HWPA_PPE_VLAN_ID_NOT_CONFIGURED; create_rule->vlan_rule.primary_vlan.egress_vlan_tag = HWPA_PPE_VLAN_ID_NOT_CONFIGURED; create_rule->vlan_rule.secondary_vlan.ingress_vlan_tag = HWPA_PPE_VLAN_ID_NOT_CONFIGURED; create_rule->vlan_rule.secondary_vlan.egress_vlan_tag = HWPA_PPE_VLAN_ID_NOT_CONFIGURED; for (i = 0; i < ig_match->nmatch; ++i) { const struct avm_pa_match_info *p = &ig_match->match[i]; const void *hdr = &ig_match->hdrcopy[p->offset + ig_match->hdroff]; int vlan_in_cnt = 0; PR_DEVEL("ingress %i type %x offset %x\n", i, p->type, p->offset); switch (p->type) { case AVM_PA_ETH: { ethh_ig = hdr; ether_addr_copy(conn_rule->flow_mac, (u8 *)ethh_ig->h_source); break; } case AVM_PA_IPV4: case AVM_PA_PORTS: /* handled by hwpa_ppe_ipv4_fill_tuple later */ break; case AVM_PA_PPP: case AVM_PA_PPPOE: /* Not Handled yet */ break; case AVM_PA_VLAN: { uint32_t vlan_value; if (is_routed || vlan_in_cnt > 1) { retval = HWPA_BACKEND_ERR_BAD_VLAN; goto failure_2; } if (p->offset != AVM_PA_OFFSET_NOT_SET) { /* VLAN Header needs offset correction by 2 bytes due to no ethertype */ const void *vlanh = &ig_match->hdrcopy[p->offset + ig_match->hdroff - 2]; vlan_value = ntohl(*((uint32_t *) vlanh)); } else { vlan_value = ((ntohs(ig_match->vlan_proto)) << 16) | (ig_match->vlan_tci); } if (vlan_in_cnt == 0) create_rule->vlan_rule.primary_vlan.ingress_vlan_tag = vlan_value; else create_rule->vlan_rule.secondary_vlan.ingress_vlan_tag = vlan_value; create_rule->valid_flags |= PPE_DRV_V4_VALID_FLAG_VLAN; vlan_in_cnt++; break; } default: retval = HWPA_BACKEND_ERR_INTERNAL; PR_DEVEL("Header not supported for offload: 0x%02x", p->type); goto failure_6; } } for (i = 0; i < eg_match->nmatch; ++i) { const struct avm_pa_match_info *p = &eg_match->match[i]; const void *hdr = &eg_match->hdrcopy[p->offset + eg_match->hdroff]; int vlan_out_cnt = 0; PR_DEVEL("egress %i type %x offset %x\n", i, p->type, p->offset); switch (p->type) { case AVM_PA_ETH: { const struct ethhdr *ethh_eg = hdr; ether_addr_copy(conn_rule->return_mac, (u8 *)ethh_eg->h_dest); break; } case AVM_PA_IPV4: { const struct iphdr *ips_xlate = hdr; conn_rule->flow_ip_xlate = ntohl(ips_xlate->saddr); conn_rule->return_ip_xlate = ntohl(ips_xlate->daddr); break; } case AVM_PA_PORTS: { const uint16_t *ports_xlate = hdr; conn_rule->flow_ident_xlate = ntohs(ports_xlate[0]); conn_rule->return_ident_xlate = ntohs(ports_xlate[1]); break; } case AVM_PA_VLAN: { uint32_t vlan_value; if (is_routed || vlan_out_cnt > 1) { retval = HWPA_BACKEND_ERR_BAD_VLAN; goto failure_6; } if (p->offset != AVM_PA_OFFSET_NOT_SET) { /* VLAN Header needs offset correction by 2 bytes due to no ethertype */ const void *vlanh = &eg_match->hdrcopy[p->offset + eg_match->hdroff - 2]; vlan_value = htonl(*((uint32_t *) vlanh)); } else { vlan_value = ((htons(eg_match->vlan_proto)) << 16) | (eg_match->vlan_tci); } if (vlan_out_cnt == 0) create_rule->vlan_rule.primary_vlan.egress_vlan_tag = vlan_value; else create_rule->vlan_rule.secondary_vlan.egress_vlan_tag = vlan_value; create_rule->valid_flags |= PPE_DRV_V4_VALID_FLAG_VLAN; vlan_out_cnt++; break; } case AVM_PA_PPP: case AVM_PA_PPPOE: /* Not supported yet */ break; default: retval = HWPA_BACKEND_ERR_INTERNAL; PR_DEVEL("Header not supported for offload: 0x%02x", p->type); goto failure_6; } } /* For Unidirectional offload do not enable PPE_DRV_V4_RULE_FLAG_RETURN_VALID */ create_rule->rule_flags |= PPE_DRV_V4_RULE_FLAG_FLOW_VALID; #if defined(CONFIG_ARCH_IPQ5332) create_rule->rule_flags |= PPE_DRV_V4_RULE_FLAG_SRC_INTERFACE_CHECK; #endif hwpa_ppe_ipv4_fill_tuple(&hws->sess_pa->ingress, tuple); conn_rule->rx_if = ifnum_in; conn_rule->tx_if = ifnum_out; conn_rule->flow_mtu = in->mtu; conn_rule->return_mtu = out->mtu; /* QoS Handling */ out_qos_dev = out_master ? : out; if (hwpa_ppe_qdisc_enabled(out_qos_dev)) { create_rule->qos_rule.flow_qos_tag = eg->output.priority; create_rule->qos_rule.flow_int_pri = ppe_drv_qos_int_pri_get(out_qos_dev, create_rule->qos_rule.flow_qos_tag); create_rule->qos_rule.qos_valid_flags |= PPE_DRV_VALID_FLAG_FLOW_PPE_QOS; create_rule->valid_flags |= PPE_DRV_V4_VALID_FLAG_QOS; } /* Special FOS MAC Handling -- see JZ-117155 */ if (is_routed && !in_master) { if (!ether_addr_equal(ethh_ig->h_dest, in->dev_addr)) { if (unlikely(!hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, true))) { retval = HWPA_BACKEND_ERR_WAN_IG_MAC; goto failure_6; } hws->hws_in_ifmac_added = true; } } if (is_routed && !out_master) { retval = hwpa_ppe_install_egress_mac(ifnum_out, out, &eg_mac_added); if (retval != HWPA_BACKEND_SUCCESS) goto failure_7; } hws->hws_eg_ifmac_added = eg_mac_added; if (!is_routed) { top_rule->tx_if = ifnum_out; top_rule->rx_if = ifnum_in; create_rule->rule_flags |= PPE_DRV_V4_RULE_FLAG_BRIDGE_FLOW; } else { /* Innermosts Interfaces */ top_rule->tx_if = out_master ? ifnum_out_master : ifnum_out; top_rule->rx_if = in_master ? ifnum_in_master : ifnum_in; create_rule->rule_flags |= PPE_DRV_V4_RULE_FLAG_ROUTED_FLOW; } if (avm_ppe_drv_port_user_type_ds(ifnum_out) || avm_ppe_drv_port_user_type_ds(ifnum_in)) { create_rule->rule_flags |= PPE_DRV_V4_RULE_FLAG_DS_FLOW; PR_DEVEL("%p: In or out port is DS capable\n", hws); } PR_DEVEL("%p create ipv4 rule:\n" " protocol: %d\n" " from ip: %pI4h:%d\n" " to ip: %pI4h:%d\n" " rule flags: 0x%x\n" " valid flags: 0x%x\n" " rx_if: %d (%s)\n" " tx_if: %d (%s)\n" " from mac: %pM\n" " to mac: %pM\n" " ingress_inner_vlan_tag: 0x%x\n" " egress_inner_vlan_tag: 0x%x\n" " ingress_outer_vlan_tag: 0x%x\n" " egress_outer_vlan_tag: 0x%x\n" " flow mtu / return mtu: %d / %d\n" " conn_rule rx_if: %d\n" " conn_rule tx_if: %d\n" " from xlate_ip: %pI4h:%d\n" " to xlate_ip: %pI4h:%d\n" " qos flags: 0x%hhx\n" " qos_rule qos_tag: %x:%x flow_int_pri %u\n", hws, tuple->protocol, &tuple->flow_ip, tuple->flow_ident, &tuple->return_ip, tuple->return_ident, create_rule->rule_flags, create_rule->valid_flags, top_rule->rx_if, in->name, top_rule->tx_if, out->name, &conn_rule->flow_mac, &conn_rule->return_mac, create_rule->vlan_rule.primary_vlan.ingress_vlan_tag, create_rule->vlan_rule.primary_vlan.egress_vlan_tag, create_rule->vlan_rule.secondary_vlan.ingress_vlan_tag, create_rule->vlan_rule.secondary_vlan.egress_vlan_tag, conn_rule->flow_mtu, conn_rule->return_mtu, conn_rule->rx_if, conn_rule->tx_if, &conn_rule->flow_ip_xlate, conn_rule->flow_ident_xlate, &conn_rule->return_ip_xlate, conn_rule->return_ident_xlate, create_rule->qos_rule.qos_valid_flags, TC_H_MAJ(create_rule->qos_rule.flow_qos_tag), TC_H_MIN(create_rule->qos_rule.flow_qos_tag), create_rule->qos_rule.flow_int_pri); ppe_rv = ppe_drv_v4_create(create_rule); if (ppe_rv != PPE_DRV_RET_SUCCESS) { PR_DEVEL("Could not accelerate hws %p, error code: %u\n", hws, ppe_rv); retval = HWPA_BACKEND_ERR_CREATION; goto failure_8; } hws->flow_index = ppe_drv_avm_flow_v4_get_index(tuple); if (hws->flow_index < 0) { pr_warn("flow index %d invalid from %s to %s\n", hws->flow_index, in->name, out->name); atomic_inc(&ipv4_accelerator.counter[IPV4_NO_FLOW_ID]); retval = HWPA_BACKEND_ERR_NO_FLOW_ID; goto failure_9; } *hash = hwpa_ppe_ipv4_get_hash(hws); PR_DEVEL("%p hws ppe flow id: %d", hws, hws->flow_index); failure_9: if (retval != HWPA_BACKEND_SUCCESS) ipv4_accelerator.remove_session(hws); failure_8: if (retval != HWPA_BACKEND_SUCCESS && hws->hws_eg_ifmac_added) { hwpa_ppe_uninstall_egress_mac(); hws->hws_eg_ifmac_added = false; } failure_7: if (retval != HWPA_BACKEND_SUCCESS && hws->hws_in_ifmac_added && !hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, false)) pr_warn("%p: failed to remove just added wan mac for if %s\n", hws, in->name); failure_6: if (out_master) dev_put(out_master); failure_5: if (in_master) dev_put(in_master); failure_4: dev_put(in); failure_3: dev_put(out); failure_2: kfree(create_rule); failure_1: return retval; } enum hwpa_backend_rv hwpa_ppe_ipv4_rem_session(struct hwpa_ppe_session *hws) { struct ppe_drv_v4_rule_destroy *destroy_rule; struct ppe_drv_v4_5tuple *tuple; ppe_drv_ret_t ppe_rv; enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; PR_DEVEL("Removing hws %p\n", hws); destroy_rule = kzalloc(sizeof(*destroy_rule), GFP_KERNEL); if (!destroy_rule) { retval = HWPA_BACKEND_ERR_NO_MEM; goto failure_1; } tuple = &destroy_rule->tuple; /* * Do not perform sanity checks in avm_pa session. * These were done during offload */ hwpa_ppe_ipv4_fill_tuple(&hws->sess_pa->ingress, tuple); if (hws->hws_in_ifmac_added) { const struct ethhdr *ethh_ig; const struct avm_pa_session *sess_pa; const struct avm_pa_pkt_match *ig_match; struct net_device *in; int32_t ifnum_in; sess_pa = hws->sess_pa; ig_match = &sess_pa->ingress; in = hwpa_get_netdev(sess_pa->ingress_pid_handle); if (unlikely(!in)) { PR_DEVEL("Could not get in netdevice!\n"); goto skip_mac_removal; } ifnum_in = ppe_drv_iface_idx_get_by_dev(in); if (unlikely(ifnum_in < 0)) { PR_DEVEL("%s not a ppe port\n", in->name); dev_put(in); goto skip_mac_removal; } ethh_ig = hwpa_get_hdr(ig_match, AVM_PA_ETH); hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, false); dev_put(in); } skip_mac_removal: if (hws->hws_eg_ifmac_added) { hwpa_ppe_uninstall_egress_mac(); hws->hws_eg_ifmac_added = false; } if (hws->session_flushed && NULL != hwpa_ppe_ipv4_find_connection(tuple, true)) { PR_DEVEL("%p: Already re-offloaded, do not deaccelerate (old avm session %p)\n", hws, hws->sess_pa); goto skip_deaccelerate; } PR_DEVEL("%p destroy rule:\n" " protocol: %d\n" " from ip: %pI4h:%d\n" " to ip: %pI4h:%d\n", hws, tuple->protocol, &tuple->flow_ip, tuple->flow_ident, &tuple->return_ip, tuple->return_ident); ppe_rv = ppe_drv_v4_destroy(destroy_rule); switch (ppe_rv) { case PPE_DRV_RET_SUCCESS: PR_DEVEL("Session destroyed: flow_id: %d\n", hws->flow_index); break; case PPE_DRV_RET_FAILURE_DESTROY_NO_CONN: PR_DEVEL("Session already flushed: flow_id: %d\n", hws->flow_index); break; default: PR_DEVEL("Could not de-accelerate hws %p, flow id = %d, error code: %u\n", hws, hws->flow_index, ppe_rv); retval = HWPA_BACKEND_ERR_REMOVAL; } skip_deaccelerate: kfree(destroy_rule); failure_1: return retval; } /** * @fn static void hwpa_ppe_ipv4_stats_callback(void *app_data, * struct ppe_drv_v4_conn_sync *conn_sync) * * @brief gets the latest stats from session * @param app_data - not used * @param[in] conn_sync - contains the connection info and stats */ static void hwpa_ppe_ipv4_stats_callback(void *app_data, struct ppe_drv_v4_conn_sync *conn_sync) { struct ppe_drv_v4_5tuple v4_5tuple = { .flow_ip = conn_sync->flow_ip, .flow_ident = conn_sync->flow_ident, .return_ip = conn_sync->return_ip, .return_ident = conn_sync->return_ident, .protocol = conn_sync->protocol }; struct hwpa_ppe_session *hws = hwpa_ppe_ipv4_find_connection(&v4_5tuple, false); if (hws == NULL) { PR_DEVEL_SYNC("No session found for stats sync\n" " protocol: %d\n" " from ip: %pI4h:%d\n" " to ip: %pI4h:%d\n", v4_5tuple.protocol, &v4_5tuple.flow_ip, v4_5tuple.flow_ident, &v4_5tuple.return_ip, v4_5tuple.return_ident); return; } PR_DEVEL_SYNC("%p IPv4 Stats by callback adding: tx_bytes %d, tx_pkts %d, reason %d\n", hws, conn_sync->flow_tx_byte_count, conn_sync->flow_tx_packet_count, conn_sync->reason); hwpa_ppe_calc_abs_tx_stats(hws, conn_sync->flow_tx_byte_count, conn_sync->flow_tx_packet_count); PR_DEVEL_SYNC("%p IPv4 Stats by callback: tx_bytes %d, total_tx_bytes %lld, tx_pkts %d, total_tx_pkts %lld", hws, hws->stats.tx_bytes, hws->stats.total_tx_bytes, hws->stats.tx_pkts, hws->stats.total_tx_pkts); } enum hwpa_backend_rv hwpa_ppe_ipv4_sync_stats(struct hwpa_ppe_session *hws) { fal_entry_counter_t flow_cntrs = {0}; sw_error_t err; if (hws->flow_index < 0) return HWPA_BACKEND_ERR_INTERNAL; err = fal_flow_counter_get(PPE_SWITCH_ID, hws->flow_index, &flow_cntrs); if (err != SW_OK) { PR_DEVEL_SYNC("failed to get stats for ipv4 flow at index: %u", hws->flow_index); return HWPA_BACKEND_ERR_INTERNAL; } hwpa_ppe_calc_abs_tx_stats(hws,flow_cntrs.matched_bytes, flow_cntrs.matched_pkts); PR_DEVEL_SYNC("%p IPv4 Stats: tx_bytes %d, total_tx_bytes %lld, tx_pkts %d, total_tx_pkts %lld", hws, hws->stats.tx_bytes, hws->stats.total_tx_bytes, hws->stats.tx_pkts, hws->stats.total_tx_pkts); return HWPA_BACKEND_SUCCESS; } enum hwpa_backend_rv hwpa_ppe_ipv4_mark_flushed_connection( struct hwpa_ppe_session *hws, void *tuple) { struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); struct ppe_drv_v4_5tuple *in_tuple = (struct ppe_drv_v4_5tuple *)tuple; struct ppe_drv_v4_5tuple hws_tuple; hwpa_ppe_ipv4_fill_tuple(&hws->sess_pa->ingress, &hws_tuple); if (hwpa_ppe_ipv4_match_tuple(&hws_tuple, in_tuple)) { if (!hws->session_flushed) { PR_DEVEL("%p: Mark flow ID %d as flushed, avm_pa session %p\n", hws, hws->flow_index, hws->sess_pa); } spin_lock_bh(&ppe_ctx->hws_list_lock); hws->session_flushed = true; spin_unlock_bh(&ppe_ctx->hws_list_lock); return HWPA_BACKEND_SUCCESS; } return HWPA_BACKEND_ERR_NO_MATCH; } static enum hwpa_backend_rv hwpa_ppe_ipv4_init(void) { if (!ppe_drv_v4_stats_callback_register(hwpa_ppe_ipv4_stats_callback, NULL)) { PR_DEVEL("Error while register stats CB\n"); return HWPA_BACKEND_ERR_INTERNAL; } PR_DEVEL("hwpa ipv4 accelerator init OK\n"); return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv hwpa_ppe_ipv4_exit(void) { ppe_drv_v4_stats_callback_unregister(); return HWPA_BACKEND_SUCCESS; } struct hwpa_ppe_accelerator ipv4_accelerator = { .accel_type = HWPA_PPE_ACCELERATOR_PPE_IPV4, .label = "PPE IPV4", .init = hwpa_ppe_ipv4_init, .exit = hwpa_ppe_ipv4_exit, .add_session = hwpa_ppe_ipv4_add_session, .remove_session = hwpa_ppe_ipv4_rem_session, .sync_session = hwpa_ppe_ipv4_sync_stats, .set_flushed_session = hwpa_ppe_ipv4_mark_flushed_connection, .dump_hws = hwpa_ppe_ipv4_dump_hws, .stats_updated_flags = AVM_PA_SESSION_STATS_VALID_BYTES | AVM_PA_SESSION_STATS_VALID_PKTS, .counter_label[IPV4_NO_FLOW_ID] = "No flow id", .counter_count = IPV4_COUNTER_MAX, }; enum hwpa_backend_rv hwpa_ppe_ipv4_register_accelerator(void) { return hwpa_ppe_register_accelerator(&ipv4_accelerator); }