// SPDX-License-Identifier: GPL-2.0 /** * @brief HWPA Accelerator frontend for MHT bridging Sessions */ #include "hwpa_ppe_internal.h" #include #include #include #include enum HWPA_PPE_MHT_L2_COUNTER { MHT_L2_NO_CAP, MHT_L2_ACL_ADD_HW_ERR, MHT_L2_COUNTER_MAX, }; /* Time between each stat refresh of mht_l2 sessions */ #define MHT_L2_OFL_STAT_SYNC_PERIOD_MS 1000 /* Taken from ISISC_MAX_FILTER */ #define MHT_L2_OFL_MAX_RULES 96 /* See Datasheet -- ACL_COUNTER_MODE (0x09F4) has exactly 32 counters */ #define MHT_L2_OFL_MAX_COUNTERS 32 /* Prio: x/7 (fixed) * ALWAYS set bsession prio to a constant predefined value. No use * taking any priority from single packets as bridge sessions handle * multiple connections between two client systems. Choose lowest prio here */ #define MHT_L2_OFL_ACL_DEFAULT_QUEUE 7 /* External Switches always have dev_id 1 inside ssdk */ #define PPE_EXTERNAL_SWITCH_ID 1 /** * @enum mht_l2_sess_state * @brief state of a mht_l2 session */ enum mht_l2_sess_state { MHT_L2_SESS_STATE_OFF = 0, MHT_L2_SESS_STATE_ACTIVE, MHT_L2_SESS_STATE_MAX, }; /** * @struct mht_l2_tuple * @brief tuple used for identification of a mht_l2 session. Will be held by hws. */ struct mht_l2_tuple { struct vlan_ethhdr hdr; /** ethernet header infos */ }; /** * @struct hw_bsession_info * @brief hw bsession info held locally within this accelerator */ struct hw_bsession_info { enum mht_l2_sess_state state; struct mht_l2_tuple tuple; /**< session information */ a_uint64_t byte_cnt; /**< Byte traffic counter */ int32_t flow_index; /**< ACL Rule ID */ uint32_t src_port_bmp; /**< src port */ uint32_t dst_port_bmp; /**< dest port */ }; /** * @struct mht_l2_ofl_priv * @brief private accelerator data */ struct mht_l2_ofl_priv { struct delayed_work stats_work_item; /**< gathering stats from ppe */ struct workqueue_struct *stats_workqueue; /**< gathering stats from ppe */ struct hw_bsession_info infos[MHT_L2_OFL_MAX_RULES]; /**< list of all hw bsessions */ spinlock_t bsess_lock; /**< bsession list lock */ struct mutex acl_mutex; /**< acl list/rule write access protection */ DECLARE_BITMAP(acl_alloc_bmp, MHT_L2_OFL_MAX_RULES); /**< data for quick finding of free sessions*/ }; static struct hwpa_ppe_accelerator mht_l2_accelerator; static struct mht_l2_ofl_priv data = {0}; /** * @brief Allocate a rule in one of the rule bitmaps by setting a bit and return bit-nr. * * @param alloc_bmp Pointer to bitmap * @param size bitmap size in bits. * @param offs Start offset in bitmap * @return bit-nr that was set/-1 if all bits were already set. */ static int rule_alloc(unsigned long *alloc_bmp, unsigned int size, unsigned int offs) { unsigned int unused; do { unused = find_next_zero_bit(alloc_bmp, size, offs); if (unlikely(unused >= size)) return -1; } while (test_and_set_bit(unused, alloc_bmp)); PR_DEVEL("alloc rule with (acl_alloc, %u, %u) = %u", size, offs, unused); return unused; } /** * @brief Free a rule in one of the rule bitmaps by clearing one bit. * * @param alloc_bmp Pointer to bitmap * @param size bitmap size in bits * @param id bit to clear */ static inline void rule_free(unsigned long *alloc_bmp, unsigned int size, int id) { PR_DEVEL("free rule with(acl_alloc, %d)", id); if (likely(id >= 0 && id < size)) clear_bit(id, alloc_bmp); } /** * @brief Try to get a hw bsession after performing some sanity checks * * @param flow_index acl rule id * @return hw bsession pointer or NULL if flow_index invalid */ static struct hw_bsession_info *get_hw_bsession_info(int32_t flow_index) { if (unlikely(flow_index < 0 && flow_index >= MHT_L2_OFL_MAX_RULES)) return NULL; return &data.infos[flow_index]; } /** * @brief Remove a bridge session from hardware * * This unbinds and deletes the ACL rule and associated list and clears the * rule from the rule allocation bitmap in software. * * @param acl_id the ACL rule ID to delete * @param src_port_bmp the port bitmap ACL rule is bound to * @return SW_OK if all OK or SSDK error code if not */ static sw_error_t acl_remove(int acl_id, fal_pbmp_t src_port_bmp) { sw_error_t rv = SW_OK; a_uint8_t src_port; if (unlikely(acl_id < 0)) return SW_BAD_VALUE; rv = fal_acl_rule_delete(PPE_EXTERNAL_SWITCH_ID, acl_id, 0, 1); PR_DEVEL("fal_acl_rule_delete(%d, %d, 0, 1) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_rule_delete(%d, %d, 0, 1) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); } src_port = ffs(src_port_bmp) - 1; rv = fal_acl_list_unbind(PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port); PR_DEVEL("fal_acl_list_unbind(%d, %d, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, %d) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, src_port, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_list_unbind(%d, %d, %d, %d, %d) failed (%d).", PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port, rv); return rv; } rv = fal_acl_list_destroy(PPE_EXTERNAL_SWITCH_ID, acl_id); PR_DEVEL("fal_acl_list_destroy(%d, %d) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_list_destroy(%d, %d) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); } PR_DEVEL("freed acl rule id %d, src Port bmp %02x", acl_id, src_port_bmp); return rv; } /** * @brief Install ACL rule in hardware * * This installs an ACL rule into a new list with ID in acl_id parameter. * The list is bound to src_port. * ACL rule is cleared from rule bitmap and bsession list on error. * * @param acl_id The acl rule id to use * @param src_port_bmp source port bitmap * @param acl_rule The acl rule to hand to fal api * @return fal api error code */ static sw_error_t acl_setup(int acl_id, fal_pbmp_t src_port_bmp, fal_acl_rule_t *acl_rule) { sw_error_t rv = SW_OK; fal_pbmp_t tmp_pbmp = src_port_bmp; uint8_t src_port; if (unlikely(!tmp_pbmp)) return SW_BAD_PARAM; /* create a new acl list -- it really is fal_acl_list_creat without 'e'... */ rv = fal_acl_list_creat(PPE_EXTERNAL_SWITCH_ID, acl_id, 0); PR_DEVEL("fal_acl_list_creat(%d, %d, 0) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_list_creat(%d, %d, 0) failed (%d).", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); goto err_acl_id; } src_port = ffs(tmp_pbmp) - 1; rv = fal_acl_list_bind(PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port); PR_DEVEL("fal_acl_list_bind(%d, %d, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, %d) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, src_port, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_list_bind(%d, %d, %d, %d, %d) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port, rv); goto err_acl_list; } rv = fal_acl_rule_add(PPE_EXTERNAL_SWITCH_ID, acl_id, 0, 1, acl_rule); PR_DEVEL("fal_acl_rule_add(%d, %d, 0, 1, acl_rule) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_rule_add(%d, %d, 0, 1, acl_rule) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); goto err_acl_unbind; } return rv; err_acl_unbind: src_port = ffs(tmp_pbmp) - 1; rv = fal_acl_list_unbind(PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port); PR_DEVEL("fal_acl_list_unbind(%d, %d, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("fal_acl_list_unbind failed(%d, %d, %d, %d, %d) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORT, src_port, rv); return rv; } err_acl_list: rv = fal_acl_list_destroy(PPE_EXTERNAL_SWITCH_ID, acl_id); PR_DEVEL("fal_acl_list_destroy(%d, %d) = %d", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); if (unlikely(rv != SW_OK)) { pr_err("Also fal_acl_list_destroy(%d, %d) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); } err_acl_id: return rv; } /** * @brief Install a ACL counter to an ACL rule * * This modyfies the acl_rule struct and prepares an ACL "policer" as counter. * Needs to be called before installing the acl rule passed to this function. * * @param cnt_id ACL counter ID * @param acl_rule ACL rule to install counter to */ static void acl_cnt_setup(int cnt_id, fal_acl_rule_t *acl_rule) { fal_acl_policer_t policer; sw_error_t rv; /* Alloc counter; if none available, bsessions just time out. */ /* All acl rules with ID < ACL_COUNTERS get a counter with the same ID. */ if (cnt_id >= MHT_L2_OFL_MAX_COUNTERS || cnt_id < 0) { PR_DEVEL("Out of acl counters: %d", cnt_id); return; } PR_DEVEL("acl_cnt_id = %d", cnt_id); /* POLICER is used for rate limit / ACL counters. We have 32 (id=0 .. 31) */ rv = fal_rate_acl_policer_get(PPE_EXTERNAL_SWITCH_ID, cnt_id, &policer); PR_DEVEL("fal_rate_acl_policer_get(%d, %d, &policer) = %d", PPE_EXTERNAL_SWITCH_ID, cnt_id, rv); if (likely(rv == SW_OK)) { policer.counter_mode = A_TRUE; policer.meter_unit = FAL_BYTE_BASED; rv = fal_rate_acl_policer_set(PPE_EXTERNAL_SWITCH_ID, cnt_id, &policer); PR_DEVEL("fal_rate_acl_policer_set(%d, %d, &policer) = %d", PPE_EXTERNAL_SWITCH_ID, cnt_id, rv); if (rv != SW_OK) { pr_err("fal_rate_acl_policer_set(%d, %d, &policer) failed (%d).", PPE_EXTERNAL_SWITCH_ID, cnt_id, rv); } else { acl_rule->policer_ptr = cnt_id; FAL_ACTION_FLG_SET(acl_rule->action_flg, FAL_ACL_ACTION_POLICER_EN); } } else { pr_err("fal_rate_acl_policer_get(%d, %d, &policer) failed (%d).", PPE_EXTERNAL_SWITCH_ID, cnt_id, rv); } } /** * @brief Setup a bridge session by setting up an ACL rule * * @param info Pointer to a hw_bsession_info struct describing the session * @return fal api error code */ static sw_error_t bsession_setup(struct hw_bsession_info *info) { sw_error_t rv; fal_acl_rule_t acl_rule; uint32_t acl_id = info->flow_index; memset((void *)&acl_rule, 0, sizeof(acl_rule)); acl_rule.rule_type = FAL_ACL_RULE_MAC; FAL_FIELD_FLG_SET(acl_rule.field_flg, FAL_ACL_FIELD_MAC_DA); FAL_FIELD_FLG_SET(acl_rule.field_flg, FAL_ACL_FIELD_MAC_SA); ether_addr_copy((u8 *) acl_rule.src_mac_val.uc, (u8 *) info->tuple.hdr.h_source); ether_addr_copy((u8 *) acl_rule.dest_mac_val.uc, (u8 *) info->tuple.hdr.h_dest); FAL_ACTION_FLG_SET(acl_rule.action_flg, FAL_ACL_ACTION_PERMIT); FAL_ACTION_FLG_SET(acl_rule.action_flg, FAL_ACL_ACTION_REDPT); acl_rule.ports = info->dst_port_bmp; memset(acl_rule.src_mac_mask.uc, 0xff, ETH_ALEN); memset(acl_rule.dest_mac_mask.uc, 0xff, ETH_ALEN); acl_rule.queue = MHT_L2_OFL_ACL_DEFAULT_QUEUE; FAL_ACTION_FLG_SET(acl_rule.action_flg, FAL_ACL_ACTION_REMARK_QUEUE); acl_cnt_setup(acl_id, &acl_rule); rv = acl_setup(acl_id, info->src_port_bmp, &acl_rule); if (rv != SW_OK) return rv; PR_DEVEL("Created acl rule %d, Prio %u, src-Port-bmp %02x, dest-Port-bmp %02x, src MAC %pMF; dest MAC %pMF", acl_id, acl_rule.queue, info->src_port_bmp, acl_rule.ports, acl_rule.src_mac_val.uc, acl_rule.dest_mac_val.uc); return rv; } /** * @brief Read statistics data from hardware and update sessions * * This is run in regular intervalls from WQ as delayed work to * gather statistics data from hardware and update session data. * Hardware counters are reset after reading. * * This reads ACL counters for bsessions (if an ACL counter was allocated) * Hit detection for bsessions is currently not supported. * * @param work Used to reschedule this delayed work */ static void mht_l2_stats_work(struct work_struct *work) { sw_error_t rv; int i; int acl_id; fal_acl_policer_t policer; for (i = 0; i < MHT_L2_OFL_MAX_COUNTERS; i++) { spin_lock_bh(&data.bsess_lock); if (unlikely(data.infos[i].state != MHT_L2_SESS_STATE_ACTIVE)) { spin_unlock_bh(&data.bsess_lock); continue; } spin_unlock_bh(&data.bsess_lock); acl_id = i; rv = fal_rate_acl_policer_get(PPE_EXTERNAL_SWITCH_ID, acl_id, &policer); if (unlikely(rv != SW_OK)) { pr_warn("fal_rate_acl_policer_get(%d, %d, &policer) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); goto finished; } spin_lock_bh(&data.bsess_lock); data.infos[i].byte_cnt = policer.counter_low | ((u64)policer.counter_high << 32); spin_unlock_bh(&data.bsess_lock); /* reset counter */ policer.counter_low = 0; policer.counter_high = 0; rv = fal_rate_acl_policer_set(PPE_EXTERNAL_SWITCH_ID, acl_id, &policer); if (unlikely(rv != SW_OK)) { pr_warn("fal_rate_acl_policer_set(%d, %d, &policer) failed (%d)", PPE_EXTERNAL_SWITCH_ID, acl_id, rv); } } finished: queue_delayed_work(data.stats_workqueue, &data.stats_work_item, msecs_to_jiffies(MHT_L2_OFL_STAT_SYNC_PERIOD_MS)); } static void hwpa_ppe_mht_l2_fill_tuple(const struct avm_pa_pkt_match *match, struct mht_l2_tuple *tuple) { const struct ethhdr *hdr; hdr = (const struct ethhdr *) hwpa_get_hdr(match, AVM_PA_ETH); ether_addr_copy((u8 *) tuple->hdr.h_source, (u8 *) hdr->h_source); ether_addr_copy((u8 *) tuple->hdr.h_dest, (u8 *) hdr->h_dest); tuple->hdr.h_vlan_TCI = match->vlan_tci; } static void hwpa_ppe_mht_l2_dump_hws(hwpa_ppe_fprintf fprintffunc, void *arg, const struct hwpa_ppe_session *hws) { struct mht_l2_tuple tuple = {0}; hwpa_ppe_mht_l2_fill_tuple(&hws->sess_pa->ingress, &tuple); fprintffunc(arg, " vlan_TCI: %d\n from mac: %pM\n to mac: %pM\n", tuple.hdr.h_vlan_TCI, tuple.hdr.h_source, tuple.hdr.h_dest); } static uint32_t hwpa_ppe_mht_l2_get_hash(struct hwpa_ppe_session *hws) { const struct avm_pa_pkt_match *match = &hws->sess_pa->ingress; const struct ethhdr *hdr; hdr = (const struct ethhdr *) hwpa_get_hdr(match, AVM_PA_ETH); return hwpa_l2_gen_session_hash_raw(hdr->h_source, hdr->h_dest, match->vlan_tci); } static enum hwpa_backend_rv hwpa_ppe_mht_l2_sync_stats(struct hwpa_ppe_session *hws) { struct hw_bsession_info *info; struct hwpa_ppe_stats *stats; info = get_hw_bsession_info(hws->flow_index); if (!info) return HWPA_BACKEND_ERR_INTERNAL; stats = &hws->stats; spin_lock_bh(&data.bsess_lock); if (likely(info->state == MHT_L2_SESS_STATE_ACTIVE)) { stats->tx_bytes += info->byte_cnt; stats->total_tx_bytes += info->byte_cnt; info->byte_cnt = 0; hws->stats_updated = true; } else { stats->tx_bytes = 0; } spin_unlock_bh(&data.bsess_lock); PR_DEVEL_SYNC("%p MHT_L2 Stats: tx_bytes %d, tx_pkts %d\n", hws, hws->stats.tx_bytes, hws->stats.tx_pkts); return HWPA_BACKEND_SUCCESS; } static enum hwpa_backend_rv hwpa_ppe_mht_l2_add_session(struct hwpa_ppe_session *hws, uint32_t *hash) { enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; int acl_id; struct hw_bsession_info *info; struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context(); const struct avm_pa_session *sess_pa; const struct avm_pa_egress *eg; const struct avm_pa_pkt_match *ig_match, *eg_match; sess_pa = hws->sess_pa; eg = avm_pa_first_egress(sess_pa); ig_match = &sess_pa->ingress; eg_match = &eg->match; /* Mutex instead of spinlock is needed here as deep down in ssdk a mutex is used */ mutex_lock(&data.acl_mutex); acl_id = rule_alloc(data.acl_alloc_bmp, MHT_L2_OFL_MAX_RULES, 0); if (unlikely(acl_id < 0 || acl_id >= MHT_L2_OFL_MAX_RULES)) { pr_warn("Could not allocate acl_id -- Out of entries!\n"); retval = HWPA_BACKEND_ERR_CREATION; atomic_inc(&mht_l2_accelerator.counter[MHT_L2_NO_CAP]); goto failure_1; } info = &data.infos[acl_id]; info->byte_cnt = 0; info->flow_index = acl_id; memset((void *) &hws->stats, 0, sizeof(hws->stats)); hwpa_ppe_mht_l2_fill_tuple(ig_match, &info->tuple); info->dst_port_bmp = ppe_ctx->pid_info[eg->pid_handle].mht_port_bmp; info->src_port_bmp = ppe_ctx->pid_info[sess_pa->ingress_pid_handle].mht_port_bmp; retval = bsession_setup(info); if (retval != (int) SW_OK) { PR_DEVEL("Could not offload mht_l2 session %d\n", acl_id); retval = HWPA_BACKEND_ERR_CREATION; atomic_inc(&mht_l2_accelerator.counter[MHT_L2_ACL_ADD_HW_ERR]); goto failure_2; } hws->flow_index = acl_id; *hash = hwpa_ppe_mht_l2_get_hash(hws); spin_lock_bh(&data.bsess_lock); info->state = MHT_L2_SESS_STATE_ACTIVE; spin_unlock_bh(&data.bsess_lock); PR_DEVEL("%p hws ppe flow id: %d", hws, hws->flow_index); retval = HWPA_BACKEND_SUCCESS; mutex_unlock(&data.acl_mutex); return retval; failure_2: rule_free(data.acl_alloc_bmp, MHT_L2_OFL_MAX_RULES, acl_id); failure_1: mutex_unlock(&data.acl_mutex); return retval; } static enum hwpa_backend_rv hwpa_ppe_mht_l2_rem_session(struct hwpa_ppe_session *hws) { enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS; uint32_t acl_id; struct hw_bsession_info *info; mutex_lock(&data.acl_mutex); info = get_hw_bsession_info(hws->flow_index); if (unlikely(!info)) { pr_err("cannot remove mht_l2 session with flow_id %d!\n", hws->flow_index); retval = HWPA_BACKEND_ERR_REMOVAL; goto failure_1; } spin_lock_bh(&data.bsess_lock); if (unlikely(info->state != MHT_L2_SESS_STATE_ACTIVE)) { spin_unlock_bh(&data.bsess_lock); pr_err("Bad mht_l2 session state for flow_id %d!\n", hws->flow_index); retval = HWPA_BACKEND_ERR_REMOVAL; goto failure_1; } info->state = MHT_L2_SESS_STATE_OFF; spin_unlock_bh(&data.bsess_lock); acl_id = hws->flow_index; hws->flow_index = -1; acl_remove(acl_id, info->src_port_bmp); rule_free(data.acl_alloc_bmp, MHT_L2_OFL_MAX_RULES, acl_id); failure_1: mutex_unlock(&data.acl_mutex); return retval; } static enum hwpa_backend_rv hwpa_ppe_mht_l2_init(void) { int retval = HWPA_BACKEND_SUCCESS; spin_lock_init(&data.bsess_lock); mutex_init(&data.acl_mutex); bitmap_zero(data.acl_alloc_bmp, MHT_L2_OFL_MAX_RULES); data.stats_workqueue = create_singlethread_workqueue("mht_l2_stats"); if (unlikely(!data.stats_workqueue)) { pr_err("OOM in init. No mht_l2 offload.\n"); retval = HWPA_BACKEND_ERR_INTERNAL; goto failure_1; } /* Enable ACL on external Switch as it is off by default */ fal_acl_status_set(PPE_EXTERNAL_SWITCH_ID, A_TRUE); INIT_DELAYED_WORK(&data.stats_work_item, mht_l2_stats_work); queue_delayed_work(data.stats_workqueue, &data.stats_work_item, msecs_to_jiffies(MHT_L2_OFL_STAT_SYNC_PERIOD_MS)); failure_1: return retval; } static enum hwpa_backend_rv hwpa_ppe_mht_l2_exit(void) { cancel_delayed_work_sync(&data.stats_work_item); destroy_workqueue(data.stats_workqueue); fal_acl_status_set(PPE_EXTERNAL_SWITCH_ID, A_FALSE); return HWPA_BACKEND_SUCCESS; } static struct hwpa_ppe_accelerator mht_l2_accelerator = { .accel_type = HWPA_PPE_ACCELERATOR_PPE_MHT_L2, .label = "PPE MHT L2", .init = hwpa_ppe_mht_l2_init, .exit = hwpa_ppe_mht_l2_exit, .add_session = hwpa_ppe_mht_l2_add_session, .remove_session = hwpa_ppe_mht_l2_rem_session, .sync_session = hwpa_ppe_mht_l2_sync_stats, .set_flushed_session = NULL, /* MHT cannot flush ACL rules */ .dump_hws = hwpa_ppe_mht_l2_dump_hws, /* ACL Policier can only count packets or bytes. We choose bytes here. */ .stats_updated_flags = AVM_PA_SESSION_STATS_VALID_BYTES, .counter_label[MHT_L2_NO_CAP] = "No ACL capacity", .counter_label[MHT_L2_ACL_ADD_HW_ERR] = "ACL Add HW Error", .counter_count = MHT_L2_COUNTER_MAX, }; enum hwpa_backend_rv hwpa_ppe_mht_l2_register_accelerator(void) { return hwpa_ppe_register_accelerator(&mht_l2_accelerator); }