// SPDX-License-Identifier: GPL-2.0

/**
 * @brief HWPA Accelerator frontend. Abstracts commonly used functionality.
 *
 */

#include "hwpa_ppe_internal.h"
#include <ppe_drv_v6.h>
#include <ppe_drv_iface.h>
#include <ppe_drv_qos.h>
#include <linux/ipv6.h>
#include <net/sch_generic.h>

struct hwpa_ppe_accelerator ipv6_accelerator;

enum HWPA_PPE_IPV6_COUNTER {
	IPV6_NO_FLOW_ID,
	IPV6_COUNTER_MAX,
};

static void hwpa_ppe_ipv6_fill_tuple(const struct avm_pa_pkt_match *match, struct ppe_drv_v6_5tuple *tuple)
{
	const uint16_t *ports;
	const struct ipv6hdr *ips;

	ips = hwpa_get_hdr(match, AVM_PA_IPV6);
	ports = hwpa_get_hdr(match, AVM_PA_PORTS);

	PPE_DRV_IN6_TO_IPV6(tuple->flow_ip, ips->saddr);
	tuple->flow_ident = ntohs(ports[0]);
	PPE_DRV_IN6_TO_IPV6(tuple->return_ip, ips->daddr);
	tuple->return_ident = ntohs(ports[1]);
	tuple->protocol = AVM_PA_PKTTYPE_IPPROTO(match->pkttype);
}

void hwpa_ppe_ipv6_dump_hws(hwpa_ppe_fprintf fprintffunc, void *arg, const struct hwpa_ppe_session *hws)
{
	struct ppe_drv_v6_5tuple tuple = {0};

	hwpa_ppe_ipv6_fill_tuple(&hws->sess_pa->ingress, &tuple);

	/* Note JHI: %pI6 only accepts network byte order, the printed
	 * addresses will be messed up here. */
	fprintffunc(arg, "  protocol: %d\n  from ip: %pI6:%d\n  to ip: %pI6:%d\n",
				tuple.protocol,
				tuple.flow_ip, tuple.flow_ident,
				tuple.return_ip, tuple.return_ident);
}

static bool hwpa_ppe_ipv6_match_tuple(const struct ppe_drv_v6_5tuple *tuple1, const struct ppe_drv_v6_5tuple *tuple2)
{
#define IPV6_EQ(a, b) (memcmp((a), (b), sizeof(a)) == 0)
	/* AVM TLG/JHI: memcmp() over the whole struct does not work here due
	 * to padding.
	 */
	return (IPV6_EQ(tuple1->flow_ip, tuple2->flow_ip) &&
		(tuple1->flow_ident == tuple2->flow_ident) &&
		IPV6_EQ(tuple1->return_ip, tuple2->return_ip) &&
		(tuple1->return_ident == tuple2->return_ident) &&
		(tuple1->protocol == tuple2->protocol));
#undef IPV6_EQ
}

/**
 * @fn static struct hwpa_ppe_session *hwpa_ppe_ipv6_find_connection
 * 				(struct ppe_drv_v6_5tuple *tuple, bool only_unflushed)
 *
 * @brief search a session given by the 3/5 tuple
 * @param[in] tuple 3/5 Tuple of the wanted session
 * @param[in] only_unflushed This function returns only not flused sessions if set to true
 * @retval hwpa session
 * @retval NULL - not found
 */
static struct hwpa_ppe_session *hwpa_ppe_ipv6_find_connection(struct ppe_drv_v6_5tuple *tuple, bool only_unflushed)
{
	struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context();
	struct hwpa_ppe_session *hws;
	uint32_t hash = hwpa_ipv6_gen_session_hash_raw(tuple->flow_ip, tuple->flow_ident,
				tuple->return_ip, tuple->return_ident,
				tuple->protocol);
	struct ppe_drv_v6_5tuple hws_tuple;
	struct hwpa_ppe_accelerator *accelerator;

	rcu_read_lock();
	hash_for_each_possible_rcu(ppe_ctx->used_hws_hlist, hws, node, hash) {
		accelerator = hwpa_ppe_get_accelerator(hws);
		if (accelerator->accel_type != HWPA_PPE_ACCELERATOR_PPE_IPV6) {
			continue;
		}
		hwpa_ppe_ipv6_fill_tuple(&hws->sess_pa->ingress, &hws_tuple);
		if (hwpa_ppe_ipv6_match_tuple(tuple, &hws_tuple)) {
			if (hws->session_flushed && only_unflushed)
				continue;
			rcu_read_unlock();
			return hws;
		}
	}
	rcu_read_unlock();
	return NULL;
}


static uint32_t hwpa_ppe_ipv6_get_hash(struct hwpa_ppe_session *hws)
{
	const struct avm_pa_pkt_match *match = &hws->sess_pa->ingress;
	const uint16_t *ports;
	const struct ipv6hdr *ips;
	uint32_t saddr[4];
	uint32_t daddr[4];

	ips = hwpa_get_hdr(match, AVM_PA_IPV6);
	PPE_DRV_IN6_TO_IPV6(saddr, ips->saddr);
	PPE_DRV_IN6_TO_IPV6(daddr, ips->daddr);
	ports = hwpa_get_hdr(match, AVM_PA_PORTS);

	BUG_ON(hws->on_used_list);
	return hwpa_ipv6_gen_session_hash_raw(saddr, ntohs(ports[0]), daddr,
		ntohs(ports[1]), AVM_PA_PKTTYPE_IPPROTO(match->pkttype));
}

enum hwpa_backend_rv hwpa_ppe_ipv6_add_session(struct hwpa_ppe_session *hws, uint32_t *hash)
{
	struct ppe_drv_v6_rule_create *create_rule;
	struct ppe_drv_v6_5tuple *tuple;
	struct ppe_drv_v6_connection_rule *conn_rule;
	ppe_drv_ret_t ppe_rv;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct net_device *in, *out;
	int32_t ifnum_in, ifnum_out;
	const struct ethhdr *ethh_ig, *ethh_eg;
	const struct avm_pa_session *sess_pa;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct ppe_drv_top_if_rule *top_rule;
	struct net_device *in_master, *out_master;
	struct net_device *out_qos_dev;
	int32_t ifnum_in_master, ifnum_out_master;
	bool is_routed;
	bool eg_mac_added = false;

	PR_DEVEL("Adding hws %p\n", hws);

	create_rule = kzalloc(sizeof(*create_rule), GFP_KERNEL);
	if (!create_rule) {
		retval = HWPA_BACKEND_ERR_NO_MEM;
		goto failure_1;
	}

	sess_pa = hws->sess_pa;
	eg = avm_pa_first_egress(sess_pa);
	ig_match = &sess_pa->ingress;
	eg_match = &eg->match;

	out = hwpa_get_netdev(eg->pid_handle);
	if (unlikely(!out)) {
		PR_DEVEL("Could not get out netdevice!\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_2;
	}

	in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
	if (unlikely(!in)) {
		PR_DEVEL("Could not get in netdevice!\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_3;
	}

	retval = hwpa_ppe_get_and_hold_ppe_master(in, &in_master);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Bad hierarchy on ingress!\n");
		goto failure_4;
	}

	if (in_master) {
		PR_DEVEL("Found master %s for dev %s\n", in_master->name, in->name);
		ifnum_in_master = ppe_drv_iface_idx_get_by_dev(in_master);
		if (ifnum_in_master < 0) {
			PR_DEVEL("%s not a ppe port\n", in_master->name);
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_5;
		}
	}

	retval = hwpa_ppe_get_and_hold_ppe_master(out, &out_master);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Bad hierarchy on ingress!\n");
		goto failure_5;
	}

	if (out_master) {
		PR_DEVEL("Found out master %s for dev %s\n", out_master->name, out->name);
		ifnum_out_master = ppe_drv_iface_idx_get_by_dev(out_master);
		if (ifnum_out_master < 0) {
			PR_DEVEL("%s not a ppe port\n", out_master->name);
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_6;
		}
	}

	ifnum_in = ppe_drv_iface_idx_get_by_dev(in);
	if (ifnum_in < 0) {
		PR_DEVEL("%s not a ppe port\n", in->name);
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_6;
	}

	ifnum_out = ppe_drv_iface_idx_get_by_dev(out);
	if (ifnum_out < 0) {
		PR_DEVEL("%s not a ppe port\n", out->name);
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_6;
	}

	ethh_ig = hwpa_get_hdr(ig_match, AVM_PA_ETH);
	ethh_eg = hwpa_get_hdr(eg_match, AVM_PA_ETH);

	tuple = &create_rule->tuple;
	conn_rule = &create_rule->conn_rule;
	top_rule = &create_rule->top_rule;

	/* No special fields (e.g.) PPPoE or VLAN valid by default */
	create_rule->valid_flags = 0;

	/* For Unidirectional offload do not enable PPE_DRV_V6_RULE_FLAG_RETURN_VALID */
	create_rule->rule_flags = PPE_DRV_V6_RULE_FLAG_FLOW_VALID;
	is_routed = !!(sess_pa->mod.modflags & AVM_PA_MOD_TTL);
	if (is_routed)
		create_rule->rule_flags |= PPE_DRV_V6_RULE_FLAG_ROUTED_FLOW;
	else
		create_rule->rule_flags |= PPE_DRV_V6_RULE_FLAG_BRIDGE_FLOW;

#if defined(CONFIG_ARCH_IPQ5332)
	create_rule->rule_flags |= PPE_DRV_V6_RULE_FLAG_SRC_INTERFACE_CHECK;
#endif

	hwpa_ppe_ipv6_fill_tuple(&hws->sess_pa->ingress, tuple);

	conn_rule->rx_if = ifnum_in;
	conn_rule->tx_if = ifnum_out;
	ether_addr_copy(conn_rule->flow_mac, (u8 *)ethh_ig->h_source);
	ether_addr_copy(conn_rule->return_mac, (u8 *)ethh_eg->h_dest);
	conn_rule->flow_mtu = in->mtu;
	conn_rule->return_mtu = out->mtu;

	/* QoS Handling */
	out_qos_dev = out_master ? : out;
	if (hwpa_ppe_qdisc_enabled(out_qos_dev)) {
		create_rule->qos_rule.flow_qos_tag   = eg->output.priority;
		create_rule->qos_rule.flow_int_pri   = ppe_drv_qos_int_pri_get(out_qos_dev, create_rule->qos_rule.flow_qos_tag);
		create_rule->qos_rule.qos_valid_flags |= PPE_DRV_VALID_FLAG_FLOW_PPE_QOS;
		create_rule->valid_flags |= PPE_DRV_V4_VALID_FLAG_QOS;
	}

	/* Special FOS MAC Handling -- see JZ-117155 */
	if (is_routed && !in_master) {
		if (!ether_addr_equal(ethh_ig->h_dest, in->dev_addr)) {
			if (unlikely(!hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, true))) {
				retval = HWPA_BACKEND_ERR_WAN_IG_MAC;
				goto failure_6;
			}
			hws->hws_in_ifmac_added = true;
		}
	}

	if (is_routed && !out_master) {
		retval = hwpa_ppe_install_egress_mac(ifnum_out, out, &eg_mac_added);
		if (retval != HWPA_BACKEND_SUCCESS)
			goto failure_7;

		hws->hws_eg_ifmac_added = true;
	}

	if (!is_routed) {
		top_rule->tx_if = ifnum_out;
		top_rule->rx_if = ifnum_in;
	} else {
		/* Innermosts Interfaces */
		top_rule->tx_if = out_master ? ifnum_out_master : ifnum_out;
		top_rule->rx_if = in_master ? ifnum_in_master : ifnum_in;
	}


	if (avm_ppe_drv_port_user_type_ds(ifnum_out) || avm_ppe_drv_port_user_type_ds(ifnum_in)) {
		create_rule->rule_flags |= PPE_DRV_V6_RULE_FLAG_DS_FLOW;
		PR_DEVEL("%p: In or out port is DS capable\n", hws);
	}

	/* Note JHI: %pI6 only accepts network byte order, the printed
	 * addresses will be messed up here. */
	PR_DEVEL("%p create IPv6 rule:\n"
				"  protocol: %d\n"
				"  from ip: %pI6:%d\n"
				"  to ip: %pI6:%d\n"
				"  rule flags: 0x%x\n"
				"  valid flags: 0x%x\n"
				"  rx_if: %d (%s)\n"
				"  tx_if: %d (%s)\n"
				"  from mac: %pM\n"
				"  to mac: %pM\n"
				"  flow mtu / return mtu: %d / %d\n"
				"  conn_rule rx_if: %d\n"
				"  conn_rule tx_if: %d\n"
				"  qos flags: 0x%hhx\n"
				"  qos_rule qos_tag: %x:%x flow_int_pri %u\n",
				hws,
				tuple->protocol,
				tuple->flow_ip, tuple->flow_ident,
				tuple->return_ip, tuple->return_ident,
				create_rule->rule_flags,
				create_rule->valid_flags,
				top_rule->rx_if, in->name,
				top_rule->tx_if, out->name,
				&conn_rule->flow_mac,
				&conn_rule->return_mac,
				conn_rule->flow_mtu, conn_rule->return_mtu,
				conn_rule->rx_if,
				conn_rule->tx_if,
				create_rule->qos_rule.qos_valid_flags,
				TC_H_MAJ(create_rule->qos_rule.flow_qos_tag), TC_H_MIN(create_rule->qos_rule.flow_qos_tag),
				create_rule->qos_rule.flow_int_pri);

	ppe_rv = ppe_drv_v6_create(create_rule);
	if (ppe_rv != PPE_DRV_RET_SUCCESS) {
		PR_DEVEL("Could not accelerate hws %p, error code: %u\n", hws, ppe_rv);
		retval = HWPA_BACKEND_ERR_CREATION;
		goto failure_8;
	}

	hws->flow_index = ppe_drv_avm_flow_v6_get_index(tuple);
	if (hws->flow_index < 0) {
		pr_warn("flow index %d invalid from %s to %s\n", hws->flow_index, in->name, out->name);
		atomic_inc(&ipv6_accelerator.counter[IPV6_NO_FLOW_ID]);
		retval = HWPA_BACKEND_ERR_NO_FLOW_ID;
		goto failure_9;
	}
	*hash = hwpa_ppe_ipv6_get_hash(hws);
	PR_DEVEL("%p hws ppe flow id: %d", hws, hws->flow_index);

failure_9:
	if (retval != HWPA_BACKEND_SUCCESS)
		ipv6_accelerator.remove_session(hws);

failure_8:
	if (retval != HWPA_BACKEND_SUCCESS && eg_mac_added)
		hwpa_ppe_uninstall_egress_mac();

failure_7:
	if (retval != HWPA_BACKEND_SUCCESS && hws->hws_in_ifmac_added &&
				!hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, false))
		pr_warn("%p: failed to remove just added wan mac for if %s\n", hws, in->name);

failure_6:
	if (out_master)
		dev_put(out_master);

failure_5:
	if (in_master)
		dev_put(in_master);

failure_4:
	dev_put(in);

failure_3:
	dev_put(out);

failure_2:
	kfree(create_rule);

failure_1:
	return retval;
}

enum hwpa_backend_rv hwpa_ppe_ipv6_rem_session(struct hwpa_ppe_session *hws)
{
	struct ppe_drv_v6_rule_destroy *destroy_rule;
	struct ppe_drv_v6_5tuple *tuple;
	ppe_drv_ret_t ppe_rv;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Removing hws %p\n", hws);

	destroy_rule = kzalloc(sizeof(*destroy_rule), GFP_KERNEL);
	if (!destroy_rule) {
		retval = HWPA_BACKEND_ERR_NO_MEM;
		goto failure_1;
	}

	tuple = &destroy_rule->tuple;

	/*
	 * Do not perform sanity checks in avm_pa session.
	 * These were done during offload
	 */
	hwpa_ppe_ipv6_fill_tuple(&hws->sess_pa->ingress, tuple);

	if (hws->hws_in_ifmac_added) {
		const struct ethhdr *ethh_ig;
		const struct avm_pa_session *sess_pa;
		const struct avm_pa_pkt_match *ig_match;
		struct net_device *in;
		int32_t ifnum_in;

		sess_pa = hws->sess_pa;
		ig_match = &sess_pa->ingress;

		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
		if (unlikely(!in)) {
			PR_DEVEL("Could not get in netdevice!\n");
			goto skip_mac_removal;
		}

		ifnum_in = ppe_drv_iface_idx_get_by_dev(in);
		if (unlikely(ifnum_in < 0)) {
			PR_DEVEL("%s not a ppe port\n", in->name);
			dev_put(in);
			goto skip_mac_removal;
		}

		ethh_ig = hwpa_get_hdr(ig_match, AVM_PA_ETH);

		hwpa_ppe_mac_to_wan_if_op(ifnum_in, ethh_ig->h_dest, FAL_IP_INGRESS, false);

		dev_put(in);
	}

skip_mac_removal:
	if (hws->hws_eg_ifmac_added == true)
		hwpa_ppe_uninstall_egress_mac();

	if (hws->session_flushed && NULL != hwpa_ppe_ipv6_find_connection(tuple, true)) {
		PR_DEVEL("%p: Already re-offloaded, do not deaccelerate (old avm session %p)\n",
			hws, hws->sess_pa);
		goto skip_deaccelerate;
	}

	/* Note JHI: %pI6 only accepts network byte order, the printed
	 * addresses will be messed up here. */
	PR_DEVEL("%p destroy rule:\n"
				"  protocol: %d\n"
				"  from ip: %pI6:%d\n"
				"  to ip: %pI6:%d\n",
				hws,
				tuple->protocol,
				tuple->flow_ip, tuple->flow_ident,
				tuple->return_ip, tuple->return_ident);

	ppe_rv = ppe_drv_v6_destroy(destroy_rule);
	switch (ppe_rv) {
	case PPE_DRV_RET_SUCCESS:
		PR_DEVEL("Session destroyed: flow_id: %d\n", hws->flow_index);
		break;
	case PPE_DRV_RET_FAILURE_DESTROY_NO_CONN:
		PR_DEVEL("Session already flushed: flow_id: %d\n", hws->flow_index);
		break;
	default:
		PR_DEVEL("Could not de-accelerate hws %p, flow id = %d, error code: %u\n",
			hws, hws->flow_index, ppe_rv);
		retval = HWPA_BACKEND_ERR_REMOVAL;
	}

skip_deaccelerate:
	kfree(destroy_rule);

failure_1:
	return retval;
}

/**
 * @fn static void hwpa_ppe_ipv6_stats_callback(void *app_data,
 * 					struct ppe_drv_v6_conn_sync *conn_sync)
 *
 * @brief gets the latest stats from session
 * @param app_data - not used
 * @param[in] conn_sync - contains the connection info and stats
 */
static void hwpa_ppe_ipv6_stats_callback(void *app_data, struct ppe_drv_v6_conn_sync *conn_sync)
{
#define INIT_IPV6_MEMBER(to, from) .to[0] = from[0], .to[1] = from[1], .to[2] = from[2], .to[3] = from[3]
	struct ppe_drv_v6_5tuple v6_5tuple = {
		INIT_IPV6_MEMBER(flow_ip, conn_sync->flow_ip),
		.flow_ident = conn_sync->flow_ident,
		INIT_IPV6_MEMBER(return_ip, conn_sync->return_ip),
		.return_ident = conn_sync->return_ident,
		.protocol = conn_sync->protocol
	};
#undef INIT_IPV6_MEMBER
	struct hwpa_ppe_session *hws = hwpa_ppe_ipv6_find_connection(&v6_5tuple, false);

	if (hws == NULL) {
		/* Note JHI: %pI6 only accepts network byte order, the printed
		 * addresses will be messed up here. */
		PR_DEVEL_SYNC("No session found for stats sync\n"
				"  protocol: %d\n"
				"  from ip: %pI6:%d\n"
				"  to ip: %pI6:%d\n",
				v6_5tuple.protocol,
				v6_5tuple.flow_ip, v6_5tuple.flow_ident,
				v6_5tuple.return_ip, v6_5tuple.return_ident);
		return;
	}
	PR_DEVEL_SYNC("%p IPv6 Stats by callback adding: tx_bytes %d, tx_pkts %d, reason %d\n", hws,
			conn_sync->flow_tx_byte_count, conn_sync->flow_tx_packet_count,
			conn_sync->reason);

	hwpa_ppe_calc_abs_tx_stats(hws, conn_sync->flow_tx_byte_count, conn_sync->flow_tx_packet_count);

	PR_DEVEL_SYNC("%p IPv6 Stats by callback: tx_bytes %d, total_tx_bytes %lld, tx_pkts %d, total_tx_pkts %lld",
		hws,
		hws->stats.tx_bytes, hws->stats.total_tx_bytes,
		hws->stats.tx_pkts, hws->stats.total_tx_pkts);
}

enum hwpa_backend_rv hwpa_ppe_ipv6_sync_stats(struct hwpa_ppe_session *hws)
{
	fal_entry_counter_t flow_cntrs = {0};
	sw_error_t err;

	if (hws->flow_index < 0)
		return HWPA_BACKEND_ERR_INTERNAL;

	err = fal_flow_counter_get(PPE_SWITCH_ID,
			hws->flow_index, &flow_cntrs);

	if (err != SW_OK) {
		PR_DEVEL_SYNC("failed to get stats for ipv6 flow at index: %u", hws->flow_index);
		return HWPA_BACKEND_ERR_INTERNAL;
	}

	hwpa_ppe_calc_abs_tx_stats(hws,flow_cntrs.matched_bytes, flow_cntrs.matched_pkts);

	PR_DEVEL_SYNC("%p IPv6 Stats: tx_bytes %d, total_tx_bytes %lld, tx_pkts %d, total_tx_pkts %lld",
		hws,
		hws->stats.tx_bytes, hws->stats.total_tx_bytes,
		hws->stats.tx_pkts, hws->stats.total_tx_pkts);

	return HWPA_BACKEND_SUCCESS;
}

enum hwpa_backend_rv hwpa_ppe_ipv6_mark_flushed_connection(
		struct hwpa_ppe_session *hws, void *tuple)
{
	struct hwpa_ppe_context *ppe_ctx = hwpa_ppe_get_context();
	struct ppe_drv_v6_5tuple *in_tuple = (struct ppe_drv_v6_5tuple *)tuple;
	struct ppe_drv_v6_5tuple hws_tuple;

	hwpa_ppe_ipv6_fill_tuple(&hws->sess_pa->ingress, &hws_tuple);
	if (hwpa_ppe_ipv6_match_tuple(&hws_tuple, in_tuple)) {
		spin_lock_bh(&ppe_ctx->hws_list_lock);
		hws->session_flushed = true;
		spin_unlock_bh(&ppe_ctx->hws_list_lock);
		return HWPA_BACKEND_SUCCESS;
	}
	return HWPA_BACKEND_ERR_NO_MATCH;
}

static enum hwpa_backend_rv hwpa_ppe_ipv6_init(void)
{
	if (!ppe_drv_v6_stats_callback_register(hwpa_ppe_ipv6_stats_callback, NULL)) {
		PR_DEVEL("Error while register stats CB\n");
		return HWPA_BACKEND_ERR_INTERNAL;
	}
	PR_DEVEL("hwpa ipv6 accelerator init OK\n");

	return HWPA_BACKEND_SUCCESS;
}

static enum hwpa_backend_rv hwpa_ppe_ipv6_exit(void)
{
	ppe_drv_v6_stats_callback_unregister();

	return HWPA_BACKEND_SUCCESS;
}

struct hwpa_ppe_accelerator ipv6_accelerator = {
	.accel_type = HWPA_PPE_ACCELERATOR_PPE_IPV6,
	.label = "PPE IPV6",
	.init = hwpa_ppe_ipv6_init,
	.exit = hwpa_ppe_ipv6_exit,
	.add_session = hwpa_ppe_ipv6_add_session,
	.remove_session = hwpa_ppe_ipv6_rem_session,
	.sync_session = hwpa_ppe_ipv6_sync_stats,
	.set_flushed_session = hwpa_ppe_ipv6_mark_flushed_connection,
	.dump_hws = hwpa_ppe_ipv6_dump_hws,
	.counter_label[IPV6_NO_FLOW_ID] = "No flow id",
	.counter_count = IPV6_COUNTER_MAX,
};

enum hwpa_backend_rv hwpa_ppe_ipv6_register_accelerator(void)
{
	return hwpa_ppe_register_accelerator(&ipv6_accelerator);
}