// SPDX-License-Identifier: GPL-2.0

/**
 * @brief AVM Hardware PA (hwpa) for NSS
 * @author Christos Mimidis
 *
 * This file implements the hwpa-backend for the qca NSS. It uses the
 * NSS-API. It realizes an interface between AVM_PA/HWPA and NSS for
 * session offloading.
 */

/*
 *==============================================================================
 * HWPA NSS includes and global defines
 *==============================================================================
 */

/*
 * uncomment to enable some debugging mechanisms and more verbose output
 */
//#define HWPA_NSS_DEBUG

#ifdef HWPA_NSS_DEBUG
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#else
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#endif

#include <avm/pa/avm_pa.h>
#include <avm/pa/avm_pa_hw.h>

#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/workqueue.h>
#include <linux/hashtable.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/proc_fs.h>
#include <nss_api_if.h>
#include "hwpa.h"
#include "hwpa_nss.h"
#include <linux/rhashtable.h>
#include <avm_ssdk_wrapper.h>

/*
 * Disable NAT or PT acceleration
 *
 * 0 -Do accelerate any traffic involving NAT or PT
 * 1 -Do not accelerate any traffic involving NAT or PT
 */
#if defined(CONFIG_ARCH_IPQ5018)
#define HWPA_NSS_DISABLE_NAT_ACCEL 0
#elif defined(CONFIG_ARCH_IPQ8074)
/*
 * DO NOT disable config for Hawkeye (see JZ-117155)
 */
#define HWPA_NSS_DISABLE_NAT_ACCEL 1
#else
#error "Unsupported platform for nss offloading"
#endif


/* Period for sending a sync_many msg down to NSS */
#define HWPA_NSS_STATS_SYNC_PERIOD msecs_to_jiffies(1000)

/* Reduce sync frequency to keep activity on mdio a bit lower */
#define HWPA_NSS_QCA833X_STATS_SYNC_PERIOD msecs_to_jiffies(1000)

/*
 * The length of all label strings
 */
#define HWPA_NSS_LABEL_SIZE 32

/*
 * Maximum Number of parallel offloads. Offloading and removing sessions from
 * nss takes time (some ms). And as these actions are interlocked by a mutex
 * adding new sessions can delay removing old sessions. To disarm this potential
 * issue limit maximum parallel offloads. A large value is good to offload as
 * many sessions as possible and a small value avoids this session jam. The
 * current value was determined by creating as many iperf sessions as possible
 * which live 1 sec and repeat that every 5 seconds. With this value the
 * sessions in the system reached zero.
 */
#define HWPA_NSS_MAX_SIMULTANEOUS_OFFLOADS 100

/*
 * Timings for pending offloads.
 */
#define HWPA_NSS_PENDING_OFFLOAD_PERIOD msecs_to_jiffies(1000)
#define HWPA_NSS_UDP_MAX_WAITING_TIME msecs_to_jiffies(4000)

/* NSS Session Limits */
#define NSS_MAX_IPV4_SESSIONS		1024
#define NSS_MAX_IPV4_SESSIONS_LOG2	ilog2(NSS_MAX_IPV4_SESSIONS)
#define NSS_MAX_IPV6_SESSIONS		1024
#define NSS_MAX_IPV6_SESSIONS_LOG2	ilog2(NSS_MAX_IPV6_SESSIONS)
#define QCA_MAX_QCA833X_SESSIONS	96
#define QCA_MAX_833X_SESSIONS_LOG2	ilog2(roundup_pow_of_two(QCA_MAX_QCA833X_SESSIONS))

#ifdef HWPA_NSS_DEBUG
#define NSS_MAGIC_BASE			0xCAFE
#define NSS_MAGIC_NUMBER(n)		(NSS_MAGIC_BASE+(n))
#define NSS_SESSION_MAGIC		NSS_MAGIC_NUMBER(0)
#define HWPA_SESSION_MAGIC		NSS_MAGIC_NUMBER(1)
#define IPV4_SPECIFIC_MAGIC		NSS_MAGIC_NUMBER(2)
#define IPV4_SESSION_DATA_MAGIC		NSS_MAGIC_NUMBER(3)
#define IPV6_SPECIFIC_MAGIC		NSS_MAGIC_NUMBER(4)
#define IPV6_SESSION_DATA_MAGIC		NSS_MAGIC_NUMBER(5)
#define QCA833X_SPECIFIC_MAGIC		NSS_MAGIC_NUMBER(6)
#define QCA833X_SESSION_DATA_MAGIC	NSS_MAGIC_NUMBER(7)

#define PR_DEVEL(fmt, ...)		pr_err(fmt, ##__VA_ARGS__)
#else
#define PR_DEVEL(fmt, ...)
#endif

/* Default Value for VLAN IDs. Taken from ECM. */
#define HWPA_NSS_VLAN_ID_NOT_CONFIGURED 0xFFF

/* QoS Tag Mask -- Taken from ECM */
#define HWPA_NSS_QOS_TAG_MASK 0xFFF

/* Some ipv6 helpers */
#define IPV6_ADDR_MATCH(a, b) \
	((a[0] == b[0]) && (a[1] == b[1]) && (a[2] == b[2]) && (a[3] == b[3]))
#define IPV6_ADDR_XOR(a) (a[0] ^ a[1] ^ a[2] ^ a[3])
#define IPV6_COPY(from, to) \
	{ \
		to[0] = htonl(from[0]); \
		to[1] = htonl(from[1]); \
		to[2] = htonl(from[2]); \
		to[3] = htonl(from[3]); \
	}

#define MAC_ADDR_XOR(a) ((*((uint32_t *)(&((uint8_t *)a)[0]))) ^\
			 (*((uint32_t *)(&((uint8_t *)a)[2]))))

/*
 *Limits for buidling the hierarchy of session
 */
#define HWPA_NSS_MAX_INTERFACES 10
#define HWPA_NSS_DIRS 2

/*
 * Forward definitions
 */
struct hwpa_nss_offloading_data;
struct hwpa_nss_offloader;
struct hwpa_nss_subsystem;

/*
 *==============================================================================
 * HWPA NSS session structs
 *==============================================================================
 */

/**
 * @enum hwpa_nss_nat_mode
 * @brief NAT Modes supported by NSS
 */
enum hwpa_nss_nat_mode {
	HWPA_NSS_NAT_ERROR = -1,            /* Error indication */
	HWPA_NSS_IPV4_NAT_MODE_BRIDGED = 0, /* Bridged Traffic */
	HWPA_NSS_IPV4_NAT_MODE_SNAT,	     /* Egress NAT */
	HWPA_NSS_IPV4_NAT_MODE_DNAT,      /* Ingress NAT*/
	HWPA_NSS_IPV4_NAT_MODE_PORT_NAT, /* routing, modified ports only */
	HWPA_NSS_IPV4_NAT_MODE_NO_NAT,    /* routing, but no NAT */
	HWPA_NSS_IPV6_NAT_MODE_BRIDGED, /* Bridged Traffic */
		/* No NAT support for IPV6 */
	HWPA_NSS_IPV6_NAT_MODE_PORT_NAT, /* routing, modified ports only */
	HWPA_NSS_IPV6_NAT_MODE_NO_NAT,    /* No NAT (routing, modified ports only)*/
	HWPA_NSS_QCA833X_NAT_MODE_BRIDGED, /* Bridged traffic on external switch */

	HWPA_NSS_NAT_MODE_MAX
};

/**
 * @struct hwpa_nss_ipv4_session_data
 * @brief data for an nss ipv4 session. Used for identification and removal of a
 * session.
 */
struct hwpa_nss_ipv4_session_data	{
	struct nss_ipv4_5tuple tuple;
	uint32_t flow_ident_xlate;
	uint32_t flow_ip_xlate;
	uint32_t return_ident_xlate;
	uint32_t return_ip_xlate;
};

 /**
  * @struct hwpa_nss_ipv6_session_data
  * @brief data for an nss ipv6 session. Used for identification and removal of a
  * session.
  */
struct hwpa_nss_ipv6_session_data {
	struct nss_ipv6_5tuple tuple;
};

/**
 * @struct hwpa_qca833x_session_data
 * @brief data for an qca833x session. Used for identification and removal of a
 * session.
 */
struct hwpa_qca833x_session_data	{
	struct qca833x_ofl_session tuple;
};

/**
 * @struct hwpa_nss_stats
 * @brief Sync data for stats for a nss session.
 */
struct hwpa_nss_stats	{
	uint32_t flow_rx_bytes;
	uint32_t flow_rx_pkts;
	uint32_t flow_tx_bytes;
	uint32_t flow_tx_pkts;
	uint32_t return_rx_bytes;
	uint32_t return_rx_pkts;
	uint32_t return_tx_bytes;
	uint32_t return_tx_pkts;
};

/**
 * @enum hwpa_nss_session_flag
 * @brief Enum for state, type and sync flags of a NSS session.
 *
 */
enum hwpa_nss_session_flag {
	/* Set if Stats were read by AVM_PA(For Flow and Return Direction) */
	HWPA_NSS_SESSION_SYNC_FLOW_UPDATED,
	HWPA_NSS_SESSION_SYNC_RETURN_UPDATED,

	/* Set if Session was flushed by NSS */
	HWPA_NSS_SESSION_FLUSHED,

	HWPA_NSS_SESSION_MAX
};

/**
 * @enum hwpa_nss_session_state
 * @brief State of a nss session
 */
enum hwpa_nss_session_state	{
	HWPA_NSS_SESSION_STATE_INITIALIZED,
	HWPA_NSS_SESSION_STATE_PREPARED,
	HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD,
	HWPA_NSS_SESSION_STATE_PENDING_APPROVAL,
	HWPA_NSS_SESSION_STATE_ACTIVE,
	HWPA_NSS_SESSION_STATE_INVALID,
	HWPA_NSS_SESSION_STATE_BROKEN,
	HWPA_NSS_SESSION_STATE_MAX
};

/**
 * @struct hwpa_nss_nss_session
 * @brief a struct encapsulating all required data for a single NSS-Session
 *
 */
struct hwpa_nss_nss_session {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif

	/* Flags encoded with enum hwpa_nss_session_flag_bits */
	unsigned long flags;
	enum hwpa_nss_session_state state;
	struct hwpa_nss_offloader *offloader;

	/* subsystem-specific data */
	union	{
		struct hwpa_nss_ipv4_session_data ipv4;
		struct hwpa_nss_ipv6_session_data ipv6;
		struct hwpa_qca833x_session_data qca833x;
	};

	/* hashlist node */
	struct hlist_node node;

	/* offloader node */
	struct list_head ofl_node;

	/* A list with all hwpa-sessions assigned to a specific NSS-Session */
	struct list_head hwpa_session_list;

	/* Session stats */
	struct hwpa_nss_stats stats;

	/* lock used for stat-accesses */
	spinlock_t sync_lock;

	/* A reference count for hwpa sessions attached */
	atomic_t pa_ref_count;
};

/**
 * @struct hwpa_nss_session_direction
 * @brief The direction of an hwpa session.
 *
 */
enum hwpa_nss_session_direction	{
	HWPA_NSS_SESSION_DIRECTION_FLOW,
	HWPA_NSS_SESSION_DIRECTION_RETURN,
	HWPA_NSS_SESSION_DIRECTION_DONT_CARE,
	HWPA_NSS_SESSION_DIRECTION_MAX,
};

/**
 * @struct hwpa_nss_hwpa_session
 * @brief An interface between avm_pa/hwpa and NSS for a session. Specifies
 * a direction for a hwpa_nss_session. This is what AVM_PA/HWPA "sees" from HWPA_NSS.
 */
struct hwpa_nss_hwpa_session {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	enum hwpa_nss_session_direction direction;
	struct list_head node;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *offloader;
	const struct avm_pa_session *sess_pa;
};

/*
 *==============================================================================
 * HWPA NSS offloaders
 *==============================================================================
 */

/**
 * @struct hwpa_nss_offloader
 * @brief Data struct for each HWPA NSS Subsystem Offloader
 */
struct hwpa_nss_offloader	{
	/* The subsystem this offloader is assigned to */
	struct hwpa_nss_subsystem *subsys;

	/* a list of sessions handled by this offloader and a lock for this list */
	struct list_head session_list;
	spinlock_t list_lock;

	char label[HWPA_NSS_LABEL_SIZE];

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void		     (*exit)(struct hwpa_nss_subsystem *subsys);
	enum hwpa_backend_rv (*prepare_session)(struct hwpa_nss_offloading_data *ofl_data);
	enum hwpa_backend_rv (*add_session)(struct hwpa_nss_offloading_data *ofl_data);
	enum hwpa_backend_rv (*remove_session)(struct hwpa_nss_subsystem *subsys,
						struct hwpa_nss_nss_session *hws_nss);
	enum hwpa_backend_rv (*change_session)(struct hwpa_nss_subsystem *subsys,
						struct hwpa_nss_nss_session *hws_nss);

	/* Session Counters */
	spinlock_t lock;
	uint32_t active_nss_session_count, avm_pa_session_count;
	uint32_t pending_nss_session_count;
	uint32_t failed_nss_offloads, successful_nss_offloads;
	uint32_t flushed_sessions;
};

/*
 *==============================================================================
 * HWPA NSS subsystems
 *==============================================================================
 */

/**
 * @struct hwpa_nss_tracker
 * @brief A subsystem tracker
 */
struct hwpa_nss_tracker {
	uint16_t usage;
	uint16_t limit;
	spinlock_t lock;

	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
};

/**
 * @struct hwpa_nss_subsys_msg
 * @brief subsystem specific NSS message. Needed for Synchronization with NSS.
 *
 */
struct hwpa_nss_subsys_msg {
	union {
		struct nss_ipv4_msg *ipv4;
		struct nss_ipv6_msg *ipv6;
	};
};

/**
 * @struct hwpa_nss_ipv4_specific
 * @brief IPV4 NSS subsystem specific data.
 */
struct hwpa_nss_ipv4_specific {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	/*
	 * The Hashtable used for tracking all pending and offloaded nss ipv4
	 * sessions
	 */
	DECLARE_HASHTABLE(session_table, NSS_MAX_IPV4_SESSIONS_LOG2);

	unsigned long int next_req_time;
	unsigned long int roll_check_jiffies;
	uint32_t sync_info_len;
	uint32_t sync_info_idx;
	struct nss_ipv4_conn_sync sync_info[NSS_MAX_IPV4_SESSIONS];
};

/**
 * @struct hwpa_nss_ipv6_specific
 * @brief IPV6 NSS subsystem specific data.
 */
struct hwpa_nss_ipv6_specific {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	/*
	 * The Hashtable used for tracking all pending and offloaded nss ipv6
	 * sessions
	 */
	DECLARE_HASHTABLE(session_table, NSS_MAX_IPV6_SESSIONS_LOG2);

	unsigned long int next_req_time;
	unsigned long int roll_check_jiffies;
	uint32_t sync_info_len;
	uint32_t sync_info_idx;
	struct nss_ipv6_conn_sync sync_info[NSS_MAX_IPV6_SESSIONS];
};

/**
 * @struct hwpa_qca833x_specific
 * @brief QCA833X subsystem specific data.
 */
struct hwpa_qca833x_specific {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	struct qca833x_api *api;

	/*
	 * The Hashtable used for tracking all pending and offloaded qca833x
	 * sessions
	 */
	DECLARE_HASHTABLE(session_table, QCA_MAX_833X_SESSIONS_LOG2);
};

/**
 * @struct hwpa_nss_synchronizer
 * @brief Stat synchonization of a nss subsystem
 *
 */
struct hwpa_nss_synchronizer {
	/* The nss synchronization message */
	struct hwpa_nss_subsys_msg msg;

	/*
	 * A lock used for read and write accesses on the subsystem message
	 * above
	 */
	spinlock_t lock;

	/* workqueue for periodic stat synchronization */
	struct workqueue_struct *workqueue;
	struct delayed_work work;

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void (*exit)(struct hwpa_nss_subsystem *subsys);
};

/**
 * @enum hwpa_nss_subsystem_flag
 * @brief NSS Subsystem representation flags
 */
enum hwpa_nss_subsystem_flag {
	HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS,
	HWPA_NSS_SUBSYS_FLAG_FLOW_NO_PKT_STATS,
	HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION,
	HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO,
	HWPA_NSS_SUBSYS_FLAG_FLUSHABLE_SESSIONS,
	HWPA_NSS_SUBSYS_FLAG_MAX
};

/**
 * @struct hwpa_nss_subsystem
 * @brief Representation of a NSS Subsystem/Interface
 */
struct hwpa_nss_subsystem {
	unsigned long flags;
	char label[HWPA_NSS_LABEL_SIZE];

	/* NSS interface handle for subsystem */
	struct nss_ctx_instance *mgr;

	struct hwpa_nss_synchronizer *sync;
	struct hwpa_nss_tracker *tracker;

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void (*exit)(struct hwpa_nss_subsystem *subsys);
	uint32_t (*gen_hash)(struct hwpa_nss_nss_session *hws_nss);
	void (*register_nss_session)(struct hwpa_nss_subsystem *subsys,
							uint32_t hash, struct hwpa_nss_nss_session *hws_nss);
	struct hwpa_nss_nss_session* (*find_nss_session)(struct hwpa_nss_subsystem *subsys,
							uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir);
	void (*purge_sessions)(struct hwpa_nss_subsystem *subsys);

	/* Subsystem specific data */
	union	{
		struct hwpa_nss_ipv4_specific *ipv4_spec;
		struct hwpa_nss_ipv6_specific *ipv6_spec;
		struct hwpa_qca833x_specific  *qca833x_spec;
		void *spec;
	};
};

/*
 *==============================================================================
 * HWPA NSS private global context
 *==============================================================================
 */

/**
 * @enum hwpa_nss_subsystem_idx
 * @brief Subsystem Index
 *
 */
enum hwpa_nss_subsystem_idx	{
	HWPA_NSS_SUBSYSTEM_IDX_IPV4,
	HWPA_NSS_SUBSYSTEM_IDX_IPV6,
	HWPA_NSS_SUBSYSTEM_IDX_QCA833X,
	HWPA_NSS_SUBSYSTEM_IDX_MAX,
};

/**
 * @enum hwpa_nss_offloader_idx
 * @brief Offloader Index
 *
 */
enum hwpa_nss_offloader_idx	{
	HWPA_NSS_OFFLOADER_IDX_IPV4,
	HWPA_NSS_OFFLOADER_IDX_IPV6,
	HWPA_NSS_OFFLOADER_IDX_QCA833X,
	HWPA_NSS_OFFLOADER_IDX_MAX,
};

/**
 * @enum hwpa_nss_ep_type
 * @brief Type of Endpoint
 *
 */
enum hwpa_nss_ep_type {
	EP_TYPE_UNDEFINED,
	EP_TYPE_NSS,
	EP_TYPE_QCA833X,
	EP_TYPE_MAX,
};

/**
 * @struct hwpa_nss_ep_info
 * @brief Info for Endpoint
 */
struct hwpa_nss_ep_info {
	enum hwpa_nss_ep_type type; /* type of endpoint */
	uint32_t port_bmp_833x; /* port bitmap for qca833x ports */
};

/**
 * @enum hwpa_nss_pending_offload_manager
 * @brief a manager for pending offloads
 *
 */
struct hwpa_nss_pending_offload_manager {
	/* a list of all currently pending offloads */
	struct list_head pending_offloads;

	/* a lock for the pending_offloads_list*/
	spinlock_t lock;

	/*
	 * A workqueue which lets the pending_offload_manager periodically check
	 * all pending offloads to see whether a timing condition was hit.
	 */
	struct workqueue_struct *workqueue;
	struct delayed_work work;
};

/**
 * @struct hwpa_nss_context
 * @brief private, global data struct for the hwpa_nss subsystem
 */
struct hwpa_nss_context	{
	/* contexts for nss subsystems */
	struct hwpa_nss_subsystem *subsystems[HWPA_NSS_SUBSYSTEM_IDX_MAX];

	/* the offloading instances */
	struct hwpa_nss_offloader *offloaders[HWPA_NSS_OFFLOADER_IDX_MAX];

	/* kmem cache for hwpa_sessions and nss_sessions saved locally*/
	struct kmem_cache *kmem_hwpa;
	struct kmem_cache *kmem_nss;
	spinlock_t lock;

	/*
	 * This mutex is used for locking the specific datasets for all subsystems
	 * (Basically the hashlist containing all nss sessions)
	 */
	struct mutex mutex;

	/* Manager for pending offloads */
	struct hwpa_nss_pending_offload_manager pending_offload_mgr;

	/* interface registry */
	struct hwpa_nss_ep_info if_reg[NSS_MAX_NET_INTERFACES];

	/* for limiting the maximum number of parallel offloads */
	atomic_t ratelimit_counter;
};

/**
 * hwpa_nss private data used globally in this file
 */
static struct hwpa_nss_context hwpa_nss_ctx;

/**
 * @enum hwpa_nss_if_type
 * @brief nss inerface types supported by hwpa_nss
 */
enum hwpa_nss_if_type {
	HWPA_NSS_IF_TYPE_ETH,
	HWPA_NSS_IF_TYPE_BRIDGE,
	HWPA_NSS_IF_TYPE_MAX
};

/**
 * @struct hwpa_nss_if_data
 * @brief a struct describing a nss counterpart of a netdevice
 */
struct hwpa_nss_if_data	{
	int32_t ifnum;
	struct hwpa_nss_ep_info *ep_info;
	enum hwpa_nss_if_type type;
};

/**
 * @struct hwpa_nss_offloading_data
 * @brief encapsulation of all data required for a single offload
 */
struct hwpa_nss_offloading_data	{
	/* All information extracted from a avm_pa session during an offload */
	struct net_device *in, *out, *bridge;
	enum hwpa_nss_nat_mode nat_mode;
	enum generic_ct_dir ct_dir;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	const struct avm_pa_session *sess_pa;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	void *nss_msg;
	bool is_routed;
	uint8_t protocol;
	/* interface hierarchy */
	struct hwpa_nss_if_data interfaces[HWPA_NSS_DIRS][HWPA_NSS_MAX_INTERFACES];
	uint8_t if_max_indices[HWPA_NSS_DIRS];

	/* A list node for the list of all penign offloads */
	struct list_head node;

	/*
	 * A timestamp for the pending offload manager to determine the age of a
	 * pending offload
	 */
	uint32_t timestamp;
};

/*
 *==============================================================================
 * channel allocation
 *==============================================================================
 */

/**
 * @fn int try_to_accelerate(avm_pid_handle, struct sk_buff*)
 * @brief avm_pa callback function
 *
 * @param pid_handle [in] corresponding endpoint pid
 * @param skb [in] the packet
 * @return AVM_PA_RX_OK
 */
int try_to_accelerate(avm_pid_handle pid_handle,
		      struct sk_buff *skb)
{
	return AVM_PA_RX_OK;
}

/*
 *==============================================================================
 * hwpa nss tracking
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_tracker_add_nss_session(struct hwpa_nss_subsystem*)
 * @brief increments tracker usage counter. If usage would exceed limit, counter
 * is not incremented and error is returned.
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success or error code in case limit is reached
 */
static enum hwpa_backend_rv hwpa_nss_tracker_add_nss_session(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	spin_lock_bh(&tracker->lock);
	if (unlikely(tracker->usage >= tracker->limit)) {
		spin_unlock_bh(&tracker->lock);
		return HWPA_BACKEND_ERR_TRACKER_LIMIT;
	}

	tracker->usage++;
	spin_unlock_bh(&tracker->lock);

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_tracker_remove_nss_session(struct hwpa_nss_subsystem*)
 * @brief decrements usage counter.
 *
 * @param subsys [in] subsystem containig the tracker
 * @return success or error code if usage is zero
 */
static enum hwpa_backend_rv hwpa_nss_tracker_remove_nss_session(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	spin_lock_bh(&tracker->lock);
	if (unlikely(tracker->usage == 0)) {
		spin_unlock_bh(&tracker->lock);
		pr_err("trying to remove session from tracker although there is none registered\n");
		return HWPA_BACKEND_ERR_TRACKER_LIMIT;
	}

	tracker->usage--;
	spin_unlock_bh(&tracker->lock);

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_init_limit(struct hwpa_nss_subsystem*)
 * @brief initializes ipv4 subsystem usage and limit counter. Limit is
 * min(HASH_SIZE(ipv4->session_table), nss_ipv4_max_conn_count).
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_init_limit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_ipv4_specific *ipv4 = subsys->ipv4_spec;
	int max_nss_sessions;

	PR_DEVEL("Init IPV4 Tracker\n");

	tracker->limit = HASH_SIZE(ipv4->session_table);
	max_nss_sessions = nss_ipv4_max_conn_count();
	PR_DEVEL("Max NSS IPV4 sessions: %u\n", max_nss_sessions);

	if (max_nss_sessions < tracker->limit) {
		pr_warn("FW limit (%d) of IPV4 NSS Sessions smaller than configured limit (%d). Reducing limit to %d",
			max_nss_sessions,  tracker->limit,
			max_nss_sessions);
		 tracker->limit = max_nss_sessions;
	}

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_init_limit(struct hwpa_nss_subsystem*)
 * @brief initializes ipv6 subsystem usage and limit counter. Limit is
 * min(HASH_SIZE(ipv6->session_table), nss_ipv6_max_conn_count).
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_init_limit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_ipv6_specific *ipv6 = subsys->ipv6_spec;
	int max_nss_sessions;

	PR_DEVEL("Init IPV6 Tracker\n");

	tracker->limit = HASH_SIZE(ipv6->session_table);
	max_nss_sessions = nss_ipv6_max_conn_count();
	PR_DEVEL("Max NSS IPV6 sessions: %u\n", max_nss_sessions);

	if (max_nss_sessions < tracker->limit) {
		pr_warn("FW limit (%d) of IPV6 NSS Sessions smaller than configured limit (%d). Reducing limit to %d",
			max_nss_sessions,  tracker->limit,
			max_nss_sessions);
		 tracker->limit = max_nss_sessions;
	}

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_qca833x_init_limit(struct hwpa_nss_subsystem*)
 * @brief initializes qca833x subsystem usage and limit counter.
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success only
 */
static enum hwpa_backend_rv hwpa_qca833x_init_limit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	PR_DEVEL("Init QCA833X Tracker\n");

	tracker->limit = QCA_MAX_QCA833X_SESSIONS;

	return HWPA_BACKEND_SUCCESS;
}

/*
 *==============================================================================
 * Global Context / Subsystem / Offloader - Selection functions
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_subsystem hwpa_nss_get_subsys*(enum hwpa_nss_subsystem_idx)
 * @brief get subsystem from index
 *
 * @param idx [in] the index of the requested subsystem
 * @return subsytem or NULL in case of invalid index
 */
static struct hwpa_nss_subsystem *hwpa_nss_get_subsys(enum hwpa_nss_subsystem_idx idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	if (idx >= HWPA_NSS_SUBSYSTEM_IDX_MAX)
		return (struct hwpa_nss_subsystem *)NULL;

	return global_ctx->subsystems[idx];
}

/**
 * @fn struct hwpa_nss_subsystem hwpa_nss_get_offloader*(enum hwpa_nss_offloader_idx)
 * @brief get offloader from index
 *
 * @param idx [in] index of the requested offloader
 * @return offloader or NULL in case of invalid index
 */
static struct hwpa_nss_offloader *hwpa_nss_get_offloader(enum hwpa_nss_offloader_idx idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	if (idx >= HWPA_NSS_OFFLOADER_IDX_MAX)
		return (struct hwpa_nss_offloader *)NULL;

	return global_ctx->offloaders[idx];
}

/**
 * @fn bool hwpa_nss_invalid_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief check if given hwpa session is valid
 *
 * @param hws_hwpa [in] hwpa session
 * @return True if invalid, false otherwise
 */
static bool hwpa_nss_invalid_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	if (hws_hwpa) {
#ifdef HWPA_NSS_DEBUG
		if (hws_hwpa->magic != NSS_SESSION_MAGIC)
			return true;
#endif
		return !hws_hwpa->hws_nss;
	}
	return true;

}

/**
 * @fn unsigned long hwpa_nss_session_to_handle(struct hwpa_nss_hwpa_session*)
 * @brief Translates a hwpa-session to an hwpa-handle
 *
 * @param hwpa_sess [in] the hwpa Session to translate to a handle
 *
 * @return the generated handle
 */
unsigned long hwpa_nss_session_to_handle(struct hwpa_nss_hwpa_session *hwpa_sess)
{
	return (unsigned long) hwpa_sess;
}

/**
 * @fn struct hwpa_nss_hwpa_session hwpa_nss_handle_to_session*(unsigned long)
 * @brief Translates a hwpa handle to a hwpa session
 *
 * @param handle [in] the handle for the requested session
 *
 * @return the requested Session or NULL in case of error
 */
struct hwpa_nss_hwpa_session *hwpa_nss_handle_to_session(unsigned long handle)
{
	if (!handle || handle == -1) {
		PR_DEVEL("Invalid Session");
		return NULL;
	}

	return (struct hwpa_nss_hwpa_session *) handle;
}

/*
 *==============================================================================
 * HWPA and NSS Session init and deinit
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_init_nss_session(struct hwpa_nss_nss_session*)
 * @brief initializes a nss session struct
 *
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_init_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
#ifdef HWPA_NSS_DEBUG
	hws_nss->magic = NSS_SESSION_MAGIC;
#endif
	spin_lock_init(&hws_nss->sync_lock);
	INIT_LIST_HEAD(&hws_nss->hwpa_session_list);
	INIT_LIST_HEAD(&hws_nss->ofl_node);
	INIT_HLIST_NODE(&hws_nss->node);
	hws_nss->flags = 0;
	hws_nss->state = HWPA_NSS_SESSION_STATE_INITIALIZED;
}

/**
 * @fn void hwpa_nss_init_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief initializes a hwpa session struct
 *
 * @param hws_hwpa [in] hwpa session
 */
static void hwpa_nss_init_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
#ifdef HWPA_NSS_DEBUG
	hws_hwpa->magic = NSS_SESSION_MAGIC;
#endif
	hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_MAX;
	INIT_LIST_HEAD(&hws_hwpa->node);
}

/**
 * @fn void hwpa_nss_unregister_nss_session(struct hwpa_nss_session*)
 * @brief unregister nss session from subsystem, by removing it from hashlist.
 *
 * @param hws_nss [in] nss session to unregister
 */
static void hwpa_nss_unregister_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock;

	PR_DEVEL("Unregistering hw session %p\n", hws_nss);

	lock = &hwpa_nss_ctx.lock;

	spin_lock_bh(lock);
	hash_del_rcu(&hws_nss->node);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn void hwpa_nss_ipv4_register_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*)
 * @brief register ipv4 session by adding it to the ipv4 hashlist
 *
 * @param subsys [in] ipv4 subsystem
 * @param hash [in] hash of nss session
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_ipv4_register_nss_session(struct hwpa_nss_subsystem *subsys,
							       uint32_t hash,
							       struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock = &hwpa_nss_ctx.lock;

	spin_lock_bh(lock);
	hash_add_rcu(subsys->ipv4_spec->session_table, &hws_nss->node, hash);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn void hwpa_nss_ipv6_register_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*)
 * @brief register ipv6 session by adding it to the ipv6 hashlist
 *
 * @param subsys [in] ipv6 subsystem
 * @param hash [in] hash of nss session
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_ipv6_register_nss_session(struct hwpa_nss_subsystem *subsys,
							       uint32_t hash,
							       struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock = &hwpa_nss_ctx.lock;

	spin_lock_bh(lock);
	hash_add_rcu(subsys->ipv6_spec->session_table, &hws_nss->node, hash);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn void hwpa_qca833x_register_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*)
 * @brief register qca833x session by adding it to the qca833x hashlist
 *
 * @param subsys [in] qca833x subsystem
 * @param hash [in] hash of nss session
 * @param hws_nss [in] nss session
 */
static void hwpa_qca833x_register_nss_session(struct hwpa_nss_subsystem *subsys,
							       uint32_t hash,
							       struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock = &hwpa_nss_ctx.lock;

	spin_lock_bh(lock);
	hash_add_rcu(subsys->qca833x_spec->session_table, &hws_nss->node, hash);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_register_nss_session(struct hwpa_nss_nss_session*, uint32_t)
 * @brief register nss session to hwpa_nss_context by adding it to the
 * corresponding hashlist
 *
 * @param hws_nss [in] nss session to register
 * @param hash [in] hash to register session with
 */
static void hwpa_nss_register_nss_session(struct hwpa_nss_nss_session *hws_nss,
					  uint32_t hash)
{
	struct hwpa_nss_subsystem *subsys;

	subsys = hws_nss->offloader->subsys;

	subsys->register_nss_session(subsys, hash, hws_nss);

	PR_DEVEL("Registered hw session %p with hash %x\n", hws_nss, hash);
}

/**
 * @fn void hwpa_nss_detach_from_nss_session(struct hwpa_nss_hwpa_session*)
 * @brief detach hwpa session from nss session
 *
 * @param hws_hwpa the hwpa session to detach
 */
static void hwpa_nss_detach_from_nss_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	struct hwpa_nss_nss_session *hws_nss;

	if (hwpa_nss_invalid_hwpa_session(hws_hwpa)) {
		pr_warn("Could not detach hwpa session from nss session");
		return;
	}

	hws_nss = hws_hwpa->hws_nss;

	if (atomic_read(&hws_nss->pa_ref_count) == 0) {
		pr_warn("BUG? Session could not be deregistered as there are non.\n");
		return;
	}

	atomic_dec(&hws_nss->pa_ref_count);
	list_del_init(&hws_hwpa->node); /* init to make reassignment possible */

	hws_hwpa->hws_nss = NULL;

	PR_DEVEL("Detached hwpa session %p from nss session %p\n", hws_hwpa, hws_nss);
}

/**
 * @fn void hwpa_nss_attach_to_nss_session(struct hwpa_nss_nss_session*, struct hwpa_nss_hwpa_session*, enum hwpa_nss_session_direction)
 * @brief attach hwpa session to nss session.
 *
 * @param hws_nss [in] the nss session to attach to
 * @param hws_hwpa [in] the hwpa session
 */
static void hwpa_nss_attach_to_nss_session(struct hwpa_nss_nss_session *hws_nss,
					   struct hwpa_nss_hwpa_session *hws_hwpa)
{
	hws_hwpa->hws_nss = hws_nss;
	atomic_inc(&hws_nss->pa_ref_count);
	list_add_tail(&hws_hwpa->node, &hws_nss->hwpa_session_list);

	PR_DEVEL("Attached hwpa session %p to hw session %p\n", hws_hwpa, hws_nss);
}

/**
 * @fn void hwpa_nss_destroy_unregistered_nss_session(struct hwpa_nss_nss_session*)
 * @brief destroy unregistered nss session in hwpa_nss context.
 *
 * @param hws_nss [in] nss session to destroy
 */
static void hwpa_nss_destroy_unregistered_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	WARN_ON(!list_empty(&hws_nss->hwpa_session_list));

	kmem_cache_free(global_ctx->kmem_nss, hws_nss);

	PR_DEVEL("Destroyed nss session %p\n", hws_nss);
}

/**
 * @fn void hwpa_nss_destroy_nss_session(struct hwpa_nss_nss_session*)
 * @brief destroy nss session in hwpa_nss context and also unregisters it.
 *
 * @param hws_nss [in] nss session to destroy
 */
static void hwpa_nss_destroy_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
	hwpa_nss_unregister_nss_session(hws_nss);

	hwpa_nss_destroy_unregistered_nss_session(hws_nss);
}

/**
 * @fn void hwpa_nss_destroy_unattached_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief destroy hwpa session in hwpa_nss context
 *
 * @param hws_hwpa [in] hwpa session to destroy
 */
static void hwpa_nss_destroy_unattached_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	kmem_cache_free(global_ctx->kmem_hwpa, hws_hwpa);

	PR_DEVEL("Destroyed hwpa session %p\n", hws_hwpa);
}

/**
 * @fn void hwpa_nss_destroy_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief destroy hwpa session in hwpa_nss context and also detaches it from
 * nss session.
 *
 * @param hws_hwpa [in] hwpa session to destroy
 */
static void hwpa_nss_destroy_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	hwpa_nss_detach_from_nss_session(hws_hwpa);

	hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
}

/**
 * @fn bool hwpa_nss_is_routed(const struct avm_pa_session*)
 * @brief figure out if session is routed or bridged in the nss point of view.
 *
 * @param s [in] avm_pa session
 * @return true if session is routed false if not
 */
static bool hwpa_nss_is_routed(const struct avm_pa_session *sess_pa)
{
	return !!(sess_pa->mod.modflags & AVM_PA_MOD_TTL);
}

/*
 *==============================================================================
 * NSS Session Hash Generation
 *==============================================================================
 */

/**
 * @fn uint32_t hwpa_nss_ipv4_gen_session_hash_raw(uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief generate hash for ipv4 session properties. Needs to be symmetric.
 *
 * @param flow_ip [in] flow_ip used to generate hash
 * @param flow_ident [in] flow_ident used to generate hash
 * @param return_ip_xlate [in] return_ip_xlate used to generate hash
 * @param return_ident_xlate [in] return_ident_xlate used to generate hash
 * @param protocol [in] protocol used to generate hash
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv4_gen_session_hash_raw(uint32_t flow_ip, uint32_t flow_ident,
				       uint32_t return_ip_xlate, uint32_t return_ident_xlate,
				       uint8_t protocol)
{
	return hwpa_ipv4_gen_session_hash_raw(flow_ip, flow_ident, return_ip_xlate,
						return_ident_xlate, protocol);
}

/**
 * @fn uint32_t hwpa_nss_ipv6_gen_session_hash_raw(uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief generate hash for ipv6 session properties. Needs to be symmetric.
 *
 * @param flow_ip [in] flow_ip used to generate hash
 * @param flow_ident [in] flow_ident used to generate hash
 * @param return_ip [in] return_ip used to generate hash
 * @param return_ident [in] return_ident used to generate hash
 * @param protocol [in] protocol used to generate hash
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv6_gen_session_hash_raw(uint32_t *flow_ip, uint32_t flow_ident,
				       uint32_t *return_ip, uint32_t return_ident,
				       uint8_t protocol)
{
	return hwpa_ipv6_gen_session_hash_raw(flow_ip, flow_ident, return_ip,
						return_ident, protocol);
}

/**
 * @fn uint32_t hwpa_qca833x_gen_session_hash_raw(uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief generate hash for ipv6 session properties. Needs to be symmetric.
 *
 * @param flow_ip [in] source mac
 * @param flow_ident [in] destination mac
 *
 * @return the generated hash value
 */
static uint32_t hwpa_qca833x_gen_session_hash_raw(uint8_t *smac, uint8_t *dmac, uint16_t vlan_tci)
{
	uint32_t hash = 0;

	/* Non-Bidirectional: use asymmetric hash function */
	hash += MAC_ADDR_XOR(smac);
	hash -= MAC_ADDR_XOR(dmac);
	hash += vlan_tci;

	return hash;
}

/**
 * @fn uint32_t hwpa_nss_ipv4_gen_session_hash(struct hwpa_nss_nss_session*)
 * @brief generate ipv4 session hash
 *
 * @param hws_nss [in] NSS session to generate hash for
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv4_gen_session_hash(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_ipv4_session_data *data = &hws_nss->ipv4;

	return hwpa_nss_ipv4_gen_session_hash_raw(data->tuple.flow_ip,
		data->tuple.flow_ident, data->return_ip_xlate,
		data->return_ident_xlate, data->tuple.protocol);
}

/**
 * @fn uint32_t hwpa_nss_ipv6_gen_session_hash(struct hwpa_nss_nss_session*)
 * @brief generate ipv6 session hash
 *
 * @param hws_nss [in] NSS session to generate hash for
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv6_gen_session_hash(struct hwpa_nss_nss_session *hws_nss)
{
	struct nss_ipv6_5tuple *data_tuple = &hws_nss->ipv6.tuple;

	return hwpa_nss_ipv6_gen_session_hash_raw(data_tuple->flow_ip,
		data_tuple->flow_ident, data_tuple->return_ip,
		data_tuple->return_ident, data_tuple->protocol);
}

/**
 * @fn uint32_t hwpa_nss_qca833x_gen_session_hash(struct hwpa_nss_nss_session*)
 * @brief generate ipv6 session hash
 *
 * @param hws_nss [in] qca833x session to generate hash for
 *
 * @return the generated hash value
 */
static uint32_t hwpa_qca833x_gen_session_hash(struct hwpa_nss_nss_session *hws_nss)
{
	return hwpa_qca833x_gen_session_hash_raw(hws_nss->qca833x.tuple.hdr.h_source,
						  hws_nss->qca833x.tuple.hdr.h_dest,
						  hws_nss->qca833x.tuple.hdr.h_vlan_TCI);
}

/*
 *==============================================================================
 * NSS/HWPA-Session search for ipv4
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_session_unidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief find an unidirectional nss ipv4 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip_xlate [in] return_ip_xlate
 * @param return_ident_xlate [in] return_ident_xlate
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_session_unidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t flow_ip, uint32_t flow_ident,
							   uint32_t return_ip_xlate, uint32_t return_ident_xlate,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct hwpa_nss_ipv4_session_data *session_data;

	rcu_read_lock();
	hash_for_each_possible_rcu(subsys->ipv4_spec->session_table,
				   find, node, hash) {
		session_data = &find->ipv4;
		if (unlikely(session_data->tuple.protocol != protocol))
			continue;
		if (unlikely(session_data->tuple.flow_ip != flow_ip))
			continue;
		if (unlikely(session_data->tuple.flow_ident != flow_ident))
			continue;
		if (unlikely(session_data->return_ident_xlate != return_ident_xlate))
			continue;
		if (unlikely(session_data->return_ip_xlate != return_ip_xlate))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_session_bidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief find a bidirectional nss ipv4 session. It is constructed according to
 * ecm_db_connection_find_and_ref_chain from ecm.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip_xlate [in] return_ip_xlate
 * @param return_ident_xlate [in] return_ident_xlate
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_session_bidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t flow_ip, uint32_t flow_ident,
							   uint32_t return_ip_xlate, uint32_t return_ident_xlate,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct hwpa_nss_ipv4_session_data *session_data;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv4_spec->session_table,
				   find, node, hash) {
		session_data = &find->ipv4;
		if (unlikely(session_data->tuple.protocol != protocol))
			continue;
		if (unlikely(session_data->tuple.flow_ip != flow_ip))
			goto try_reverse;
		if (unlikely(session_data->tuple.flow_ident != flow_ident))
			goto try_reverse;
		if (unlikely(session_data->return_ident_xlate != return_ident_xlate))
			goto try_reverse;
		if (unlikely(session_data->return_ip_xlate != return_ip_xlate))
			goto try_reverse;
		goto connection_found;
try_reverse:
		if (unlikely(session_data->tuple.flow_ip != return_ip_xlate))
			continue;
		if (unlikely(session_data->tuple.flow_ident != return_ident_xlate))
			continue;
		if (unlikely(session_data->return_ident_xlate != flow_ident))
			continue;
		if (unlikely(session_data->return_ip_xlate != flow_ip))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_nss_session_from_sync*(struct hwpa_nss_subsystem*, struct nss_ipv4_conn_sync*)
 * @brief find a bidirectional nss session from sync.
 *
 * @param subsys [in] subsystem containing the session
 * @param sync [in] ipv4 sync data for session identification
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_nss_session_from_sync(struct hwpa_nss_subsystem *subsys, struct nss_ipv4_conn_sync *sync)
{
	uint32_t hash;

	hash = hwpa_nss_ipv4_gen_session_hash_raw(sync->flow_ip, sync->flow_ident,
						  sync->return_ip_xlate, sync->return_ident_xlate,
						  sync->protocol);

	return hwpa_nss_ipv4_find_session_bidir(subsys, hash, sync->flow_ip, sync->flow_ident,
							sync->return_ip_xlate, sync->return_ident_xlate,
							sync->protocol);
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*, enum hwpa_nss_session_direction)
 * @brief find an offloaded nss session from not-offloaded nss-session, its hash and for a specified direction.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash of the searched session
 * @param hws_nss [in] nss session used for searching
 * @param dir [in] direction of the offloaded session
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_nss_session(struct hwpa_nss_subsystem *subsys,
					uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir)
{
	struct hwpa_nss_ipv4_session_data *data = &hws_nss->ipv4;
	struct hwpa_nss_nss_session *find = NULL;

	PR_DEVEL("Trying to find nss session with nss session %p with hash %x and direction %d\n", hws_nss, hash, dir);
	switch (dir) {
	case HWPA_NSS_SESSION_DIRECTION_FLOW:
		find = hwpa_nss_ipv4_find_session_unidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_RETURN:
		find = hwpa_nss_ipv4_find_session_unidir(subsys, hash, data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_DONT_CARE:
		find = hwpa_nss_ipv4_find_session_bidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.protocol);
		break;
	default:
		pr_err("Bad Direction\n");
	}

	PR_DEVEL("Found nss session %p\n", find);

	return find;
}

/*
 *==============================================================================
 * NSS/HWPA-Session search for ipv6
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_session_unidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief find an unidirectional nss ipv6 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip [in] return_ip
 * @param return_ident [in] return_ident
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_session_unidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t *flow_ip, uint32_t flow_ident,
							   uint32_t *return_ip, uint32_t return_ident,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct nss_ipv6_5tuple *session_tuple;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv6_spec->session_table,
				   find, node, hash) {
		session_tuple = &find->ipv6.tuple;
		if (unlikely(session_tuple->protocol != protocol))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, flow_ip)))
			continue;
		if (unlikely(session_tuple->flow_ident != flow_ident))
			continue;
		if (unlikely(session_tuple->return_ident != return_ident))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, return_ip)))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_session_bidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief find an bidirectional nss ipv6 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip [in] return_ip
 * @param return_ident [in] return_ident
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_session_bidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t *flow_ip, uint32_t flow_ident,
							   uint32_t *return_ip, uint32_t return_ident,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct nss_ipv6_5tuple *session_tuple;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv6_spec->session_table,
				   find, node, hash) {
		session_tuple = &find->ipv6.tuple;
		if (unlikely(session_tuple->protocol != protocol))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, flow_ip)))
			goto try_reverse;
		if (unlikely(session_tuple->flow_ident != flow_ident))
			goto try_reverse;
		if (unlikely(session_tuple->return_ident != return_ident))
			goto try_reverse;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, return_ip)))
			goto try_reverse;
		goto connection_found;
try_reverse:
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, return_ip)))
			continue;
		if (unlikely(session_tuple->flow_ident != return_ident))
			continue;
		if (unlikely(session_tuple->return_ident != flow_ident))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, flow_ip)))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_nss_session_from_sync*(struct hwpa_nss_subsystem*, struct nss_ipv6_conn_sync*)
 * @brief find an bidirectional nss session from sync.
 *
 * @param subsys [in] subsystem containing the session
 * @param sync [in] ipv6 sync data for session identification
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_nss_session_from_sync(struct hwpa_nss_subsystem *subsys, struct nss_ipv6_conn_sync *sync)
{
	uint32_t hash;

	hash = hwpa_nss_ipv6_gen_session_hash_raw(sync->flow_ip, sync->flow_ident,
						  sync->return_ip, sync->return_ident,
						  sync->protocol);

	return hwpa_nss_ipv6_find_session_bidir(subsys, hash, sync->flow_ip, sync->flow_ident,
							sync->return_ip, sync->return_ident,
							sync->protocol);
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*, enum hwpa_nss_session_direction)
 * @brief find an offloaded nss session from not-offloaded nss-session, its hash and for a specified direction.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash of the searched session
 * @param hws_nss [in] nss session used for searching
 * @param dir [in] direction of the offloaded session
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_nss_session(struct hwpa_nss_subsystem *subsys,
					uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir)
{
	struct hwpa_nss_ipv6_session_data *data = &hws_nss->ipv6;
	struct hwpa_nss_nss_session *find = NULL;

	PR_DEVEL("Trying to find nss session with nss session %p with hash %x and direction %d\n", hws_nss, hash, dir);
	switch (dir) {
	case HWPA_NSS_SESSION_DIRECTION_FLOW:
		find = hwpa_nss_ipv6_find_session_unidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_RETURN:
		find = hwpa_nss_ipv6_find_session_unidir(subsys, hash, data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_DONT_CARE:
		find = hwpa_nss_ipv6_find_session_bidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.protocol);
		break;
	default:
		pr_err("Bad Direction\n");
	}
	PR_DEVEL("Found nss session %p\n", find);

	return find;
}

/*
 *==============================================================================
 * NSS/HWPA-Session search for qca833x
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_qca833x_find_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*, enum hwpa_nss_session_direction)
 * @brief find an offloaded nss session from not-offloaded nss-session, its hash and for a specified direction.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash of the searched session
 * @param hws_nss [in] nss session used for searching
 * @param dir [in] direction of the offloaded session
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_qca833x_find_nss_session(struct hwpa_nss_subsystem *subsys,
					uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir)
{
	struct vlan_ethhdr *hdr = &hws_nss->qca833x.tuple.hdr;
	struct hwpa_nss_nss_session *find = NULL;

	PR_DEVEL("Trying to find nss session with nss session %p with hash %x and direction %d\n", hws_nss, hash, dir);
	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->qca833x_spec->session_table, find, node, hash) {
		if (unlikely(!ether_addr_equal(find->qca833x.tuple.hdr.h_source, hdr->h_source)))
			continue;
		if (unlikely(!ether_addr_equal(find->qca833x.tuple.hdr.h_dest, hdr->h_dest)))
			continue;
		if (unlikely(find->qca833x.tuple.hdr.h_vlan_TCI != hdr->h_vlan_TCI))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	PR_DEVEL("Found qca833x session %p\n", find);

	return find;
}

/*
 *==============================================================================
 * hwpa nss session preparation and destruction
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_set_protocol(uint8_t*, u16)
 * @brief set protocol field using pkttype from avm_pa
 *
 * @param protocol [out] field in hwpa_nss_session tuple
 * @param pkttype [in] pkttype from avm_pa
 * @return  success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_set_protocol(uint8_t *protocol, u16 pkttype)
{
	switch (AVM_PA_PKTTYPE_IPPROTO(pkttype)) {
	case IPPROTO_UDP:
		*protocol = (uint8_t)IPPROTO_UDP;
		break;
	case IPPROTO_UDPLITE:
		*protocol = (uint8_t)IPPROTO_UDPLITE;
		break;
	case IPPROTO_TCP:
		*protocol = (uint8_t)IPPROTO_TCP;
		break;
	default:
		return HWPA_BACKEND_UNSUPPORTED_L4_PROTOCOL;
	}
	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_prepare_session(struct hwpa_nss_offloading_data *)
 * @brief prepare a nss for the ipv4 subsystem using an avm_pa session.
 * Preparing means to use the the avm_pa session to fill session data of the
 * nss session to make it ready for adding it to the hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *sess_pa;
	struct hwpa_nss_nss_session *hws_nss;
	const uint16_t *ig_ports;
	const uint16_t *eg_ports;
	const struct iphdr *ip4_ig, *ip4_eg;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct hwpa_nss_ipv4_session_data *hws_data;

	PR_DEVEL("Preparing IPv4 session\n");

	sess_pa = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_data = &hws_nss->ipv4;

	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;

	retval = hwpa_nss_set_protocol(&hws_data->tuple.protocol, ig_match->pkttype);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Couldn't set protocol. Session preparation failed!\n");
		goto failure_1;
	}
	ofl_data->protocol = hws_data->tuple.protocol;

	ip4_ig = hwpa_get_hdr(ig_match, AVM_PA_IPV4);
	ip4_eg = hwpa_get_hdr(eg_match, AVM_PA_IPV4);
	ig_ports = hwpa_get_hdr(ig_match, AVM_PA_PORTS);
	eg_ports = hwpa_get_hdr(eg_match, AVM_PA_PORTS);

	/* Configure IPs */
	hws_data->tuple.flow_ip = htonl(ip4_ig->saddr);
	hws_data->flow_ip_xlate = htonl(ip4_eg->saddr);
	hws_data->tuple.return_ip = htonl(ip4_ig->daddr);
	hws_data->return_ip_xlate = htonl(ip4_eg->daddr);
	hws_data->tuple.flow_ident = htons(ig_ports[0]);
	hws_data->flow_ident_xlate = htons(eg_ports[0]);
	hws_data->tuple.return_ident = htons(ig_ports[1]);
	hws_data->return_ident_xlate = htons(eg_ports[1]);


	PR_DEVEL("Prepared IPv4 session\n");

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_prepare_session(const struct avm_pa_session*, struct hwpa_nss_nss_session*)
 * @brief prepare a nss for the ipv6 subsystem using an avm_pa session.
 * Preparing means to use the the avm_pa session to fill session data of the
 * nss session to make it ready for adding it to the hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{

	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *s;
	struct hwpa_nss_nss_session *hws_nss;
	const uint16_t *ig_ports;
	const uint16_t *eg_ports;
	const struct ipv6hdr *ip6_ig, *ip6_eg;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct hwpa_nss_ipv6_session_data *hws_data;

	PR_DEVEL("Preparing IPv6 session\n");

	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_data = &hws_nss->ipv6;

	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;

	retval = hwpa_nss_set_protocol(&hws_data->tuple.protocol, ig_match->pkttype);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Couldn't set protocol. Session preparation failed!\n");
		goto failure_1;
	}
	ofl_data->protocol = hws_data->tuple.protocol;

	ip6_ig = hwpa_get_hdr(ig_match, AVM_PA_IPV6);
	ip6_eg = hwpa_get_hdr(eg_match, AVM_PA_IPV6);
	ig_ports = hwpa_get_hdr(ig_match, AVM_PA_PORTS);
	eg_ports = hwpa_get_hdr(eg_match, AVM_PA_PORTS);

	IPV6_COPY(ip6_eg->saddr.in6_u.u6_addr32, hws_data->tuple.flow_ip);
	IPV6_COPY(ip6_eg->daddr.in6_u.u6_addr32, hws_data->tuple.return_ip);
	hws_data->tuple.flow_ident = htons(eg_ports[0]);
	hws_data->tuple.return_ident = htons(eg_ports[1]);

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_qca833x_prepare_session(const struct avm_pa_session*, struct hwpa_nss_nss_session*)
 * @brief prepare a qca833x session
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_qca833x_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{

	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *s;
	struct hwpa_nss_nss_session *hws_nss;
	const struct ethhdr *ig_ethhdr, *eg_ethhdr;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct hwpa_qca833x_session_data *hws_data;

	PR_DEVEL("Preparing qca833x session\n");

	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_data = &hws_nss->qca833x;

	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;

	ig_ethhdr = (const struct ethhdr *) hwpa_get_hdr(ig_match, AVM_PA_ETH);
	eg_ethhdr = (const struct ethhdr *) hwpa_get_hdr(eg_match, AVM_PA_ETH);
	if (unlikely(!ig_ethhdr || !eg_ethhdr)) {
		PR_DEVEL("No ethernet header for l2 session\n");
		return HWPA_BACKEND_ERR_INTERNAL;
	}

	ether_addr_copy((u8 *) hws_data->tuple.hdr.h_source,
			(u8 *) ig_ethhdr->h_source);
	ether_addr_copy((u8 *) hws_data->tuple.hdr.h_dest,
			(u8 *) eg_ethhdr->h_dest);
	hws_data->tuple.hdr.h_vlan_TCI = ig_match->vlan_tci;

	return retval;
}

/**
 * @fn enum hwpa_nss_session_flag hwpa_nss_get_session_type(const struct avm_pa_session*)
 * @brief extract nss session type from avm_pa session
 *
 * @param ofl_data [in] data for a specific offload
 * @return the session type or HWPA_NSS_SESSION_MAX in case of error
 */
static struct hwpa_nss_offloader *hwpa_nss_select_offloader(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_nss_offloader_idx idx;
	const struct avm_pa_session *sess_pa = ofl_data->sess_pa;
	uint32_t flow_idx, return_idx;

	flow_idx = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW] - 1;
	return_idx = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN] - 1;

	if (sess_pa->bsession) {
		struct hwpa_nss_if_data *flow_interfaces, *return_interfaces;

		flow_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW];
		return_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN];
		if (flow_interfaces[flow_idx].ep_info->type == EP_TYPE_QCA833X &&
		    return_interfaces[return_idx].ep_info->type == EP_TYPE_QCA833X) {
			idx = HWPA_NSS_OFFLOADER_IDX_QCA833X;
			goto finished;
		}
		idx = HWPA_NSS_OFFLOADER_IDX_MAX;
		goto finished;
	}

	switch (sess_pa->ingress.pkttype & AVM_PA_PKTTYPE_IP_MASK) {
	case AVM_PA_PKTTYPE_IPV4:
		idx = HWPA_NSS_OFFLOADER_IDX_IPV4;
		break;
	case AVM_PA_PKTTYPE_IPV6:
		idx = HWPA_NSS_OFFLOADER_IDX_IPV6;
		break;
	default:
		idx = HWPA_NSS_OFFLOADER_IDX_MAX;
	}

finished:
	PR_DEVEL("offloader index: %d\n", idx);

	return hwpa_nss_get_offloader(idx);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_prepare_session(const struct avm_pa_session*, struct hwpa_nss_hwpa_session*, struct hwpa_nss_nss_session*)
 * @brief prepare hwpa session and avm_pa session to make it searchable in
 * hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{
	const struct avm_pa_session *s;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("HWPA Preparing Session\n");
	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_hwpa = ofl_data->hws_hwpa;

	ofl = hwpa_nss_select_offloader(ofl_data);
	if (ofl == NULL) {
		pr_err("unsupported packet-type\n");
		retval = HWPA_BACKEND_UNSUPPORTED_SESS_TYPE;
		goto failure_1;
	}

	hws_nss->offloader = ofl;
	hws_hwpa->offloader = ofl;

	retval = ofl->prepare_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS)
		goto failure_1;

	hws_nss->state = HWPA_NSS_SESSION_STATE_PREPARED;

	PR_DEVEL("Preparation finished!\n");

failure_1:
	return retval;
}

/*
 *==============================================================================
 * pending offload manager
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_add_pending_offload(struct hwpa_nss_offloading_data*)
 * @brief adds a pending offload
 *
 * @param ofl_data [in] the offloading data describing the offload
 */
static void hwpa_nss_add_pending_offload(struct hwpa_nss_offloading_data *ofl_data)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;


	ofl_data->hws_nss->state = HWPA_NSS_SESSION_STATE_PENDING_APPROVAL;
	ofl_data->timestamp = jiffies;

	spin_lock_bh(&pom->lock);
	list_add_tail(&ofl_data->node, &pom->pending_offloads);
	spin_unlock_bh(&pom->lock);
}

/**
 * @fn void hwpa_nss_remove_pending_offload(struct hwpa_nss_offloading_data*)
 * @brief removes a pending offload
 *
 * @param ofl_data [in] the offlaoding data describing the offload
 */
static void hwpa_nss_remove_pending_offload(struct hwpa_nss_offloading_data *ofl_data)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;

	spin_lock_bh(&pom->lock);
	list_del(&ofl_data->node);
	spin_unlock_bh(&pom->lock);
}

/**
 * @fn struct hwpa_nss_offloading_data hwpa_nss_pom_get_and_unregister_offloading_data*(struct hwpa_nss_nss_session*)
 * @brief gets a registered pending offload and unregisters it
 *
 * @param hws_nss [in] the nss session to search the offload with
 * @return the found offloading data or NULL in case of error
 */
static struct hwpa_nss_offloading_data *hwpa_nss_pom_get_and_unregister_offloading_data(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_pending_offload_manager *pom = &hwpa_nss_ctx.pending_offload_mgr;
	struct hwpa_nss_offloading_data *ofl_data, *t, *find = NULL;

	list_for_each_entry_safe(ofl_data, t, &pom->pending_offloads, node) {
		if (ofl_data->hws_nss == hws_nss) {
			hwpa_nss_remove_pending_offload(ofl_data);
			find = ofl_data;
			break;
		}

	}

	return find;
}

/*
 * Forward definition
 */
static enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_offloading_data *ofl_data);

/**
 * @fn void hwpa_pending_offload_manager_work(struct work_struct*)
 * @brief work function for the pending offload manager workqueue. Performs
 * actions if offload was too long ago.
 *
 * @param work [in] work struct
 */
static void hwpa_nss_pending_offload_manager_work(struct work_struct *work)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;
	struct hwpa_nss_offloading_data *ofl_data, *t;
	struct list_head pom_to_offload;

	if (list_empty(&pom->pending_offloads))
		goto reschedule;

	INIT_LIST_HEAD(&pom_to_offload);

	mutex_lock(&global_ctx->mutex);

	/*
	 * iterate over all pending offloads and find out which ones are too
	 * old. In case the time limit for a TCP session is reached -> drop it.
	 * For UDP session perform an offload instead.
	 */
	list_for_each_entry_safe(ofl_data, t, &pom->pending_offloads, node) {
		switch (ofl_data->protocol) {
			/* AVM_PA decides when to drop session*/
		case IPPROTO_TCP:
			break;
		case IPPROTO_UDP:
		case IPPROTO_UDPLITE:
			if (time_after(jiffies, ofl_data->timestamp + HWPA_NSS_UDP_MAX_WAITING_TIME)) {
				hwpa_nss_remove_pending_offload(ofl_data);
				list_add_tail(&ofl_data->node, &pom_to_offload);
			}
			break;
		default:
			PR_DEVEL("Unsupported Protocol!\n");
		}

	}

	// Offload all offloads in pom_to_offload and update counters
	list_for_each_entry_safe(ofl_data, t, &pom_to_offload, node) {
		struct hwpa_nss_subsystem *subsys;
		struct hwpa_nss_offloader *ofl;
		enum hwpa_backend_rv retval;
		struct hwpa_nss_hwpa_session *hws_hwpa;

		ofl = ofl_data->hws_nss->offloader;
		subsys = ofl_data->hws_nss->offloader->subsys;

		PR_DEVEL("Offloading %p\n", ofl_data);

		/*
		 * If we only have a return direction flow -- make a flow direction flow out of it.
		 * This is needed to be able to collect its stats.
		 */
		list_for_each_entry(hws_hwpa, &ofl_data->hws_nss->hwpa_session_list, node) {
			if (hws_hwpa->direction == HWPA_NSS_SESSION_DIRECTION_RETURN)
				hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_FLOW;
		}

		retval = hwpa_nss_offload_session(ofl_data);

		spin_lock_bh(&ofl->lock);
		ofl->pending_nss_session_count--;
		if (retval == HWPA_BACKEND_SUCCESS) {
			ofl->successful_nss_offloads++;
			ofl->active_nss_session_count++;
		} else	{
			ofl->failed_nss_offloads++;
		}
		spin_unlock_bh(&ofl->lock);
		kfree(ofl_data);
	}

	mutex_unlock(&global_ctx->mutex);

reschedule:
	queue_delayed_work(pom->workqueue, &pom->work,
			   HWPA_NSS_PENDING_OFFLOAD_PERIOD);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_pending_offload_manager_init()
 * @brief initializes pending offload manager to perform work periodically
 *
 * @return error code or success
 */
static enum hwpa_backend_rv hwpa_nss_pending_offload_manager_init(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Init Pending Offload Workqueue\n");

	INIT_LIST_HEAD(&pom->pending_offloads);
	spin_lock_init(&pom->lock);

	pom->workqueue = create_singlethread_workqueue("hwpa_nss_pending_offload_manager");
	if (!pom->workqueue) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	INIT_DELAYED_WORK(&pom->work,
			  hwpa_nss_pending_offload_manager_work);

	queue_delayed_work(pom->workqueue, &pom->work,
			   HWPA_NSS_PENDING_OFFLOAD_PERIOD);

	return HWPA_BACKEND_SUCCESS;


failure_1:
	return retval;
}

/**
 * @fn void hwpa_nss_pending_offload_manager_exit()
 * @brief exits offload manager by stopping assigned workqueue
 *
 */
static void hwpa_nss_pending_offload_manager_exit(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;

	PR_DEVEL("Exit Pending Offload Manager\n");

	cancel_delayed_work_sync(&pom->work);
	destroy_workqueue(pom->workqueue);
}

/*
 *===============================================================================
 * hwpa nss offloading session classification
 *==============================================================================
 */

/**
 * @enum hwpa_nss_classification_result_value
 * @brief The result of a classification of a new nss session
 */
enum hwpa_nss_classification_result_value	{
	// ignore the new nss session
	HWPA_NSS_CLASS_RES_NEW_IGNORE		= 0,
	// offload the new nss session
	HWPA_NSS_CLASS_RES_NEW_OFFLOAD		= 1,
	// queue the new session to the pending offload manager
	HWPA_NSS_CLASS_RES_NEW_QUEUE		= 2,
	// just attach the new hwpa session to an existing nss session
	HWPA_NSS_CLASS_RES_EST_ATTACH		= 3,
	// offload an older nss session from the pom and delete the new one
	HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW		= 4,
	HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD		= 5,
	HWPA_NSS_CLASS_RES_MAX			= 6,
};

#ifdef HWPA_NSS_DEBUG
static const char * const hwpa_nss_class_val_strings[] = {
	"Ignore new session",
	"offload new session",
	"queue new session",
	"attach new session to established session",
	"found established session and offload new",
	"found established session and offload it",
	"error"
};
#endif

/**
 * @struct hwpa_nss_classification_result
 * @brief the result of a classification, containing a value and some information
 * for special offload scenarios
 */
struct hwpa_nss_classification_result {
	enum hwpa_nss_classification_result_value value;
	struct hwpa_nss_offloading_data *ofl_data_established;
	struct hwpa_nss_nss_session	*hws_nss_established;
	uint32_t hws_new_hash;
};

/**
 * @fn enum hwpa_backend_rv hwpa_nss_classify(struct hwpa_nss_hwpa_session*, struct hwpa_nss_nss_session*,struct hwpa_nss_subsystem*)
 * @brief links nss and hwpa session and decides whether to delay, offload or drop.
 *
 * @param ofl_data [in] offloading data
 * @param res [in] the result of the classification
 */
static void hwpa_nss_classify(struct hwpa_nss_offloading_data *ofl_data,
					      struct hwpa_nss_classification_result *res)
{
	uint32_t hash;
	struct hwpa_nss_nss_session *hws_nss_new, *hws_nss_established;
	struct hwpa_nss_hwpa_session *hws_hwpa, *hws_hwpa_established;
	struct hwpa_nss_subsystem *subsys;
	enum hwpa_nss_session_direction dir, rev_dir;

	hws_nss_new = ofl_data->hws_nss;
	hws_hwpa = ofl_data->hws_hwpa;
	subsys = hws_nss_new->offloader->subsys;

	if (ofl_data->ct_dir == GENERIC_CT_DIR_REPLY) {
		dir = HWPA_NSS_SESSION_DIRECTION_RETURN;
		rev_dir = HWPA_NSS_SESSION_DIRECTION_FLOW;
	} else {
		/* by default we always end up in flow direction */
		dir = HWPA_NSS_SESSION_DIRECTION_FLOW;
		rev_dir = HWPA_NSS_SESSION_DIRECTION_RETURN;
	}

	hash = subsys->gen_hash(hws_nss_new);
	res->value = HWPA_NSS_CLASS_RES_MAX;

	/*
	 * There is a (low) chance that avm_pa offloads the same session twice.
	 * this gets handled here. In that case we just add a hwpa session and
	 * clean up.
	 */
	hws_nss_established = subsys->find_nss_session(subsys, hash, hws_nss_new, HWPA_NSS_SESSION_DIRECTION_FLOW);
	if (unlikely(hws_nss_established)) {
		switch (hws_nss_established->state) {
		case HWPA_NSS_SESSION_STATE_ACTIVE:
		case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
			hws_hwpa->direction = dir;
			res->hws_nss_established = hws_nss_established;
			res->value = HWPA_NSS_CLASS_RES_EST_ATTACH;
			break;
		default:
			res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
			break;
		}
		goto classification_done;
	}

	/*
	 * Here bidirectional sessions, if the subsystem uses them, are handled
	 */
	if (test_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags)) {
		hws_nss_established = subsys->find_nss_session(subsys, hash, hws_nss_new, HWPA_NSS_SESSION_DIRECTION_RETURN);

		/*
		 * if we want to offload a return-direction-flow, we want to offload
		 * the corresponding flow-direction flow
		 */
		if (hws_nss_established) {
			switch (hws_nss_established->state) {
			case HWPA_NSS_SESSION_STATE_ACTIVE:
				hws_hwpa->direction = rev_dir;
				res->hws_nss_established = hws_nss_established;
				res->value = HWPA_NSS_CLASS_RES_EST_ATTACH;
				break;
			case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
				res->hws_nss_established = hws_nss_established;
				res->ofl_data_established = hwpa_nss_pom_get_and_unregister_offloading_data(hws_nss_established);
				/* In case we lose the race to pom just do not offload the new session */
				if (!res->ofl_data_established) {
					res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
					break;
				}
				if (dir == HWPA_NSS_SESSION_DIRECTION_FLOW) {
					res->value = HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW;
					hws_hwpa->direction = dir;
					list_for_each_entry(hws_hwpa_established, &hws_nss_established->hwpa_session_list, node) {
						hws_hwpa_established->direction = rev_dir;
					}
				} else {
					res->value = HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD;
					hws_hwpa->direction = rev_dir;
					list_for_each_entry(hws_hwpa_established, &hws_nss_established->hwpa_session_list, node) {
						hws_hwpa_established->direction = rev_dir;
					}
				}
				/* See JZ-111944 --> we do not want to offload this case*/
				if (res->ofl_data_established->is_routed != ofl_data->is_routed) {
					PR_DEVEL("Not offloading a session where we have routing and bridging at the same time\n");
					res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
				}
				break;
			default:
				res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
				break;
			}
			goto classification_done;
		}
	}

	PR_DEVEL("Session not accelerated or pending yet!\n");

	hws_hwpa->direction = dir;
	if (test_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags)) {
		hws_nss_new->state = HWPA_NSS_SESSION_STATE_PENDING_APPROVAL;
		res->value = HWPA_NSS_CLASS_RES_NEW_QUEUE;
	} else {
		hws_nss_new->state = HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD;
		res->value = HWPA_NSS_CLASS_RES_NEW_OFFLOAD;
	}

	res->hws_new_hash = hash;

classification_done:
	PR_DEVEL("Classification finished with value %d [%s]!\n", res->value, hwpa_nss_class_val_strings[res->value]);
}

/*
 *===============================================================================
 * hwpa nss offloading session creation
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_connection_create(struct hwpa_nss_nss_session*, struct nss_cmn_msg*)
 * @brief Protocol independent part of subsystem session creation
 *
 * @param hws_nss [in] NSS session just offloaded
 * @param cm [in] common message part of answer from nss
 */
static void hwpa_nss_connection_create(struct hwpa_nss_nss_session *hws_nss, struct nss_cmn_msg *cm)
{
	if (cm->response != NSS_CMN_RESPONSE_ACK) {
		pr_err("An Error occurred creating NSS connection acceleration\n");
		pr_err("Response is: %d, error code is: %d\n", (int) cm->response, cm->error);
		hws_nss->state = HWPA_NSS_SESSION_STATE_INVALID;
	} else	{
		hws_nss->state = HWPA_NSS_SESSION_STATE_ACTIVE;
	}
}

/**
 * @fn void hwpa_nss_ipv4_connection_create_callback(void*, struct nss_ipv4_msg*)
 * @brief Callback for ipv4 subsystem session creation in nss
 *
 * @param app_data [in] application specific data
 * @param nim [in] Answer from NSS after Offloading attempt
 */
static void hwpa_nss_ipv4_connection_create_callback(void *app_data, struct nss_ipv4_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV4_TX_CREATE_RULE_MSG) {
		pr_err("%p: create callback with improper type: %d\n",
		       app_data, nim->cm.type);
		return;
	}

	hwpa_nss_connection_create(hws_nss, &nim->cm);
}

/**
 * @fn void hwpa_nss_ipv6_connection_create_callback(void*, struct nss_ipv6_msg*)
 * @brief Callback for ipv6 subsystem session creation in nss
 *
 * @param app_data [in] application specific data
 * @param nim [in] Answer from NSS after Offloading attempt
 */
static void hwpa_nss_ipv6_connection_create_callback(void *app_data, struct nss_ipv6_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV6_TX_CREATE_RULE_MSG) {
		pr_err("%p: create callback with improper type: %d\n",
		       app_data, nim->cm.type);
		return;
	}

	hwpa_nss_connection_create(hws_nss, &nim->cm);
}

/**
 * @fn hwpa_backend_rv hwpa_nss_ipv4_add_session(struct hwpa_nss_offloading_data *)
 * @brief Translate an ipv4 avm_pa session to a nss rule and perform the actual offload
 *
 * @param subsys [in] the subsystem
 * @param s [in] the avm_pa session supposed to be offloaded
 * @param hws_nss [in] nss session to fill and offload
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_nss_ipv4_add_session(struct hwpa_nss_offloading_data *ofl_data)
{
	struct nss_ipv4_msg *create_msg;
	struct nss_ipv4_rule_create_msg *nircm;
	struct hwpa_nss_subsystem *subsys;
	const struct avm_pa_session *sess_pa;
	struct hwpa_nss_nss_session *hws_nss;
	int retval;
#if defined(CONFIG_ARCH_IPQ8074)
	struct hwpa_nss_if_data *flow_outermost, *return_outermost;
	struct net_device *dev;
#endif
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct nss_ipv4_src_mac_rule *smr;
	int i;
	int if_index;
	bool ingress_has_l2_info = false;

	PR_DEVEL("Adding IPv4 session\n");

	sess_pa = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	eg = ofl_data->eg;
	subsys = hws_nss->offloader->subsys;

	create_msg = kzalloc(sizeof(struct nss_ipv4_msg),
						    GFP_KERNEL);
	if (!create_msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	/*
	 * Prepare message for sending to NSS. No return value.
	 */
	nss_ipv4_msg_init(create_msg, NSS_IPV4_RX_INTERFACE,
				   NSS_IPV4_TX_CREATE_RULE_MSG,
			sizeof(struct nss_ipv4_rule_create_msg),
			hwpa_nss_ipv4_connection_create_callback, hws_nss);

	/*
	 * Edit message to our needs
	 */
	nircm = &create_msg->msg.rule_create;
	nircm->valid_flags = 0;
	nircm->rule_flags = 0;

	/*
	 * VLAN init
	 */
	nircm->vlan_primary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_primary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_secondary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_secondary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;

	/*
	 * nexthop (Gateway)
	 */
	nircm->nexthop_rule.flow_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->nexthop_rule.return_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum;
	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_NEXTHOP_VALID;

	/*
	 * used interfaces
	 */
	if_index = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW] - 1;
	nircm->conn_rule.flow_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][if_index].ifnum;
	if_index = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN] - 1;
	nircm->conn_rule.return_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][if_index].ifnum;

	/*
	 * Set the mtu values.
	 */
	nircm->conn_rule.flow_mtu = eg->mtu;
	nircm->conn_rule.return_mtu = eg->mtu;

	smr = &nircm->src_mac_rule;

	/*
	 * Iterate over ingress and egress devices to configure offloading message.
	 * Error Checks are not needed here as we are only accept supported
	 * sessions via whitelist. Others don't even come this far (I assume)
	 */
	for (i = 0; i < ig_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &ig_match->match[i];
		const void *hdr = &ig_match->hdrcopy[p->offset + ig_match->hdroff];
		int vlan_in_cnt = 0;

		switch (p->type) {
		case AVM_PA_ETH:	{
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			ether_addr_copy((u8 *) smr->flow_src_mac,
					(u8 *) ethh->h_dest);
			ingress_has_l2_info = true;
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV4:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.flow_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;

			if (ofl_data->is_routed || vlan_in_cnt > 1) {
				retval = HWPA_BACKEND_ERR_BAD_VLAN;
				goto failure_2;
			}

			if (p->offset != AVM_PA_OFFSET_NOT_SET) {
				/* VLAN Header needs offset correction by 2 bytes due to no ethertype */
				const void *vlanh = &ig_match->hdrcopy[p->offset + ig_match->hdroff - 2];

				vlan_value = htonl(*((uint32_t *) vlanh));
			} else {
				vlan_value = ((htons(ig_match->vlan_proto)) << 16) | (ig_match->vlan_tci);
			}

			if (vlan_in_cnt == 0) {
				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
			} else {
				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;
			}

			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
			vlan_in_cnt++;
			break;
		}

		case AVM_PA_IPV6:
			PR_DEVEL("IPV6 in IPV4 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_BAD_MATCH;
			goto failure_2;
		}
	}

	for (i = 0; i < eg_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &eg_match->match[i];
		const void *hdr = &eg_match->hdrcopy[p->offset + eg_match->hdroff];
		int vlan_out_cnt = 0;

		PR_DEVEL("egress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.return_mac,
					(u8 *) ethh->h_dest);
			ether_addr_copy((u8 *) smr->return_src_mac,
					(u8 *) ethh->h_source);

			if (!ingress_has_l2_info) {
				ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			}

			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV4:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.return_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;

			if (ofl_data->is_routed || vlan_out_cnt > 1) {
				retval = HWPA_BACKEND_ERR_BAD_VLAN;
				goto failure_2;
			}

			if (p->offset != AVM_PA_OFFSET_NOT_SET) {
				/* VLAN Header needs offset correction by 2 bytes due to no ethertype */
				const void *vlanh = &eg_match->hdrcopy[p->offset + eg_match->hdroff - 2];

				vlan_value = htonl(*((uint32_t *) vlanh));
			} else {
				vlan_value = ((htons(eg_match->vlan_proto)) << 16) | (eg_match->vlan_tci);
			}

			if (vlan_out_cnt == 0) {
				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
			} else {
				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;
			}

			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
			vlan_out_cnt++;
			break;
		}

		case AVM_PA_IPV6:
			PR_DEVEL("IPV6 in IPV4 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_BAD_MATCH;
			goto failure_2;
		}
	}

	nircm->qos_rule.flow_qos_tag = eg->output.priority & HWPA_NSS_QOS_TAG_MASK;
	nircm->qos_rule.return_qos_tag = eg->output.priority & HWPA_NSS_QOS_TAG_MASK;
	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_QOS_VALID;

	/*
	 * src_mac_rule
	 */
	if (!ether_addr_equal((u8 *) nircm->conn_rule.flow_mac,
				(u8 *) smr->return_src_mac)) {
		smr->mac_valid_flags |= NSS_IPV4_SRC_MAC_FLOW_VALID;
		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID;
	}

	if (!ether_addr_equal((u8 *) nircm->conn_rule.return_mac,
				(u8 *) smr->flow_src_mac)) {
		smr->mac_valid_flags |= NSS_IPV4_SRC_MAC_RETURN_VALID;
		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID;
	}
#if defined(CONFIG_ARCH_IPQ8074)
	flow_outermost = &ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0];
	dev = nss_cmn_get_interface_dev(subsys->mgr, flow_outermost->ifnum);
	if (dev && (!ether_addr_equal((u8 *) dev->dev_addr, (u8 *) smr->flow_src_mac))
			&& (flow_outermost->ifnum < NSS_MAX_PHYSICAL_INTERFACES)
			&& (ofl_data->is_routed)) {
		/*
		 * AVM/TLG: different source mac addresses are not supported
		 *          by ppe yet, see JZ-117155
		 */
		PR_DEVEL("mac-address %pM differs to %pM from interface %d - could not offload"
				" the stream",
				dev->dev_addr, smr->flow_src_mac, flow_outermost->ifnum);

		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_2;
	}

	return_outermost = &ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0];
	dev = nss_cmn_get_interface_dev(subsys->mgr, return_outermost->ifnum);
	if (dev && (!ether_addr_equal((u8 *) dev->dev_addr, (u8 *) smr->return_src_mac))
			&& (return_outermost->ifnum < NSS_MAX_PHYSICAL_INTERFACES)
			&& (ofl_data->is_routed)) {
		/*
		 * AVM/TLG: different source mac addresses are not supported
		 *          by ppe yet, see JZ-117155
		 */
		PR_DEVEL("mac-address %pM differs to %pM from interface %d - could not offload"
				" the stream",
				dev->dev_addr, smr->return_src_mac, return_outermost->ifnum);

		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_2;
	}
#endif /* defined(CONFIG_ARCH_IPQ8074) */
	/*
	 * Routed or bridged?
	 */
	if (ofl_data->is_routed)
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_ROUTED;
	else {
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW;
	}

	/*
	 * Configure the IP-5-Tuple
	 * This is the central configuration
	 */
	nircm->tuple = hws_nss->ipv4.tuple;
	nircm->conn_rule.flow_ip_xlate = hws_nss->ipv4.flow_ip_xlate;
	nircm->conn_rule.flow_ident_xlate = hws_nss->ipv4.flow_ident_xlate;
	nircm->conn_rule.return_ip_xlate = hws_nss->ipv4.return_ip_xlate;
	nircm->conn_rule.return_ident_xlate = hws_nss->ipv4.return_ident_xlate;

	if (nircm->tuple.protocol == IPPROTO_TCP) {
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_TCP_VALID;
	}

	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_CONN_VALID;
	nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK;

	PR_DEVEL("%p: Accelerate Session\n"
			"Protocol: %d\n"
			"from_mtu: %u\n"
			"to_mtu: %u\n"
			"from_ip: %pI4h:%d\n"
			"to_ip: %pI4h:%d\n"
			"from_ip_xlate: %pI4h:%d\n"
			"to_ip_xlate: %pI4h:%d\n"
			"from_mac: %pM\n"
			"to_mac: %pM\n"
			"src_iface_num: %u\n"
			"dest_iface_num: %u\n"
			"src_nexthop_num: %u\n"
			"dest_nexthop_num: %u\n"
			"mac_valid_flags: %x\n"
			"flow_src_mac: %pM\n"
			"return_src_mac: %pM\n"
			"ingress_inner_vlan_tag: %x\n"
			"egress_inner_vlan_tag: %x\n"
			"ingress_outer_vlan_tag: %x\n"
			"egress_outer_vlan_tag: %x\n"
			"rule_flags: %x\n"
			"valid_flags: %x\n"
			"pppoe_return_if_exist: %u\n"
			"pppoe_return_if_num: %u\n"
			"pppoe_flow_if_exist: %u\n"
			"pppoe_flow_if_num: %u\n"
			"flow_qos_tag: %x (%u)\n"
			"return_qos_tag: %x (%u)\n"
			"igs_flow_qos_tag: %x (%u)\n"
			"igs_return_qos_tag: %x (%u)\n"
			"flow_window_scale: %u\n"
			"flow_max_window: %u\n"
			"flow_end: %u\n"
			"flow_max_end: %u\n"
			"return_window_scale: %u\n"
			"return_max_window: %u\n"
			"return_end: %u\n"
			"return_max_end: %u\n"
			"flow_dscp: %x\n"
			"return_dscp: %x\n-------------\n",
			hws_nss,
			nircm->tuple.protocol,
			nircm->conn_rule.flow_mtu,
			nircm->conn_rule.return_mtu,
			&nircm->tuple.flow_ip, nircm->tuple.flow_ident,
			&nircm->tuple.return_ip, nircm->tuple.return_ident,
			&nircm->conn_rule.flow_ip_xlate, nircm->conn_rule.flow_ident_xlate,
			&nircm->conn_rule.return_ip_xlate, nircm->conn_rule.return_ident_xlate,
			nircm->conn_rule.flow_mac,
			nircm->conn_rule.return_mac,
			nircm->conn_rule.flow_interface_num,
			nircm->conn_rule.return_interface_num,
			nircm->nexthop_rule.flow_nexthop,
			nircm->nexthop_rule.return_nexthop,
			nircm->src_mac_rule.mac_valid_flags,
			nircm->src_mac_rule.flow_src_mac,
			nircm->src_mac_rule.return_src_mac,
			nircm->vlan_primary_rule.ingress_vlan_tag,
			nircm->vlan_primary_rule.egress_vlan_tag,
			nircm->vlan_secondary_rule.ingress_vlan_tag,
			nircm->vlan_secondary_rule.egress_vlan_tag,
			nircm->rule_flags,
			nircm->valid_flags,
			nircm->pppoe_rule.return_if_exist,
			nircm->pppoe_rule.return_if_num,
			nircm->pppoe_rule.flow_if_exist,
			nircm->pppoe_rule.flow_if_num,
			nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
			nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
			nircm->igs_rule.igs_flow_qos_tag, nircm->igs_rule.igs_flow_qos_tag,
			nircm->igs_rule.igs_return_qos_tag, nircm->igs_rule.igs_return_qos_tag,
			nircm->tcp_rule.flow_window_scale,
			nircm->tcp_rule.flow_max_window,
			nircm->tcp_rule.flow_end,
			nircm->tcp_rule.flow_max_end,
			nircm->tcp_rule.return_window_scale,
			nircm->tcp_rule.return_max_window,
			nircm->tcp_rule.return_end,
			nircm->tcp_rule.return_max_end,
			nircm->dscp_rule.flow_dscp,
			nircm->dscp_rule.return_dscp);

	/*
	 * Send message for rule creation
	 */
	retval = nss_ipv4_tx_sync(subsys->mgr, create_msg);
	if (retval != NSS_TX_SUCCESS) {
		pr_err("Session could not be created\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	kfree(create_msg);

failure_1:
	return retval;
}

/**
 * @fn hwpa_backend_rv hwpa_nss_ipv6_add_session(struct hwpa_nss_subsystem *, const struct avm_pa_session*, struct hwpa_nss_nss_session*)
 * @brief Translate an ipv6 avm_pa session to a nss rule and perform the actual offload
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_nss_ipv6_add_session(struct hwpa_nss_offloading_data *ofl_data)
{
	struct nss_ipv6_msg *create_msg;
	struct nss_ipv6_rule_create_msg *nircm;
	struct hwpa_nss_subsystem *subsys;
	const struct avm_pa_session *s;
	struct hwpa_nss_nss_session *hws_nss;
	int retval;
#if defined(CONFIG_ARCH_IPQ8074)
	struct hwpa_nss_if_data *flow_outermost, *return_outermost;
	struct net_device *dev;
#endif
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct nss_ipv6_src_mac_rule *smr;
	int i;
	int if_index;
	bool ingress_has_l2_info = false;

	PR_DEVEL("Adding IPv6 session\n");

	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	eg = ofl_data->eg;
	subsys = hws_nss->offloader->subsys;

	create_msg = kzalloc(sizeof(struct nss_ipv6_msg),
						    GFP_KERNEL);
	if (!create_msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	/*
	 * Prepare message for sending to NSS. No return value.
	 */
	nss_ipv6_msg_init(create_msg, NSS_IPV6_RX_INTERFACE,
				   NSS_IPV6_TX_CREATE_RULE_MSG,
			sizeof(struct nss_ipv6_rule_create_msg),
			hwpa_nss_ipv6_connection_create_callback, hws_nss);

	/*
	 * Edit message to our needs
	 */
	nircm = &create_msg->msg.rule_create;
	nircm->valid_flags = 0;
	nircm->rule_flags = 0;

	/*
	 * VLAN init
	 */
	nircm->vlan_primary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_primary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED; //eg->match->vlan_proto
	nircm->vlan_secondary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_secondary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;

	/*
	 * nexthop (Gateway)
	 */
	nircm->nexthop_rule.flow_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->nexthop_rule.return_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum;
	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_NEXTHOP_VALID;

	/*
	 * used interfaces
	 */
	if_index = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW] - 1;
	nircm->conn_rule.flow_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][if_index].ifnum;
	if_index = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN] - 1;
	nircm->conn_rule.return_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][if_index].ifnum;

	/*
	 * Set the mtu values.
	 */
	nircm->conn_rule.flow_mtu = eg->mtu;
	nircm->conn_rule.return_mtu = eg->mtu;

	smr = &nircm->src_mac_rule;
	/*
	 * Iterate over ingress and egress devices to configure offloading message.
	 * Error Checks are not needed here as we are only accept supported
	 * sessions via whitelist. Others don't even come this far (I assume)
	 */
	for (i = 0; i < ig_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &ig_match->match[i];
		const void *hdr = &ig_match->hdrcopy[p->offset + ig_match->hdroff];
		int vlan_in_cnt = 0;

		PR_DEVEL("ingress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			ether_addr_copy((u8 *) smr->flow_src_mac,
					(u8 *) ethh->h_dest);
			ingress_has_l2_info = true;
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV6:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.flow_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;

			if (ofl_data->is_routed || vlan_in_cnt > 1) {
				retval = HWPA_BACKEND_ERR_BAD_VLAN;
				goto failure_2;
			}

			if (p->offset != AVM_PA_OFFSET_NOT_SET) {
				/* VLAN Header needs offset correction by 2 bytes due to no ethertype */
				const void *vlanh = &ig_match->hdrcopy[p->offset + eg_match->hdroff - 2];

				vlan_value = htonl(*((uint32_t *) vlanh));
			} else {
				vlan_value = ((htons(ig_match->vlan_proto)) << 16) | (ig_match->vlan_tci);
			}

			if (vlan_in_cnt == 0) {
				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
			} else {
				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;
			}

			nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID;
			vlan_in_cnt++;
			break;
		}

		case AVM_PA_IPV4:
			PR_DEVEL("IPV4 in IPV6 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_BAD_MATCH;
			goto failure_2;
		}
	}

	for (i = 0; i < eg->match.nmatch; ++i) {
		const struct avm_pa_match_info *p = &eg_match->match[i];
		const void *hdr = &eg_match->hdrcopy[p->offset + eg_match->hdroff];
		int vlan_out_cnt = 0;

		PR_DEVEL("egress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.return_mac,
					(u8 *) ethh->h_dest);
			ether_addr_copy((u8 *) smr->return_src_mac,
					(u8 *) ethh->h_source);

			if (!ingress_has_l2_info) {
				ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			}

			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV6:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.return_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;

			if (ofl_data->is_routed || vlan_out_cnt > 1) {
				retval = HWPA_BACKEND_ERR_BAD_VLAN;
				goto failure_2;
			}

			if (p->offset != AVM_PA_OFFSET_NOT_SET) {
				/* VLAN Header needs offset correction by 2 bytes due to no ethertype */
				const void *vlanh = &eg_match->hdrcopy[p->offset + eg_match->hdroff - 2];

				vlan_value = htonl(*((uint32_t *) vlanh));
			} else {
				vlan_value = ((htons(eg_match->vlan_proto)) << 16) | (eg_match->vlan_tci);
			}

			if (vlan_out_cnt == 0) {
				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
			} else {
				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;
			}

			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
			vlan_out_cnt++;
			break;
		}

		case AVM_PA_IPV4:
			PR_DEVEL("IPV4 in IPV6 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_BAD_MATCH;
			goto failure_2;
		}
	}

	nircm->qos_rule.flow_qos_tag = eg->output.priority & HWPA_NSS_QOS_TAG_MASK;
	nircm->qos_rule.return_qos_tag = eg->output.priority & HWPA_NSS_QOS_TAG_MASK;
	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_QOS_VALID;

	/*
	 * src_mac_rule
	 */
	if (!ether_addr_equal((u8 *) nircm->conn_rule.flow_mac,
				(u8 *) smr->return_src_mac)) {
		smr->mac_valid_flags |= NSS_IPV6_SRC_MAC_FLOW_VALID;
		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_SRC_MAC_VALID;
	}

	if (!ether_addr_equal((u8 *) nircm->conn_rule.return_mac,
				(u8 *) smr->flow_src_mac)) {
		smr->mac_valid_flags |= NSS_IPV6_SRC_MAC_RETURN_VALID;
		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_SRC_MAC_VALID;
	}
#if defined(CONFIG_ARCH_IPQ8074)
	flow_outermost = &ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0];
	dev = nss_cmn_get_interface_dev(subsys->mgr, flow_outermost->ifnum);
	if (dev && (!ether_addr_equal((u8 *) dev->dev_addr, (u8 *) smr->flow_src_mac))
			&& (flow_outermost->ifnum < NSS_MAX_PHYSICAL_INTERFACES)
			&& (ofl_data->is_routed)) {

		/*
		 * AVM/TLG: different source mac addresses are not supported
		 *          by ppe yet, see JZ-117155
		 */
		PR_DEVEL("mac-address %pM differs to %pM from interface %d - could not offload"
				" the stream",
				dev->dev_addr, smr->flow_src_mac, flow_outermost->ifnum);

		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_2;
	}

	return_outermost = &ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0];
	dev = nss_cmn_get_interface_dev(subsys->mgr, return_outermost->ifnum);
	if (dev && (!ether_addr_equal((u8 *) dev->dev_addr, (u8 *) smr->return_src_mac))
			&& (return_outermost->ifnum < NSS_MAX_PHYSICAL_INTERFACES)
			&& (ofl_data->is_routed)) {
		/*
		 * AVM/TLG: different source mac addresses are not supported
		 *          by ppe yet, see JZ-117155
		 */
		PR_DEVEL("mac-address %pM differs to %pM from interface %d - could not offload"
				" the stream",
				dev->dev_addr, smr->return_src_mac, return_outermost->ifnum);

		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_2;
	}
#endif /* defined(CONFIG_ARCH_IPQ8074) */
	/*
	 * Routed or bridged?
	 */
	if (ofl_data->is_routed)
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED;
	else {
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW;
	}

	/*
	 * Configure the IP-5-Tuple
	 * This is the central configuration
	 */
	nircm->tuple = hws_nss->ipv6.tuple;

	if (nircm->tuple.protocol == IPPROTO_TCP) {
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK;
		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_TCP_VALID;
	}

	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID;
	nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK;

	PR_DEVEL("%p: Accelerate Session\n"
				"Protocol: %d\n"
				"from_mtu: %u\n"
				"to_mtu: %u\n"
				"from_ip: %pI6h:%d\n"
				"to_ip: %pI6h:%d\n"
				"from_mac: %pM\n"
				"to_mac: %pM\n"
				"src_iface_num: %u\n"
				"dest_iface_num: %u\n"
				"src_nexthop_num: %u\n"
				"dest_nexthop_num: %u\n"
				"mac_valid_flags: %x\n"
				"flow_src_mac: %pM\n"
				"return_src_mac: %pM\n"
				"ingress_inner_vlan_tag: %x\n"
				"egress_inner_vlan_tag: %x\n"
				"ingress_outer_vlan_tag: %x\n"
				"egress_outer_vlan_tag: %x\n"
				"rule_flags: %x\n"
				"valid_flags: %x\n"
				"pppoe_return_if_exist: %u\n"
				"pppoe_return_if_num: %u\n"
				"pppoe_flow_if_exist: %u\n"
				"pppoe_flow_if_num: %u\n"
				"flow_qos_tag: %x (%u)\n"
				"return_qos_tag: %x (%u)\n"
				"igs_flow_qos_tag: %x (%u)\n"
				"igs_return_qos_tag: %x (%u)\n"
				"flow_window_scale: %u\n"
				"flow_max_window: %u\n"
				"flow_end: %u\n"
				"flow_max_end: %u\n"
				"return_window_scale: %u\n"
				"return_max_window: %u\n"
				"return_end: %u\n"
				"return_max_end: %u\n"
				"flow_dscp: %x\n"
				"return_dscp: %x\n",
				hws_nss,
				nircm->tuple.protocol,
				nircm->conn_rule.flow_mtu,
				nircm->conn_rule.return_mtu,
				nircm->tuple.flow_ip, nircm->tuple.flow_ident,
				nircm->tuple.return_ip, nircm->tuple.return_ident,
				nircm->conn_rule.flow_mac,
				nircm->conn_rule.return_mac,
				nircm->conn_rule.flow_interface_num,
				nircm->conn_rule.return_interface_num,
				nircm->nexthop_rule.flow_nexthop,
				nircm->nexthop_rule.return_nexthop,
				nircm->src_mac_rule.mac_valid_flags,
				nircm->src_mac_rule.flow_src_mac,
				nircm->src_mac_rule.return_src_mac,
				nircm->vlan_primary_rule.ingress_vlan_tag,
				nircm->vlan_primary_rule.egress_vlan_tag,
				nircm->vlan_secondary_rule.ingress_vlan_tag,
				nircm->vlan_secondary_rule.egress_vlan_tag,
				nircm->rule_flags,
				nircm->valid_flags,
				nircm->pppoe_rule.return_if_exist,
				nircm->pppoe_rule.return_if_num,
				nircm->pppoe_rule.flow_if_exist,
				nircm->pppoe_rule.flow_if_num,
				nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
				nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
				nircm->igs_rule.igs_flow_qos_tag, nircm->igs_rule.igs_flow_qos_tag,
				nircm->igs_rule.igs_return_qos_tag, nircm->igs_rule.igs_return_qos_tag,
				nircm->tcp_rule.flow_window_scale,
				nircm->tcp_rule.flow_max_window,
				nircm->tcp_rule.flow_end,
				nircm->tcp_rule.flow_max_end,
				nircm->tcp_rule.return_window_scale,
				nircm->tcp_rule.return_max_window,
				nircm->tcp_rule.return_end,
				nircm->tcp_rule.return_max_end,
				nircm->dscp_rule.flow_dscp,
				nircm->dscp_rule.return_dscp);

	/*
	 * Send message for rule creation
	 */
	retval = nss_ipv6_tx_sync(subsys->mgr, create_msg);
	if (retval != NSS_TX_SUCCESS) {
		pr_err("Session could not be created\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	kfree(create_msg);

failure_1:
	return retval;
}

/**
 * @fn hwpa_backend_rv hwpa_qca833x_add_session(struct hwpa_nss_offloading_data *)
 * @brief Translate an ipv4 avm_pa session to a nss rule and perform the actual offload
 *
 * @param subsys [in] the subsystem
 * @param s [in] the avm_pa session supposed to be offloaded
 * @param hws_nss [in] nss session to fill and offload
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_qca833x_add_session(struct hwpa_nss_offloading_data *ofl_data)
{
	int retval = HWPA_BACKEND_ERR_HW_WRITE;
	struct hwpa_qca833x_session_data *data = &ofl_data->hws_nss->qca833x;
	struct vlan_ethhdr *hdr __maybe_unused  = &data->tuple.hdr;
	uint32_t flow_idx, return_idx;
	struct hwpa_nss_if_data *flow_interfaces, *return_interfaces;
	struct qca833x_api *api;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_QCA833X);

	api = subsys->qca833x_spec->api;

	flow_idx = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW] - 1;
	return_idx = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN] - 1;
	flow_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW];
	return_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN];

	data->tuple.src_port_bmp = flow_interfaces[flow_idx].ep_info->port_bmp_833x;
	data->tuple.dst_port_bmp = return_interfaces[return_idx].ep_info->port_bmp_833x;

	PR_DEVEL("%p: Accelerate qca833x Session\n"
				"id: %d\n"
				"smac: %pM\n"
				"dmac: %pM\n"
				"src_port_bmp: %x\n"
				"dst_port_bmp: %x\n"
				"nss_src_ifnum: %d\n"
				"nss_dst_ifnum: %d\n"
				"vlan_tci: %x\n",
				ofl_data->hws_nss,
				data->tuple.id,
				hdr->h_source,
				hdr->h_dest,
				data->tuple.src_port_bmp,
				data->tuple.dst_port_bmp,
				flow_interfaces[flow_idx].ifnum,
				return_interfaces[return_idx].ifnum,
				hdr->h_vlan_TCI);

	if (api && api->add_session &&
			api->add_session(&data->tuple) != QCA833X_OFL_OK) {
		PR_DEVEL("offload failed for sessions %p\n", ofl_data->hws_nss);
		goto failure_1;
	}

	ofl_data->hws_nss->state = HWPA_NSS_SESSION_STATE_ACTIVE;

	retval = HWPA_BACKEND_SUCCESS;

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_nss_session*, struct hwpa_nss_offloader*)
 * @brief perform the actual subsystem-specific offload and update tracker
 *
 * @param ofl_data [in] offloading_data
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_nss_session *hws_nss;
	const struct avm_pa_session *s;
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_subsystem *subsys;

	hws_nss = ofl_data->hws_nss;
	s = ofl_data->sess_pa;
	ofl = hws_nss->offloader;

	subsys = ofl->subsys;

	retval = hwpa_nss_tracker_add_nss_session(subsys);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Offloading limit for subsystem %s reached\n", subsys->label);
		goto failure_1;
	}

	PR_DEVEL("Adding hw session %p to subsystem %s with offloader %s\n", hws_nss, subsys->label, ofl->label);

	retval = ofl->add_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS)
		goto failure_2;

	if (hws_nss->state == HWPA_NSS_SESSION_STATE_ACTIVE) {
		PR_DEVEL("Session created successfully!\n");
		retval = HWPA_BACKEND_SUCCESS;
	} else	{
		pr_err("Session could not be offloaded!\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	spin_lock_bh(&ofl->list_lock);
	list_add_rcu(&hws_nss->ofl_node, &ofl->session_list);
	spin_unlock_bh(&ofl->list_lock);

	return retval;

failure_2:
	hwpa_nss_tracker_remove_nss_session(subsys);

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_handle_bridged_pppoe_traffic(struct hwpa_nss_offloading_data*)
 * @brief Perform early checks for pppoe traffic over bridge
 *
 * @param ofl_data [in] all relevant information for the offloading process
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_handle_bridged_pppoe_traffic(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct pppoe_hdr *eg_pppoe_hdr, *ig_pppoe_hdr;

	eg_pppoe_hdr = hwpa_get_hdr(ofl_data->eg_match, AVM_PA_PPPOE);
	ig_pppoe_hdr = hwpa_get_hdr(ofl_data->ig_match, AVM_PA_PPPOE);

	if (!eg_pppoe_hdr && !ig_pppoe_hdr)
		goto done;

	if (nss_pppoe_get_br_accel_mode() == NSS_PPPOE_BR_ACCEL_MODE_DIS) {
		PR_DEVEL("PPPoE bridge flow acceleration is disabled\n");
		retval = HWPA_BACKEND_ERR_BAD_PPPOE;
		goto failure_1;
	}

	if (ofl_data->eg_match->casttype == AVM_PA_IS_MULTICAST) {
		PR_DEVEL("Multicast in PPPoE bridge is not supported\n");
		retval = HWPA_BACKEND_ERR_BAD_PPPOE;
		goto done;
	}

failure_1:
done:
	return retval;
}

#ifdef HWPA_NSS_DEBUG
static inline void hwpa_nss_dump_hierarchy(struct hwpa_nss_offloading_data *ofl_data)
{
	int i;
	struct hwpa_nss_if_data *flow_interfaces, *return_interfaces;
	uint8_t flow_if_count, return_if_count;

	flow_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW];
	return_interfaces = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN];
	flow_if_count = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW];
	return_if_count = ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN];

	PR_DEVEL("%p: Interface Hierarchy FLOW direction:\n", ofl_data->sess_pa);
	for (i = 0; i < flow_if_count; ++i) {
		int32_t ifnum = flow_interfaces[i].ifnum;
		struct hwpa_nss_ep_info *ep_info = flow_interfaces[i].ep_info;
		enum hwpa_nss_if_type if_type = flow_interfaces[i].type;

		PR_DEVEL(" %-2d: IF= %-4d if_type= %-3d ep_type= %-3d port_bmp %-8x\n",
			 i, ifnum, if_type, ep_info ? ep_info->type : 0,
			 ep_info ? ep_info->port_bmp_833x : 0);
	}

	PR_DEVEL("%p: Interface Hierarchy RETURN direction:\n", ofl_data->sess_pa);
	for (i = 0; i < return_if_count; ++i) {
		int32_t ifnum = return_interfaces[i].ifnum;
		struct hwpa_nss_ep_info *ep_info = return_interfaces[i].ep_info;
		enum hwpa_nss_if_type if_type = return_interfaces[i].type;

		PR_DEVEL(" %-2d: IF= %-4d if_type= %-3d ep_type= %-3d port_bmp %-8x\n",
			 i, ifnum, if_type, ep_info ? ep_info->type : 0,
			 ep_info ? ep_info->port_bmp_833x : 0);
	}
}
#else
static inline void hwpa_nss_dump_hierarchy(struct hwpa_nss_offloading_data *ofl_data)
{

}
#endif /* HWPA_NSS_DEBUG */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_build_hierarchy(struct hwpa_nss_offloading_data*)
 * @brief Build Interface hierarchy. So far we are cheating here by only adding
 * the in and out interfaces.
 *
 * @param ofl_data [in] offloading data
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_build_hierarchy(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	int32_t ifnum;
	struct hwpa_nss_subsystem *subsys;
	struct hwpa_nss_ep_info *ep_info;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct net_device *in_bridge, *out_bridge;
	uint32_t index = 0;

	subsys = ofl_data->hws_nss->offloader->subsys;

	in_bridge = hwpa_get_and_hold_dev_master(ofl_data->in);
	if (in_bridge) {
		ifnum = nss_cmn_get_interface_number_by_dev(in_bridge);
		if (nss_bridge_verify_if_num(ifnum))
			ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][index++] =
				(struct hwpa_nss_if_data){.ifnum = ifnum, .type = HWPA_NSS_IF_TYPE_BRIDGE, .ep_info = NULL};
	}

	ifnum = nss_cmn_get_interface_number_by_dev(ofl_data->in);
	if (ifnum < 0) {
		retval = HWPA_BACKEND_ERR_BAD_HIERARCHY;
		goto failure_1;
	}

	ep_info = &global_ctx->if_reg[ifnum];

	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][index] =
			(struct hwpa_nss_if_data){.ifnum = ifnum, .type = HWPA_NSS_IF_TYPE_ETH, .ep_info = ep_info};
	ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_FLOW] = index + 1;

	index = 0;

	out_bridge = hwpa_get_and_hold_dev_master(ofl_data->out);
	if (out_bridge) {
		ifnum = nss_cmn_get_interface_number_by_dev(out_bridge);
		if (nss_bridge_verify_if_num(ifnum))
			ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][index++] =
				(struct hwpa_nss_if_data){.ifnum = ifnum, .type = HWPA_NSS_IF_TYPE_BRIDGE, .ep_info = NULL};
	}

	ifnum = nss_cmn_get_interface_number_by_dev(ofl_data->out);
	if (ifnum < 0) {
		retval = HWPA_BACKEND_ERR_BAD_HIERARCHY;
		goto failure_2;
	}

	ep_info = &global_ctx->if_reg[ifnum];

	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][index] =
			(struct hwpa_nss_if_data){.ifnum = ifnum, .type = HWPA_NSS_IF_TYPE_ETH, .ep_info = ep_info};
	ofl_data->if_max_indices[HWPA_NSS_SESSION_DIRECTION_RETURN] = index + 1;

	hwpa_nss_dump_hierarchy(ofl_data);

failure_2:
	if (out_bridge)
		dev_put(out_bridge);

failure_1:
	if (in_bridge)
		dev_put(in_bridge);

	return retval;
}

/**
 * @fn enum hwpa_nss_nat_mode hwpa_nss_get_session_nat_mode(const struct avm_pa_session *)
 * @brief determine NAT-Mode of a avm_pa session
 *
 * @param sess_pa [in] session to offload
 * @return one of hwpa_nss_nat_mode, incl. HWPA_NSS_NAT_ERROR on error
 */
static enum hwpa_nss_nat_mode hwpa_nss_get_session_nat_mode(const struct avm_pa_session *sess_pa)
{
	bool ip_snat, ip_dnat, port_snat, port_dnat, is_routed;
	int32_t ifnum_in, ifnum_out;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	if (sess_pa->bsession) {
		struct net_device *in, *out;
		enum hwpa_nss_ep_type ep_type_in, ep_type_out;

		/* Callers hold a ref already so must succeed. */
		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
		out = hwpa_get_netdev(avm_pa_first_egress(sess_pa)->pid_handle);

		ifnum_in = nss_cmn_get_interface_number_by_dev(in);
		if (ifnum_in < 0) {
			return HWPA_NSS_NAT_ERROR;
		}

		ifnum_out = nss_cmn_get_interface_number_by_dev(out);
		if (ifnum_out < 0) {
			dev_put(in);
			return HWPA_NSS_NAT_ERROR;
		}

		ep_type_in = global_ctx->if_reg[ifnum_in].type;
		ep_type_out = global_ctx->if_reg[ifnum_out].type;

		dev_put(in);
		dev_put(out);

		if (ep_type_in == EP_TYPE_QCA833X && ep_type_out == EP_TYPE_QCA833X)
			return HWPA_NSS_QCA833X_NAT_MODE_BRIDGED;

		PR_DEVEL("Unsupported bsession!\n");
		return HWPA_NSS_NAT_ERROR;
	}

	is_routed = hwpa_nss_is_routed(sess_pa);
	ip_snat = sess_pa->mod.modflags & (AVM_PA_MOD_SADDR);
	ip_dnat = sess_pa->mod.modflags & (AVM_PA_MOD_DADDR);
	port_snat = sess_pa->mod.modflags & (AVM_PA_MOD_SPORT);
	port_dnat = sess_pa->mod.modflags & (AVM_PA_MOD_DPORT);

	if ((ip_snat && ip_dnat) || (port_snat && port_dnat)) {
		pr_err("Bad NAT Mode for session %p\n", sess_pa);
		return HWPA_NSS_NAT_ERROR;
	}

	if (unlikely(sess_pa->generic_ct_dir == GENERIC_CT_DIR_ORIGINAL && ip_dnat)) {
		PR_DEVEL("DNAT for original packets not supported\n");
		return HWPA_NSS_NAT_ERROR;
	}
	if (unlikely(sess_pa->generic_ct_dir == GENERIC_CT_DIR_REPLY && ip_snat)) {
		PR_DEVEL("SNAT for reply packets not supported\n");
		return HWPA_NSS_NAT_ERROR;
	}

	switch (sess_pa->ingress.pkttype & AVM_PA_PKTTYPE_IP_MASK) {
	case AVM_PA_PKTTYPE_IPV4:
		if (!is_routed)
			return HWPA_NSS_IPV4_NAT_MODE_BRIDGED;
		else if (ip_dnat)
			return HWPA_NSS_IPV4_NAT_MODE_DNAT;
		else if (ip_snat)
			return HWPA_NSS_IPV4_NAT_MODE_SNAT;
		else if (port_snat || port_dnat)
			return HWPA_NSS_IPV4_NAT_MODE_PORT_NAT;
		else if (!ip_dnat && !ip_snat)
			return HWPA_NSS_IPV4_NAT_MODE_NO_NAT;
		else {
			PR_DEVEL("IPV4: Bad nat mode!\n");
			return HWPA_NSS_NAT_ERROR;
		}
	case AVM_PA_PKTTYPE_IPV6:
		if (!is_routed)
			return HWPA_NSS_IPV6_NAT_MODE_BRIDGED;
		else if (port_snat || port_dnat)
			return HWPA_NSS_IPV6_NAT_MODE_PORT_NAT;
		else if (!ip_dnat && !ip_snat)
			return HWPA_NSS_IPV6_NAT_MODE_NO_NAT;
		else {
			PR_DEVEL("IPV6: No NAT for IPV6!\n");
			return HWPA_NSS_NAT_ERROR;
		}
	default:
		PR_DEVEL("L2 Protocol not supported!\n");
		return HWPA_NSS_NAT_ERROR;
	}
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_fill_nat_mode(struct hwpa_nss_offloading_data*)
 * @brief determine NAT-Mode as per hwpa_nss_get_session_nat_mode() save it in the offloading_data
 *
 * @param ofl_data [in] offloading data
 * @return success or error code
 */
static inline enum hwpa_backend_rv  hwpa_nss_fill_nat_mode(struct hwpa_nss_offloading_data *ofl_data)
{
	const struct avm_pa_session *sess_pa = ofl_data->sess_pa;
	enum hwpa_nss_nat_mode nat_mode;

	nat_mode = hwpa_nss_get_session_nat_mode(sess_pa);
	if (nat_mode == HWPA_NSS_NAT_ERROR)
		return HWPA_BACKEND_ERR_BAD_NAT_MODE;

	ofl_data->nat_mode = nat_mode;
	ofl_data->ct_dir = sess_pa->generic_ct_dir;

	PR_DEVEL("Determined NAT Mode %d for session %p\n", ofl_data->nat_mode,
		 ofl_data->sess_pa);

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn static bool hwpa_nss_determine_if_session_can_be_accelerated(struct hwpa_nss_offloading_data *)
 * @brief do a late analysis of the avm_pa session to sort out flows, which can
 * not be handled by nss.
 *
 * In contrast to hwpa_backend_probe_session(), flows that fail the checks are already
 * thought to be offloaded, sorting out here is too late for avm_pa. As a result they
 * will take the software path but avm_pa assumes offloading. This may lead to
 * suboptimal data paths. E.g. thought-to-be-offloaded-flows skip some necessary QoS.
 *
 * If possible, sort out flows in hwpa_backend_probe_session()
 *
 * @see hwpa_backend_probe_session
 *
 * @param ofl_data [in/out]  all relevant information for the offloading process
 * @return true if session can potentially be accelerated by nss. false otherwise
 */
static bool hwpa_nss_determine_if_session_can_be_accelerated(struct hwpa_nss_offloading_data *ofl_data)
{
	const struct avm_pa_session *sess_pa;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct net_device *in, *out, *bridge = NULL;
	bool is_routed;
	enum hwpa_backend_rv retval;

	/*
	 * @todo: Migrate these checks to hwpa_backend_probe_session() to sort
	 * out unacceleratable flows as early as possible.
	 */

	sess_pa = ofl_data->sess_pa;
	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	is_routed = hwpa_nss_is_routed(sess_pa);

	out = hwpa_get_netdev(eg->pid_handle);
	if (unlikely(!out)) {
		PR_DEVEL("out net_device could not be gathered\n");
		goto failure_1;
	}

	if (is_routed) {
		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
		if (unlikely(!in)) {
			PR_DEVEL("Could not get in netdevice!\n");
			goto failure_2;
		}
	} else	{
		const struct ethhdr *eg_ethhdr, *ig_ethhdr;

		eg_ethhdr = (const struct ethhdr *) hwpa_get_hdr(eg_match, AVM_PA_ETH);
		ig_ethhdr = (const struct ethhdr *) hwpa_get_hdr(ig_match, AVM_PA_ETH);
		if (unlikely(!ig_ethhdr || !eg_ethhdr)) {
			PR_DEVEL("No ethernet header for l2 session\n");
			goto failure_2;
		}

		bridge = hwpa_get_and_hold_dev_master(out);
		/*
		 * If traffic is going over a sta interface there is no master.
		 * So we need to diverge from the ECM approach, which assumes
		 * that every netdevice used for bridged traffic is assigned to
		 * a bridge.
		 */
		if (!bridge) {
			PR_DEVEL("No bridge for bridged traffic for netdev %s\n", out->name);

			in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
			if (unlikely(!in)) {
				PR_DEVEL("Could not get in netdevice!\n");
				goto failure_2;
			}

			/*
			 * As mentioned earlier: dont throw error when no bridge
			 * assigned to netdev and just continue and skip the
			 * bridge stuff.
			 */
			goto skip_bridge_stuff;
		}

		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);

		if (!in) {
			PR_DEVEL("Could not get in netdevice!\n");
			goto failure_3;
		}

		if (in == out) {
			if (!br_is_hairpin_enabled(in)) {
				PR_DEVEL("hairpin not enabled\n");
				goto failure_4;
			}
		}

		if (ether_addr_equal(eg_ethhdr->h_source, bridge->dev_addr)) {
			PR_DEVEL("Ignoring routed packet to bridge\n");
			goto failure_4;
		}

		if (eg_match->casttype == AVM_PA_IS_UNICAST) {
			if (!br_fdb_has_entry(out, eg_ethhdr->h_dest, 0)) {
				PR_DEVEL("No fdb entry for mac\n");
				goto failure_4;
			}
		}

skip_bridge_stuff:
		if (hwpa_nss_handle_bridged_pppoe_traffic(ofl_data) != HWPA_BACKEND_SUCCESS) {
			PR_DEVEL("Couldn't handle PPPoE flow\n");
			goto failure_4;
		}
	}

	PR_DEVEL("Valid session\n");

	ofl_data->is_routed = is_routed;
	ofl_data->in = in;
	ofl_data->out = out;
	ofl_data->bridge = bridge;

	retval = hwpa_nss_build_hierarchy(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Could not build hierarchy for session %p\n", sess_pa);
		goto failure_4;
	}

	/* Determine NAT Mode */
	retval = hwpa_nss_fill_nat_mode(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Invalid NAT Mode for session %p\n", sess_pa);
		goto failure_4;
	}

	if (ofl_data->nat_mode == HWPA_NSS_QCA833X_NAT_MODE_BRIDGED) {
		if (!hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_QCA833X)->qca833x_spec->api) {
			pr_warn("No external qca833x initialized (session %p) -- not accelerating\n", sess_pa);
			retval = HWPA_BACKEND_ERR_BAD_NAT_MODE;
			goto failure_4;
		}
	}

	dev_put(ofl_data->in);
	dev_put(ofl_data->out);
	if (ofl_data->bridge)
		dev_put(ofl_data->bridge);

	return (retval == HWPA_BACKEND_SUCCESS);

failure_4:
	dev_put(in);

failure_3:
	if (bridge)
		dev_put(bridge);

failure_2:
	dev_put(out);

failure_1:
	return false;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session*, unsigned long*)
 * @brief do an early analysis of the avm_pa session to sort out flows, which can
 * not be handled by nss.
 *
 * In contrast to hwpa_nss_determine_if_session_can_be_accelerated(), flows
 * that fail the checks here are not even attempted to be accelerated. Instead,
 * the avm_pa session will use the software path and *know about it*
 *
 * If possible, sort out flows here, but beware that this function runs in softirq
 * context.
 *
 * @see hwpa_nss_determine_if_session_can_be_accelerated
 *
 * @param sess_pa [in] avm_pa session to offload
 * @param handle_out [in] handle of the created hwpa_session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session *sess_pa,
							unsigned long *handle_out)
{
	struct net_device *in, *out;
	enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_PROBE_FAILED;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	enum hwpa_nss_nat_mode nat_mode;
	const struct avm_pa_egress *eg;

	/* Guaranteed to be single-egress as we don't implement change_session() */
	eg = avm_pa_first_egress(sess_pa);

	/* Only unicast traffic is supported */
	if (eg->match.casttype != AVM_PA_IS_UNICAST) {
		PR_DEVEL("Only accelerating unicast traffic\n");
		goto failure_1;
	}

	/* NSS cannot accelerate local traffic */
	if (eg->type != avm_pa_egresstype_output) {
		PR_DEVEL("Not Accelerating local traffic");
		goto failure_1;
	}

	out = hwpa_get_netdev(avm_pa_first_egress(sess_pa)->pid_handle);
	if (unlikely(!out)) {
		PR_DEVEL("Could not get out netdevice!\n");
		goto failure_1;
	}

	in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
	if (unlikely(!in)) {
		dev_put(out);
		PR_DEVEL("Could not get in netdevice!\n");
		goto failure_1;
	}

	if (nss_cmn_get_interface_number_by_dev(in) < 0) {
		PR_DEVEL("IN netdev not registered in NSS!\n");
		goto failure_2;
	}

	if (nss_cmn_get_interface_number_by_dev(out) < 0) {
		PR_DEVEL("OUT netdev not registered in NSS!\n");
		goto failure_2;
	}

	nat_mode = hwpa_nss_get_session_nat_mode(sess_pa);
#if HWPA_NSS_DISABLE_NAT_ACCEL
	/* enable the following if we do not want to offload traffic involving nat or port translation */
	if (!(nat_mode == HWPA_NSS_IPV4_NAT_MODE_BRIDGED ||
			nat_mode == HWPA_NSS_IPV6_NAT_MODE_BRIDGED ||
			nat_mode == HWPA_NSS_QCA833X_NAT_MODE_BRIDGED)) {
		PR_DEVEL("Unsupported NAT Mode (%d) for session %p\n", nat_mode, sess_pa);
		goto failure_2;
	}
#else
	if (nat_mode == HWPA_NSS_NAT_ERROR) {
		PR_DEVEL("Failed to determine NAT Mode for session %p\n", sess_pa);
		goto failure_2;
	}
#endif

	if (!hwpa_nss_is_routed(sess_pa)) {
		const struct ethhdr *ig_ethhdr, *eg_ethhdr;
		const struct avm_pa_pkt_match *ig_match, *eg_match;

		ig_match = &sess_pa->ingress;
		eg_match = &eg->match;

		ig_ethhdr = (const struct ethhdr *) hwpa_get_hdr(ig_match, AVM_PA_ETH);
		eg_ethhdr = (const struct ethhdr *) hwpa_get_hdr(eg_match, AVM_PA_ETH);
		if (unlikely(!ig_ethhdr || !eg_ethhdr)) {
			PR_DEVEL("No ethernet header for l2 session\n");
			goto failure_2;
		}

		if (!ether_addr_equal(eg_ethhdr->h_source, ig_ethhdr->h_source) ||
				!ether_addr_equal(eg_ethhdr->h_dest, ig_ethhdr->h_dest)) {
			PR_DEVEL("MAT Acceleration for bridged traffic not supported!\n");
			goto failure_2;
		}
	}

	if (atomic_read(&global_ctx->ratelimit_counter) > HWPA_NSS_MAX_SIMULTANEOUS_OFFLOADS) {
		PR_DEVEL("offload ratelimited!\n");
		goto failure_2;
	}

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	dev_put(in);
	dev_put(out);

failure_1:
	*handle_out = hw_handle_invalid;
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session*, unsigned long*)
 * @brief Decide what to do with an avm_pa session and perform an action
 * accordingly. The possible outcomes are an error, to ignore the session, to
 * just register the hwpa-session to an existing nss session, to offload a newly
 * created nss session, to queue the new session to the pom or to offload a
 * peding session.
 *
 * @param sess_pa [in] avm_pa session to offload
 * @param handle_out [in] handle of the created hwpa_session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session *sess_pa,
							unsigned long *handle_out)
{
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss_new;
	enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_subsystem *subsys;
	struct hwpa_nss_offloading_data *ofl_data;
	struct hwpa_nss_classification_result class_res;
	struct hwpa_nss_hwpa_session *hws_hwpa_est, *t;

	atomic_inc(&global_ctx->ratelimit_counter);

	ofl_data = kmalloc(sizeof(*ofl_data), GFP_KERNEL);
	if (!ofl_data) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	ofl_data->sess_pa = sess_pa;
	ofl_data->eg = avm_pa_first_egress(sess_pa);
	ofl_data->eg_match = &ofl_data->eg->match;
	ofl_data->ig_match = &sess_pa->ingress;
	INIT_LIST_HEAD(&ofl_data->node);

	/* Sort out flows which are not accelerateable at an early stage */
	if (!hwpa_nss_determine_if_session_can_be_accelerated(ofl_data)) {
		PR_DEVEL("Not Accelerating Session %p\n", sess_pa);
		goto failure_2;
	}

	PR_DEVEL("Accelerating Session %p\n", sess_pa);

	/* The classification and registration of a session have to be executed
	 * within a single mutex subsystem context because otherwise there
	 * is a race condition between finding just registered sessions and
	 * registering/creating new sessions.
	 */
	mutex_lock(&global_ctx->mutex);

	hws_hwpa = (struct hwpa_nss_hwpa_session *) kmem_cache_zalloc(global_ctx->kmem_hwpa, GFP_KERNEL);
	if (!hws_hwpa) {
		retval = HWPA_BACKEND_ERR_CACHE;
		goto failure_2;
	}
	hwpa_nss_init_hwpa_session(hws_hwpa);
	ofl_data->hws_hwpa = hws_hwpa;
	hws_hwpa->sess_pa = sess_pa;

	hws_nss_new = (struct hwpa_nss_nss_session *) kmem_cache_zalloc(global_ctx->kmem_nss, GFP_KERNEL);
	if (!hws_nss_new) {
		retval = HWPA_BACKEND_ERR_CACHE;
		goto failure_3;
	}
	hwpa_nss_init_nss_session(hws_nss_new);
	ofl_data->hws_nss = hws_nss_new;

	retval = hwpa_nss_prepare_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Error during session preparation.\n");
		goto failure_4;
	}

	/*
	 * Assumption: Subsystem and offloader of hws_nss_new doesn't
	 * differ from subsystem of hws_nss after classification
	 */
	ofl = hws_nss_new->offloader;
	subsys = ofl->subsys;

	/*
	 * if many sessions are offloaded simultaneously it may happen that a
	 * session got already flushed by avm_pa once we reach here inside the
	 * mutex protected area. If thats the case cancel the offload early to
	 * speed up cleanup.
	 */
	if (sess_pa->on_list != AVM_PA_LIST_ACTIVE) {
		mutex_unlock(&global_ctx->mutex);
		PR_DEVEL("%p: Session already flushed\n", sess_pa);
		retval = HWPA_BACKEND_ERR_FLUSHED_BY_AVM_PA;
		goto failure_4;
	}

	/* Decide what to do with the new avm_pa session */
	hwpa_nss_classify(ofl_data, &class_res);

#ifdef HWPA_NSS_DEBUG
	WARN_ON((uint32_t)class_res.value >= (uint32_t)HWPA_NSS_CLASS_RES_MAX);
#endif

	switch (class_res.value) {
	case HWPA_NSS_CLASS_RES_NEW_OFFLOAD:
		hwpa_nss_register_nss_session(hws_nss_new, class_res.hws_new_hash);
		hwpa_nss_attach_to_nss_session(hws_nss_new, hws_hwpa);
		retval = hwpa_nss_offload_session(ofl_data);
		if (retval != HWPA_BACKEND_SUCCESS) {
			hwpa_nss_destroy_hwpa_session(hws_hwpa);
			hwpa_nss_destroy_nss_session(hws_nss_new);
			kfree(ofl_data);
			goto failure_5;
		}

		kfree(ofl_data);
		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
		break;
	case HWPA_NSS_CLASS_RES_NEW_QUEUE:
		hwpa_nss_register_nss_session(hws_nss_new, class_res.hws_new_hash);
		hwpa_nss_attach_to_nss_session(hws_nss_new, hws_hwpa);
		hwpa_nss_add_pending_offload(ofl_data);

		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
		break;
	case HWPA_NSS_CLASS_RES_EST_ATTACH:
		hwpa_nss_attach_to_nss_session(class_res.hws_nss_established, hws_hwpa);

		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
		hwpa_nss_destroy_unregistered_nss_session(hws_nss_new);
		kfree(ofl_data);
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW:
		hwpa_nss_register_nss_session(hws_nss_new, class_res.hws_new_hash);
		hwpa_nss_attach_to_nss_session(hws_nss_new, hws_hwpa);

		list_for_each_entry_safe(hws_hwpa_est, t, &class_res.hws_nss_established->hwpa_session_list, node) {
			hwpa_nss_detach_from_nss_session(hws_hwpa_est);
			hwpa_nss_attach_to_nss_session(hws_nss_new, hws_hwpa_est);
		}
		hwpa_nss_destroy_nss_session(class_res.hws_nss_established);

		retval = hwpa_nss_offload_session(ofl_data);
		if (retval != HWPA_BACKEND_SUCCESS) {
			list_for_each_entry_safe(hws_hwpa_est, t, &hws_nss_new->hwpa_session_list, node) {
				hwpa_nss_detach_from_nss_session(hws_hwpa_est);
			}
			hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
			hwpa_nss_destroy_nss_session(hws_nss_new);
			kfree(class_res.ofl_data_established);
			kfree(ofl_data);
			goto failure_5;
		}

		kfree(class_res.ofl_data_established);
		kfree(ofl_data);
		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD:
		hwpa_nss_destroy_unregistered_nss_session(hws_nss_new);
		hwpa_nss_attach_to_nss_session(class_res.hws_nss_established, hws_hwpa);

		retval = hwpa_nss_offload_session(class_res.ofl_data_established);
		if (retval != HWPA_BACKEND_SUCCESS) {
			list_for_each_entry_safe(hws_hwpa_est, t, &class_res.hws_nss_established->hwpa_session_list, node) {
				hwpa_nss_detach_from_nss_session(hws_hwpa_est);
			}
			hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
			hwpa_nss_destroy_nss_session(class_res.hws_nss_established);
			kfree(class_res.ofl_data_established);
			kfree(ofl_data);
			goto failure_5;
		}
		kfree(class_res.ofl_data_established);

		kfree(ofl_data);
		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
		break;
	case HWPA_NSS_CLASS_RES_NEW_IGNORE:
	default:
		hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
		hwpa_nss_destroy_unregistered_nss_session(hws_nss_new);

		kfree(ofl_data);
		*handle_out = -1;
	}

	spin_lock_bh(&ofl->lock);

	/* Update session counters */
	switch (class_res.value) {
	case HWPA_NSS_CLASS_RES_NEW_OFFLOAD:
		ofl->successful_nss_offloads++;
		ofl->avm_pa_session_count++;
		ofl->active_nss_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_NEW_QUEUE:
		ofl->avm_pa_session_count++;
		ofl->pending_nss_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_EST_ATTACH:
		ofl->avm_pa_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD:
		ofl->successful_nss_offloads++;
		ofl->active_nss_session_count++;
		ofl->pending_nss_session_count--;
		ofl->avm_pa_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW:
		ofl->successful_nss_offloads++;
		ofl->active_nss_session_count++;
		ofl->pending_nss_session_count--;
		ofl->avm_pa_session_count++;
		break;
	default:
		break;
	}
	spin_unlock_bh(&ofl->lock);
	mutex_unlock(&global_ctx->mutex);
	atomic_dec(&global_ctx->ratelimit_counter);

	return HWPA_BACKEND_SUCCESS;

failure_5:
	spin_lock_bh(&ofl->lock);

	/* So far we can only get here if classifier decides for
	 * HWPA_NSS_CLASS_RES_NEW_OFFLOAD or HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW
	 * or HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD.
	 * Here we also need a session counter update.
	 */
	switch (class_res.value) {
	case HWPA_NSS_CLASS_RES_NEW_OFFLOAD:
		ofl->failed_nss_offloads++;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_OLD:
		ofl->failed_nss_offloads++;
		ofl->pending_nss_session_count--;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD_NEW:
		ofl->failed_nss_offloads++;
		ofl->pending_nss_session_count--;
		break;
	default:
		break;
	}
	spin_unlock_bh(&ofl->lock);

	*handle_out = -1;

	mutex_unlock(&global_ctx->mutex);
	atomic_dec(&global_ctx->ratelimit_counter);
	return (enum hwpa_backend_rv) retval;

failure_4:
	if (hws_nss_new)
		hwpa_nss_destroy_unregistered_nss_session(hws_nss_new);

failure_3:
	if (hws_hwpa)
		hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);

failure_2:
	kfree(ofl_data);

failure_1:
	*handle_out = -1;
	atomic_dec(&global_ctx->ratelimit_counter);
	return (enum hwpa_backend_rv) retval;
}

/*
 *===============================================================================
 * hwpa nss offloading session removal
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_connection_destroy(struct hwpa_nss_nss_session*, struct nss_cmn_msg*)
 * @brief Protocol independent part of subsystem session destruction
 *
 * @param hws_nss [in] NSS session just destroyed
 * @param cm [in] common message part of answer from nss
 */
static void hwpa_nss_connection_destroy(struct hwpa_nss_nss_session *hws_nss, struct nss_cmn_msg *cm)
{
	if (cm->response != NSS_CMN_RESPONSE_ACK) {
		if (cm->error == NSS_IPV4_DR_NO_CONNECTION_ENTRY_ERROR || cm->error == NSS_IPV6_DR_NO_CONNECTION_ENTRY_ERROR)
			PR_DEVEL("Trying to remove non-existing session");
		else	{
			pr_err("An Error occurred destroying NSS connection acceleration\n");
			pr_err("Error Code: %d", cm->error);
		}
		hws_nss->state = HWPA_NSS_SESSION_STATE_INVALID;
	} else {
		hws_nss->state = HWPA_NSS_SESSION_STATE_BROKEN;
	}
}

/**
 * @fn void hwpa_nss_ipv4_connection_destroy_callback(void*, struct nss_ipv4_msg*)
 * @brief ipv4 rule destroy callback
 *
 * @param app_data [in] app data. The subsystem.
 * @param nim [in] the answer to a destroy_rule_msg for ipv4
 */
static void hwpa_nss_ipv4_connection_destroy_callback(void *app_data, struct nss_ipv4_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV4_TX_DESTROY_RULE_MSG) {
		pr_err("%p: ported create callback with improper type: %d\n",
		       hws_nss, nim->cm.type);
		return;
	}

	hwpa_nss_connection_destroy(hws_nss, &nim->cm);
}

/**
 * @fn void hwpa_nss_ipv6_connection_destroy_callback(void*, struct nss_ipv6_msg*)
 * @brief ipv6 rule destroy callback
 *
 * @param app_data [in] app data. The subsystem.
 * @param nim [in] the answer to a destroy_rule_msg for ipv6
 */
static void hwpa_nss_ipv6_connection_destroy_callback(void *app_data, struct nss_ipv6_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV6_TX_DESTROY_RULE_MSG) {
		pr_err("%p: ported create callback with improper type: %d\n",
		       hws_nss, nim->cm.type);
		return;
	}

	hwpa_nss_connection_destroy(hws_nss, &nim->cm);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_remove_session(struct hwpa_nss_subsystem*, struct hwpa_nss_nss_session*)
 * @brief Remove Session from ipv4 subsystem
 *
 * @param subsys [in] the subsystem
 * @param hws_nss [in] nss session to destroy
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_remove_session(struct hwpa_nss_subsystem *subsys,
							 struct hwpa_nss_nss_session *hws_nss)
{
	uint32_t retval;
	struct nss_ipv4_msg *rem_msg;

	rem_msg = kzalloc(sizeof(struct nss_ipv4_msg),
						 GFP_KERNEL);

	if (!rem_msg) {
		pr_err("Memory Error During Session Removal\n");
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	nss_ipv4_msg_init(rem_msg, NSS_IPV4_RX_INTERFACE,
				   NSS_IPV4_TX_DESTROY_RULE_MSG,
				   sizeof(struct nss_ipv4_rule_destroy_msg),
				   hwpa_nss_ipv4_connection_destroy_callback, hws_nss);

	rem_msg->msg.rule_destroy.tuple = hws_nss->ipv4.tuple;

	PR_DEVEL("%p: Deaccelerate Session\n"
			"Protocol: %d\n"
			"from_ip: %pI4h:%d\n"
			"to_ip: %pI4h:%d\n",
			hws_nss,
			rem_msg->msg.rule_destroy.tuple.protocol,
			&rem_msg->msg.rule_destroy.tuple.flow_ip,
			rem_msg->msg.rule_destroy.tuple.flow_ident,
			&rem_msg->msg.rule_destroy.tuple.return_ip,
			rem_msg->msg.rule_destroy.tuple.return_ident);

	retval = nss_ipv4_tx(subsys->mgr, rem_msg);
	if (retval != NSS_TX_SUCCESS) {
		PR_DEVEL("IPV4 acceleration rule could not be removed\n");
		retval = HWPA_BACKEND_ERR_SESS_REM;
		goto failure_2;
	}

failure_2:
	kfree(rem_msg);

failure_1:
	return (enum hwpa_backend_rv) retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_remove_session(struct hwpa_nss_subsystem*, struct hwpa_nss_nss_session*)
 * @brief Remove Session from ipv6 subsystem
 *
 * @param subsys [in] the subsystem
 * @param hws_nss [in] nss session to destroy
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_remove_session(struct hwpa_nss_subsystem *subsys,
							 struct hwpa_nss_nss_session *hws_nss)
{
	uint32_t retval;
	struct nss_ipv6_msg *rem_msg;

	rem_msg = kzalloc(sizeof(struct nss_ipv6_msg),
						 GFP_KERNEL);

	if (!rem_msg) {
		pr_err("Memory Error During Session Removal\n");
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	nss_ipv6_msg_init(rem_msg, NSS_IPV6_RX_INTERFACE,
				   NSS_IPV6_TX_DESTROY_RULE_MSG,
				   sizeof(struct nss_ipv6_rule_destroy_msg),
				   hwpa_nss_ipv6_connection_destroy_callback, hws_nss);

	rem_msg->msg.rule_destroy.tuple = hws_nss->ipv6.tuple;

	retval = nss_ipv6_tx(subsys->mgr, rem_msg);
	if (retval != NSS_TX_SUCCESS) {
		PR_DEVEL("IPV6 acceleration rule could not be removed\n");
		retval = HWPA_BACKEND_ERR_SESS_REM;
		goto failure_2;
	}

failure_2:
	kfree(rem_msg);

failure_1:
	return (enum hwpa_backend_rv) retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_qca833x_remove_session(struct hwpa_nss_subsystem*, struct hwpa_nss_nss_session*)
 * @brief Remove Session from qca833x subsystem
 *
 * @param subsys [in] the subsystem
 * @param hws_nss [in] nss session to destroy
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_qca833x_remove_session(struct hwpa_nss_subsystem *subsys,
							 struct hwpa_nss_nss_session *hws_nss)
{
	uint32_t retval = HWPA_BACKEND_SUCCESS;
	struct qca833x_api *api;

	api = subsys->qca833x_spec->api;

	PR_DEVEL("Remove qca833x session %d!\n", hws_nss->qca833x.tuple.id);

	if (api && api->del_session)
		api->del_session(&hws_nss->qca833x.tuple);

	return (enum hwpa_backend_rv) retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long)
 * @brief implementation of the hwpa_backend-API function for session removal
 *
 * @param handle [in] the hwpa session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long handle)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_subsystem *subsys;
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_offloading_data *ofl_data;
	int32_t pa_ref_count;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	/* get hwpa session from avm_pa session */
	hws_hwpa = hwpa_nss_handle_to_session(handle);
	if (!hws_hwpa) {
		retval = HWPA_BACKEND_ERR_BAD_HANDLE;
		goto finished;
	}

	ofl = hws_hwpa->offloader;

	subsys = ofl->subsys;

	PR_DEVEL("Removing HWPA session: %p\n", hws_hwpa);

	mutex_lock(&global_ctx->mutex);

	/* before destroying hwpa session obtain linked nss session */
	hws_nss = hws_hwpa->hws_nss;

	spin_lock_bh(&ofl->lock);
	ofl->avm_pa_session_count--;
	spin_unlock_bh(&ofl->lock);

	/* firstly destroy hwpa session */
	if (unlikely(!hws_nss)) {
		hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
		goto finished_unlock;
	}

	/*
	 * There are very rare cases where the following condition is hit.
	 * This means we have an invalid nss session attached which was already freed
	 */
	if (unlikely(atomic_read(&hws_nss->pa_ref_count) == 0)) {
		hwpa_nss_destroy_unattached_hwpa_session(hws_hwpa);
		goto finished_unlock;
	}

	hwpa_nss_destroy_hwpa_session(hws_hwpa);

	/* if the nss session has more hwpa sessions attached cleanup and ret */
	pa_ref_count = atomic_read(&hws_nss->pa_ref_count);
	if (pa_ref_count > 0) {
		PR_DEVEL("Not removing HW session %p, as there are still %d hwpa sessions attached\n", hws_nss, pa_ref_count);
		retval = HWPA_BACKEND_SUCCESS;
		goto finished_unlock;
	}

	/*
	 * If hwps session list in nss session is empty but there are still
	 * sessions assigned to it according to the pa_ref_counter there is sth
	 * going wrong
	 */
	if (!list_empty(&hws_nss->hwpa_session_list)) {
		WARN_ON(1);
		goto finished_unlock;
	}

	/*
	 * According to nss session state perform action and update session
	 * counters
	 */
	switch (hws_nss->state) {
	case HWPA_NSS_SESSION_STATE_ACTIVE:
		PR_DEVEL("Removing HW session %p from subsystem %s with offloader %s\n",
			 hws_nss, subsys->label, ofl->label);

		spin_lock_bh(&ofl->list_lock);
		list_del_rcu(&hws_nss->ofl_node);
		spin_unlock_bh(&ofl->list_lock);
		synchronize_rcu();

		if (!test_bit(HWPA_NSS_SESSION_FLUSHED, &hws_nss->flags))
			retval = ofl->remove_session(subsys, hws_nss);

		hwpa_nss_tracker_remove_nss_session(subsys);

		if (hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID) {
			PR_DEVEL("NSS Session removed successfully!\n");
			retval = HWPA_BACKEND_SUCCESS;
		} else	{
			PR_DEVEL("NSS Session could not be deaccelerated!\n");
			retval = HWPA_BACKEND_ERR_SESS_REM;
		}
		spin_lock_bh(&ofl->lock);
		ofl->active_nss_session_count--;
		spin_unlock_bh(&ofl->lock);
		break;
	case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
		ofl_data = hwpa_nss_pom_get_and_unregister_offloading_data(hws_nss);
		kfree(ofl_data);
		spin_lock_bh(&ofl->lock);
		ofl->pending_nss_session_count--;
		spin_unlock_bh(&ofl->lock);
		break;
	case HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD:
		break;
	case HWPA_NSS_SESSION_STATE_INVALID:
		break;
	default:
		PR_DEVEL("Bad Session!\n");
	}

	hwpa_nss_destroy_nss_session(hws_nss);

finished_unlock:
	mutex_unlock(&global_ctx->mutex);

	PR_DEVEL("Removed NSS session: %p\n", hws_nss);

	retval = HWPA_BACKEND_SUCCESS;

finished:
	return retval;
}

/*
 *==============================================================================
 * hwpa nss purging
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_ipv4_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all ipv4 sessions
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_ipv4_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_ipv4_specific *ipv4_spec = subsys->ipv4_spec;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	int i;

	rcu_read_lock();
	hash_for_each_rcu(ipv4_spec->session_table, i, hws_nss, node) {
		ofl = hws_nss->offloader;
		ofl->remove_session(subsys, hws_nss);
	}
	rcu_read_unlock();
}

/**
 * @fn void hwpa_nss_ipv6_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all ipv6 sessions
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_ipv6_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_ipv6_specific *ipv6_spec = subsys->ipv6_spec;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	int i;

	rcu_read_lock();
	hash_for_each_rcu(ipv6_spec->session_table, i, hws_nss, node) {
		ofl = hws_nss->offloader;
		ofl->remove_session(subsys, hws_nss);
	}
	rcu_read_unlock();
}

/**
 * @fn void hwpa_qca833x_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all qca833x sessions
 *
 * @param subsys [in] the subsytem
 */
void hwpa_qca833x_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_qca833x_specific *qca833x_spec = subsys->qca833x_spec;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	int i;

	rcu_read_lock();
	hash_for_each_rcu(qca833x_spec->session_table, i, hws_nss, node) {
		ofl = hws_nss->offloader;
		ofl->remove_session(subsys, hws_nss);
	}
	rcu_read_unlock();
}

/**
 * @fn void hwpa_nss_subsystem_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all sessions which are offloaded in a subsystem
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_subsystem_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	if (subsys->tracker->usage == 0)
		return;

	PR_DEVEL("Purging sessions of subsystem %s", subsys->label);

	subsys->purge_sessions(subsys);
}

/**
 * @fn void hwpa_nss_purge_sessions(void)
 * @brief purge all still offloaded sessions from nss
 */
void hwpa_nss_purge_sessions(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i)
		hwpa_nss_subsystem_purge_sessions(global_ctx->subsystems[i]);
}

/*
 *===============================================================================
 * hwpa nss ipv4 synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_sync_session(struct hwpa_nss_subsystem*, struct nss_ipv4_conn_sync*)
 * @brief sync a nss session. Finds local nss session from sync and updates its stats
 *
 * @param subsys [in] the subsytem which belongs to the sync message.
 * @param sync [in] ipv4 subsystem sync message.
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_sync_session(struct hwpa_nss_subsystem *subsys,
						       struct nss_ipv4_conn_sync *sync)
{
	struct hwpa_nss_nss_session *hws_nss;
	static enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	int index;

	PR_DEVEL("sync ipv4 session, reason: %d\n", sync->reason);

	hws_nss = hwpa_nss_ipv4_find_nss_session_from_sync(subsys, sync);
	if (!hws_nss) {
		retval = HWPA_BACKEND_ERR_INVALID_SYNC;
		goto failure_1;
	}
	/* We only want stats when we asked for them */
	switch (sync->reason) {
	case NSS_IPV4_RULE_SYNC_REASON_STATS:
		break;
	case NSS_IPV4_RULE_SYNC_REASON_FLUSH:
	case NSS_IPV4_RULE_SYNC_REASON_EVICT:
		PR_DEVEL("NSS Session got removed by NSS\n");
		set_bit(HWPA_NSS_SESSION_FLUSHED, &hws_nss->flags);
	case NSS_IPV4_RULE_SYNC_REASON_DESTROY:
	default:
		goto ignore_sync;
	}

	if (hws_nss->state != HWPA_NSS_SESSION_STATE_ACTIVE)
		goto ignore_sync;

	spin_lock_bh(&hws_nss->sync_lock);

	hws_nss->stats.flow_rx_bytes += sync->flow_rx_byte_count;
	hws_nss->stats.flow_rx_pkts += sync->flow_rx_packet_count;
	hws_nss->stats.flow_tx_bytes += sync->flow_tx_byte_count;
	hws_nss->stats.flow_tx_pkts += sync->flow_tx_packet_count;
	hws_nss->stats.return_rx_bytes += sync->return_rx_byte_count;
	hws_nss->stats.return_rx_pkts += sync->return_rx_packet_count;
	hws_nss->stats.return_tx_bytes += sync->return_tx_byte_count;
	hws_nss->stats.return_tx_pkts += sync->return_tx_packet_count;

	spin_unlock_bh(&hws_nss->sync_lock);

	PR_DEVEL("hws_nss->stats.flow_rx_bytes: %d\n"
		"hws_nss->stats.flow_tx_bytes: %d\n"
		"hws_nss->stats.return_rx_bytes: %d\n"
		"hws_nss->stats.return_tx_bytes: %d\n",
		hws_nss->stats.flow_rx_bytes,
		hws_nss->stats.flow_tx_bytes,
		hws_nss->stats.return_rx_bytes,
		hws_nss->stats.return_tx_bytes);

	set_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags);
	set_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags);

	spin_lock(&subsys->sync->lock);
	index = subsys->ipv4_spec->sync_info_idx;
	if (index >= NSS_MAX_IPV4_SESSIONS - 1) {
		spin_unlock(&subsys->sync->lock);
		goto failure_1;
	}
	subsys->ipv4_spec->sync_info_idx++;
	spin_unlock(&subsys->sync->lock);
	memcpy(&subsys->ipv4_spec->sync_info[index], sync, sizeof(*sync));

failure_1:
ignore_sync:
	return retval;
}

/**
 * @fn void hwpa_nss_ipv4_net_dev_callback(void*, struct nss_ipv4_msg*)
 * @brief ipv4 subsystem callback
 *
 * @param app_data [in] application specific data. Used for subsystem.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv4_net_dev_callback(void *app_data,
					  struct nss_ipv4_msg *nim)
{
	struct nss_ipv4_conn_sync *sync = &nim->msg.conn_stats;
	struct hwpa_nss_subsystem *subsys = (struct hwpa_nss_subsystem *) app_data;

	if (nim->cm.type != NSS_IPV4_RX_CONN_STATS_SYNC_MSG)
		return;

	hwpa_nss_ipv4_sync_session(subsys, sync);
}

/**
 * @fn void hwpa_nss_ipv4_sync_many_callback(void*, struct nss_ipv4_msg*)
 * @brief callback function used as a reply from a sync_many message from nss
 *
 * @param app_data [in] application specific data. not used here.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv4_sync_many_callback(void *app_data,
					  struct nss_ipv4_msg *nim)
{
	uint32_t index;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4);
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv4_conn_sync_many_msg *sync_many_msg = &nim->msg.conn_stats_many;
	struct nss_ipv4_conn_sync_many_msg *global_sync_many_msg;

	if (nim->cm.type != NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG)
		return;

	global_sync_many_msg = &sync->msg.ipv4->msg.conn_stats_many;

	if (nim->cm.response == NSS_CMN_RESPONSE_ACK) {
		for (index = 0; index < sync_many_msg->count; index++) {
			hwpa_nss_ipv4_sync_session(subsys,
					  &(sync_many_msg->conn_sync[index]));
		}

		spin_lock(&sync->lock);
		global_sync_many_msg->index = sync_many_msg->next;
		spin_unlock(&sync->lock);
		/* Send next sync_many-msg*/
		queue_delayed_work(sync->workqueue, &sync->work, 0);
	} else	{
		spin_lock(&sync->lock);
		global_sync_many_msg->index = 0;
		subsys->ipv4_spec->sync_info_len = subsys->ipv4_spec->sync_info_idx;
		subsys->ipv4_spec->sync_info_idx = 0;
		spin_unlock(&sync->lock);
		queue_delayed_work(sync->workqueue, &sync->work, HWPA_NSS_STATS_SYNC_PERIOD);
	}
}

/**
 * @fn void hwpa_nss_ipv4_sync_work(struct work_struct*)
 * @brief work function for the ipv4 sync workqueue
 *
 * @param work [in] work struct
 */
static void hwpa_nss_ipv4_sync_work(struct work_struct *work)
{
	struct delayed_work *delayed_work_data = container_of(work, struct delayed_work, work);
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4);
	struct hwpa_nss_ipv4_specific *ipv4_spec = subsys->ipv4_spec;
	struct hwpa_nss_synchronizer *sync;
	struct nss_ipv4_msg *msg;
	uint32_t retval;
	unsigned long int current_jiffies;

	sync = subsys->sync;
	msg = sync->msg.ipv4;

	if (subsys->tracker->usage == 0)
		goto reschedule;

	if (test_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &subsys->flags))
		goto reschedule;

	if (msg->msg.conn_stats_many.index == 0) {
		current_jiffies = jiffies;

		if (time_is_after_jiffies(ipv4_spec->roll_check_jiffies))  {
			ipv4_spec->next_req_time = jiffies + HWPA_NSS_STATS_SYNC_PERIOD;
		}

		if (time_after(ipv4_spec->next_req_time, current_jiffies)) {
			spin_lock(&sync->lock);
			ipv4_spec->sync_info_len = ipv4_spec->sync_info_idx;
			ipv4_spec->sync_info_idx = 0;
			spin_unlock(&sync->lock);
			set_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &subsys->flags);
			msleep(jiffies_to_msecs(ipv4_spec->next_req_time - current_jiffies));
			clear_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &subsys->flags);
		}
		ipv4_spec->roll_check_jiffies = jiffies;
		ipv4_spec->next_req_time = ipv4_spec->roll_check_jiffies + HWPA_NSS_STATS_SYNC_PERIOD;
	}

	retval = nss_ipv4_tx_with_size(subsys->mgr, msg, PAGE_SIZE);
	if (retval == NSS_TX_SUCCESS)
		return;

reschedule:
	spin_lock(&sync->lock);
	msg->msg.conn_stats_many.count = 0;
	msg->msg.conn_stats_many.index = 0;
	spin_unlock(&sync->lock);
	queue_delayed_work(sync->workqueue, delayed_work_data,
			   HWPA_NSS_STATS_SYNC_PERIOD);
}

/**
 * @fn void hwpa_nss_ipv4_sync_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv4 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 */
static void hwpa_nss_ipv4_sync_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	cancel_delayed_work_sync(&sync->work);
	destroy_workqueue(sync->workqueue);
	nss_ipv4_conn_sync_many_notify_unregister();
	kfree(sync->msg.ipv4);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_sync_init(struct hwpa_nss_subsystem*)
 * @brief initialize ipv4 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_sync_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv4_conn_sync_many_msg *nicsm;
	struct nss_ipv4_msg *msg;

	PR_DEVEL("IPV4 Sync init\n");

	spin_lock_init(&sync->lock);

	msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	sync->msg.ipv4 = msg;

	nss_ipv4_conn_sync_many_notify_register(hwpa_nss_ipv4_sync_many_callback);

	nss_ipv4_msg_init(msg, NSS_IPV4_RX_INTERFACE,
		NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG,
		sizeof(struct nss_ipv4_conn_sync_many_msg),
		NULL,
		(void *) subsys);

	nicsm = &msg->msg.conn_stats_many;
	nicsm->index = 0;
	nicsm->size = PAGE_SIZE;

	sync->workqueue = create_singlethread_workqueue("hwpa_nss_ipv4_sync_workqueue");
	if (!sync->workqueue) {
		retval = HWPA_BACKEND_ERR_SYNC;
		goto failure_2;
	}

	INIT_DELAYED_WORK(&sync->work,
			  hwpa_nss_ipv4_sync_work);

	queue_delayed_work(sync->workqueue, &sync->work,
			   HWPA_NSS_STATS_SYNC_PERIOD);

	return HWPA_BACKEND_SUCCESS;

failure_2:
	nss_ipv4_conn_sync_many_notify_unregister();
	kfree(msg);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss ipv6 synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_sync_session(struct hwpa_nss_subsystem*, struct nss_ipv6_conn_sync*)
 * @brief sync an nss session. Finds local nss session from sync and updates its stats
 *
 * @param subsys [in] the subsytem which belongs to the sync message.
 * @param sync [in] ipv6 subsystem sync message.
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_sync_session(struct hwpa_nss_subsystem *subsys,
						       struct nss_ipv6_conn_sync *sync)
{
	struct hwpa_nss_nss_session *hws_nss;
	static enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	int index;

	PR_DEVEL("Syncing ipv6 session\n");

	hws_nss = hwpa_nss_ipv6_find_nss_session_from_sync(subsys, sync);
	if (!hws_nss) {
		retval = HWPA_BACKEND_ERR_INVALID_SYNC;
		goto failure_1;
	}

	/* We only want stats when we asked for them */
	switch (sync->reason) {
	case NSS_IPV6_RULE_SYNC_REASON_STATS:
		break;
	case NSS_IPV6_RULE_SYNC_REASON_FLUSH:
	case NSS_IPV6_RULE_SYNC_REASON_EVICT:
		PR_DEVEL("NSS Session got removed by NSS\n");
		set_bit(HWPA_NSS_SESSION_FLUSHED, &hws_nss->flags);
	case NSS_IPV6_RULE_SYNC_REASON_DESTROY:
	default:
		goto ignore_sync;
	}

	if (hws_nss->state != HWPA_NSS_SESSION_STATE_ACTIVE)
		goto ignore_sync;

	spin_lock_bh(&hws_nss->sync_lock);

	hws_nss->stats.flow_rx_bytes += sync->flow_rx_byte_count;
	hws_nss->stats.flow_rx_pkts += sync->flow_rx_packet_count;
	hws_nss->stats.flow_tx_bytes += sync->flow_tx_byte_count;
	hws_nss->stats.flow_tx_pkts += sync->flow_tx_packet_count;
	hws_nss->stats.return_rx_bytes += sync->return_rx_byte_count;
	hws_nss->stats.return_rx_pkts += sync->return_rx_packet_count;
	hws_nss->stats.return_tx_bytes += sync->return_tx_byte_count;
	hws_nss->stats.return_tx_pkts += sync->return_tx_packet_count;

	spin_unlock_bh(&hws_nss->sync_lock);

	PR_DEVEL("hws_nss->stats.flow_rx_bytes: %d\n"
		"hws_nss->stats.flow_tx_bytes: %d\n"
		"hws_nss->stats.return_rx_bytes: %d\n"
		"hws_nss->stats.return_tx_bytes: %d\n",
		hws_nss->stats.flow_rx_bytes,
		hws_nss->stats.flow_tx_bytes,
		hws_nss->stats.return_rx_bytes,
		hws_nss->stats.return_tx_bytes);

	set_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags);
	set_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags);

	spin_lock(&subsys->sync->lock);
	index = subsys->ipv6_spec->sync_info_idx;
	if (index >= NSS_MAX_IPV6_SESSIONS - 1) {
		spin_unlock(&subsys->sync->lock);
		goto failure_1;
	}
	subsys->ipv6_spec->sync_info_idx++;
	spin_unlock(&subsys->sync->lock);
	memcpy(&subsys->ipv6_spec->sync_info[index], sync, sizeof(*sync));

failure_1:
ignore_sync:
	return retval;
}

/**
 * @fn void hwpa_nss_ipv6_net_dev_callback(void*, struct nss_ipv6_msg*)
 * @brief ipv6 subsystem callback
 *
 * @param app_data [in] application specific data. Used for subsystem.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv6_net_dev_callback(void *app_data,
					  struct nss_ipv6_msg *nim)
{
	struct nss_ipv6_conn_sync *sync = &nim->msg.conn_stats;
	struct hwpa_nss_subsystem *subsys = (struct hwpa_nss_subsystem *) app_data;

	if (nim->cm.type != NSS_IPV6_RX_CONN_STATS_SYNC_MSG)
		return;

	hwpa_nss_ipv6_sync_session(subsys, sync);
}

/**
 * @fn void hwpa_nss_ipv6_sync_many_callback(void*, struct nss_ipv6_msg*)
 * @brief callback function used as a reply from a sync_many message from nss
 *
 * @param app_data [in] application specific data. not used here.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv6_sync_many_callback(void *app_data,
					  struct nss_ipv6_msg *nim)
{
	uint32_t index;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6);
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv6_conn_sync_many_msg *sync_many_msg = &nim->msg.conn_stats_many;
	struct nss_ipv6_conn_sync_many_msg *global_sync_many_msg;

	if (nim->cm.type != NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG)
		return;

	global_sync_many_msg = &sync->msg.ipv6->msg.conn_stats_many;

	if (nim->cm.response == NSS_CMN_RESPONSE_ACK) {
		for (index = 0; index < sync_many_msg->count; index++) {
			hwpa_nss_ipv6_sync_session(subsys,
					  &(sync_many_msg->conn_sync[index]));
		}

		spin_lock(&sync->lock);
		global_sync_many_msg->index = sync_many_msg->next;
		spin_unlock(&sync->lock);
		/* Send next sync_many-msg*/
		queue_delayed_work(sync->workqueue, &sync->work, 0);
	} else	{
		spin_lock(&sync->lock);
		global_sync_many_msg->index = 0;
		subsys->ipv4_spec->sync_info_len = subsys->ipv4_spec->sync_info_idx;
		subsys->ipv4_spec->sync_info_idx = 0;
		spin_unlock(&sync->lock);
		queue_delayed_work(sync->workqueue, &sync->work, HWPA_NSS_STATS_SYNC_PERIOD);
	}
}

/**
 * @fn void hwpa_nss_ipv6_sync_work(struct work_struct*)
 * @brief work function for the ipv6 sync workqueue
 *
 * @param work [in] work struct
 */
static void hwpa_nss_ipv6_sync_work(struct work_struct *work)
{
	struct delayed_work *delayed_work_data = container_of(work, struct delayed_work, work);
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6);
	struct hwpa_nss_ipv6_specific *ipv6_spec = subsys->ipv6_spec;
	struct hwpa_nss_synchronizer *sync;
	struct nss_ipv6_msg *msg;
	uint32_t retval;
	unsigned long int current_jiffies;

	sync = subsys->sync;
	msg = sync->msg.ipv6;

	if (subsys->tracker->usage == 0)
		goto reschedule;

	if (test_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &subsys->flags))
		goto reschedule;

	if (msg->msg.conn_stats_many.index == 0) {
		current_jiffies = jiffies;

		if (time_is_after_jiffies(ipv6_spec->roll_check_jiffies))  {
			ipv6_spec->next_req_time = jiffies + HWPA_NSS_STATS_SYNC_PERIOD;
		}

		if (time_after(ipv6_spec->next_req_time, current_jiffies)) {
			spin_lock(&sync->lock);
			ipv6_spec->sync_info_len = ipv6_spec->sync_info_idx;
			ipv6_spec->sync_info_idx = 0;
			spin_unlock(&sync->lock);
			set_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &subsys->flags);
			msleep(jiffies_to_msecs(ipv6_spec->next_req_time - current_jiffies));
			clear_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &subsys->flags);
		}
		ipv6_spec->roll_check_jiffies = jiffies;
		ipv6_spec->next_req_time = ipv6_spec->roll_check_jiffies + HWPA_NSS_STATS_SYNC_PERIOD;
	}

	retval = nss_ipv6_tx_with_size(subsys->mgr, msg, PAGE_SIZE);
	if (retval  == NSS_TX_SUCCESS)
		return;

reschedule:
	spin_lock(&sync->lock);
	msg->msg.conn_stats_many.count = 0;
	msg->msg.conn_stats_many.index = 0;
	spin_unlock(&sync->lock);
	queue_delayed_work(sync->workqueue, delayed_work_data,
			   HWPA_NSS_STATS_SYNC_PERIOD);
}

/**
 * @fn void hwpa_nss_ipv6_sync_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv6 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 */
static void hwpa_nss_ipv6_sync_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	cancel_delayed_work_sync(&sync->work);
	destroy_workqueue(sync->workqueue);
	nss_ipv6_conn_sync_many_notify_unregister();
	kfree(sync->msg.ipv6);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_sync_init(struct hwpa_nss_subsystem*)
 * @brief initialize ipv6 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_sync_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv6_conn_sync_many_msg *nicsm;
	struct nss_ipv6_msg *msg;

	PR_DEVEL("IPV6 Sync init\n");

	spin_lock_init(&sync->lock);

	msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	sync->msg.ipv6 = msg;

	nss_ipv6_conn_sync_many_notify_register(hwpa_nss_ipv6_sync_many_callback);

	nss_ipv6_msg_init(msg, NSS_IPV6_RX_INTERFACE,
		NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG,
		sizeof(struct nss_ipv6_conn_sync_many_msg),
		NULL,
		(void *) subsys);

	nicsm = &msg->msg.conn_stats_many;
	nicsm->index = 0;
	nicsm->size = PAGE_SIZE;

	sync->workqueue = create_singlethread_workqueue("hwpa_nss_ipv6_sync_workqueue");
	if (!sync->workqueue) {
		retval = HWPA_BACKEND_ERR_SYNC;
		goto failure_2;
	}

	INIT_DELAYED_WORK(&sync->work,
			  hwpa_nss_ipv6_sync_work);

	queue_delayed_work(sync->workqueue, &sync->work,
			   HWPA_NSS_STATS_SYNC_PERIOD);

	return HWPA_BACKEND_SUCCESS;

failure_2:
	nss_ipv6_conn_sync_many_notify_unregister();
	kfree(msg);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa qca833x synchronization
 *==============================================================================
 */

/**
 * @fn void hwpa_qca833x_sync_work(struct work_struct*)
 * @brief work function for the qca833x sync workqueue
 *
 * @param work [in] work struct
 */
static void hwpa_qca833x_sync_work(struct work_struct *work)
{
	struct delayed_work *delayed_work_data = container_of(work, struct delayed_work, work);
	struct hwpa_nss_synchronizer *sync;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_QCA833X);
	struct hwpa_nss_offloader *ofl = hwpa_nss_get_offloader(HWPA_NSS_OFFLOADER_IDX_QCA833X);
	struct qca833x_ofl_stats stats;
	struct qca833x_api *api;

	api = subsys->qca833x_spec->api;
	sync = subsys->sync;

	if (subsys->tracker->usage == 0)
		goto reschedule;

	PR_DEVEL("QCA833X Sync!\n");

	rcu_read_lock();
	list_for_each_entry_rcu(hws_nss, &ofl->session_list, ofl_node) {
		if (api && api->get_and_reset_stats &&
				api->get_and_reset_stats(&hws_nss->qca833x.tuple, &stats) != QCA833X_OFL_OK) {
			PR_DEVEL("Error fetching stats for %p\n", hws_nss);
			continue;
		}

		spin_lock_bh(&hws_nss->sync_lock);
		hws_nss->stats.flow_rx_bytes += stats.rx_tx_bytes;
		hws_nss->stats.flow_tx_bytes += stats.rx_tx_bytes;
		hws_nss->stats.flow_rx_pkts = 0;
		hws_nss->stats.flow_tx_pkts = 0;
		spin_unlock_bh(&hws_nss->sync_lock);
		set_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags);
	}
	rcu_read_unlock();

reschedule:
	queue_delayed_work(sync->workqueue, delayed_work_data,
			   HWPA_NSS_QCA833X_STATS_SYNC_PERIOD);
}

/**
 * @fn void hwpa_qca833x_sync_exit(struct hwpa_nss_subsystem*)
 * @brief exit qca833x subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 */
static void hwpa_qca833x_sync_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	cancel_delayed_work_sync(&sync->work);
	destroy_workqueue(sync->workqueue);
}

/**
 * @fn enum hwpa_backend_rv hwpa_qca833x_sync_init(struct hwpa_nss_subsystem*)
 * @brief initialize qca833x subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_qca833x_sync_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	PR_DEVEL("qca833x sync init\n");

	spin_lock_init(&sync->lock);

	sync->workqueue = create_singlethread_workqueue("hwpa_qca833x_sync_workqueue");
	if (!sync->workqueue) {
		retval = HWPA_BACKEND_ERR_SYNC;
		goto failure_1;
	}

	INIT_DELAYED_WORK(&sync->work,
			  hwpa_qca833x_sync_work);

	queue_delayed_work(sync->workqueue, &sync->work,
			   HWPA_NSS_QCA833X_STATS_SYNC_PERIOD);

	return HWPA_BACKEND_SUCCESS;

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_backend_stats(unsigned long, struct avm_pa_session_stats*)
 * @brief update avm_pa stats
 *
 * @param subsys [in] handle of the hwpa session
 * @param stats [out] avm_pa stats to fill
 * @return success only
 */
enum hwpa_backend_rv hwpa_backend_stats(unsigned long handle,
							struct avm_pa_session_stats *stats)
{
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;

	hws_hwpa = hwpa_nss_handle_to_session(handle);
	if (!hws_hwpa || !hws_hwpa->hws_nss) {
		PR_DEVEL("Requesting Stats from invalid session id: %p\n", (void *) handle);
		memset(stats, 0, sizeof(*stats));
		return HWPA_BACKEND_SUCCESS;
	}
	stats->validflags = 0;
	rcu_read_lock();
	hws_nss = rcu_dereference(hws_hwpa->hws_nss);

	if (hws_nss->state != HWPA_NSS_SESSION_STATE_ACTIVE) {
		memset(stats, 0, sizeof(*stats));
		goto done_unlock;
	}

	if (hws_hwpa->direction == HWPA_NSS_SESSION_DIRECTION_FLOW &&
			test_and_clear_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags)) {
		spin_lock_bh(&hws_nss->sync_lock);
		stats->tx_pkts = hws_nss->stats.flow_rx_pkts;
		stats->tx_bytes = (u64) hws_nss->stats.flow_rx_bytes;
		hws_nss->stats.flow_rx_pkts = 0;
		hws_nss->stats.flow_rx_bytes = 0;
		hws_nss->stats.return_tx_pkts = 0;
		hws_nss->stats.return_tx_bytes = 0;
		spin_unlock_bh(&hws_nss->sync_lock);

		stats->validflags |= AVM_PA_SESSION_STATS_VALID_BYTES;
		if (!test_bit(HWPA_NSS_SUBSYS_FLAG_FLOW_NO_PKT_STATS, &hws_nss->offloader->subsys->flags))
			stats->validflags |= AVM_PA_SESSION_STATS_VALID_PKTS;

	} else if (hws_hwpa->direction == HWPA_NSS_SESSION_DIRECTION_RETURN &&
			test_and_clear_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags)) {
		spin_lock_bh(&hws_nss->sync_lock);
		stats->tx_pkts = hws_nss->stats.return_rx_pkts;
		stats->tx_bytes = (u64) hws_nss->stats.return_rx_bytes;
		hws_nss->stats.flow_tx_pkts = 0;
		hws_nss->stats.flow_tx_bytes = 0;
		hws_nss->stats.return_rx_pkts = 0;
		hws_nss->stats.return_rx_bytes = 0;
		spin_unlock_bh(&hws_nss->sync_lock);

		stats->validflags |= AVM_PA_SESSION_STATS_VALID_BYTES;
		stats->validflags |= AVM_PA_SESSION_STATS_VALID_PKTS;
	} else {
		memset(stats, 0, sizeof(*stats));
	}


done_unlock:
	rcu_read_unlock();
	PR_DEVEL("%p (dir %d, nss sess %p) AVM_PA Read %d pkts and %lld bytes; flags: %x\n",
			hws_hwpa, hws_hwpa->direction, hws_hwpa->hws_nss, stats->tx_pkts,
			stats->tx_bytes, stats->validflags);

	return HWPA_BACKEND_SUCCESS;
}

/*
 *===============================================================================
 * hwpa nss session check
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_backend_check_session(unsigned long)
 * @brief check if session was flushed by nss
 *
 * @param handle [in] handle for hws_hwpa
 * @return success if session was not flushed. Error otherwise
 */
enum hwpa_backend_rv hwpa_backend_check_session(unsigned long handle)
{
	struct hwpa_nss_hwpa_session *hws_hwpa;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_offloader *ofl;

	hws_hwpa = hwpa_nss_handle_to_session(handle);
	if (!hws_hwpa || !hws_hwpa->hws_nss) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto finished;
	}

	ofl = hws_hwpa->offloader;

	if (!test_bit(HWPA_NSS_SUBSYS_FLAG_FLUSHABLE_SESSIONS,
				&ofl->subsys->flags)) {
		goto finished;
	}

	if (test_bit(HWPA_NSS_SESSION_FLUSHED, &hws_hwpa->hws_nss->flags)) {
		spin_lock_bh(&ofl->lock);
		ofl->flushed_sessions++;
		spin_unlock_bh(&ofl->lock);
		retval = HWPA_BACKEND_ERR_INTERNAL;
	}

finished:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss offloaders init and exit
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_offloader_exit(struct hwpa_nss_offloader*)
 * @brief exit offloader
 *
 * @param ofl [in] the offloader to exit
 */
static void hwpa_nss_offloader_exit(struct hwpa_nss_offloader *ofl)
{
	PR_DEVEL("Exit offloader: %s", ofl->label);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_offloader_init(struct hwpa_nss_offlaoder*)
 * @brief initialize offloader
 *
 * @param ofl [in] the offloader to initialize
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_offloader_init(struct hwpa_nss_offloader *ofl)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Initialize offoader: %s\n", ofl->label);

	INIT_LIST_HEAD(&ofl->session_list);
	spin_lock_init(&ofl->lock);
	spin_lock_init(&ofl->list_lock);

	return retval;
}

/*
 *===============================================================================
 * hwpa nss subsystems init and exit
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv4 subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_ipv4_exit(struct hwpa_nss_subsystem *subsys)
{
	nss_ipv4_notify_unregister();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv6 subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_ipv6_exit(struct hwpa_nss_subsystem *subsys)
{
	nss_ipv6_notify_unregister();
}

/**
 * @fn enum hwpa_backend_rv hwpa_qca833x_exit(struct hwpa_nss_subsystem*)
 * @brief exit qca833x subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_qca833x_exit(struct hwpa_nss_subsystem *subsys)
{
	struct qca833x_api *api;

	api = subsys->qca833x_spec->api;
	if (api && api->exit)
		api->exit();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_init(struct hwpa_nss_subsystem*)
 * @brief init ipv4 subsystem
 *
 * @param subsys [in] the subsystem to init
 *
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_ipv4_specific *ipv4_spec;

	ipv4_spec = subsys->ipv4_spec;
	hash_init(ipv4_spec->session_table);

	subsys->mgr = nss_ipv4_notify_register(hwpa_nss_ipv4_net_dev_callback,
					(void *) subsys);

	__set_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags);
	__set_bit(HWPA_NSS_SUBSYS_FLAG_FLUSHABLE_SESSIONS, &subsys->flags);

	ipv4_spec->sync_info_idx = 0;
	ipv4_spec->sync_info_len = 0;
	ipv4_spec->next_req_time = 0;
	ipv4_spec->roll_check_jiffies = 0;

	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_init(struct hwpa_nss_subsystem*)
 * @brief init ipv6 subsystem
 *
 * @param subsys [in] the subsystem to init
 *
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_ipv6_specific *ipv6_spec;

	ipv6_spec = subsys->ipv6_spec;
	hash_init(ipv6_spec->session_table);

	subsys->mgr = nss_ipv6_notify_register(hwpa_nss_ipv6_net_dev_callback,
					(void *) subsys);

	__set_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags);
	__set_bit(HWPA_NSS_SUBSYS_FLAG_FLUSHABLE_SESSIONS, &subsys->flags);

	ipv6_spec->sync_info_idx = 0;
	ipv6_spec->sync_info_len = 0;
	ipv6_spec->next_req_time = 0;
	ipv6_spec->roll_check_jiffies = 0;

	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_qca833x_init(struct hwpa_nss_subsystem*)
 * @brief init qca833x subsystem
 *
 * @param subsys [in] the subsystem to init
 *
 * @return success only
 */
static enum hwpa_backend_rv hwpa_qca833x_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_qca833x_specific *qca833x_spec;

	qca833x_spec = subsys->qca833x_spec;
	hash_init(qca833x_spec->session_table);

	/* TODO: Could we put something useful here? */
	subsys->mgr = NULL;

	/* bsessions are undirectional in avm_pa and hardware */
	__clear_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags);

	/* we only collect byte counters on qca833x */
	__set_bit(HWPA_NSS_SUBSYS_FLAG_FLOW_NO_PKT_STATS, &subsys->flags);

	return retval;
}

/**
 * @fn void hwpa_nss_subsys_exit(struct hwpa_nss_subsystem*)
 * @brief exit subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_subsys_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	PR_DEVEL("Exit subsystem: %s", subsys->label);

	if (subsys->exit)
		subsys->exit(subsys);
	if (sync->exit)
		sync->exit(subsys);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_subsys_init(struct hwpa_nss_subsystem*)
 * @brief initialize subsystem
 *
 * @param subsys [in] the subsystem to initialize
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_subsys_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval;
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	PR_DEVEL("Initialize subsystem: %s", subsys->label);

	subsys->flags = 0;

	if (subsys->init) {
		retval = subsys->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize subsystem specific stuff: %s\n", subsys->label);
			goto failure_1;
		}
	}

	if (tracker->init) {
		retval = tracker->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize tracking for subsystem: %s\n", subsys->label);
			goto failure_2;
		}
		spin_lock_init(&tracker->lock);
	}

	if (sync->init) {
		retval = sync->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize sync for subsystem: %s\n", subsys->label);
			goto failure_2;
		}
	}

	return retval;

failure_2:
	if (subsys->exit)
		subsys->exit(subsys);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss pid activation and registration
 *==============================================================================
 */

enum hwpa_backend_rv backend_activate_hw(avm_pid_handle pid_handle)
{
	struct net_device *dev = hwpa_get_netdev(pid_handle);
	int32_t ifnum;
	enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_HW_ACTIVATION;
	enum nss_dynamic_interface_type type;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	bool bridging_ok = false;
	enum hwpa_nss_ep_type ep_type = EP_TYPE_NSS;

	ifnum = nss_cmn_get_interface_number_by_dev(dev);
	if (ifnum < 0) {
		PR_DEVEL("Interface %s not registered in NSS\n", dev->name);
		goto finished;
	}

	/*
	 * Here we assume that all contexts relevant for offloading are the same
	 * -- which is why we use the ipv4 context here
	 * We also assume that we have a qca833x attached on a portid interface
	 * which so far always is true
	 */
	type = nss_dynamic_interface_get_type(nss_ipv4_get_mgr(), ifnum);
	if (type == NSS_DYNAMIC_INTERFACE_TYPE_PORTID) {
		bridging_ok = true;
		ep_type = EP_TYPE_QCA833X;
	}

	/*
	 * We need to explicitly configure bsessions for all interfaces.
	 * - On all Maple Interfaces we cannot use them
	 * - On all Hawkeye PHY's we could use them, but we do not (yet)
	 * - On all Hawkeye Wifi-Interfaces we cannot use them
	 * - On Sessions on a external QCA833X-Switch we can use them
	 * - On all others we can't (yet)
	 * TODO: remove this line. AVM_PA will get a new feature where the datapath driver
	 *	 allows bsessions or not. This call here is not 100% safe for disabling bsessions
	 */
	avm_pa_pid_set_bridging(pid_handle, bridging_ok);

	global_ctx->if_reg[ifnum].type = ep_type;

	if (ep_type == EP_TYPE_QCA833X) {
		uint32_t port_bmp;
		struct hwpa_qca833x_specific *qca833x_spec;

		port_bmp = (uint32_t) avm_pa_pid_get_hwinfo(AVM_PA_DEVINFO(dev)->pid_handle)->hw;
		if (unlikely(!ffs(port_bmp & 0x7F))) {
			pr_err("Bad port_bmp (%x) for %s --> qca833x driver did not set it?\n",
			       port_bmp, dev->name);
			goto finished;
		}
		global_ctx->if_reg[ifnum].port_bmp_833x = port_bmp;

		qca833x_spec = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_QCA833X)->qca833x_spec;
		if (!qca833x_spec->api) {
			qca833x_spec->api = (struct qca833x_api *) get_global_qca833x_api();

			if (qca833x_spec->api && qca833x_spec->api->init) {
				PR_DEVEL("Found QCA833X API (%p)\n", qca833x_spec->api);
				if (qca833x_spec->api->init() != QCA833X_OFL_OK) {
					goto finished;
				}
			}
		}
	}

	pr_info("Activate HW Acceleration for %s (ifnum = %d), bridging_ok = %d\n", dev->name, ifnum, bridging_ok);

	retval = HWPA_BACKEND_SUCCESS;

finished:
	dev_put(dev);
	return retval;
}

/*
 *===============================================================================
 * hwpa nss init and exit
 *==============================================================================
 */

#ifdef CONFIG_PROC_FS
static void __init hwpa_nss_proc_init(void);
static void __exit hwpa_nss_proc_exit(void);
#endif

/**
 * @fn hwpa_nss_exit_offloaders_till(struct hwpa_nss_offloader*)
 * @brief exit all offloaders from global offloader list positioned before last_element_idx.
 *
 * @param last_element_idx [in] the index to which all offloaders are supposed to be exited.
 */
static void hwpa_nss_exit_offloaders_till(unsigned int last_element_idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	if (last_element_idx > ARRAY_SIZE(global_ctx->offloaders) - 1)
		return;

	for (i = 0; i <= last_element_idx; ++i)
		hwpa_nss_offloader_exit(global_ctx->offloaders[i]);
}

/**
 * @fn hwpa_nss_exit_offloaders()
 * @brief exit all offloaders from global offloader-list.
 */
static void hwpa_nss_exit_offloaders(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	hwpa_nss_exit_offloaders_till(ARRAY_SIZE(global_ctx->offloaders) - 1);
}

/**
 * @fn hwpa_nss_init_offloaders()
 * @brief init all offloaders from global offloader-list.
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_init_offloaders(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	enum hwpa_backend_rv retval;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->offloaders); ++i) {
		retval = hwpa_nss_offloader_init(global_ctx->offloaders[i]);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Could not initialize offloader: %s\n", global_ctx->offloaders[i]->label);
			/* Clean up */
			hwpa_nss_exit_offloaders_till((i-1 >= 0) ? i-1 : 0);
			break;
		}
	}

	return retval;
}

/**
 * @fn hwpa_nss_exit_subsystems_till(struct hwpa_nss_subsystem*)
 * @brief exit all subsystems from global subsystem list positioned before last_element_idx.
 *
 * @param last_element_idx [in] the index to which all offloaders are supposed to be exited.
 */
static void hwpa_nss_exit_subsystems_till(unsigned int last_element_idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	if (last_element_idx > ARRAY_SIZE(global_ctx->subsystems) - 1)
		return;

	for (i = 0; i <= last_element_idx; ++i)
		hwpa_nss_subsys_exit(global_ctx->subsystems[i]);
}

/**
 * @fn hwpa_nss_exit_subsystems()
 * @brief exit all subsystems from global subsystem-list.
 */
static void hwpa_nss_exit_subsystems(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	hwpa_nss_exit_subsystems_till(ARRAY_SIZE(global_ctx->subsystems) - 1);
}

/**
 * @fn hwpa_nss_init_subsystems()
 * @brief init all subsystems from global subsystem-list.
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_init_subsystems(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	enum hwpa_backend_rv retval;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i) {
		retval = hwpa_nss_subsys_init(global_ctx->subsystems[i]);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Could not initialize subsystem: %s\n", global_ctx->subsystems[i]->label);
			/* Clean up */
			hwpa_nss_exit_subsystems_till((i-1 >= 0) ? i-1 : 0);
			break;
		}
	}

	return retval;
}

struct hwpa_nss_ipv4_specific ipv4_spec;
struct hwpa_nss_ipv6_specific ipv6_spec;
struct hwpa_qca833x_specific qca833x_spec;

#ifdef HWPA_NSS_DEBUG
/**
 * @fn void hwpa_nss_init_magic(void)
 * @brief Init global debug magic
 */
static void hwpa_nss_init_magic(void)
{
	ipv4_spec.magic = IPV4_SPECIFIC_MAGIC;
	ipv6_spec.magic = IPV6_SPECIFIC_MAGIC;
	qca833x_spec.magic = QCA833X_SPECIFIC_MAGIC;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_check_magic(void)
 * @brief Check global debug magic
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_check_magic(void)
{
	struct hwpa_nss_ipv4_specific *ipv4;
	struct hwpa_nss_ipv6_specific *ipv6;
	struct hwpa_qca833x_specific *qca833x;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	ipv4 = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4)->ipv4_spec;
	ipv6 = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6)->ipv6_spec;
	qca833x = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_QCA833X)->qca833x_spec;

	if (ipv4->magic != IPV4_SPECIFIC_MAGIC)
		retval = HWPA_BACKEND_ERR_MAGIC;
	else if (ipv6->magic != IPV6_SPECIFIC_MAGIC)
		retval = HWPA_BACKEND_ERR_MAGIC;
	else if (qca833x->magic != QCA833X_SPECIFIC_MAGIC)
		retval = HWPA_BACKEND_ERR_MAGIC;

	return retval;
}
#endif

static struct hwpa_nss_tracker ipv4_tracker = {
	.init = hwpa_nss_ipv4_init_limit,
};

static struct hwpa_nss_synchronizer ipv4_sync = {
	.init = hwpa_nss_ipv4_sync_init,
	.exit = hwpa_nss_ipv4_sync_exit,
};

static struct hwpa_nss_subsystem ipv4_subsys = {
	.label = "ipv4",
	.spec = &ipv4_spec,
	.init = hwpa_nss_ipv4_init,
	.exit = hwpa_nss_ipv4_exit,
	.gen_hash = hwpa_nss_ipv4_gen_session_hash,
	.register_nss_session = hwpa_nss_ipv4_register_nss_session,
	.find_nss_session = hwpa_nss_ipv4_find_nss_session,
	.purge_sessions = hwpa_nss_ipv4_purge_sessions,
	.tracker = &ipv4_tracker,
	.sync = &ipv4_sync,
};

static struct hwpa_nss_offloader ipv4_offloader = {
	.label = "ipv4",
	.subsys = &ipv4_subsys,
	.prepare_session = hwpa_nss_ipv4_prepare_session,
	.add_session = hwpa_nss_ipv4_add_session,
	.remove_session = hwpa_nss_ipv4_remove_session,
};
static struct hwpa_nss_tracker ipv6_tracker = {
	.init = hwpa_nss_ipv6_init_limit,
};

static struct hwpa_nss_synchronizer ipv6_sync = {
	.init = hwpa_nss_ipv6_sync_init,
	.exit = hwpa_nss_ipv6_sync_exit,
};

static struct hwpa_nss_subsystem ipv6_subsys = {
	.label = "ipv6",
	.spec = &ipv6_spec,
	.init = hwpa_nss_ipv6_init,
	.exit = hwpa_nss_ipv6_exit,
	.gen_hash = hwpa_nss_ipv6_gen_session_hash,
	.register_nss_session = hwpa_nss_ipv6_register_nss_session,
	.find_nss_session = hwpa_nss_ipv6_find_nss_session,
	.purge_sessions = hwpa_nss_ipv6_purge_sessions,
	.tracker = &ipv6_tracker,
	.sync = &ipv6_sync,
};

static struct hwpa_nss_offloader ipv6_offloader = {
	.label = "ipv6",
	.subsys = &ipv6_subsys,
	.prepare_session = hwpa_nss_ipv6_prepare_session,
	.add_session = hwpa_nss_ipv6_add_session,
	.remove_session = hwpa_nss_ipv6_remove_session,
};

static struct hwpa_nss_tracker qca833x_tracker = {
	.init = hwpa_qca833x_init_limit,
};

static struct hwpa_nss_synchronizer qca833x_sync = {
	.init = hwpa_qca833x_sync_init,
	.exit = hwpa_qca833x_sync_exit,
};

static struct hwpa_nss_subsystem qca833x_subsys = {
	.label = "qca833x",
	.spec = &qca833x_spec,
	.init = hwpa_qca833x_init,
	.exit = hwpa_qca833x_exit,
	.gen_hash = hwpa_qca833x_gen_session_hash,
	.register_nss_session = hwpa_qca833x_register_nss_session,
	.find_nss_session = hwpa_qca833x_find_nss_session,
	.purge_sessions = hwpa_qca833x_purge_sessions,
	.tracker = &qca833x_tracker,
	.sync = &qca833x_sync,
};

static struct hwpa_nss_offloader qca833x_offloader = {
	.label = "qca833x",
	.subsys = &qca833x_subsys,
	.prepare_session = hwpa_qca833x_prepare_session,
	.add_session = hwpa_qca833x_add_session,
	.remove_session = hwpa_qca833x_remove_session,
};

static struct hwpa_nss_context hwpa_nss_ctx = {
	.subsystems = {
		&ipv4_subsys,
		&ipv6_subsys,
		&qca833x_subsys,
	},
	.offloaders = {
		&ipv4_offloader,
		&ipv6_offloader,
		&qca833x_offloader,
	},
};

/**
 * @fn enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config*)
 * @brief Init Proc entries, Purge Sessions, init offloaders and subsystem and init kmem_caches.
 * Also fills a HWPA configuration.
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config *hw_pa_config)
{
	enum hwpa_backend_rv retval;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	PR_DEVEL("HWPA backend init\n");

	hw_pa_config->flags = HWPA_BACKEND_HAS_SESSION_CHECK;
	hw_pa_config->alloc_rx_channel = NULL;
	hw_pa_config->alloc_tx_channel = NULL;
	hw_pa_config->free_rx_channel = NULL;
	hw_pa_config->free_tx_channel = NULL;

	if (!nss_cmn_get_nss_enabled()) {
		retval = HWPA_BACKEND_ERR_NO_NSS;
		goto failure_1;
	}

#ifdef HWPA_NSS_DEBUG
	hwpa_nss_init_magic();
	retval = hwpa_nss_check_magic();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Bad Magic!\n");
		goto failure_1;
	}
#endif
	mutex_init(&global_ctx->mutex);
	spin_lock_init(&global_ctx->lock);
	atomic_set(&global_ctx->ratelimit_counter, 0);

	global_ctx->kmem_nss = kmem_cache_create("hwpa_nss_nss_sess",
				sizeof(struct hwpa_nss_nss_session), 0,
				SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE, NULL);
	if (!global_ctx->kmem_nss) {
		retval = HWPA_BACKEND_ERR_CACHE;
		pr_err("Could not create nss session cache!\n");
		goto failure_1;
	}

	global_ctx->kmem_hwpa = kmem_cache_create("hwpa_nss_hwpa_sess",
				sizeof(struct hwpa_nss_hwpa_session), 0,
				SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE, NULL);
	if (!global_ctx->kmem_hwpa) {
		retval = HWPA_BACKEND_ERR_CACHE;
		pr_err("Could not create hwpa session cache!\n");
		goto failure_2;
	}

	retval = hwpa_nss_init_subsystems();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize all subsystems\n");
		goto failure_3;
	}

	retval = hwpa_nss_init_offloaders();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize all offloaders\n");
		goto failure_4;
	}

	retval = hwpa_nss_pending_offload_manager_init();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize pending offload manager\n");
		goto failure_5;
	}

	hwpa_nss_proc_init();

	PR_DEVEL("HWPA_NSS init successful\n");

	return HWPA_BACKEND_SUCCESS;

failure_5:
	hwpa_nss_exit_offloaders();

failure_4:
	hwpa_nss_exit_subsystems();

failure_3:
	kmem_cache_destroy(global_ctx->kmem_hwpa);

failure_2:
	kmem_cache_destroy(global_ctx->kmem_nss);

failure_1:
	BUG();
	return retval;
}

/**
 * @fn hwpa_backend_exit(void)
 * @brief Remove Proc entries, Purge Sessions, remove offloaders and subsystem and release kmem_caches
 */
void hwpa_backend_exit(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	PR_DEVEL("HWPA Backend Exit\n");

	hwpa_nss_proc_exit();

	hwpa_nss_pending_offload_manager_exit();

	hwpa_nss_purge_sessions();
	hwpa_nss_exit_offloaders();
	hwpa_nss_exit_subsystems();

	kmem_cache_destroy(global_ctx->kmem_nss);
	kmem_cache_destroy(global_ctx->kmem_hwpa);
}

/*
 *==============================================================================
 * procfs user interface
 *==============================================================================
 */

#ifdef CONFIG_PROC_FS
//TODO: introduce control interface
typedef int hwpa_nss_fprintf(void *, const char *, ...);

static void hwpa_nss_show_offloader_stats(hwpa_nss_fprintf fprintffunc, void *arg,
				struct hwpa_nss_offloader *ofl)
{
	if (!ofl)
		return;

	spin_lock_bh(&ofl->lock);
	(*fprintffunc)(arg, "Offloader %s:\n", ofl->label);
	(*fprintffunc)(arg, "  pending nss sessions: %d\n", ofl->pending_nss_session_count);
	(*fprintffunc)(arg, "  active nss sessions: %d\n", ofl->active_nss_session_count);
	(*fprintffunc)(arg, "  avm_pa sessions: %d\n", ofl->avm_pa_session_count);
	(*fprintffunc)(arg, "  successful NSS offloads: %d\n", ofl->successful_nss_offloads);
	(*fprintffunc)(arg, "  failed NSS offloads: %d\n", ofl->failed_nss_offloads);
	(*fprintffunc)(arg, "  flushed sessions: %d\n", ofl->flushed_sessions);
	spin_unlock_bh(&ofl->lock);
}

static void hwpa_nss_show_subsystem_stats(hwpa_nss_fprintf fprintffunc, void *arg,
				struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	if (!tracker)
		return;

	(*fprintffunc)(arg, "Subsystem %s:\n", subsys->label);
	spin_lock_bh(&tracker->lock);
	(*fprintffunc)(arg, "  usage: %d\n", tracker->usage);
	spin_unlock_bh(&tracker->lock);
	(*fprintffunc)(arg, "  limit: %d\n", tracker->limit);
}

static void hwpa_nss_show_brief(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	(*fprintffunc)(arg, "HWPA_NSS summary\n");

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i)
		hwpa_nss_show_subsystem_stats(fprintffunc, arg, global_ctx->subsystems[i]);

	for (i = 0; i < ARRAY_SIZE(global_ctx->offloaders); ++i)
		hwpa_nss_show_offloader_stats(fprintffunc, arg, global_ctx->offloaders[i]);

	(*fprintffunc)(arg, " ratelimit_counter: %d\n", global_ctx->ratelimit_counter);
}

static int brief_show(struct seq_file *m, void *v)
{
	hwpa_nss_show_brief((hwpa_nss_fprintf *)seq_printf, m);
	return 0;
}

static int brief_show_open(struct inode *inode, struct file *file)
{
	return single_open(file, brief_show, PDE_DATA(inode));
}

static const struct file_operations brief_show_fops = {
	.open    = brief_show_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

static void hwpa_nss_show_interfaces(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct net_device *dev;
	struct net *net;
	int32_t if_num;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	(*fprintffunc)(arg, "%-20s%-10s%-10s%-10s%-10s\n", "Netdev", "type",
			"avm_pid", "nss_ifnum", "ep_type");

	rcu_read_lock();
	for_each_net_rcu(net) {
		for_each_netdev_rcu(net, dev) {
			if_num = nss_cmn_get_interface_number_by_dev(dev);
			(*fprintffunc)(arg, "%-20s%-10u%-10u%-10d%-10d\n",
					dev->name,
					(unsigned int)dev->type,
					(unsigned int)AVM_PA_DEVINFO(dev)->pid_handle,
					if_num,
					global_ctx->if_reg[if_num == -1 ? NSS_MAX_NET_INTERFACES - 1  : if_num]
					);

		}
	}
	rcu_read_unlock();
}

static int interfaces_show(struct seq_file *m, void *v)
{
	hwpa_nss_show_interfaces((hwpa_nss_fprintf *)seq_printf, m);
	return 0;
}

static int interfaces_show_open(struct inode *inode, struct file *file)
{
	return single_open(file, interfaces_show, PDE_DATA(inode));
}

static const struct file_operations interfaces_show_fops = {
	.open    = interfaces_show_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

static void hwpa_nss_show_synced_sessions_ipv4(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_subsystem *ipv4_subsys = global_ctx->subsystems[HWPA_NSS_SUBSYSTEM_IDX_IPV4];
	int i = 0;
	int retries = 3;
	bool ready = false;

	while (retries--) {
		if (!test_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &ipv4_subsys->flags))
			msleep(50);
		else {
			ready = true;
			break;
		}
	}

	if (!ready) {
		(*fprintffunc)(arg, "(IPV4) No synced sessions or not ready for sync stat collection. A Retry could help!\n");
		(*fprintffunc)(arg, "ipv4 sync_info_len is %d\n", ipv4_subsys->ipv4_spec->sync_info_len);
		return;
	}

	set_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &ipv4_subsys->flags);

	(*fprintffunc)(arg, "Synced IPV4 Sessions: %d\n\n", ipv4_subsys->ipv4_spec->sync_info_len);

	for (i = 0; i < ipv4_subsys->ipv4_spec->sync_info_len; ++i) {
		struct nss_ipv4_conn_sync *sync = &ipv4_subsys->ipv4_spec->sync_info[i];
		struct hwpa_nss_nss_session *hws_nss;
		struct hwpa_nss_hwpa_session *hws_hwpa;

		hws_nss = hwpa_nss_ipv4_find_nss_session_from_sync(ipv4_subsys, &ipv4_subsys->ipv4_spec->sync_info[i]);

		(*fprintffunc)(arg, "ID: %-5d protocol %-5u flags 0x%-8x qos_tag %-4d inc_tics %-6d\n",
				i, sync->protocol, sync->flags, sync->qos_tag, sync->inc_ticks);

		if (hws_nss) {
			(*fprintffunc)(arg, "hws_nss %p (%d avm_pa sessions attached); avm_pa uniq_id('s): ", hws_nss, hws_nss->pa_ref_count);
			list_for_each_entry(hws_hwpa, &hws_nss->hwpa_session_list, node) {
				(*fprintffunc)(arg,"%d (flushed = %d) ", hws_hwpa->sess_pa->uniq_id,
						test_bit(HWPA_NSS_SESSION_FLUSHED, &hws_nss->flags));
			}
			(*fprintffunc)(arg, "\n", hws_nss);
		}
		else {
			(*fprintffunc)(arg, "no associated hws_nss\n");
		}

		(*fprintffunc)(arg, "flow_ip       %pI4h:%-10d return_ip       %pI4h:%-10d\n",
				&sync->flow_ip, sync->flow_ident, &sync->return_ip, sync->return_ident);
		(*fprintffunc)(arg, "flow_ip_xlate %pI4h:%-10d return_ip_xlate %pI4h:%-10d\n\n\n",
				&sync->flow_ip_xlate, sync->flow_ident_xlate, &sync->return_ip_xlate, sync->return_ident_xlate);
	}

	clear_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &ipv4_subsys->flags);
}

static void hwpa_nss_show_synced_sessions_ipv6(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_subsystem *ipv6_subsys = global_ctx->subsystems[HWPA_NSS_SUBSYSTEM_IDX_IPV6];
	int i = 0;
	int retries = 3;
	bool ready = false;

	while (retries--) {
		if (!test_bit(HWPA_NSS_SUBSYS_FLAG_READY_TO_COLLECT_SYNC_INFO, &ipv6_subsys->flags))
			msleep(50);
		else {
			ready = true;
			break;
		}
	}

	if (!ready) {
		(*fprintffunc)(arg, "(IPV6) No synced sessions or not ready for sync stat collection. A Retry could help!\n");
		(*fprintffunc)(arg, "ipv6 sync_info_len is %d\n", ipv6_subsys->ipv6_spec->sync_info_len);
		return;
	}

	set_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &ipv6_subsys->flags);

	(*fprintffunc)(arg, "Synced IPV6 Sessions: %d\n\n", ipv6_subsys->ipv6_spec->sync_info_len);

	for (i = 0; i < ipv6_subsys->ipv6_spec->sync_info_len; ++i) {
		struct nss_ipv6_conn_sync *sync = &ipv6_subsys->ipv6_spec->sync_info[i];
		struct hwpa_nss_nss_session *hws_nss;
		struct hwpa_nss_hwpa_session *hws_hwpa;

		hws_nss = hwpa_nss_ipv6_find_nss_session_from_sync(ipv6_subsys, &ipv6_subsys->ipv6_spec->sync_info[i]);

		(*fprintffunc)(arg, "ID: %-5d protocol %-5u flags 0x%-8x qos_tag %-4d inc_tics %-6d\n",
				i, sync->protocol, sync->flags, sync->qos_tag, sync->inc_ticks);

		if (hws_nss) {
			(*fprintffunc)(arg, "hws_nss %p (%d avm_pa sessions attached); avm_pa uniq_id('s): ", hws_nss, hws_nss->pa_ref_count);
			list_for_each_entry(hws_hwpa, &hws_nss->hwpa_session_list, node) {
				(*fprintffunc)(arg,"%d (flushed = %d) ", hws_hwpa->sess_pa->uniq_id,
						test_bit(HWPA_NSS_SESSION_FLUSHED, &hws_nss->flags));
			}
			(*fprintffunc)(arg, "\n", hws_nss);
		}
		else {
			(*fprintffunc)(arg, "no associated hws_nss\n");
		}

		(*fprintffunc)(arg, "flow_ip       %pI6h:%-10d return_ip       %pI6h:%-10d\n\n",
				&sync->flow_ip[0], sync->flow_ident, &sync->return_ip[0], sync->return_ident);
	}

	clear_bit(HWPA_NSS_SUBSYS_FLAG_DISABLE_STAT_COLLECTION, &ipv6_subsys->flags);
}

static int synced_sessions_show(struct seq_file *m, void *v)
{
	hwpa_nss_show_synced_sessions_ipv4((hwpa_nss_fprintf *)seq_printf, m);
	hwpa_nss_show_synced_sessions_ipv6((hwpa_nss_fprintf *)seq_printf, m);
	return 0;
}

static int synced_sessions_show_open(struct inode *inode, struct file *file)
{
	return single_open(file, synced_sessions_show, PDE_DATA(inode));
}

static const struct file_operations synced_sessions_show_fops = {
	.open    = synced_sessions_show_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};


static struct proc_dir_entry *dir_entry;

static void __init hwpa_nss_proc_init(void)
{
	dir_entry = proc_net_mkdir(&init_net, "hwpa_nss", init_net.proc_net);

	proc_create("brief", 0444, dir_entry, &brief_show_fops);
	proc_create("interfaces", 0444, dir_entry, &interfaces_show_fops);
	proc_create("synced_sessions", 0444, dir_entry, &synced_sessions_show_fops);

	PR_DEVEL("Created proc entries!\n");
}

static void __exit hwpa_nss_proc_exit(void)
{
	remove_proc_entry("brief", dir_entry);
	remove_proc_entry("interfaces", dir_entry);
	remove_proc_entry("synced_sessions", dir_entry);

	remove_proc_entry("hwpa_nss", init_net.proc_net);

	PR_DEVEL("Removed proc entries!\n");
}

#endif