// SPDX-License-Identifier: GPL-2.0

/**
 * @brief AVM Hardware PA (hwpa) for NSS
 * @author Christos Mimidis
 *
 * This file implements the hwpa-backend for the qca NSS. It uses the
 * NSS-API. It realizes an interface between AVM_PA/HWPA and NSS for
 * session offloading.
 *
 * The NSS-API relies on L3 (IPs) and L4 (Ports, Protocol) information. If
 * avm_pa tries to offload sessions without that information, this won't work
 * only using NSS API currently as session removal relies on a 5tuple. QSDK 11.1
 * introduces a Match API. That allows to offload L2 Sessions (Up to 4x32).
 */

/*
 *==============================================================================
 * HWPA NSS includes and global defines
 *==============================================================================
 */

/*
 * uncomment to enable some debugging mechanisms and more verbose output
 */
//#define HWPA_NSS_DEBUG

#ifdef HWPA_NSS_DEBUG
#define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__
#else
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#endif

#include <avm/pa/avm_pa.h>
#include <avm/pa/avm_pa_hw.h>

#include <linux/if_bridge.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/workqueue.h>
#include <linux/hashtable.h>
#include <linux/spinlock.h>
#include <linux/bitops.h>
#include <linux/proc_fs.h>
#include <nss_api_if.h>
#include "hwpa.h"
#include "hwpa_nss.h"
#include <linux/rhashtable.h>

#if defined(CONFIG_ARCH_IPQ5018)
#elif defined(CONFIG_ARCH_IPQ807x)
#else
#error "Unsupported platform for nss offloading"
#endif

/* Period for sending a sync_many msg down to NSS */
#define HWPA_NSS_STATS_SYNC_PERIOD msecs_to_jiffies(500)

/*
 * The length of all label strings
 */
#define HWPA_NSS_LABEL_SIZE 32

/*
 * Timings for pending offloads.
 */
#define HWPA_NSS_PENDING_OFFLOAD_PERIOD msecs_to_jiffies(1000)
#define HWPA_NSS_TCP_MAX_WAITING_TIME msecs_to_jiffies(10000)
#define HWPA_NSS_UDP_MAX_WAITING_TIME msecs_to_jiffies(10000)

/* NSS Session Limits */
#define NSS_MAX_IPV4_SESSIONS		1024
#define NSS_MAX_IPV4_SESSIONS_LOG2	ilog2(NSS_MAX_IPV4_SESSIONS)
#define NSS_MAX_IPV6_SESSIONS		1024
#define NSS_MAX_IPV6_SESSIONS_LOG2	ilog2(NSS_MAX_IPV6_SESSIONS)

#ifdef HWPA_NSS_DEBUG
#define NSS_MAGIC_BASE			0xCAFE
#define NSS_MAGIC_NUMBER(n)		(NSS_MAGIC_BASE+(n))
#define NSS_SESSION_MAGIC		NSS_MAGIC_NUMBER(0)
#define HWPA_SESSION_MAGIC		NSS_MAGIC_NUMBER(1)
#define IPV4_SPECIFIC_MAGIC		NSS_MAGIC_NUMBER(2)
#define IPV4_SESSION_DATA_MAGIC		NSS_MAGIC_NUMBER(3)
#define IPV6_SPECIFIC_MAGIC		NSS_MAGIC_NUMBER(4)
#define IPV6_SESSION_DATA_MAGIC		NSS_MAGIC_NUMBER(5)

#define PR_DEVEL(fmt, ...)		pr_err(fmt, ##__VA_ARGS__)
#else
#define PR_DEVEL(fmt, ...)
#endif

/* Default Value for VLAN IDs. Taken from ECM. */
/* TODO: Find Default F!OS Value */
#define HWPA_NSS_VLAN_ID_NOT_CONFIGURED 0xFFF

/* Some ipv6 helpers */
#define IPV6_ADDR_MATCH(a, b) \
	((a[0] == b[0]) && (a[1] == b[1]) && (a[2] == b[2]) && (a[3] == b[3]))
#define IPV6_ADDR_XOR(a) (a[0] ^ a[1] ^ a[2] ^ a[3])
#define IPV6_COPY(from, to) \
	{ \
		to[0] = htonl(from[0]); \
		to[1] = htonl(from[1]); \
		to[2] = htonl(from[2]); \
		to[3] = htonl(from[3]); \
	}

/*
 *Limits for buidling the hierarchy of session
 */
#define HWPA_NSS_MAX_INTERFACES 10
#define HWPA_NSS_DIRS 2

/*
 * Forward definitions
 */
struct hwpa_nss_offloading_data;
struct hwpa_nss_offloader;
struct hwpa_nss_subsystem;

/*
 *==============================================================================
 * HWPA NSS session structs
 *==============================================================================
 */

/**
 * @enum hwpa_nss_nat_mode
 * @brief NAT Modes supported by NSS
 */
enum hwpa_nss_nat_mode {
	HWPA_NSS_IPV4_NAT_MODE_BRIDGED = 0, /* Bridged Traffic */
	HWPA_NSS_IPV4_NAT_MODE_SNAT,	     /* Egress NAT */
	HWPA_NSS_IPV4_NAT_MODE_DNAT,      /* Ingress NAT*/
	HWPA_NSS_IPV4_NAT_MODE_NO_NAT,    /* No NAT (routing, modified ports only)*/
	HWPA_NSS_IPV6_NAT_MODE_BRIDGED, /* Bridged Traffic */
		/* No NAT support for IPV6 */
	HWPA_NSS_IPV6_NAT_MODE_NO_NAT,    /* No NAT (routing, modified ports only)*/

	HWPA_NSS_NAT_MODE_MAX
};

/**
 * @struct hwpa_nss_ipv4_session_data
 * @brief data for an nss ipv4 session. Used for identification and removal of a
 * session.
 */
struct hwpa_nss_ipv4_session_data	{
	struct nss_ipv4_5tuple tuple;
	uint32_t flow_ident_xlate;
	uint32_t flow_ip_xlate;
	uint32_t return_ident_xlate;
	uint32_t return_ip_xlate;
};

 /**
  * @struct hwpa_nss_ipv6_session_data
  * @brief data for an nss ipv6 session. Used for identification and removal of a
  * session.
  */
struct hwpa_nss_ipv6_session_data {
	struct nss_ipv6_5tuple tuple;
};

/**
 * @struct hwpa_nss_stats
 * @brief Sync data for stats for a nss session.
 */
struct hwpa_nss_stats	{
	uint32_t flow_rx_bytes;
	uint32_t flow_rx_pkts;
	uint32_t flow_tx_bytes;
	uint32_t flow_tx_pkts;
	uint32_t return_rx_bytes;
	uint32_t return_rx_pkts;
	uint32_t return_tx_bytes;
	uint32_t return_tx_pkts;
};

/**
 * @enum hwpa_nss_session_flag
 * @brief Enum for state, type and sync flags of a NSS session.
 *
 */
enum hwpa_nss_session_flag {
	/* Set if Stats were read by AVM_PA(For Flow and Return Direction) */
	HWPA_NSS_SESSION_SYNC_FLOW_UPDATED,
	HWPA_NSS_SESSION_SYNC_RETURN_UPDATED,

	HWPA_NSS_SESSION_MAX
};

/**
 * @enum hwpa_nss_session_state
 * @brief State of a nss session
 */
enum hwpa_nss_session_state	{
	HWPA_NSS_SESSION_STATE_INITIALIZED,
	HWPA_NSS_SESSION_STATE_PREPARED,
	HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD,
	HWPA_NSS_SESSION_STATE_PENDING_APPROVAL,
	HWPA_NSS_SESSION_STATE_ACTIVE,
	HWPA_NSS_SESSION_STATE_INVALID,
	HWPA_NSS_SESSION_STATE_BROKEN,
	HWPA_NSS_SESSION_STATE_MAX
};

/**
 * @struct hwpa_nss_nss_session
 * @brief a struct encapsulating all required data for a single NSS-Session
 *
 */
struct hwpa_nss_nss_session {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif

	/* Flags encoded with enum hwpa_nss_session_flag_bits */
	unsigned long flags;
	enum hwpa_nss_session_state state;
	struct hwpa_nss_offloader *offloader;

	/* subsystem-specific data */
	union	{
		struct hwpa_nss_ipv4_session_data ipv4;
		struct hwpa_nss_ipv6_session_data ipv6;
	};

	/* hashlist node */
	struct hlist_node node;

	/* A list with all hwpa-sessions assigned to a specific NSS-Session */
	struct list_head hwpa_session_list;

	/* Session stats */
	struct hwpa_nss_stats stats;

	/* lock used for stat-accesses */
	spinlock_t sync_lock;

	/* A reference count for hwpa sessions attached */
	uint16_t pa_ref_count;
};

/**
 * @struct hwpa_nss_session_direction
 * @brief The direction of an hwpa session.
 *
 */
enum hwpa_nss_session_direction	{
	HWPA_NSS_SESSION_DIRECTION_FLOW,
	HWPA_NSS_SESSION_DIRECTION_RETURN,
	HWPA_NSS_SESSION_DIRECTION_DONT_CARE,
	HWPA_NSS_SESSION_DIRECTION_MAX,
};

/**
 * @struct hwpa_nss_hwpa_session
 * @brief An interface between avm_pa/hwpa and NSS for a session. Specifies
 * a direction for a hwpa_nss_session. This is what AVM_PA/HWPA "sees" from HWPA_NSS.
 */
struct hwpa_nss_hwpa_session {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	enum hwpa_nss_session_direction direction;
	struct list_head node;
	struct hwpa_nss_nss_session *hws_nss;
};

/*
 *==============================================================================
 * HWPA NSS offloaders
 *==============================================================================
 */

/**
 * @struct hwpa_nss_offloader
 * @brief Data struct for each HWPA NSS Subsystem Offloader
 */
struct hwpa_nss_offloader	{
	/* The subsystem this offloader is assigned to */
	struct hwpa_nss_subsystem *subsys;

	char label[HWPA_NSS_LABEL_SIZE];

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void		     (*exit)(struct hwpa_nss_subsystem *subsys);
	enum hwpa_backend_rv (*prepare_session)(struct hwpa_nss_offloading_data *ofl_data);
	enum hwpa_backend_rv (*add_session)(struct hwpa_nss_offloading_data *ofl_data);
	enum hwpa_backend_rv (*remove_session)(struct hwpa_nss_subsystem *subsys,
						struct hwpa_nss_nss_session *hws_nss);
	enum hwpa_backend_rv (*change_session)(struct hwpa_nss_subsystem *subsys,
						struct hwpa_nss_nss_session *hws_nss);

	/* Session Counters */
	spinlock_t lock;
	uint32_t active_nss_session_count, active_avm_pa_session_count;
	uint32_t pending_nss_session_count, pending_avm_pa_session_count;
	uint32_t failed_nss_offloads, successful_nss_offloads;
};

/*
 *==============================================================================
 * HWPA NSS subsystems
 *==============================================================================
 */

/**
 * @struct hwpa_nss_tracker
 * @brief A subsystem tracker
 */
struct hwpa_nss_tracker {
	uint16_t usage;
	uint16_t limit;
	spinlock_t lock;

	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
};

/**
 * @struct hwpa_nss_subsys_msg
 * @brief subsystem specific NSS message. Needed for Synchronization with NSS.
 *
 */
struct hwpa_nss_subsys_msg {
	union {
		struct nss_ipv4_msg *ipv4;
		struct nss_ipv6_msg *ipv6;
	};
};

/**
 * @struct hwpa_nss_ipv4_specific
 * @brief IPV4 NSS subsystem specific data.
 */
struct hwpa_nss_ipv4_specific {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	/*
	 * The Hashtable used for tracking all pending and offloaded nss ipv4
	 * sessions
	 */
	DECLARE_HASHTABLE(session_table, NSS_MAX_IPV4_SESSIONS_LOG2);
};

/**
 * @struct hwpa_nss_ipv6_specific
 * @brief IPV6 NSS subsystem specific data.
 */
struct hwpa_nss_ipv6_specific {
#ifdef HWPA_NSS_DEBUG
	uint16_t magic;
#endif
	/*
	 * The Hashtable used for tracking all pending and offloaded nss ipv6
	 * sessions
	 */
	DECLARE_HASHTABLE(session_table, NSS_MAX_IPV6_SESSIONS_LOG2);
};

/**
 * @struct hwpa_nss_synchronizer
 * @brief Stat synchonization of a nss subsystem
 *
 */
struct hwpa_nss_synchronizer {
	/* The nss synchronization message */
	struct hwpa_nss_subsys_msg msg;

	/*
	 * A lock used for read and write accesses on the subsystem message
	 * above
	 */
	spinlock_t lock;

	/* workqueue for periodic stat synchronization */
	struct workqueue_struct *workqueue;
	struct delayed_work work;

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void (*exit)(struct hwpa_nss_subsystem *subsys);
};

/**
 * @enum hwpa_nss_subsystem_flag
 * @brief NSS Subsystem representation flags
 */
enum hwpa_nss_subsystem_flag {
	HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS,
	HWPA_NSS_SUBSYS_FLAG_MAX
};

/**
 * @struct hwpa_nss_subsystem
 * @brief Representation of a NSS Subsystem/Interface
 */
struct hwpa_nss_subsystem {
	unsigned long flags;
	char label[HWPA_NSS_LABEL_SIZE];

	/* NSS interface handle for subsystem */
	struct nss_ctx_instance *mgr;

	struct hwpa_nss_synchronizer *sync;
	struct hwpa_nss_tracker *tracker;

	/* API functions */
	enum hwpa_backend_rv (*init)(struct hwpa_nss_subsystem *subsys);
	void (*exit)(struct hwpa_nss_subsystem *subsys);
	uint32_t (*gen_hash)(struct hwpa_nss_nss_session *hws_nss);
	void (*register_nss_session)(struct hwpa_nss_subsystem *subsys,
							uint32_t hash, struct hwpa_nss_nss_session *hws_nss);
	struct hwpa_nss_nss_session* (*find_nss_session)(struct hwpa_nss_subsystem *subsys,
							uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir);
	void (*purge_sessions)(struct hwpa_nss_subsystem *subsys);

	/* Subsystem specific data and a lock for it */
	spinlock_t lock;
	union	{
		struct hwpa_nss_ipv4_specific *ipv4_spec;
		struct hwpa_nss_ipv6_specific *ipv6_spec;
		void *spec;
	};

	/*
	 * This mutex is used for locking the specific data for a subsystem
	 * (Basically the hashlist containing all nss sessions)
	 */
	struct mutex mutex;
};

/*
 *==============================================================================
 * HWPA NSS private global context
 *==============================================================================
 */

/**
 * @enum hwpa_nss_subsystem_idx
 * @brief Subsystem Index
 *
 */
enum hwpa_nss_subsystem_idx	{
	HWPA_NSS_SUBSYSTEM_IDX_IPV4,
	HWPA_NSS_SUBSYSTEM_IDX_IPV6,
	HWPA_NSS_SUBSYSTEM_IDX_MAX,
};

/**
 * @enum hwpa_nss_offloader_idx
 * @brief Offloader Index
 *
 */
enum hwpa_nss_offloader_idx	{
	HWPA_NSS_OFFLOADER_IDX_IPV4,
	HWPA_NSS_OFFLOADER_IDX_IPV6,
	HWPA_NSS_OFFLOADER_IDX_MAX,
};

/**
 * @enum hwpa_nss_pending_offload_manager
 * @brief a manager for pending offloads
 *
 */
struct hwpa_nss_pending_offload_manager {
	/* a list of all currently pending offloads */
	struct list_head pending_offloads;

	/* a lock for the pending_offloads_list*/
	spinlock_t lock;

	/*
	 * A workqueue which lets the pending_offload_manager periodically check
	 * all pending offloads to see whether a timing condition was hit.
	 */
	struct workqueue_struct *workqueue;
	struct delayed_work work;
};

/**
 * @struct hwpa_nss_context
 * @brief private, global data struct for the hwpa_nss subsystem
 */
struct hwpa_nss_context	{
	/* contexts for nss subsystems */
	struct hwpa_nss_subsystem *subsystems[HWPA_NSS_SUBSYSTEM_IDX_MAX];

	/* the offloading instances */
	struct hwpa_nss_offloader *offloaders[HWPA_NSS_OFFLOADER_IDX_MAX];

	/* kmem cache for hwpa_sessions and nss_sessions saved locally*/
	struct kmem_cache *kmem_hwpa;
	struct kmem_cache *kmem_nss;

	/* Manager for pending offloads */
	struct hwpa_nss_pending_offload_manager pending_offload_mgr;
};

/**
 * hwpa_nss private data used globally in this file
 */
static struct hwpa_nss_context hwpa_nss_ctx;

/**
 * @struct hwpa_nss_if_data
 * @brief a struct describing a nss counterpart of a netdevice
 */
struct hwpa_nss_if_data	{
	int32_t ifnum;
};

/**
 * @struct hwpa_nss_offloading_data
 * @brief encapsulation of all data required for a single offload
 */
struct hwpa_nss_offloading_data	{
	/* All information extracted from a avm_pa session during an offload */
	struct net_device *in, *out, *bridge;
	enum hwpa_nss_nat_mode nat_mode;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	const struct avm_pa_session *sess_pa;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	void *nss_msg;
	bool is_routed;
	uint8_t protocol;
	/* interface hierarchy */
	struct hwpa_nss_if_data interfaces[HWPA_NSS_DIRS][HWPA_NSS_MAX_INTERFACES];

	/* A list node for the list of all penign offloads */
	struct list_head node;

	/*
	 * A timestamp for the pending offload manager to determine the age of a
	 * pending offload
	 */
	uint32_t timestamp;
};

/*
 *==============================================================================
 * channel allocation
 *==============================================================================
 */

/**
 * @fn int try_to_accelerate(avm_pid_handle, struct sk_buff*)
 * @brief avm_pa callback function
 *
 * @param pid_handle [in] corresponding endpoint pid
 * @param skb [in] the packet
 * @return AVM_PA_RX_OK
 */
int try_to_accelerate(avm_pid_handle pid_handle,
		      struct sk_buff *skb)
{
	return AVM_PA_RX_OK;
}

/*
 *==============================================================================
 * hwpa nss tracking
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_tracker_add_nss_session(struct hwpa_nss_subsystem*)
 * @brief increments tracker usage counter. If usage would exceed limit, counter
 * is not incremented and error is returned.
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success or error code in case limit is reached
 */
static enum hwpa_backend_rv hwpa_nss_tracker_add_nss_session(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	spin_lock_bh(&tracker->lock);
	if (unlikely(tracker->usage >= tracker->limit)) {
		spin_unlock_bh(&tracker->lock);
		return HWPA_BACKEND_ERR_TRACKER_LIMIT;
	}

	tracker->usage++;
	spin_unlock_bh(&tracker->lock);

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_tracker_remove_nss_session(struct hwpa_nss_subsystem*)
 * @brief decrements usage counter.
 *
 * @param subsys [in] subsystem containig the tracker
 * @return success or error code if usage is zero
 */
static enum hwpa_backend_rv hwpa_nss_tracker_remove_nss_session(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	spin_lock_bh(&tracker->lock);
	if (unlikely(tracker->usage == 0)) {
		spin_unlock_bh(&tracker->lock);
		pr_err("trying to remove session from tracker although there is none registered\n");
		return HWPA_BACKEND_ERR_TRACKER_LIMIT;
	}

	tracker->usage--;
	spin_unlock_bh(&tracker->lock);

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_init_limit(struct hwpa_nss_subsystem*)
 * @brief initializes ipv4 subsystem usage and limit counter. Limit is
 * min(HASH_SIZE(ipv4->session_table), nss_ipv4_max_conn_count).
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_init_limit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_ipv4_specific *ipv4 = subsys->ipv4_spec;
	int max_nss_sessions;

	PR_DEVEL("Init IPV4 Tracker\n");

	tracker->limit = HASH_SIZE(ipv4->session_table);
	max_nss_sessions = nss_ipv4_max_conn_count();
	PR_DEVEL("Max NSS IPV4 sessions: %u\n", max_nss_sessions);

	if (max_nss_sessions < tracker->limit) {
		pr_warn("FW limit (%d) of IPV4 NSS Sessions smaller than configured limit (%d). Reducing limit to %d",
			max_nss_sessions,  tracker->limit,
			max_nss_sessions);
		 tracker->limit = max_nss_sessions;
	}

	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_init_limit(struct hwpa_nss_subsystem*)
 * @brief initializes ipv6 subsystem usage and limit counter. Limit is
 * min(HASH_SIZE(ipv6->session_table), nss_ipv6_max_conn_count).
 *
 * @param subsys [in] subsystem containing the tracker
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_init_limit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_ipv6_specific *ipv6 = subsys->ipv6_spec;
	int max_nss_sessions;

	PR_DEVEL("Init IPV6 Tracker\n");

	tracker->limit = HASH_SIZE(ipv6->session_table);
	max_nss_sessions = nss_ipv6_max_conn_count();
	PR_DEVEL("Max NSS IPV6 sessions: %u\n", max_nss_sessions);

	if (max_nss_sessions < tracker->limit) {
		pr_warn("FW limit (%d) of IPV6 NSS Sessions smaller than configured limit (%d). Reducing limit to %d",
			max_nss_sessions,  tracker->limit,
			max_nss_sessions);
		 tracker->limit = max_nss_sessions;
	}

	return HWPA_BACKEND_SUCCESS;
}

/*
 *==============================================================================
 * Global Context / Subsystem / Offloader - Selection functions
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_subsystem hwpa_nss_get_subsys*(enum hwpa_nss_subsystem_idx)
 * @brief get subsystem from index
 *
 * @param idx [in] the index of the requested subsystem
 * @return subsytem or NULL in case of invalid index
 */
static struct hwpa_nss_subsystem *hwpa_nss_get_subsys(enum hwpa_nss_subsystem_idx idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	if (idx >= HWPA_NSS_SUBSYSTEM_IDX_MAX)
		return (struct hwpa_nss_subsystem *)NULL;

	return global_ctx->subsystems[idx];
}

/**
 * @fn struct hwpa_nss_subsystem hwpa_nss_get_offloader*(enum hwpa_nss_offloader_idx)
 * @brief get offloader from index
 *
 * @param idx [in] index of the requested offloader
 * @return offloader or NULL in case of invalid index
 */
static struct hwpa_nss_offloader *hwpa_nss_get_offloader(enum hwpa_nss_offloader_idx idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	if (idx >= HWPA_NSS_OFFLOADER_IDX_MAX)
		return (struct hwpa_nss_offloader *)NULL;

	return global_ctx->offloaders[idx];
}

/**
 * @fn bool hwpa_nss_invalid_nss_session(struct hwpa_nss_nss_session*)
 * @brief check if given nss session is valid
 *
 * @param hws_nss [in] the session
 *
 * @return True if invalid, false otherwise
 */
static bool hwpa_nss_invalid_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
#ifdef HWPA_NSS_DEBUG
	return hws_nss ? (hws_nss->magic != NSS_SESSION_MAGIC) ||
		hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID : true;
#else

	return !hws_nss || hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID;
#endif
}

/**
 * @fn bool hwpa_nss_invalid_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief check if given hwpa session is valid
 *
 * @param hws_hwpa [in] hwpa session
 * @return True if invalid, false otherwise
 */
static bool hwpa_nss_invalid_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	if (hws_hwpa) {
#ifdef HWPA_NSS_DEBUG
		if (hws_hwpa->magic != NSS_SESSION_MAGIC)
			return true;
#endif
		return !hws_hwpa->hws_nss;
	}
	return true;

}

/**
 * @fn unsigned long hwpa_nss_session_to_handle(struct hwpa_nss_hwpa_session*)
 * @brief Translates a hwpa-session to an hwpa-handle
 *
 * @param hwpa_sess [in] the hwpa Session to translate to a handle
 *
 * @return the generated handle
 */
unsigned long hwpa_nss_session_to_handle(struct hwpa_nss_hwpa_session *hwpa_sess)
{
	return (unsigned long) hwpa_sess;
}

/**
 * @fn struct hwpa_nss_hwpa_session hwpa_nss_handle_to_session*(unsigned long)
 * @brief Translates a hwpa handle to a hwpa session
 *
 * @param handle [in] the handle for the requested session
 *
 * @return the requested Session or NULL in case of error
 */
struct hwpa_nss_hwpa_session *hwpa_nss_handle_to_session(unsigned long handle)
{
	struct hwpa_nss_hwpa_session *hws_hwpa = (struct hwpa_nss_hwpa_session *) handle;

	if (hwpa_nss_invalid_hwpa_session(hws_hwpa) || hwpa_nss_invalid_nss_session(hws_hwpa->hws_nss)) {
		PR_DEVEL("Invalid Session");
		return NULL;
	}

	return hws_hwpa;
}

/*
 *==============================================================================
 * HWPA and NSS Session init and deinit
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_init_nss_session(struct hwpa_nss_nss_session*)
 * @brief initializes a nss session struct
 *
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_init_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
#ifdef HWPA_NSS_DEBUG
	hws_nss->magic = NSS_SESSION_MAGIC;
#endif
	spin_lock_init(&hws_nss->sync_lock);
	INIT_LIST_HEAD(&hws_nss->hwpa_session_list);
	INIT_HLIST_NODE(&hws_nss->node);
	hws_nss->flags = 0;
	hws_nss->state = HWPA_NSS_SESSION_STATE_INITIALIZED;
}

/**
 * @fn void hwpa_nss_init_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief initializes a hwpa session struct
 *
 * @param hws_hwpa [in] hwpa session
 */
static void hwpa_nss_init_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
#ifdef HWPA_NSS_DEBUG
	hws_hwpa->magic = NSS_SESSION_MAGIC;
#endif
	hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_MAX;
	INIT_LIST_HEAD(&hws_hwpa->node);
}

/**
 * @fn void hwpa_nss_unregister_nss_session(struct hwpa_nss_session*)
 * @brief unregister nss session from subsystem, by removing it from hashlist.
 *
 * @param hws_nss [in] nss session to unregister
 */
static void hwpa_nss_unregister_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_subsystem *subsys;
	spinlock_t *lock;

	PR_DEVEL("Unregistering nss session %p\n", hws_nss);

	subsys = hws_nss->offloader->subsys;

	lock = &subsys->lock;

	spin_lock_bh(lock);
	hash_del_rcu(&hws_nss->node);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn void hwpa_nss_ipv4_register_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*)
 * @brief register ipv4 session by adding it to the ipv4 hashlist
 *
 * @param subsys [in] ipv4 subsystem
 * @param hash [in] hash of nss session
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_ipv4_register_nss_session(struct hwpa_nss_subsystem *subsys,
							       uint32_t hash,
							       struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock = &subsys->lock;

	spin_lock_bh(lock);
	hash_add_rcu(subsys->ipv4_spec->session_table, &hws_nss->node, hash);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn void hwpa_nss_ipv6_register_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*)
 * @brief register ipv6 session by adding it to the ipv6 hashlist
 *
 * @param subsys [in] ipv6 subsystem
 * @param hash [in] hash of nss session
 * @param hws_nss [in] nss session
 */
static void hwpa_nss_ipv6_register_nss_session(struct hwpa_nss_subsystem *subsys,
							       uint32_t hash,
							       struct hwpa_nss_nss_session *hws_nss)
{
	spinlock_t *lock = &subsys->lock;

	spin_lock_bh(lock);
	hash_add_rcu(subsys->ipv6_spec->session_table, &hws_nss->node, hash);
	spin_unlock_bh(lock);
	synchronize_rcu();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_register_nss_session(struct hwpa_nss_nss_session*, uint32_t)
 * @brief register nss session to hwpa_nss_context by adding it to the
 * corresponding hashlist
 *
 * @param hws_nss [in] nss session to register
 * @param hash [in] hash to register session with
 */
static void hwpa_nss_register_nss_session(struct hwpa_nss_nss_session *hws_nss,
					  uint32_t hash)
{
	struct hwpa_nss_subsystem *subsys;

	subsys = hws_nss->offloader->subsys;

	subsys->register_nss_session(subsys, hash, hws_nss);

	PR_DEVEL("Registered nss session %p with hash %u\n", hws_nss, hash);
}

/**
 * @fn void hwpa_nss_detach_from_nss_session(struct hwpa_nss_hwpa_session*)
 * @brief detach hwpa session from nss session
 *
 * @param hws_hwpa the hwpa session to detach
 */
static void hwpa_nss_detach_from_nss_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	struct hwpa_nss_nss_session *hws_nss;

	if (hwpa_nss_invalid_hwpa_session(hws_hwpa)) {
		pr_warn("Could not detach hwpa session from nss session");
		return;
	}

	hws_nss = hws_hwpa->hws_nss;

	if (hws_nss->pa_ref_count == 0) {
		pr_warn("BUG? Session could not be deregistered as there are non.\n");
		return;
	}

	hws_nss->pa_ref_count--;
	list_del(&hws_hwpa->node);

	PR_DEVEL("Detached hwpa session %p from nss session %p\n", hws_hwpa, hws_hwpa->hws_nss);
}

/**
 * @fn void hwpa_nss_attach_to_nss_session(struct hwpa_nss_nss_session*, struct hwpa_nss_hwpa_session*, enum hwpa_nss_session_direction)
 * @brief attach hwpa session to nss session.
 *
 * @param hws_nss [in] the nss session to attach to
 * @param hws_hwpa [in] the hwpa session
 */
static void hwpa_nss_attach_to_nss_session(struct hwpa_nss_nss_session *hws_nss,
					   struct hwpa_nss_hwpa_session *hws_hwpa)
{
	hws_hwpa->hws_nss = hws_nss;
	hws_nss->pa_ref_count++;
	list_add_tail(&hws_nss->hwpa_session_list, &hws_hwpa->node);

	PR_DEVEL("Attached hwpa session %p to nss session %p\n", hws_hwpa, hws_nss);
}

/**
 * @fn void hwpa_nss_destroy_nss_session(struct hwpa_nss_nss_session*)
 * @brief destroy nss session in hwpa_nss context and also unregisters it.
 *
 * @param hws_nss [in] nss session to destroy
 */
static void hwpa_nss_destroy_nss_session(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	WARN_ON(!list_empty(&hws_nss->hwpa_session_list));

	hwpa_nss_unregister_nss_session(hws_nss);

	kmem_cache_free(global_ctx->kmem_nss, hws_nss);

	PR_DEVEL("Destroyed nss session %p\n", hws_nss);
}

/**
 * @fn void hwpa_nss_destroy_hwpa_session(struct hwpa_nss_hwpa_session*)
 * @brief destroy hwpa session in hwpa_nss context and also detaches it from
 * nss session.
 *
 * @param hws_hwpa [in] hwpa session to destroy
 */
static void hwpa_nss_destroy_hwpa_session(struct hwpa_nss_hwpa_session *hws_hwpa)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	hwpa_nss_detach_from_nss_session(hws_hwpa);

	kmem_cache_free(global_ctx->kmem_hwpa, hws_hwpa);

	PR_DEVEL("Destroyed hwpa session %p\n", hws_hwpa);
}

/**
 * @fn bool hwpa_nss_is_routed(const struct avm_pa_session*)
 * @brief figure out if session is routed or bridged in the nss point of view.
 *
 * @param s [in] avm_pa session
 * @return true if session is routed false if not
 */
static bool hwpa_nss_is_routed(const struct avm_pa_session *sess_pa)
{
	return !!(sess_pa->mod.modflags & AVM_PA_MOD_TTL);
}

/*
 *==============================================================================
 * NSS Session Hash Generation
 *==============================================================================
 */

/**
 * @fn uint32_t hwpa_nss_ipv4_gen_session_hash_raw(uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief generate hash for ipv4 session properties. Needs to be symmetric.
 *
 * @param flow_ip [in] flow_ip used to generate hash
 * @param flow_ident [in] flow_ident used to generate hash
 * @param return_ip_xlate [in] return_ip_xlate used to generate hash
 * @param return_ident_xlate [in] return_ident_xlate used to generate hash
 * @param protocol [in] protocol used to generate hash
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv4_gen_session_hash_raw(uint32_t flow_ip, uint32_t flow_ident,
				       uint32_t return_ip_xlate, uint32_t return_ident_xlate,
				       uint8_t protocol)
{
	uint32_t hash = 0;

	hash ^= flow_ident;
	hash ^= flow_ip;
	hash ^= (uint32_t) protocol;
	hash ^= return_ip_xlate;
	hash ^= return_ident_xlate;
	return hash;
}

/**
 * @fn uint32_t hwpa_nss_ipv6_gen_session_hash_raw(uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief generate hash for ipv6 session properties. Needs to be symmetric.
 *
 * @param flow_ip [in] flow_ip used to generate hash
 * @param flow_ident [in] flow_ident used to generate hash
 * @param return_ip [in] return_ip used to generate hash
 * @param return_ident [in] return_ident used to generate hash
 * @param protocol [in] protocol used to generate hash
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv6_gen_session_hash_raw(uint32_t *flow_ip, uint32_t flow_ident,
				       uint32_t *return_ip, uint32_t return_ident,
				       uint8_t protocol)
{
	uint32_t hash = 0;

	hash ^= flow_ident;
	hash ^= IPV6_ADDR_XOR(flow_ip);
	hash ^= (uint32_t) protocol;
	hash ^= IPV6_ADDR_XOR(return_ip);
	hash ^= return_ident;

	return hash;
}

/**
 * @fn uint32_t hwpa_nss_ipv4_gen_session_hash(struct hwpa_nss_nss_session*)
 * @brief generate ipv4 session hash
 *
 * @param hws_nss [in] NSS session to generate hash for
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv4_gen_session_hash(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_ipv4_session_data *data = &hws_nss->ipv4;

	return hwpa_nss_ipv4_gen_session_hash_raw(data->tuple.flow_ip,
		data->tuple.flow_ident, data->return_ip_xlate,
		data->return_ident_xlate, data->tuple.protocol);
}

/**
 * @fn uint32_t hwpa_nss_ipv6_gen_session_hash(struct hwpa_nss_nss_session*)
 * @brief generate ipv6 session hash
 *
 * @param hws_nss [in] NSS session to generate hash for
 *
 * @return the generated hash value
 */
static uint32_t hwpa_nss_ipv6_gen_session_hash(struct hwpa_nss_nss_session *hws_nss)
{
	struct nss_ipv6_5tuple *data_tuple = &hws_nss->ipv6.tuple;

	return hwpa_nss_ipv6_gen_session_hash_raw(data_tuple->flow_ip,
		data_tuple->flow_ident, data_tuple->return_ip,
		data_tuple->return_ident, data_tuple->protocol);
}

/*
 *==============================================================================
 * NSS/HWPA-Session search for ipv4
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_session_unidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief find an unidirectional nss ipv4 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip_xlate [in] return_ip_xlate
 * @param return_ident_xlate [in] return_ident_xlate
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_session_unidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t flow_ip, uint32_t flow_ident,
							   uint32_t return_ip_xlate, uint32_t return_ident_xlate,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct hwpa_nss_ipv4_session_data *session_data;

	rcu_read_lock();
	hash_for_each_possible_rcu(subsys->ipv4_spec->session_table,
				   find, node, hash) {
		session_data = &find->ipv4;
		if (unlikely(session_data->tuple.protocol != protocol))
			continue;
		if (unlikely(session_data->tuple.flow_ip != flow_ip))
			continue;
		if (unlikely(session_data->tuple.flow_ident != flow_ident))
			continue;
		if (unlikely(session_data->return_ident_xlate != return_ident_xlate))
			continue;
		if (unlikely(session_data->return_ip_xlate != return_ip_xlate))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_session_bidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint8_t)
 * @brief find a bidirectional nss ipv4 session. It is constructed according to
 * ecm_db_connection_find_and_ref_chain from ecm.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip_xlate [in] return_ip_xlate
 * @param return_ident_xlate [in] return_ident_xlate
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_session_bidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t flow_ip, uint32_t flow_ident,
							   uint32_t return_ip_xlate, uint32_t return_ident_xlate,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct hwpa_nss_ipv4_session_data *session_data;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv4_spec->session_table,
				   find, node, hash) {
		session_data = &find->ipv4;
		if (unlikely(session_data->tuple.protocol != protocol))
			continue;
		if (unlikely(session_data->tuple.flow_ip != flow_ip))
			goto try_reverse;
		if (unlikely(session_data->tuple.flow_ident != flow_ident))
			goto try_reverse;
		if (unlikely(session_data->return_ident_xlate != return_ident_xlate))
			goto try_reverse;
		if (unlikely(session_data->return_ip_xlate != return_ip_xlate))
			goto try_reverse;
		goto connection_found;
try_reverse:
		if (unlikely(session_data->tuple.flow_ip != return_ip_xlate))
			continue;
		if (unlikely(session_data->tuple.flow_ident != return_ident_xlate))
			continue;
		if (unlikely(session_data->return_ident_xlate != flow_ident))
			continue;
		if (unlikely(session_data->return_ip_xlate != flow_ip))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_nss_session_from_sync*(struct hwpa_nss_subsystem*, struct nss_ipv4_conn_sync*)
 * @brief find a bidirectional nss session from sync.
 *
 * @param subsys [in] subsystem containing the session
 * @param sync [in] ipv4 sync data for session identification
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_nss_session_from_sync(struct hwpa_nss_subsystem *subsys, struct nss_ipv4_conn_sync *sync)
{
	uint32_t hash;

	hash = hwpa_nss_ipv4_gen_session_hash_raw(sync->flow_ip, sync->flow_ident,
						  sync->return_ip_xlate, sync->return_ident_xlate,
						  sync->protocol);

	return hwpa_nss_ipv4_find_session_bidir(subsys, hash, sync->flow_ip, sync->flow_ident,
							sync->return_ip_xlate, sync->return_ident_xlate,
							sync->protocol);
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv4_find_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*, enum hwpa_nss_session_direction)
 * @brief find an offloaded nss session from not-offloaded nss-session, its hash and for a specified direction.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash of the searched session
 * @param hws_nss [in] nss session used for searching
 * @param dir [in] direction of the offloaded session
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv4_find_nss_session(struct hwpa_nss_subsystem *subsys,
					uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir)
{
	struct hwpa_nss_ipv4_session_data *data = &hws_nss->ipv4;
	struct hwpa_nss_nss_session *find = NULL;

	PR_DEVEL("Trying to find nss session with nss session %p with hash %x and direction %d\n", hws_nss, hash, dir);
	switch (dir) {
	case HWPA_NSS_SESSION_DIRECTION_FLOW:
		find = hwpa_nss_ipv4_find_session_unidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_RETURN:
		find = hwpa_nss_ipv4_find_session_unidir(subsys, hash, data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_DONT_CARE:
		find = hwpa_nss_ipv4_find_session_bidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->return_ip_xlate, data->return_ident_xlate,
				data->tuple.protocol);
		break;
	default:
		pr_err("Bad Direction\n");
	}

	PR_DEVEL("Found nss session %p\n", find);

	return find;
}

/*
 *==============================================================================
 * NSS/HWPA-Session search for ipv6
 *==============================================================================
 */

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_session_unidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief find an unidirectional nss ipv6 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip [in] return_ip
 * @param return_ident [in] return_ident
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_session_unidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t *flow_ip, uint32_t flow_ident,
							   uint32_t *return_ip, uint32_t return_ident,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct nss_ipv6_5tuple *session_tuple;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv6_spec->session_table,
				   find, node, hash) {
		session_tuple = &find->ipv6.tuple;
		if (unlikely(session_tuple->protocol != protocol))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, flow_ip)))
			continue;
		if (unlikely(session_tuple->flow_ident != flow_ident))
			continue;
		if (unlikely(session_tuple->return_ident != return_ident))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, return_ip)))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_session_bidir*(struct hwpa_nss_subsystem*, uint32_t, uint32_t*, uint32_t, uint32_t*, uint32_t, uint8_t)
 * @brief find an bidirectional nss ipv6 session.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash assigned to searched session
 * @param flow_ip [in] flow_ip
 * @param flow_ident [in] flow_ident
 * @param return_ip [in] return_ip
 * @param return_ident [in] return_ident
 * @param protocol [in] protocol
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_session_bidir(struct hwpa_nss_subsystem *subsys, uint32_t hash,
							   uint32_t *flow_ip, uint32_t flow_ident,
							   uint32_t *return_ip, uint32_t return_ident,
							   uint8_t protocol)
{
	struct hwpa_nss_nss_session *find;
	struct nss_ipv6_5tuple *session_tuple;

	rcu_read_lock();

	hash_for_each_possible_rcu(subsys->ipv6_spec->session_table,
				   find, node, hash) {
		session_tuple = &find->ipv6.tuple;
		if (unlikely(session_tuple->protocol != protocol))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, flow_ip)))
			goto try_reverse;
		if (unlikely(session_tuple->flow_ident != flow_ident))
			goto try_reverse;
		if (unlikely(session_tuple->return_ident != return_ident))
			goto try_reverse;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, return_ip)))
			goto try_reverse;
		goto connection_found;
try_reverse:
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->flow_ip, return_ip)))
			continue;
		if (unlikely(session_tuple->flow_ident != return_ident))
			continue;
		if (unlikely(session_tuple->return_ident != flow_ident))
			continue;
		if (unlikely(!IPV6_ADDR_MATCH(session_tuple->return_ip, flow_ip)))
			continue;
		goto connection_found;
	}

	rcu_read_unlock();
	return NULL;

connection_found:
	rcu_read_unlock();
	return find;
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_nss_session_from_sync*(struct hwpa_nss_subsystem*, struct nss_ipv6_conn_sync*)
 * @brief find an bidirectional nss session from sync.
 *
 * @param subsys [in] subsystem containing the session
 * @param sync [in] ipv6 sync data for session identification
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_nss_session_from_sync(struct hwpa_nss_subsystem *subsys, struct nss_ipv6_conn_sync *sync)
{
	uint32_t hash;

	hash = hwpa_nss_ipv6_gen_session_hash_raw(sync->flow_ip, sync->flow_ident,
						  sync->return_ip, sync->return_ident,
						  sync->protocol);

	return hwpa_nss_ipv6_find_session_bidir(subsys, hash, sync->flow_ip, sync->flow_ident,
							sync->return_ip, sync->return_ident,
							sync->protocol);
}

/**
 * @fn struct hwpa_nss_nss_session hwpa_nss_ipv6_find_nss_session(struct hwpa_nss_subsystem*, uint32_t, struct hwpa_nss_nss_session*, enum hwpa_nss_session_direction)
 * @brief find an offloaded nss session from not-offloaded nss-session, its hash and for a specified direction.
 *
 * @param subsys [in] subsystem containing the session
 * @param hash [in] hash of the searched session
 * @param hws_nss [in] nss session used for searching
 * @param dir [in] direction of the offloaded session
 * @return the found session or NULL
 */
static struct hwpa_nss_nss_session *hwpa_nss_ipv6_find_nss_session(struct hwpa_nss_subsystem *subsys,
					uint32_t hash, struct hwpa_nss_nss_session *hws_nss, enum hwpa_nss_session_direction dir)
{
	struct hwpa_nss_ipv6_session_data *data = &hws_nss->ipv6;
	struct hwpa_nss_nss_session *find = NULL;

	PR_DEVEL("Trying to find nss session with nss session %p with hash %x and direction %d\n", hws_nss, hash, dir);
	switch (dir) {
	case HWPA_NSS_SESSION_DIRECTION_FLOW:
		find = hwpa_nss_ipv6_find_session_unidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_RETURN:
		find = hwpa_nss_ipv6_find_session_unidir(subsys, hash, data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.protocol);
		break;
	case HWPA_NSS_SESSION_DIRECTION_DONT_CARE:
		find = hwpa_nss_ipv6_find_session_bidir(subsys, hash, data->tuple.flow_ip, data->tuple.flow_ident,
				data->tuple.return_ip, data->tuple.return_ident,
				data->tuple.protocol);
		break;
	default:
		pr_err("Bad Direction\n");
	}
	PR_DEVEL("Found nss session %p\n", find);

	return find;
}

/*
 *==============================================================================
 * hwpa nss session preparation and destruction
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_set_protocol(uint8_t*, u16)
 * @brief set protocol field using pkttype from avm_pa
 *
 * @param protocol [out] field in hwpa_nss_session tuple
 * @param pkttype [in] pkttype from avm_pa
 * @return  success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_set_protocol(uint8_t *protocol, u16 pkttype)
{
	switch (AVM_PA_PKTTYPE_IPPROTO(pkttype)) {
	case IPPROTO_UDP:
		*protocol = (uint8_t)IPPROTO_UDP;
		break;
	case IPPROTO_UDPLITE:
		*protocol = (uint8_t)IPPROTO_UDPLITE;
		break;
	case IPPROTO_TCP:
		*protocol = (uint8_t)IPPROTO_TCP;
		break;
	default:
		return HWPA_BACKEND_UNSUPPORTED_L4_PROTOCOL;
	}
	return HWPA_BACKEND_SUCCESS;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_prepare_session(struct hwpa_nss_offloading_data *)
 * @brief prepare a nss for the ipv4 subsystem using an avm_pa session.
 * Preparing means to use the the avm_pa session to fill session data of the
 * nss session to make it ready for adding it to the hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *sess_pa;
	struct hwpa_nss_nss_session *hws_nss;
	const uint16_t *ig_ports;
	const uint16_t *eg_ports;
	const struct iphdr *ip4_ig, *ip4_eg;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct hwpa_nss_ipv4_session_data *hws_data;

	PR_DEVEL("Preparing IPv4 session\n");

	sess_pa = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_data = &hws_nss->ipv4;

	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;

	retval = hwpa_nss_set_protocol(&hws_data->tuple.protocol, ig_match->pkttype);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Couldn't set protocol. Session preparation failed!\n");
		goto failure_1;
	}
	ofl_data->protocol = hws_data->tuple.protocol;

	ip4_ig = hwpa_get_hdr(ig_match, AVM_PA_IPV4);
	ip4_eg = hwpa_get_hdr(eg_match, AVM_PA_IPV4);
	ig_ports = hwpa_get_hdr(ig_match, AVM_PA_PORTS);
	eg_ports = hwpa_get_hdr(eg_match, AVM_PA_PORTS);

	/* Configure IPs */
	switch (ofl_data->nat_mode) {
	case HWPA_NSS_IPV4_NAT_MODE_BRIDGED: /* Bridge */
		hws_data->tuple.flow_ip = htonl(ip4_ig->saddr);
		hws_data->flow_ip_xlate = htonl(ip4_eg->saddr);
		hws_data->tuple.return_ip = htonl(ip4_ig->daddr);
		hws_data->return_ip_xlate = htonl(ip4_eg->daddr);
		hws_data->tuple.flow_ident = htons(ig_ports[0]);
		hws_data->flow_ident_xlate = htons(eg_ports[0]);
		hws_data->tuple.return_ident = htons(ig_ports[1]);
		hws_data->return_ident_xlate = htons(eg_ports[1]);
		break;
	case HWPA_NSS_IPV4_NAT_MODE_DNAT: /* Ingress NAT */
		/* TODO */
	case HWPA_NSS_IPV4_NAT_MODE_SNAT: /* Egress NAT */
	case HWPA_NSS_IPV4_NAT_MODE_NO_NAT: /* No NAT, port change only */
		/* TODO */
	default:
		pr_err("Bad NAT Mode!\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	PR_DEVEL("Prepared IPv4 session\n");

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_prepare_session(const struct avm_pa_session*, struct hwpa_nss_nss_session*)
 * @brief prepare a nss for the ipv6 subsystem using an avm_pa session.
 * Preparing means to use the the avm_pa session to fill session data of the
 * nss session to make it ready for adding it to the hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{

	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *s;
	struct hwpa_nss_nss_session *hws_nss;
	const uint16_t *ig_ports;
	const uint16_t *eg_ports;
	const struct ipv6hdr *ip6_ig, *ip6_eg;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct hwpa_nss_ipv6_session_data *hws_data;

	PR_DEVEL("Preparing IPv6 session\n");

	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_data = &hws_nss->ipv6;

	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;

	retval = hwpa_nss_set_protocol(&hws_data->tuple.protocol, ig_match->pkttype);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Couldn't set protocol. Session preparation failed!\n");
		goto failure_1;
	}
	ofl_data->protocol = hws_data->tuple.protocol;

	ip6_ig = hwpa_get_hdr(ig_match, AVM_PA_IPV6);
	ip6_eg = hwpa_get_hdr(eg_match, AVM_PA_IPV6);
	ig_ports = hwpa_get_hdr(ig_match, AVM_PA_PORTS);
	eg_ports = hwpa_get_hdr(eg_match, AVM_PA_PORTS);

	switch (ofl_data->nat_mode) {
	case HWPA_NSS_IPV6_NAT_MODE_BRIDGED: /* Bridge */
		IPV6_COPY(ip6_eg->saddr.in6_u.u6_addr32, hws_data->tuple.flow_ip);
		IPV6_COPY(ip6_eg->daddr.in6_u.u6_addr32, hws_data->tuple.return_ip);
		hws_data->tuple.flow_ident = htons(eg_ports[0]);
		hws_data->tuple.return_ident = htons(eg_ports[1]);
		break;
	case HWPA_NSS_IPV6_NAT_MODE_NO_NAT: /* No NAT */
		/* TODO */
	default:
		pr_err("Bad NAT Mode!\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_nss_session_flag hwpa_nss_get_session_type(const struct avm_pa_session*)
 * @brief extract nss session type from avm_pa session
 *
 * @param s [in] the avm_pa session supposed to be offloaded
 *
 * @return the session type or HWPA_NSS_SESSION_MAX in case of error
 */
static struct hwpa_nss_offloader *hwpa_nss_select_offloader(const struct avm_pa_session *sess_pa)
{
	enum hwpa_nss_offloader_idx idx;

	switch (sess_pa->ingress.pkttype & AVM_PA_PKTTYPE_IP_MASK) {
	case AVM_PA_PKTTYPE_IPV4:
		idx = HWPA_NSS_OFFLOADER_IDX_IPV4;
		break;
	case AVM_PA_PKTTYPE_IPV6:
		idx = HWPA_NSS_OFFLOADER_IDX_IPV6;
		break;
	default:
		idx = HWPA_NSS_SESSION_MAX;
	}

	PR_DEVEL("offloader index: %d\n", idx);

	return hwpa_nss_get_offloader(idx);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_prepare_session(const struct avm_pa_session*, struct hwpa_nss_hwpa_session*, struct hwpa_nss_nss_session*)
 * @brief prepare hwpa session and avm_pa session to make it searchable in
 * hashlist.
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_prepare_session(struct hwpa_nss_offloading_data *ofl_data)
{
	const struct avm_pa_session *s;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("HWPA Preparing Session\n");
	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	hws_hwpa = ofl_data->hws_hwpa;

	ofl = hwpa_nss_select_offloader(s);
	if (ofl == NULL) {
		pr_err("unsupported packet-type\n");
		retval = HWPA_BACKEND_UNSUPPORTED_SESS_TYPE;
		goto failure_1;
	}

	hws_nss->offloader = ofl;

	retval = ofl->prepare_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS)
		goto failure_1;

	hws_nss->state = HWPA_NSS_SESSION_STATE_PREPARED;

	PR_DEVEL("Preparation finished!\n");

failure_1:
	return retval;
}

/*
 *==============================================================================
 * pending offload manager
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_add_pending_offload(struct hwpa_nss_offloading_data*)
 * @brief adds a pending offload
 *
 * @param ofl_data [in] the offloading data describing the offload
 */
static void hwpa_nss_add_pending_offload(struct hwpa_nss_offloading_data *ofl_data)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;

	ofl_data->timestamp = jiffies;

	spin_lock_bh(&pom->lock);
	list_add_tail(&ofl_data->node, &pom->pending_offloads);
	spin_unlock_bh(&pom->lock);
}

/**
 * @fn void hwpa_nss_remove_pending_offload(struct hwpa_nss_offloading_data*)
 * @brief removes a pending offload
 *
 * @param ofl_data [in] the offlaoding data describing the offload
 */
static void hwpa_nss_remove_pending_offload(struct hwpa_nss_offloading_data *ofl_data)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;

	spin_lock_bh(&pom->lock);
	list_del(&ofl_data->node);
	spin_unlock_bh(&pom->lock);
}

/**
 * @fn struct hwpa_nss_offloading_data hwpa_nss_pom_get_and_unregister_offloading_data*(struct hwpa_nss_nss_session*)
 * @brief gets a registered pending offload and unregisters it
 *
 * @param hws_nss [in] the nss session to search the offload with
 * @return the found offloading data or NULL in case of error
 */
static struct hwpa_nss_offloading_data *hwpa_nss_pom_get_and_unregister_offloading_data(struct hwpa_nss_nss_session *hws_nss)
{
	struct hwpa_nss_pending_offload_manager *pom = &hwpa_nss_ctx.pending_offload_mgr;
	struct hwpa_nss_offloading_data *ofl_data, *t, *find = NULL;

	list_for_each_entry_safe(ofl_data, t, &pom->pending_offloads, node) {
		if (ofl_data->hws_nss == hws_nss) {
			hwpa_nss_remove_pending_offload(ofl_data);
			find = ofl_data;
			break;
		}

	}

	return find;
}

/*
 * Forward definition
 */
static enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_offloading_data *ofl_data);

/**
 * @fn void hwpa_pending_offload_manager_work(struct work_struct*)
 * @brief work function for the pending offload manager workqueue. Performs
 * actions if offload was too long ago.
 *
 * @param work [in] work struct
 */
static void hwpa_nss_pending_offload_manager_work(struct work_struct *work)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;
	struct hwpa_nss_offloading_data *ofl_data, *t;
	struct list_head pom_to_drop, pom_to_offload;

	if (list_empty(&pom->pending_offloads))
		goto reschedule;

	INIT_LIST_HEAD(&pom_to_drop);
	INIT_LIST_HEAD(&pom_to_offload);

	/*
	 * iterate over all pending offloads and find out which ones are too
	 * old. In case the time limit for a TCP session is reached -> drop it.
	 * For UDP session perform an offload instead.
	 */
	list_for_each_entry_safe(ofl_data, t, &pom->pending_offloads, node) {
		switch (ofl_data->protocol) {
		case IPPROTO_TCP:
			if (time_after_eq(ofl_data->timestamp + HWPA_NSS_TCP_MAX_WAITING_TIME, jiffies)) {
				hwpa_nss_remove_pending_offload(ofl_data);
				list_add_tail(&ofl_data->node, &pom_to_drop);
			}
			break;
		case IPPROTO_UDP:
		case IPPROTO_UDPLITE:
			if (time_after_eq(ofl_data->timestamp + HWPA_NSS_UDP_MAX_WAITING_TIME, jiffies)) {
				hwpa_nss_remove_pending_offload(ofl_data);
				list_add_tail(&ofl_data->node, &pom_to_offload);
			}
			break;
		default:
			PR_DEVEL("Unsupported Protocol!\n");
		}

	}

	// Drop all offloads in pom_to_drop and update counters accordingly
	list_for_each_entry(ofl_data, &pom_to_drop, node) {
		struct hwpa_nss_subsystem *subsys;
		struct hwpa_nss_offloader *ofl;
		uint32_t pa_ref_count;

		ofl = ofl_data->hws_nss->offloader;
		subsys = ofl->subsys;

		mutex_lock(&subsys->mutex);
		hwpa_nss_destroy_hwpa_session(ofl_data->hws_hwpa);
		hwpa_nss_destroy_nss_session(ofl_data->hws_nss);
		mutex_unlock(&subsys->mutex);
		kfree(ofl_data);

		pa_ref_count = ofl_data->hws_nss->pa_ref_count;
		spin_lock_bh(&ofl->lock);
		ofl->pending_avm_pa_session_count -= pa_ref_count;
		ofl->pending_nss_session_count--;
		spin_unlock_bh(&ofl->lock);
	}

	// Offload all offloads in pom_to_offload and update counters
	list_for_each_entry(ofl_data, &pom_to_offload, node) {
		struct hwpa_nss_subsystem *subsys;
		struct hwpa_nss_offloader *ofl;
		bool success;
		uint32_t pa_ref_count;

		ofl = ofl_data->hws_nss->offloader;
		subsys = ofl_data->hws_nss->offloader->subsys;

		mutex_lock(&subsys->mutex);
		success = hwpa_nss_offload_session(ofl_data) == HWPA_BACKEND_SUCCESS;
		mutex_lock(&subsys->mutex);

		pa_ref_count = ofl_data->hws_nss->pa_ref_count;
		spin_lock_bh(&ofl->lock);
		ofl->pending_avm_pa_session_count -= pa_ref_count;
		ofl->pending_nss_session_count--;
		if (success) {
			ofl->successful_nss_offloads++;
			ofl->active_nss_session_count++;
			ofl->active_avm_pa_session_count += pa_ref_count;
		} else	{
			ofl->failed_nss_offloads++;
		}
		spin_unlock_bh(&ofl->lock);
	}

reschedule:
	queue_delayed_work(pom->workqueue, &pom->work,
			   HWPA_NSS_PENDING_OFFLOAD_PERIOD);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_pending_offload_manager_init()
 * @brief initializes pending offload manager to perform work periodically
 *
 * @return error code or success
 */
static enum hwpa_backend_rv hwpa_nss_pending_offload_manager_init(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Init Pending Offload Workqueue\n");

	INIT_LIST_HEAD(&pom->pending_offloads);
	spin_lock_init(&pom->lock);

	pom->workqueue = create_singlethread_workqueue("hwpa_nss_pending_offload_manager");
	if (!pom->workqueue) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	INIT_DELAYED_WORK(&pom->work,
			  hwpa_nss_pending_offload_manager_work);

	queue_delayed_work(pom->workqueue, &pom->work,
			   HWPA_NSS_PENDING_OFFLOAD_PERIOD);

	return HWPA_BACKEND_SUCCESS;


failure_1:
	return retval;
}

/**
 * @fn void hwpa_nss_pending_offload_manager_exit()
 * @brief exits offload manager by stopping assigned workqueue
 *
 */
static void hwpa_nss_pending_offload_manager_exit(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_pending_offload_manager *pom = &global_ctx->pending_offload_mgr;

	PR_DEVEL("Exit Pending Offload Manager\n");

	cancel_delayed_work_sync(&pom->work);
	destroy_workqueue(pom->workqueue);
}

/*
 *===============================================================================
 * hwpa nss offloading session classification
 *==============================================================================
 */

/**
 * @enum hwpa_nss_classification_result_value
 * @brief The result of a classification of a new nss session
 */
enum hwpa_nss_classification_result_value	{
	// ignore the new nss session
	HWPA_NSS_CLASS_RES_NEW_IGNORE		= 0,
	// offload the new nss session
	HWPA_NSS_CLASS_RES_NEW_OFFLOAD		= 1,
	// queue the new session to the pending offload manager
	HWPA_NSS_CLASS_RES_NEW_QUEUE		= 2,
	// just attach the new hwpa session to an existing nss session
	HWPA_NSS_CLASS_RES_EST_ATTACH		= 3,
	// offload an older nss session from the pom and delete the new one
	HWPA_NSS_CLASS_RES_EST_OFFLOAD		= 4,
	HWPA_NSS_CLASS_RES_MAX			= 5,
};

#ifdef HWPA_NSS_DEBUG
static const char * const hwpa_nss_class_val_strings[] = {
	"Ignore new session",
	"offload new session",
	"queue new session",
	"attach new session to established session",
	"offload established session",
	"error"
};
#endif

/**
 * @struct hwpa_nss_classification_result
 * @brief the result of a classification, containing a value and some information
 * for special offload scenarios
 */
struct hwpa_nss_classification_result {
	enum hwpa_nss_classification_result_value value;
	struct hwpa_nss_offloading_data *ofl_data_established;
	struct hwpa_nss_nss_session	*hws_nss_established;
	uint32_t hws_new_hash;
};

/**
 * @fn enum hwpa_backend_rv hwpa_nss_classify(struct hwpa_nss_hwpa_session*, struct hwpa_nss_nss_session*,struct hwpa_nss_subsystem*)
 * @brief links nss and hwpa session and decides whether to delay, offload or drop.
 *
 * @param ofl_data [in] offloading data
 * @param res [in] the result of the classification
 */
static void hwpa_nss_classify(struct hwpa_nss_offloading_data *ofl_data,
					      struct hwpa_nss_classification_result *res)
{
	uint32_t hash;
	struct hwpa_nss_nss_session *hws_nss_new, *hws_nss_established;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_subsystem *subsys;

	hws_nss_new = ofl_data->hws_nss;
	hws_hwpa = ofl_data->hws_hwpa;
	subsys = hws_nss_new->offloader->subsys;

	hash = subsys->gen_hash(hws_nss_new);
	res->value = HWPA_NSS_CLASS_RES_MAX;

	/*
	 * There is a (low) chance that avm_pa offloads the same session twice.
	 * this gets handled here. In that case we just add a hwpa session and
	 * clean up.
	 */
	hws_nss_established = subsys->find_nss_session(subsys, hash, hws_nss_new, HWPA_NSS_SESSION_DIRECTION_FLOW);
	if (unlikely(hws_nss_established)) {
		switch (hws_nss_established->state) {
		case HWPA_NSS_SESSION_STATE_ACTIVE:
		case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
			hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_FLOW;
			res->hws_nss_established = hws_nss_established;
			res->value = HWPA_NSS_CLASS_RES_EST_ATTACH;
			break;
		default:
			res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
			break;
		}
		goto classification_done;
	}

	/*
	 * Here bidirectional sessions, if the subsystem uses them, are handled
	 */
	if (test_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags)) {
		hws_nss_established = subsys->find_nss_session(subsys, hash, hws_nss_new, HWPA_NSS_SESSION_DIRECTION_RETURN);

		/*
		 * if we want to offload a return-direction-flow, we want to offload
		 * the corresponding flow-direction flow
		 */
		if (hws_nss_established) {
			switch (hws_nss_established->state) {
			case HWPA_NSS_SESSION_STATE_ACTIVE:
				hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_RETURN;
				res->hws_nss_established = hws_nss_established;
				res->value = HWPA_NSS_CLASS_RES_EST_ATTACH;
				break;
			case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
				hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_RETURN;
				res->hws_nss_established = hws_nss_established;
				res->ofl_data_established = hwpa_nss_pom_get_and_unregister_offloading_data(hws_nss_established);
				res->value = HWPA_NSS_CLASS_RES_EST_OFFLOAD;
				break;
			default:
				res->value = HWPA_NSS_CLASS_RES_NEW_IGNORE;
				break;
			}
			goto classification_done;
		}
	}

	PR_DEVEL("Session not accelerated or pending yet!\n");

	hws_hwpa->direction = HWPA_NSS_SESSION_DIRECTION_FLOW;
	hws_nss_new->state = HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD;

	// to enable pending offloading set res->value=HWPA_NSS_CLASS_RES_NEW_QUEUE;
	res->value = HWPA_NSS_CLASS_RES_NEW_OFFLOAD;

	res->hws_new_hash = hash;

classification_done:
	PR_DEVEL("Classification finished with value %d [%s]!\n", res->value, hwpa_nss_class_val_strings[res->value]);
}

/*
 *===============================================================================
 * hwpa nss offloading session creation
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_connection_create(struct hwpa_nss_nss_session*, struct nss_cmn_msg*)
 * @brief Protocol independent part of subsystem session creation
 *
 * @param hws_nss [in] NSS session just offloaded
 * @param cm [in] common message part of answer from nss
 */
static void hwpa_nss_connection_create(struct hwpa_nss_nss_session *hws_nss, struct nss_cmn_msg *cm)
{
	if (cm->response != NSS_CMN_RESPONSE_ACK) {
		pr_err("An Error occurred creating NSS connection acceleration\n");
		pr_err("Response is: %d, error code is: %d\n", (int) cm->response, cm->error);
		hws_nss->state = HWPA_NSS_SESSION_STATE_INVALID;
	} else	{
		hws_nss->state = HWPA_NSS_SESSION_STATE_ACTIVE;
	}
}

/**
 * @fn void hwpa_nss_ipv4_connection_create_callback(void*, struct nss_ipv4_msg*)
 * @brief Callback for ipv4 subsystem session creation in nss
 *
 * @param app_data [in] application specific data
 * @param nim [in] Answer from NSS after Offloading attempt
 */
static void hwpa_nss_ipv4_connection_create_callback(void *app_data, struct nss_ipv4_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV4_TX_CREATE_RULE_MSG) {
		pr_err("%p: create callback with improper type: %d\n",
		       app_data, nim->cm.type);
		return;
	}

	hwpa_nss_connection_create(hws_nss, &nim->cm);
}

/**
 * @fn void hwpa_nss_ipv6_connection_create_callback(void*, struct nss_ipv6_msg*)
 * @brief Callback for ipv6 subsystem session creation in nss
 *
 * @param app_data [in] application specific data
 * @param nim [in] Answer from NSS after Offloading attempt
 */
static void hwpa_nss_ipv6_connection_create_callback(void *app_data, struct nss_ipv6_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV6_TX_CREATE_RULE_MSG) {
		pr_err("%p: create callback with improper type: %d\n",
		       app_data, nim->cm.type);
		return;
	}

	hwpa_nss_connection_create(hws_nss, &nim->cm);
}

/**
 * @fn hwpa_backend_rv hwpa_nss_ipv4_add_session(struct hwpa_nss_offloading_data *)
 * @brief Translate an ipv4 avm_pa session to a nss rule and perform the actual offload
 *
 * @param subsys [in] the subsystem
 * @param s [in] the avm_pa session supposed to be offloaded
 * @param hws_nss [in] nss session to fill and offload
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_nss_ipv4_add_session(struct hwpa_nss_offloading_data *ofl_data)
{
	struct nss_ipv4_msg *create_msg;
	struct nss_ipv4_rule_create_msg *nircm;
	struct hwpa_nss_subsystem *subsys;
	const struct avm_pa_session *sess_pa;
	struct hwpa_nss_nss_session *hws_nss;
	int retval;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	int i;

	PR_DEVEL("Adding IPv4 session\n");

	sess_pa = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	eg = ofl_data->eg;
	subsys = hws_nss->offloader->subsys;

	create_msg = kzalloc(sizeof(struct nss_ipv4_msg),
						    GFP_KERNEL);
	if (!create_msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	/*
	 * Prepare message for sending to NSS. No return value.
	 */
	nss_ipv4_msg_init(create_msg, NSS_IPV4_RX_INTERFACE,
				   NSS_IPV4_TX_CREATE_RULE_MSG,
			sizeof(struct nss_ipv4_rule_create_msg),
			hwpa_nss_ipv4_connection_create_callback, hws_nss);

	/*
	 * Edit message to our needs
	 */
	nircm = &create_msg->msg.rule_create;
	nircm->valid_flags = 0;
	nircm->rule_flags = 0;

	/*
	 * VLAN init
	 */
	nircm->vlan_primary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_primary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED; //eg->match->vlan_proto
	nircm->vlan_secondary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_secondary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;

	/*
	 * nexthop (Gateway)

	nircm->nexthop_rule.flow_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->nexthop_rule.return_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum;
	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_NEXTHOP_VALID;
	 */

	/*
	 * used interfaces
	 */
	nircm->conn_rule.flow_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->conn_rule.return_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum;

	/*
	 * Set the mtu values.
	 */
	nircm->conn_rule.flow_mtu = eg->mtu;
	nircm->conn_rule.return_mtu = eg->mtu;

	/*
	 * Iterate over ingress and egress devices to configure offloading message.
	 * Error Checks are not needed here as we are only accept supported
	 * sessions via whitelist. Others don't even come this far (I assume)
	 */
	for (i = 0; i < ig_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &ig_match->match[i];
		const void *hdr = &sess_pa->ingress.hdrcopy[p->offset + ig_match->hdroff];
		int vlan_in_cnt = 0;

		PR_DEVEL("ingress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV4:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.flow_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			const struct vlan_hdr *vlanh = hdr;
			uint32_t vlan_value;

			if (vlan_in_cnt > 1) {
				retval = HWPA_BACKEND_ERR_INTERNAL;
				goto failure_2;
			}

			vlan_value = (ntohs(vlanh->h_vlan_encapsulated_proto) << 16) |
					(ntohs(vlanh->h_vlan_TCI));

			if (vlan_in_cnt == 0)
				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
			else
				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;

			vlan_in_cnt++;
			break;
		}

		case AVM_PA_IPV6:
			PR_DEVEL("IPV6 in IPV4 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_2;
		}
	}

	for (i = 0; i < eg_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &eg_match->match[i];
		const void *hdr = &sess_pa->ingress.hdrcopy[p->offset + eg_match->hdroff];
		int vlan_out_cnt = 0;

		PR_DEVEL("egress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.return_mac,
					(u8 *) ethh->h_dest);
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV4:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.return_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;
			const struct vlan_hdr *vlanh = (struct vlan_hdr *)hdr;

			if (vlan_out_cnt > 1) {
				retval = HWPA_BACKEND_ERR_INTERNAL;
				goto failure_2;
			}

			vlan_value = (ntohs(vlanh->h_vlan_encapsulated_proto) << 16) |
					(ntohs(vlanh->h_vlan_TCI));

			if (vlan_out_cnt == 0)
				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
			else
				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;

			vlan_out_cnt++;
			break;
		}

		case AVM_PA_IPV6:
			PR_DEVEL("IPV6 in IPV4 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_2;
		}
	}

	nircm->qos_rule.flow_qos_tag = eg->output.priority;
	nircm->qos_rule.return_qos_tag = eg->output.priority;
	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_QOS_VALID;

	/*
	 * Routed or bridged?
	 */
	if (ofl_data->is_routed)
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_ROUTED;
	else
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW;

	/*
	 * Configure the IP-5-Tuple
	 * This is the central configuration
	 */
	nircm->tuple = hws_nss->ipv4.tuple;
	nircm->conn_rule.flow_ip_xlate = hws_nss->ipv4.flow_ip_xlate;
	nircm->conn_rule.flow_ident_xlate = hws_nss->ipv4.flow_ident_xlate;
	nircm->conn_rule.return_ip_xlate = hws_nss->ipv4.return_ip_xlate;
	nircm->conn_rule.return_ident_xlate = hws_nss->ipv4.return_ident_xlate;

	if (nircm->tuple.protocol == IPPROTO_TCP) {
		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_TCP_VALID;
	}

	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_CONN_VALID;

	PR_DEVEL("%p: Accelerate Session\n"
			"Protocol: %d\n"
			"from_mtu: %u\n"
			"to_mtu: %u\n"
			"from_ip: %pI4h:%d\n"
			"to_ip: %pI4h:%d\n"
			"from_ip_xlate: %pI4h:%d\n"
			"to_ip_xlate: %pI4h:%d\n"
			"from_mac: %pM\n"
			"to_mac: %pM\n"
			"src_iface_num: %u\n"
			"dest_iface_num: %u\n"
			"src_nexthop_num: %u\n"
			"dest_nexthop_num: %u\n"
			"ingress_inner_vlan_tag: %x\n"
			"egress_inner_vlan_tag: %x\n"
			"ingress_outer_vlan_tag: %x\n"
			"egress_outer_vlan_tag: %x\n"
			"rule_flags: %x\n"
			"valid_flags: %x\n"
			"pppoe_return_if_exist: %u\n"
			"pppoe_return_if_num: %u\n"
			"pppoe_flow_if_exist: %u\n"
			"pppoe_flow_if_num: %u\n"
			"flow_qos_tag: %x (%u)\n"
			"return_qos_tag: %x (%u)\n"
			"igs_flow_qos_tag: %x (%u)\n"
			"igs_return_qos_tag: %x (%u)\n"
			"flow_window_scale: %u\n"
			"flow_max_window: %u\n"
			"flow_end: %u\n"
			"flow_max_end: %u\n"
			"return_window_scale: %u\n"
			"return_max_window: %u\n"
			"return_end: %u\n"
			"return_max_end: %u\n"
			"flow_dscp: %x\n"
			"return_dscp: %x\n",
			hws_nss,
			nircm->tuple.protocol,
			nircm->conn_rule.flow_mtu,
			nircm->conn_rule.return_mtu,
			&nircm->tuple.flow_ip, nircm->tuple.flow_ident,
			&nircm->tuple.return_ip, nircm->tuple.return_ident,
			&nircm->conn_rule.flow_ip_xlate, nircm->conn_rule.flow_ident_xlate,
			&nircm->conn_rule.return_ip_xlate, nircm->conn_rule.return_ident_xlate,
			nircm->conn_rule.flow_mac,
			nircm->conn_rule.return_mac,
			nircm->conn_rule.flow_interface_num,
			nircm->conn_rule.return_interface_num,
			nircm->nexthop_rule.flow_nexthop,
			nircm->nexthop_rule.return_nexthop,
			nircm->vlan_primary_rule.ingress_vlan_tag,
			nircm->vlan_primary_rule.egress_vlan_tag,
			nircm->vlan_secondary_rule.ingress_vlan_tag,
			nircm->vlan_secondary_rule.egress_vlan_tag,
			nircm->rule_flags,
			nircm->valid_flags,
			nircm->pppoe_rule.return_if_exist,
			nircm->pppoe_rule.return_if_num,
			nircm->pppoe_rule.flow_if_exist,
			nircm->pppoe_rule.flow_if_num,
			nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
			nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
			nircm->igs_rule.igs_flow_qos_tag, nircm->igs_rule.igs_flow_qos_tag,
			nircm->igs_rule.igs_return_qos_tag, nircm->igs_rule.igs_return_qos_tag,
			nircm->tcp_rule.flow_window_scale,
			nircm->tcp_rule.flow_max_window,
			nircm->tcp_rule.flow_end,
			nircm->tcp_rule.flow_max_end,
			nircm->tcp_rule.return_window_scale,
			nircm->tcp_rule.return_max_window,
			nircm->tcp_rule.return_end,
			nircm->tcp_rule.return_max_end,
			nircm->dscp_rule.flow_dscp,
			nircm->dscp_rule.return_dscp);

	/*
	 * Send message for rule creation
	 */
	retval = nss_ipv4_tx_sync(subsys->mgr, create_msg);
	if (retval != NSS_TX_SUCCESS) {
		pr_err("Session could not be created\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	kfree(create_msg);

failure_1:
	return retval;
}

/**
 * @fn hwpa_backend_rv hwpa_nss_ipv6_add_session(struct hwpa_nss_subsystem *, const struct avm_pa_session*, struct hwpa_nss_nss_session*)
 * @brief Translate an ipv6 avm_pa session to a nss rule and perform the actual offload
 *
 * @param ofl_data [in] offloading data
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_nss_ipv6_add_session(struct hwpa_nss_offloading_data *ofl_data)
{
	struct nss_ipv6_msg *create_msg;
	struct nss_ipv6_rule_create_msg *nircm;
	struct hwpa_nss_subsystem *subsys;
	const struct avm_pa_session *s;
	struct hwpa_nss_nss_session *hws_nss;
	int retval;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	int i;

	PR_DEVEL("Adding IPv6 session\n");

	s = ofl_data->sess_pa;
	hws_nss = ofl_data->hws_nss;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	eg = ofl_data->eg;
	subsys = hws_nss->offloader->subsys;

	create_msg = kzalloc(sizeof(struct nss_ipv6_msg),
						    GFP_KERNEL);
	if (!create_msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	/*
	 * Prepare message for sending to NSS. No return value.
	 */
	nss_ipv6_msg_init(create_msg, NSS_IPV6_RX_INTERFACE,
				   NSS_IPV6_TX_CREATE_RULE_MSG,
			sizeof(struct nss_ipv6_rule_create_msg),
			hwpa_nss_ipv6_connection_create_callback, hws_nss);

	/*
	 * Edit message to our needs
	 */
	nircm = &create_msg->msg.rule_create;
	nircm->valid_flags = 0;
	nircm->rule_flags = 0;

	/*
	 * VLAN init
	 */
	nircm->vlan_primary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_primary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED; //eg->match->vlan_proto
	nircm->vlan_secondary_rule.ingress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;
	nircm->vlan_secondary_rule.egress_vlan_tag = HWPA_NSS_VLAN_ID_NOT_CONFIGURED;

	/*
	 * nexthop (Gateway)
	nircm->nexthop_rule.flow_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->nexthop_rule.return_nexthop = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][1].ifnum;
	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_NEXTHOP_VALID;
	 */

	/*
	 * used interfaces
	 */
	nircm->conn_rule.flow_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum;
	nircm->conn_rule.return_interface_num = ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum;

	/*
	 * Set the mtu values.
	 */
	nircm->conn_rule.flow_mtu = eg->mtu;
	nircm->conn_rule.return_mtu = eg->mtu;

	/*
	 * Iterate over ingress and egress devices to configure offloading message.
	 * Error Checks are not needed here as we are only accept supported
	 * sessions via whitelist. Others don't even come this far (I assume)
	 */
	for (i = 0; i < ig_match->nmatch; ++i) {
		const struct avm_pa_match_info *p = &ig_match->match[i];
		const void *hdr = &ig_match->hdrcopy[p->offset + ig_match->hdroff];
		int vlan_in_cnt = 0;

		PR_DEVEL("ingress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.flow_mac,
					(u8 *) ethh->h_source);
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV6:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.flow_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			const struct vlan_hdr *vlanh = hdr;
			uint32_t vlan_value;

			if (vlan_in_cnt > 1) {
				retval = HWPA_BACKEND_ERR_INTERNAL;
				goto failure_2;
			}

			vlan_value = (ntohs(vlanh->h_vlan_encapsulated_proto) << 16) |
					(ntohs(vlanh->h_vlan_TCI));

			if (vlan_in_cnt == 0)
				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
			else
				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;

			vlan_in_cnt++;
			break;
		}

		case AVM_PA_IPV4:
			PR_DEVEL("IPV4 in IPV6 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_2;
		}
	}

	for (i = 0; i < eg->match.nmatch; ++i) {
		const struct avm_pa_match_info *p = &eg_match->match[i];
		const void *hdr = &eg_match->hdrcopy[p->offset + eg_match->hdroff];
		int vlan_out_cnt = 0;

		PR_DEVEL("egress %i type %x offset %x\n", i, p->type, p->offset);

		switch (p->type) {
		case AVM_PA_ETH:	{/* TODO Special Treatment for Bridges? */
			const struct ethhdr *ethh = hdr;

			ether_addr_copy((u8 *) nircm->conn_rule.return_mac,
					(u8 *) ethh->h_dest);
			break;
		}

		/* Already handled during preparation*/
		case AVM_PA_IPV6:
		case AVM_PA_PORTS:
			break;

		case AVM_PA_PPPOE:	{
			const struct pppoe_hdr *ppph = hdr;

			nircm->pppoe_rule.return_if_num = ppph->sid;
			nircm->pppoe_rule.flow_if_exist = 1;
			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
			break;
		}

		case AVM_PA_VLAN:	{
			uint32_t vlan_value;
			const struct vlan_hdr *vlanh = (struct vlan_hdr *)hdr;

			if (vlan_out_cnt > 1) {
				retval = HWPA_BACKEND_ERR_INTERNAL;
				goto failure_2;
			}

			vlan_value = (ntohs(vlanh->h_vlan_encapsulated_proto) << 16) |
					(ntohs(vlanh->h_vlan_TCI));

			if (vlan_out_cnt == 0)
				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
			else
				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;

			vlan_out_cnt++;
			break;
		}

		case AVM_PA_IPV4:
			PR_DEVEL("IPV4 in IPV6 not implemented");
		default:
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_2;
		}
	}

	nircm->qos_rule.flow_qos_tag = eg->output.priority;
	nircm->qos_rule.return_qos_tag = eg->output.priority;
	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_QOS_VALID;

	/*
	 * Routed or bridged?
	 */
	if (hwpa_nss_is_routed(s))
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED;
	else
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW;

	/*
	 * Configure the IP-5-Tuple
	 * This is the central configuration
	 */
	nircm->tuple = hws_nss->ipv6.tuple;

	if (nircm->tuple.protocol == IPPROTO_TCP) {
		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK;
		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_TCP_VALID;
	}

	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID;
	nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK;

	PR_DEVEL("%p: Accelerate Session\n"
				"Protocol: %d\n"
				"from_mtu: %u\n"
				"to_mtu: %u\n"
				"from_ip: %pI6h:%d\n"
				"to_ip: %pI6h:%d\n"
				"from_mac: %pM\n"
				"to_mac: %pM\n"
				"src_iface_num: %u\n"
				"dest_iface_num: %u\n"
				"src_nexthop_num: %u\n"
				"dest_nexthop_num: %u\n"
				"ingress_inner_vlan_tag: %x\n"
				"egress_inner_vlan_tag: %x\n"
				"ingress_outer_vlan_tag: %x\n"
				"egress_outer_vlan_tag: %x\n"
				"rule_flags: %x\n"
				"valid_flags: %x\n"
				"pppoe_return_if_exist: %u\n"
				"pppoe_return_if_num: %u\n"
				"pppoe_flow_if_exist: %u\n"
				"pppoe_flow_if_num: %u\n"
				"flow_qos_tag: %x (%u)\n"
				"return_qos_tag: %x (%u)\n"
				"igs_flow_qos_tag: %x (%u)\n"
				"igs_return_qos_tag: %x (%u)\n"
				"flow_window_scale: %u\n"
				"flow_max_window: %u\n"
				"flow_end: %u\n"
				"flow_max_end: %u\n"
				"return_window_scale: %u\n"
				"return_max_window: %u\n"
				"return_end: %u\n"
				"return_max_end: %u\n"
				"flow_dscp: %x\n"
				"return_dscp: %x\n",
				hws_nss,
				nircm->tuple.protocol,
				nircm->conn_rule.flow_mtu,
				nircm->conn_rule.return_mtu,
				&nircm->tuple.flow_ip, nircm->tuple.flow_ident,
				&nircm->tuple.return_ip, nircm->tuple.return_ident,
				nircm->conn_rule.flow_mac,
				nircm->conn_rule.return_mac,
				nircm->conn_rule.flow_interface_num,
				nircm->conn_rule.return_interface_num,
				nircm->nexthop_rule.flow_nexthop,
				nircm->nexthop_rule.return_nexthop,
				nircm->vlan_primary_rule.ingress_vlan_tag,
				nircm->vlan_primary_rule.egress_vlan_tag,
				nircm->vlan_secondary_rule.ingress_vlan_tag,
				nircm->vlan_secondary_rule.egress_vlan_tag,
				nircm->rule_flags,
				nircm->valid_flags,
				nircm->pppoe_rule.return_if_exist,
				nircm->pppoe_rule.return_if_num,
				nircm->pppoe_rule.flow_if_exist,
				nircm->pppoe_rule.flow_if_num,
				nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
				nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
				nircm->igs_rule.igs_flow_qos_tag, nircm->igs_rule.igs_flow_qos_tag,
				nircm->igs_rule.igs_return_qos_tag, nircm->igs_rule.igs_return_qos_tag,
				nircm->tcp_rule.flow_window_scale,
				nircm->tcp_rule.flow_max_window,
				nircm->tcp_rule.flow_end,
				nircm->tcp_rule.flow_max_end,
				nircm->tcp_rule.return_window_scale,
				nircm->tcp_rule.return_max_window,
				nircm->tcp_rule.return_end,
				nircm->tcp_rule.return_max_end,
				nircm->dscp_rule.flow_dscp,
				nircm->dscp_rule.return_dscp);

	/*
	 * Send message for rule creation
	 */
	retval = nss_ipv6_tx_sync(subsys->mgr, create_msg);
	if (retval != NSS_TX_SUCCESS) {
		pr_err("Session could not be created\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	kfree(create_msg);

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_nss_session*, struct hwpa_nss_offloader*)
 * @brief perform the actual subsystem-specific offload and update tracker
 *
 * @param ofl_data [in] offloading_data
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_offload_session(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_nss_session *hws_nss;
	const struct avm_pa_session *s;
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_subsystem *subsys;

	hws_nss = ofl_data->hws_nss;
	s = ofl_data->sess_pa;
	ofl = hws_nss->offloader;

	subsys = ofl->subsys;

	retval = hwpa_nss_tracker_add_nss_session(subsys);
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Offloading limit for subsystem %s reached", subsys->label);
		goto failure_1;
	}

	PR_DEVEL("Adding NSS session %p to subsystem %s with offloader %s\n", hws_nss, subsys->label, ofl->label);

	retval = ofl->add_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS)
		goto failure_2;

	if (hws_nss->state == HWPA_NSS_SESSION_STATE_ACTIVE) {
		PR_DEVEL("Session created successfully!\n");
		retval = HWPA_BACKEND_SUCCESS;
	} else	{
		pr_err("Session could not be offloaded!\n");
		retval = HWPA_BACKEND_ERR_SESS_CREATE;
		goto failure_2;
	}

	return retval;

failure_2:
	hwpa_nss_tracker_remove_nss_session(subsys);

failure_1:
	return retval;
}

/**
 * @fn struct net_device hwpa_nss_get_and_hold_dev_master*(struct net_device*)
 * @brief get master of net_device
 *
 * @param dev [in] net_device
 * @return master or NULL in case of error
 */
static struct net_device *hwpa_nss_get_and_hold_dev_master(struct net_device *dev)
{
	struct net_device *master;

	rcu_read_lock();
	master = netdev_master_upper_dev_get_rcu(dev);
	if (!master) {
		rcu_read_unlock();
		return NULL;
	}
	dev_hold(master);
	rcu_read_unlock();

	return master;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_handle_bridged_pppoe_traffic(struct hwpa_nss_offloading_data*)
 * @brief Perform early checks for pppoe traffic over bridge
 *
 * @param ofl_data [in] all relevant information for the offloading process
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_handle_bridged_pppoe_traffic(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct pppoe_hdr *eg_pppoe_hdr, *ig_pppoe_hdr;

	eg_pppoe_hdr = hwpa_get_hdr(ofl_data->eg_match, AVM_PA_PPPOE);
	ig_pppoe_hdr = hwpa_get_hdr(ofl_data->ig_match, AVM_PA_PPPOE);

	if (!eg_pppoe_hdr && !ig_pppoe_hdr)
		goto done;

	if (nss_pppoe_get_br_accel_mode() == NSS_PPPOE_BR_ACCEL_MODE_DIS) {
		PR_DEVEL("PPPoE bridge flow acceleration is disabled\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	if (ofl_data->eg_match->casttype == AVM_PA_IS_MULTICAST) {
		PR_DEVEL("Multicast in PPPoE bridge is not supported\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto done;
	}

failure_1:
done:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_build_hierarchy(struct hwpa_nss_offloading_data*)
 * @brief Build Interface hierarchy. So far we are cheating here by only adding
 * the in and out interfaces.
 *
 * @param ofl_data [in] offloading data
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_build_hierarchy(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_subsystem *subsys;
	int32_t ifnum;

	subsys = ofl_data->hws_nss->offloader->subsys;

	ifnum = nss_cmn_get_interface_number_by_dev(ofl_data->in);
	if (ifnum < 0) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][0].ifnum = ifnum;
	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][1].ifnum = ifnum;

	ifnum = nss_cmn_get_interface_number_by_dev(ofl_data->out);
	if (ifnum < 0) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_FLOW][1].ifnum = ifnum;
	ofl_data->interfaces[HWPA_NSS_SESSION_DIRECTION_RETURN][0].ifnum = ifnum;

failure_1:
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_fill_nat_mode(struct hwpa_nss_offloading_data*)
 * @brief determine NAT-Mode of a avm_pa session and save it in the offloading_data
 *
 * @param ofl_data [in] offloading data
 * @return success or error code
 */
static inline enum hwpa_backend_rv  hwpa_nss_fill_nat_mode(struct hwpa_nss_offloading_data *ofl_data)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	const struct avm_pa_session *sess_pa = ofl_data->sess_pa;
	bool ip_snat, ip_dnat, port_snat, port_dnat;

	ip_snat = sess_pa->mod.modflags & (AVM_PA_MOD_SADDR);
	ip_dnat = sess_pa->mod.modflags & (AVM_PA_MOD_DADDR);
	port_snat = sess_pa->mod.modflags & (AVM_PA_MOD_SPORT);
	port_dnat = sess_pa->mod.modflags & (AVM_PA_MOD_DPORT);

	if ((ip_snat && ip_dnat) || (port_snat && port_dnat)) {
		pr_err("Bad NAT Mode for session %p\n", sess_pa);
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	switch (sess_pa->ingress.pkttype & AVM_PA_PKTTYPE_IP_MASK) {
	case AVM_PA_PKTTYPE_IPV4:
		if (!ofl_data->is_routed)
			ofl_data->nat_mode = HWPA_NSS_IPV4_NAT_MODE_BRIDGED;
		else if (ip_dnat)
			ofl_data->nat_mode = HWPA_NSS_IPV4_NAT_MODE_DNAT;
		else if (ip_snat)
			ofl_data->nat_mode = HWPA_NSS_IPV4_NAT_MODE_SNAT;
		else if (port_snat || port_dnat)
			ofl_data->nat_mode = HWPA_NSS_IPV4_NAT_MODE_NO_NAT;
		else {
			PR_DEVEL("IPV4: Bad nat mode!\n");
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_1;
		}
		break;
	case AVM_PA_PKTTYPE_IPV6:
		if (!ofl_data->is_routed)
			ofl_data->nat_mode = HWPA_NSS_IPV6_NAT_MODE_BRIDGED;
		else if (port_snat || port_dnat)
			ofl_data->nat_mode = HWPA_NSS_IPV6_NAT_MODE_NO_NAT;
		else {
			PR_DEVEL("IPV6: No NAT for IPV6!\n");
			retval = HWPA_BACKEND_ERR_INTERNAL;
			goto failure_1;
		}
		break;
	default:
		PR_DEVEL("L2 Protocol not supported!\n");
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

	PR_DEVEL("Determined NAT Mode %d for session %p\n", ofl_data->nat_mode,
		 ofl_data->sess_pa);

failure_1:
	return retval;
}

/**
 * @fn static bool hwpa_nss_determine_if_session_can_be_acceleratied(struct hwpa_nss_offloading_data *)
 * @brief do an early analysis of the avm_pa session to sort out flows, which can
 * not be handled by nss.
 *
 * @param ofl_data [in/out]  all relevant information for the offloading process
 * @return true if session can potentially be accelerated by nss. false otherwise
 */
static bool hwpa_nss_determine_if_session_can_be_accelerated(struct hwpa_nss_offloading_data *ofl_data)
{
	const struct avm_pa_session *sess_pa;
	const struct avm_pa_egress *eg;
	const struct avm_pa_pkt_match *ig_match, *eg_match;
	struct net_device *in, *out, *bridge = NULL;
	bool is_routed;
	enum hwpa_backend_rv retval;

	sess_pa = ofl_data->sess_pa;
	eg = ofl_data->eg;
	ig_match = ofl_data->ig_match;
	eg_match = ofl_data->eg_match;
	is_routed = hwpa_nss_is_routed(sess_pa);

	/* NSS cannot accelerate local traffic */
	if (eg->type == avm_pa_egresstype_local) {
		PR_DEVEL("Not Accelerating local traffic");
		goto failure_1;
	}

	/* Broadcast Traffic is not supported either */
	if (eg_match->casttype == AVM_PA_IS_BROADCAST) {
		PR_DEVEL("Not accelerating broadcast traffic\n");
		goto failure_1;
	}

	/* Multicast Traffic is not supported either */
	if (eg_match->casttype == AVM_PA_IS_MULTICAST) {
		PR_DEVEL("Not accelerating multicast traffic\n");
		goto failure_1;
	}

	out = hwpa_get_netdev(eg->pid_handle);
	if (unlikely(!out)) {
		PR_DEVEL("out net_device could not be gathered\n");
		goto failure_1;
	}

	if (is_routed) {
		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
		if (unlikely(!in)) {
			PR_DEVEL("Could not get in netdevice!\n");
			goto failure_2;
		}
	} else	{
		const struct ethhdr *eg_ethhdr, *ig_ethhdr;

		eg_ethhdr = (const struct ethhdr *) hwpa_get_hdr(eg_match, AVM_PA_ETH);
		ig_ethhdr = (const struct ethhdr *) hwpa_get_hdr(ig_match, AVM_PA_ETH);

		bridge = hwpa_nss_get_and_hold_dev_master(out);
		/*
		 * If traffic is going over a sta interface there is no master.
		 * So we need to diverge from the ECM approach, which assumes
		 * that every netdevice used for bridged traffic is assigned to
		 * a bridge.
		 */
		if (!bridge) {
			PR_DEVEL("No bridge for bridged traffic for netdev %s\n", out->name);

			in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
			if (unlikely(!in)) {
				PR_DEVEL("Could not get in netdevice!\n");
				goto failure_2;
			}

			/*
			 * As mentioned earlier: dont throw error when no bridge
			 * assigned to netdev and just continue and skip the
			 * bridge stuff.
			 */
			goto skip_bridge_stuff;
		}

		in = hwpa_get_netdev(sess_pa->ingress_pid_handle);

		if (!in) {
			PR_DEVEL("Could not get in netdevice!\n");
			goto failure_3;
		}

		if (in == out) {
			if (!br_is_hairpin_enabled(in)) {
				PR_DEVEL("hairpin not enabled\n");
				goto failure_4;
			}
		}

		if (ether_addr_equal(eg_ethhdr->h_source, bridge->dev_addr)) {
			PR_DEVEL("Ignoring routed packet to bridge\n");
			goto failure_4;
		}

		if (eg_match->casttype == AVM_PA_IS_UNICAST) {
			if (!br_fdb_has_entry(out, eg_ethhdr->h_dest, 0)) {
				PR_DEVEL("No fdb entry for mac\n");
				goto failure_4;
			}
		}

skip_bridge_stuff:
		/*
		 * We don't support acceleration of bridging flows with MAC
		 * Address Translation (MAT). So sort them out here.
		 */
		if (!ether_addr_equal(eg_ethhdr->h_source, ig_ethhdr->h_source) ||
		    !ether_addr_equal(eg_ethhdr->h_dest,   ig_ethhdr->h_dest)) {
			PR_DEVEL("MAT Acceleration not supported!\n");
			goto failure_4;
		}

		if (hwpa_nss_handle_bridged_pppoe_traffic(ofl_data) != HWPA_BACKEND_SUCCESS) {
			PR_DEVEL("Couldn't handle PPPoE flow\n");
			goto failure_4;
		}
	}

	PR_DEVEL("Valid session\n");

	ofl_data->is_routed = is_routed;
	ofl_data->in = in;
	ofl_data->out = out;
	ofl_data->bridge = bridge;

	/* Determine NAT Mode */
	retval = hwpa_nss_fill_nat_mode(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Invalid NAT Mode for session %p\n", sess_pa);
		goto failure_4;
	}

	/* Temporarily only accelerate bridging traffic */
	if (ofl_data->nat_mode != HWPA_NSS_IPV4_NAT_MODE_BRIDGED &&
			ofl_data->nat_mode != HWPA_NSS_IPV6_NAT_MODE_BRIDGED) {
		PR_DEVEL("Only bridged sessions supported (session %p)\n", sess_pa);
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_4;
	}

	retval = hwpa_nss_build_hierarchy(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Could not build hierarchy for session %p\n", sess_pa);
		goto failure_4;
	}

	dev_put(ofl_data->in);
	dev_put(ofl_data->out);
	if (ofl_data->bridge)
		dev_put(ofl_data->bridge);

	return (retval == HWPA_BACKEND_SUCCESS);

failure_4:
	dev_put(in);

failure_3:
	if (bridge)
		dev_put(bridge);

failure_2:
	dev_put(out);

failure_1:
	return false;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session*, unsigned long*)
 * @brief probe session
 *
 * @param sess_pa [in] avm_pa session to offload
 * @param handle_out [in] handle of the created hwpa_session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_probe_session(const struct avm_pa_session *sess_pa,
							unsigned long *handle_out)
{
	struct net_device *in, *out;
	int32_t ifnum;
	enum hwpa_backend_rv retval = HWPA_BACKEND_ERR_INTERNAL;

	out = hwpa_get_netdev(avm_pa_first_egress(sess_pa)->pid_handle);
	if (unlikely(!out)) {
		PR_DEVEL("Could not get out netdevice!\n");
		goto failure_1;
	}

	in = hwpa_get_netdev(sess_pa->ingress_pid_handle);
	if (unlikely(!in)) {
		dev_put(out);
		PR_DEVEL("Could not get in netdevice!\n");
		goto failure_1;
	}

	ifnum = nss_cmn_get_interface_number_by_dev(in);
	if (ifnum < 0)
		goto failure_2;

	ifnum = nss_cmn_get_interface_number_by_dev(out);
	if (ifnum < 0)
		goto failure_2;

	retval = HWPA_BACKEND_SUCCESS;

failure_2:
	dev_put(in);
	dev_put(out);

failure_1:
	*handle_out = hw_handle_invalid;
	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session*, unsigned long*)
 * @brief Decide what to do with an avm_pa session and perform an action
 * accordingly. The possible outcomes are an error, to ignore the session, to
 * just register the hwpa-session to an existing nss session, to offload a newly
 * created nss session, to queue the new session to the pom or to offload a
 * peding session.
 *
 * @param sess_pa [in] avm_pa session to offload
 * @param handle_out [in] handle of the created hwpa_session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_add_session(const struct avm_pa_session *sess_pa,
							unsigned long *handle_out)
{
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss_new, *hws_nss_for_attach;
	enum hwpa_backend_rv retval;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	struct hwpa_nss_subsystem *subsys;
	struct hwpa_nss_offloading_data *ofl_data;
	struct hwpa_nss_classification_result class_res;
	uint32_t pa_ref_count;

	ofl_data = kmalloc(sizeof(*ofl_data), GFP_KERNEL);
	if (!ofl_data) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	ofl_data->sess_pa = sess_pa;
	ofl_data->eg = avm_pa_first_egress(sess_pa);
	ofl_data->eg_match = &ofl_data->eg->match;
	ofl_data->ig_match = &sess_pa->ingress;
	INIT_LIST_HEAD(&ofl_data->node);

	/* Sort out flows which are not accelerateable at an early stage */
	if (!hwpa_nss_determine_if_session_can_be_accelerated(ofl_data)) {
		PR_DEVEL("Not Accelerating Session %p\n", sess_pa);
		goto failure_2;
	}

	PR_DEVEL("Accelerating Session %p\n", sess_pa);

	hws_hwpa = (struct hwpa_nss_hwpa_session *) kmem_cache_zalloc(global_ctx->kmem_hwpa, GFP_KERNEL);
	if (!hws_hwpa) {
		retval = HWPA_BACKEND_ERR_CACHE;
		goto failure_4;
	}
	hwpa_nss_init_hwpa_session(hws_hwpa);
	ofl_data->hws_hwpa = hws_hwpa;

	hws_nss_new = (struct hwpa_nss_nss_session *) kmem_cache_zalloc(global_ctx->kmem_nss, GFP_KERNEL);
	if (!hws_nss_new) {
		retval = HWPA_BACKEND_ERR_CACHE;
		goto failure_5;
	}
	hwpa_nss_init_nss_session(hws_nss_new);
	ofl_data->hws_nss = hws_nss_new;

	retval = hwpa_nss_prepare_session(ofl_data);
	if (retval != HWPA_BACKEND_SUCCESS) {
		PR_DEVEL("Error during session preparation.\n");
		goto failure_6;
	}

	/*
	 * Assumption: Subsystem and offloader of hws_nss_new doesn't
	 * differ from subsystem of hws_nss after classification
	 */
	ofl = hws_nss_new->offloader;
	subsys = ofl->subsys;

	/* The classification and registration of a session have to be executed
	 * within a single mutex subsystem context because otherwise there
	 * is a race condition between finding just registered sessions and
	 * registering/creating new sessions.
	 */
	mutex_lock(&subsys->mutex);

	/* Decide what to do with the new avm_pa session */
	hwpa_nss_classify(ofl_data, &class_res);

#ifdef HWPA_NSS_DEBUG
	WARN_ON((uint32_t)class_res.value >= (uint32_t)HWPA_NSS_CLASS_RES_MAX);
#endif

	if (class_res.value == HWPA_NSS_CLASS_RES_EST_OFFLOAD ||
			class_res.value == HWPA_NSS_CLASS_RES_NEW_OFFLOAD ||
			class_res.value == HWPA_NSS_CLASS_RES_NEW_QUEUE) {
		PR_DEVEL("Registering NSS Session: %p\n", hws_nss_new);
		hwpa_nss_register_nss_session(hws_nss_new, class_res.hws_new_hash);
	}

	if (class_res.value != HWPA_NSS_CLASS_RES_NEW_IGNORE) {
		if (class_res.value == HWPA_NSS_CLASS_RES_EST_ATTACH)
			hws_nss_for_attach = class_res.hws_nss_established;
		else
			hws_nss_for_attach = hws_nss_new;
		PR_DEVEL("Attaching HWPA Session %p to NSS Session %p\n",
			hws_hwpa, hws_nss_for_attach);
		hwpa_nss_attach_to_nss_session(hws_nss_for_attach,
					       hws_hwpa);
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_NEW_QUEUE) {
		PR_DEVEL("Registering ofl_data: %p\n", ofl_data);
		hwpa_nss_add_pending_offload(ofl_data);
		ofl->pending_avm_pa_session_count++;
		ofl->pending_nss_session_count++;
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_NEW_OFFLOAD) {
		PR_DEVEL("Offloading ofl_data: %p\n", ofl_data);
		retval = hwpa_nss_offload_session(ofl_data);
		if (retval != HWPA_BACKEND_SUCCESS)
			goto failure_7;
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_EST_OFFLOAD) {
		PR_DEVEL("Offloading ofl_data: %p\n", class_res.ofl_data_established);

		spin_lock_bh(&ofl->lock);
		ofl->pending_nss_session_count--;
		ofl->pending_avm_pa_session_count -= class_res.ofl_data_established->hws_nss->pa_ref_count;
		spin_unlock_bh(&ofl->lock);

		retval = hwpa_nss_offload_session(class_res.ofl_data_established);
		if (retval != HWPA_BACKEND_SUCCESS)
			goto failure_7;
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_NEW_IGNORE) {
		PR_DEVEL("Dropping hwpa session: %p\n", hws_hwpa);
		hwpa_nss_destroy_hwpa_session(hws_hwpa);
		*handle_out = -1;
	} else	{
		*handle_out = hwpa_nss_session_to_handle(hws_hwpa);
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_NEW_IGNORE ||
			class_res.value == HWPA_NSS_CLASS_RES_EST_OFFLOAD ||
			class_res.value == HWPA_NSS_CLASS_RES_EST_ATTACH) {
		PR_DEVEL("Dropping nss session: %p\n", hws_nss_new);
		hwpa_nss_destroy_nss_session(hws_nss_new);
	}

	if (class_res.value != HWPA_NSS_CLASS_RES_NEW_QUEUE) {
		PR_DEVEL("Dropping new ofl_data: %p\n", ofl_data);
		hwpa_nss_remove_pending_offload(ofl_data);
		kfree(ofl_data);
	}

	if (class_res.value == HWPA_NSS_CLASS_RES_EST_OFFLOAD) {
		PR_DEVEL("Dropping established ofl_data: %p\n", class_res.ofl_data_established);
		hwpa_nss_remove_pending_offload(class_res.ofl_data_established);
		kfree(class_res.ofl_data_established);
	}

	mutex_unlock(&ofl->subsys->mutex);

	spin_lock_bh(&ofl->lock);

	/* Update session counters */
	switch (class_res.value) {
	case HWPA_NSS_CLASS_RES_NEW_OFFLOAD:
		ofl->successful_nss_offloads++;
		ofl->active_avm_pa_session_count++;
		ofl->active_nss_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_NEW_QUEUE:
		ofl->pending_avm_pa_session_count++;
		ofl->pending_nss_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_EST_ATTACH:
		ofl->active_avm_pa_session_count++;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD:
		ofl->successful_nss_offloads++;
		ofl->active_nss_session_count++;
		ofl->pending_nss_session_count--;
		pa_ref_count = class_res.ofl_data_established->hws_nss->pa_ref_count;
		ofl->pending_avm_pa_session_count -= pa_ref_count;
		ofl->active_avm_pa_session_count += pa_ref_count;
		break;
	default:
		break;
	}
	spin_unlock_bh(&ofl->lock);

	return HWPA_BACKEND_SUCCESS;

failure_7:
	spin_lock_bh(&ofl->lock);

	/* So far we can only get here if classifier decides for
	 * HWPA_NSS_CLASS_RES_NEW_OFFLOAD or HWPA_NSS_CLASS_RES_EST_OFFLOAD.
	 * Here we also need a session counter update.
	 */
	switch (class_res.value) {
	case HWPA_NSS_CLASS_RES_NEW_OFFLOAD:
		ofl->failed_nss_offloads++;
		break;
	case HWPA_NSS_CLASS_RES_EST_OFFLOAD:
		ofl->failed_nss_offloads++;
		ofl->pending_nss_session_count--;
		pa_ref_count = class_res.ofl_data_established->hws_nss->pa_ref_count;
		ofl->pending_avm_pa_session_count -= pa_ref_count;
		break;
	default:
		break;
	}
	spin_unlock_bh(&ofl->lock);

	hwpa_nss_destroy_hwpa_session(hws_hwpa);
	hws_hwpa = NULL;
	hwpa_nss_destroy_nss_session(hws_nss_new);
	hws_nss_new = NULL;
	mutex_unlock(&ofl->subsys->mutex);

failure_6:
	if (hws_nss_new)
		kmem_cache_free(global_ctx->kmem_nss, hws_nss_new);

failure_5:
	if (hws_hwpa)
		kmem_cache_free(global_ctx->kmem_hwpa, hws_hwpa);

failure_4:

failure_2:
	kfree(ofl_data);

failure_1:
	*handle_out = -1;
	return (enum hwpa_backend_rv) retval;
}

/*
 *===============================================================================
 * hwpa nss offloading session removal
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_connection_destroy(struct hwpa_nss_nss_session*, struct nss_cmn_msg*)
 * @brief Protocol independent part of subsystem session destruction
 *
 * @param hws_nss [in] NSS session just destroyed
 * @param cm [in] common message part of answer from nss
 */
static void hwpa_nss_connection_destroy(struct hwpa_nss_nss_session *hws_nss, struct nss_cmn_msg *cm)
{
	if (cm->response != NSS_CMN_RESPONSE_ACK) {
		if (cm->error == NSS_IPV4_DR_NO_CONNECTION_ENTRY_ERROR || cm->error == NSS_IPV6_DR_NO_CONNECTION_ENTRY_ERROR)
			PR_DEVEL("Trying to remove non-existing session");
		else	{
			pr_err("An Error occurred destroying NSS connection acceleration\n");
			pr_err("Error Code: %d", cm->error);
		}
		hws_nss->state = HWPA_NSS_SESSION_STATE_INVALID;
	} else {
		hws_nss->state = HWPA_NSS_SESSION_STATE_BROKEN;
	}
}

/**
 * @fn void hwpa_nss_ipv4_connection_destroy_callback(void*, struct nss_ipv4_msg*)
 * @brief ipv4 rule destroy callback
 *
 * @param app_data [in] app data. The subsystem.
 * @param nim [in] the answer to a destroy_rule_msg for ipv4
 */
static void hwpa_nss_ipv4_connection_destroy_callback(void *app_data, struct nss_ipv4_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV4_TX_DESTROY_RULE_MSG) {
		pr_err("%p: ported create callback with improper type: %d\n",
		       hws_nss, nim->cm.type);
		return;
	}

	hwpa_nss_connection_destroy(hws_nss, &nim->cm);
}

/**
 * @fn void hwpa_nss_ipv6_connection_destroy_callback(void*, struct nss_ipv6_msg*)
 * @brief ipv6 rule destroy callback
 *
 * @param app_data [in] app data. The subsystem.
 * @param nim [in] the answer to a destroy_rule_msg for ipv6
 */
static void hwpa_nss_ipv6_connection_destroy_callback(void *app_data, struct nss_ipv6_msg *nim)
{
	struct hwpa_nss_nss_session *hws_nss = (struct hwpa_nss_nss_session *) app_data;

	if (nim->cm.type != NSS_IPV6_TX_DESTROY_RULE_MSG) {
		pr_err("%p: ported create callback with improper type: %d\n",
		       hws_nss, nim->cm.type);
		return;
	}

	hwpa_nss_connection_destroy(hws_nss, &nim->cm);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_remove_session(struct hwpa_nss_subsystem*, struct hwpa_nss_nss_session*)
 * @brief Remove Session from ipv4 subsystem
 *
 * @param subsys [in] the subsystem
 * @param hws_nss [in] nss session to destroy
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_remove_session(struct hwpa_nss_subsystem *subsys,
							 struct hwpa_nss_nss_session *hws_nss)
{
	uint32_t retval;
	struct nss_ipv4_msg *rem_msg;

	rem_msg = kzalloc(sizeof(struct nss_ipv4_msg),
						 GFP_KERNEL);

	if (!rem_msg) {
		pr_err("Memory Error During Session Removal\n");
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	nss_ipv4_msg_init(rem_msg, NSS_IPV4_RX_INTERFACE,
				   NSS_IPV4_TX_DESTROY_RULE_MSG,
				   sizeof(struct nss_ipv4_rule_destroy_msg),
				   hwpa_nss_ipv4_connection_destroy_callback, hws_nss);

	rem_msg->msg.rule_destroy.tuple = hws_nss->ipv4.tuple;

	PR_DEVEL("%p: Deaccelerate Session\n"
			"Protocol: %d\n"
			"from_ip: %pI4h:%d\n"
			"to_ip: %pI4h:%d\n",
			hws_nss,
			rem_msg->msg.rule_destroy.tuple.protocol,
			&rem_msg->msg.rule_destroy.tuple.flow_ip,
			rem_msg->msg.rule_destroy.tuple.flow_ident,
			&rem_msg->msg.rule_destroy.tuple.return_ip,
			rem_msg->msg.rule_destroy.tuple.return_ident);

	retval = nss_ipv4_tx(subsys->mgr, rem_msg);
	if (retval != NSS_TX_SUCCESS) {
		PR_DEVEL("IPV4 acceleration rule could not be removed\n");
		retval = HWPA_BACKEND_ERR_SESS_REM;
		goto failure_2;
	}

failure_2:
	kfree(rem_msg);

failure_1:
	return (enum hwpa_backend_rv) retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_remove_session(struct hwpa_nss_subsystem*, struct hwpa_nss_nss_session*)
 * @brief Remove Session from ipv6 subsystem
 *
 * @param subsys [in] the subsystem
 * @param hws_nss [in] nss session to destroy
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_remove_session(struct hwpa_nss_subsystem *subsys,
							 struct hwpa_nss_nss_session *hws_nss)
{
	uint32_t retval;
	struct nss_ipv6_msg *rem_msg;

	rem_msg = kzalloc(sizeof(struct nss_ipv6_msg),
						 GFP_KERNEL);

	if (!rem_msg) {
		pr_err("Memory Error During Session Removal\n");
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	nss_ipv6_msg_init(rem_msg, NSS_IPV6_RX_INTERFACE,
				   NSS_IPV6_TX_DESTROY_RULE_MSG,
				   sizeof(struct nss_ipv6_rule_destroy_msg),
				   hwpa_nss_ipv6_connection_destroy_callback, hws_nss);

	rem_msg->msg.rule_destroy.tuple = hws_nss->ipv6.tuple;

	retval = nss_ipv6_tx(subsys->mgr, rem_msg);
	if (retval != NSS_TX_SUCCESS) {
		PR_DEVEL("IPV6 acceleration rule could not be removed\n");
		retval = HWPA_BACKEND_ERR_SESS_REM;
		goto failure_2;
	}

failure_2:
	kfree(rem_msg);

failure_1:
	return (enum hwpa_backend_rv) retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long)
 * @brief implementation of the hwpa_backend-API function for session removal
 *
 * @param handle [in] the hwpa session
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_rem_session(unsigned long handle)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_subsystem *subsys;
	struct hwpa_nss_offloader *ofl;
	struct hwpa_nss_offloading_data *ofl_data;

	/* get hwpa session from avm_pa session */
	hws_hwpa = hwpa_nss_handle_to_session(handle);
	if (!hws_hwpa) {
		retval = HWPA_BACKEND_ERR_BAD_HANDLE;
		goto finished;
	}

	hws_nss = hws_hwpa->hws_nss;

	ofl = hws_nss->offloader;

	subsys = ofl->subsys;

	PR_DEVEL("Removing HWPA session: %p\n", hws_hwpa);

	mutex_lock(&subsys->mutex);

	/* firstly destroy hwpa session */
	hwpa_nss_destroy_hwpa_session(hws_hwpa);

	/*
	 * depending on whether the nss session is offloaded or pending update
	 * counters accordingly
	 */
	spin_lock_bh(&ofl->lock);
	switch (hws_nss->state) {
	case HWPA_NSS_SESSION_STATE_ACTIVE:
		ofl->active_avm_pa_session_count--;
		break;
	case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
		ofl->pending_avm_pa_session_count--;
		break;
	default:
		break;
	}
	spin_unlock_bh(&ofl->lock);

	/* if the nss session has more hwpa sessions attached cleanup and ret */
	if (hws_nss->pa_ref_count > 0) {
		mutex_unlock(&subsys->mutex);
		retval = HWPA_BACKEND_SUCCESS;
		goto finished;
	}

	/*
	 * If hwps session list in nss session is empty but there are still
	 * sessions assigned to it according to the pa_ref_counter there is sth
	 * going wrong
	 */
	WARN_ON(!list_empty(&hws_nss->hwpa_session_list));

	/*
	 * According to nss session state perform action and update session
	 * counters
	 */
	switch (hws_nss->state) {
	case HWPA_NSS_SESSION_STATE_ACTIVE:
		PR_DEVEL("Removing NSS session %p from subsystem %s with offloader %s\n",
			 hws_nss, subsys->label, ofl->label);

		retval = ofl->remove_session(subsys, hws_nss);

		hwpa_nss_tracker_remove_nss_session(subsys);

		if (hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID) {
			PR_DEVEL("NSS Session removed successfully!\n");
			retval = HWPA_BACKEND_SUCCESS;
		} else	{
			PR_DEVEL("NSS Session could not be deaccelerated!\n");
			retval = HWPA_BACKEND_ERR_SESS_REM;
		}
		spin_lock_bh(&ofl->lock);
		ofl->active_nss_session_count--;
		spin_unlock_bh(&ofl->lock);
		break;
	case HWPA_NSS_SESSION_STATE_PENDING_APPROVAL:
		ofl_data = hwpa_nss_pom_get_and_unregister_offloading_data(hws_nss);
		kfree(ofl_data);
		spin_lock_bh(&ofl->lock);
		ofl->pending_nss_session_count--;
		spin_unlock_bh(&ofl->lock);
		break;
	case HWPA_NSS_SESSION_STATE_READY_TO_OFFLOAD:
		break;
	case HWPA_NSS_SESSION_STATE_INVALID:
		break;
	default:
		PR_DEVEL("Bad Session!\n");
	}

	hwpa_nss_destroy_nss_session(hws_nss);

	mutex_unlock(&subsys->mutex);

	retval = HWPA_BACKEND_SUCCESS;

finished:
	return retval;
}

/*
 *==============================================================================
 * hwpa nss purging
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_ipv4_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all ipv4 sessions
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_ipv4_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_ipv4_specific *ipv4_spec = subsys->ipv4_spec;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	int i;

	rcu_read_lock();
	hash_for_each_rcu(ipv4_spec->session_table, i, hws_nss, node) {
		ofl = hws_nss->offloader;
		ofl->remove_session(subsys, hws_nss);
	}
	rcu_read_unlock();
}

/**
 * @fn void hwpa_nss_ipv6_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all ipv6 sessions
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_ipv6_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_ipv6_specific *ipv6_spec = subsys->ipv6_spec;
	struct hwpa_nss_nss_session *hws_nss;
	struct hwpa_nss_offloader *ofl;
	int i;

	rcu_read_lock();
	hash_for_each_rcu(ipv6_spec->session_table, i, hws_nss, node) {
		ofl = hws_nss->offloader;
		ofl->remove_session(subsys, hws_nss);
	}
	rcu_read_unlock();
}

/**
 * @fn void hwpa_nss_subsystem_purge_sessions(struct hwpa_nss_subsystem*)
 * @brief purge all sessions which are offloaded in a subsystem
 *
 * @param subsys [in] the subsytem
 */
void hwpa_nss_subsystem_purge_sessions(struct hwpa_nss_subsystem *subsys)
{
	if (subsys->tracker->usage == 0)
		return;

	PR_DEVEL("Purging sessions of subsystem %s", subsys->label);

	subsys->purge_sessions(subsys);
}

/**
 * @fn void hwpa_nss_purge_sessions(void)
 * @brief purge all still offloaded sessions from nss
 */
void hwpa_nss_purge_sessions(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i)
		hwpa_nss_subsystem_purge_sessions(global_ctx->subsystems[i]);
}

/*
 *===============================================================================
 * hwpa nss ipv4 synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_sync_session(struct hwpa_nss_subsystem*, struct nss_ipv4_conn_sync*)
 * @brief sync a nss session. Finds local nss session from sync and updates its stats
 *
 * @param subsys [in] the subsytem which belongs to the sync message.
 * @param sync [in] ipv4 subsystem sync message.
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_sync_session(struct hwpa_nss_subsystem *subsys,
						       struct nss_ipv4_conn_sync *sync)
{
	struct hwpa_nss_nss_session *hws_nss;
	static enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("sync ipv4 session, reason: %d\n", sync->reason);
	/* We only want stats when we asked for them */
	switch (sync->reason) {
	case NSS_IPV4_RULE_SYNC_REASON_STATS:
		break;
	case NSS_IPV4_RULE_SYNC_REASON_FLUSH:
	case NSS_IPV4_RULE_SYNC_REASON_EVICT:
		pr_warn("NSS Session got removed by NSS\n");
	case NSS_IPV4_RULE_SYNC_REASON_DESTROY:
	default:
		goto ignore_sync;
	}

	hws_nss = hwpa_nss_ipv4_find_nss_session_from_sync(subsys, sync);
	if (!hws_nss) {
		retval = HWPA_BACKEND_ERR_INVALID_SYNC;
		goto failure_1;
	}

	if (hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID)
		goto ignore_sync;

	spin_lock_bh(&hws_nss->sync_lock);

	hws_nss->stats.flow_rx_bytes += sync->flow_rx_byte_count;
	hws_nss->stats.flow_rx_pkts += sync->flow_rx_packet_count;
	hws_nss->stats.flow_tx_bytes += sync->flow_tx_byte_count;
	hws_nss->stats.flow_tx_pkts += sync->flow_tx_packet_count;
	hws_nss->stats.return_rx_bytes += sync->return_rx_byte_count;
	hws_nss->stats.return_rx_pkts += sync->return_rx_packet_count;
	hws_nss->stats.return_tx_bytes += sync->return_tx_byte_count;
	hws_nss->stats.return_tx_pkts += sync->return_tx_packet_count;

	spin_unlock_bh(&hws_nss->sync_lock);

	PR_DEVEL("hws_nss->stats.flow_rx_bytes: %d\n"
		"hws_nss->stats.flow_tx_bytes: %d\n"
		"hws_nss->stats.return_rx_bytes: %d\n"
		"hws_nss->stats.return_tx_bytes: %d\n",
		hws_nss->stats.flow_rx_bytes,
		hws_nss->stats.flow_tx_bytes,
		hws_nss->stats.return_rx_bytes,
		hws_nss->stats.return_tx_bytes);

	set_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags);
	set_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags);

failure_1:
ignore_sync:
	return retval;
}

/**
 * @fn void hwpa_nss_ipv4_net_dev_callback(void*, struct nss_ipv4_msg*)
 * @brief ipv4 subsystem callback
 *
 * @param app_data [in] application specific data. Used for subsystem.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv4_net_dev_callback(void *app_data,
					  struct nss_ipv4_msg *nim)
{
	struct nss_ipv4_conn_sync *sync = &nim->msg.conn_stats;
	struct hwpa_nss_subsystem *subsys = (struct hwpa_nss_subsystem *) app_data;

	if (nim->cm.type != NSS_IPV4_RX_CONN_STATS_SYNC_MSG)
		return;

	hwpa_nss_ipv4_sync_session(subsys, sync);
}

/**
 * @fn void hwpa_nss_ipv4_sync_many_callback(void*, struct nss_ipv4_msg*)
 * @brief callback function used as a reply from a sync_many message from nss
 *
 * @param app_data [in] application specific data. not used here.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv4_sync_many_callback(void *app_data,
					  struct nss_ipv4_msg *nim)
{
	uint32_t index;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4);
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv4_conn_sync_many_msg *sync_many_msg = &nim->msg.conn_stats_many;
	struct nss_ipv4_conn_sync_many_msg *global_sync_many_msg;

	PR_DEVEL("IPV4 Sync Many Callback\n");

	if (nim->cm.type != NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG)
		return;

	global_sync_many_msg = &sync->msg.ipv4->msg.conn_stats_many;

	if (nim->cm.response == NSS_CMN_RESPONSE_ACK) {
		for (index = 0; index < sync_many_msg->count; index++) {
			hwpa_nss_ipv4_sync_session(subsys,
					  &(sync_many_msg->conn_sync[index]));
		}

		spin_lock(&sync->lock);
		global_sync_many_msg->index = sync_many_msg->next;
		spin_unlock(&sync->lock);
		/* Send next sync_many-msg*/
		queue_delayed_work(sync->workqueue, &sync->work, 0);
	} else	{
		spin_lock(&sync->lock);
		global_sync_many_msg->index = 0;
		spin_unlock(&sync->lock);
		queue_delayed_work(sync->workqueue, &sync->work, HWPA_NSS_STATS_SYNC_PERIOD);
	}
}

/**
 * @fn void hwpa_nss_ipv4_sync_work(struct work_struct*)
 * @brief work function for the ipv4 sync workqueue
 *
 * @param work [in] work struct
 */
static void hwpa_nss_ipv4_sync_work(struct work_struct *work)
{
	struct delayed_work *delayed_work_data = container_of(work, struct delayed_work, work);
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4);
	struct hwpa_nss_synchronizer *sync;
	struct nss_ipv4_msg *msg;
	uint32_t retval;

	sync = subsys->sync;
	msg = sync->msg.ipv4;

	if (subsys->tracker->usage == 0)
		goto reschedule;

	retval = nss_ipv4_tx_with_size(subsys->mgr, msg, PAGE_SIZE);
	if (retval  == NSS_TX_SUCCESS)
		return;

reschedule:
	spin_lock(&sync->lock);
	msg->msg.conn_stats_many.count = 0;
	msg->msg.conn_stats_many.index = 0;
	spin_unlock(&sync->lock);
	queue_delayed_work(sync->workqueue, delayed_work_data,
			   HWPA_NSS_STATS_SYNC_PERIOD);
}

/**
 * @fn void hwpa_nss_ipv4_sync_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv4 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 */
static void hwpa_nss_ipv4_sync_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	cancel_delayed_work_sync(&sync->work);
	destroy_workqueue(sync->workqueue);
	nss_ipv4_conn_sync_many_notify_unregister();
	kfree(sync->msg.ipv4);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_sync_init(struct hwpa_nss_subsystem*)
 * @brief initialize ipv4 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_sync_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv4_conn_sync_many_msg *nicsm;
	struct nss_ipv4_msg *msg;

	PR_DEVEL("IPV4 Sync init\n");

	spin_lock_init(&sync->lock);

	msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	sync->msg.ipv4 = msg;

	nss_ipv4_conn_sync_many_notify_register(hwpa_nss_ipv4_sync_many_callback);

	nss_ipv4_msg_init(msg, NSS_IPV4_RX_INTERFACE,
		NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG,
		sizeof(struct nss_ipv4_conn_sync_many_msg),
		NULL,
		(void *) subsys);

	nicsm = &msg->msg.conn_stats_many;
	nicsm->index = 0;
	nicsm->size = PAGE_SIZE;

	sync->workqueue = create_singlethread_workqueue("hwpa_nss_ipv4_sync_workqueue");
	if (!sync->workqueue) {
		retval = HWPA_BACKEND_ERR_SYNC;
		goto failure_2;
	}

	INIT_DELAYED_WORK(&sync->work,
			  hwpa_nss_ipv4_sync_work);

	queue_delayed_work(sync->workqueue, &sync->work,
			   HWPA_NSS_STATS_SYNC_PERIOD);

	return HWPA_BACKEND_SUCCESS;

failure_2:
	nss_ipv4_conn_sync_many_notify_unregister();
	kfree(msg);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss ipv6 synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_sync_session(struct hwpa_nss_subsystem*, struct nss_ipv6_conn_sync*)
 * @brief sync an nss session. Finds local nss session from sync and updates its stats
 *
 * @param subsys [in] the subsytem which belongs to the sync message.
 * @param sync [in] ipv6 subsystem sync message.
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_sync_session(struct hwpa_nss_subsystem *subsys,
						       struct nss_ipv6_conn_sync *sync)
{
	struct hwpa_nss_nss_session *hws_nss;
	static enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Syncing ipv6 session\n");

	/* We only want stats when we asked for them */
	switch (sync->reason) {
	case NSS_IPV6_RULE_SYNC_REASON_STATS:
		break;
	case NSS_IPV6_RULE_SYNC_REASON_FLUSH:
	case NSS_IPV6_RULE_SYNC_REASON_EVICT:
		pr_warn("NSS Session got removed by NSS\n");
	case NSS_IPV6_RULE_SYNC_REASON_DESTROY:
	default:
		goto ignore_sync;
	}

	hws_nss = hwpa_nss_ipv6_find_nss_session_from_sync(subsys, sync);
	if (!hws_nss) {
		retval = HWPA_BACKEND_ERR_INVALID_SYNC;
		goto failure_1;
	}

	if (hws_nss->state == HWPA_NSS_SESSION_STATE_INVALID)
		goto ignore_sync;

	spin_lock_bh(&hws_nss->sync_lock);

	hws_nss->stats.flow_rx_bytes += sync->flow_rx_byte_count;
	hws_nss->stats.flow_rx_pkts += sync->flow_rx_packet_count;
	hws_nss->stats.flow_tx_bytes += sync->flow_tx_byte_count;
	hws_nss->stats.flow_tx_pkts += sync->flow_tx_packet_count;
	hws_nss->stats.return_rx_bytes += sync->return_rx_byte_count;
	hws_nss->stats.return_rx_pkts += sync->return_rx_packet_count;
	hws_nss->stats.return_tx_bytes += sync->return_tx_byte_count;
	hws_nss->stats.return_tx_pkts += sync->return_tx_packet_count;

	spin_unlock_bh(&hws_nss->sync_lock);

	PR_DEVEL("hws_nss->stats.flow_rx_bytes: %d\n"
		"hws_nss->stats.flow_tx_bytes: %d\n"
		"hws_nss->stats.return_rx_bytes: %d\n"
		"hws_nss->stats.return_tx_bytes: %d\n",
		hws_nss->stats.flow_rx_bytes,
		hws_nss->stats.flow_tx_bytes,
		hws_nss->stats.return_rx_bytes,
		hws_nss->stats.return_tx_bytes);

	set_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags);
	set_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags);

failure_1:
ignore_sync:
	return retval;
}

/**
 * @fn void hwpa_nss_ipv6_net_dev_callback(void*, struct nss_ipv6_msg*)
 * @brief ipv6 subsystem callback
 *
 * @param app_data [in] application specific data. Used for subsystem.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv6_net_dev_callback(void *app_data,
					  struct nss_ipv6_msg *nim)
{
	struct nss_ipv6_conn_sync *sync = &nim->msg.conn_stats;
	struct hwpa_nss_subsystem *subsys = (struct hwpa_nss_subsystem *) app_data;

	if (nim->cm.type != NSS_IPV6_RX_CONN_STATS_SYNC_MSG)
		return;

	hwpa_nss_ipv6_sync_session(subsys, sync);
}

/**
 * @fn void hwpa_nss_ipv6_sync_many_callback(void*, struct nss_ipv6_msg*)
 * @brief callback function used as a reply from a sync_many message from nss
 *
 * @param app_data [in] application specific data. not used here.
 * @param nim [in] reply message from nss
 */
static void hwpa_nss_ipv6_sync_many_callback(void *app_data,
					  struct nss_ipv6_msg *nim)
{
	uint32_t index;
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6);
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv6_conn_sync_many_msg *sync_many_msg = &nim->msg.conn_stats_many;
	struct nss_ipv6_conn_sync_many_msg *global_sync_many_msg;

	PR_DEVEL("IPV6 Sync Many Callback\n");

	if (nim->cm.type != NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG)
		return;

	global_sync_many_msg = &sync->msg.ipv6->msg.conn_stats_many;

	if (nim->cm.response == NSS_CMN_RESPONSE_ACK) {
		for (index = 0; index < sync_many_msg->count; index++) {
			hwpa_nss_ipv6_sync_session(subsys,
					  &(sync_many_msg->conn_sync[index]));
		}

		spin_lock(&sync->lock);
		global_sync_many_msg->index = sync_many_msg->next;
		spin_unlock(&sync->lock);
		/* Send next sync_many-msg*/
		queue_delayed_work(sync->workqueue, &sync->work, 0);
	} else	{
		spin_lock(&sync->lock);
		global_sync_many_msg->index = 0;
		spin_unlock(&sync->lock);
		queue_delayed_work(sync->workqueue, &sync->work, HWPA_NSS_STATS_SYNC_PERIOD);
	}
}

/**
 * @fn void hwpa_nss_ipv6_sync_work(struct work_struct*)
 * @brief work function for the ipv6 sync workqueue
 *
 * @param work [in] work struct
 */
static void hwpa_nss_ipv6_sync_work(struct work_struct *work)
{
	struct delayed_work *delayed_work_data = container_of(work, struct delayed_work, work);
	struct hwpa_nss_subsystem *subsys = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6);
	struct hwpa_nss_synchronizer *sync;
	struct nss_ipv6_msg *msg;
	uint32_t retval;

	sync = subsys->sync;
	msg = sync->msg.ipv6;

	if (subsys->tracker->usage == 0)
		goto reschedule;

	retval = nss_ipv6_tx_with_size(subsys->mgr, msg, PAGE_SIZE);
	if (retval  == NSS_TX_SUCCESS)
		return;

reschedule:
	spin_lock(&sync->lock);
	msg->msg.conn_stats_many.count = 0;
	msg->msg.conn_stats_many.index = 0;
	spin_unlock(&sync->lock);
	queue_delayed_work(sync->workqueue, delayed_work_data,
			   HWPA_NSS_STATS_SYNC_PERIOD);
}

/**
 * @fn void hwpa_nss_ipv6_sync_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv6 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 */
static void hwpa_nss_ipv6_sync_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	cancel_delayed_work_sync(&sync->work);
	destroy_workqueue(sync->workqueue);
	nss_ipv6_conn_sync_many_notify_unregister();
	kfree(sync->msg.ipv6);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_sync_init(struct hwpa_nss_subsystem*)
 * @brief initialize ipv6 subsystem synchronization
 *
 * @param subsys [in] handle of the subsystem
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_sync_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_synchronizer *sync = subsys->sync;
	struct nss_ipv6_conn_sync_many_msg *nicsm;
	struct nss_ipv6_msg *msg;

	PR_DEVEL("IPV6 Sync init\n");

	spin_lock_init(&sync->lock);

	msg = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (!msg) {
		retval = HWPA_BACKEND_ERR_MEMORY;
		goto failure_1;
	}

	sync->msg.ipv6 = msg;

	nss_ipv6_conn_sync_many_notify_register(hwpa_nss_ipv6_sync_many_callback);

	nss_ipv6_msg_init(msg, NSS_IPV6_RX_INTERFACE,
		NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG,
		sizeof(struct nss_ipv6_conn_sync_many_msg),
		NULL,
		(void *) subsys);

	nicsm = &msg->msg.conn_stats_many;
	nicsm->index = 0;
	nicsm->size = PAGE_SIZE;

	sync->workqueue = create_singlethread_workqueue("hwpa_nss_ipv6_sync_workqueue");
	if (!sync->workqueue) {
		retval = HWPA_BACKEND_ERR_SYNC;
		goto failure_2;
	}

	INIT_DELAYED_WORK(&sync->work,
			  hwpa_nss_ipv6_sync_work);

	queue_delayed_work(sync->workqueue, &sync->work,
			   HWPA_NSS_STATS_SYNC_PERIOD);

	return HWPA_BACKEND_SUCCESS;

failure_2:
	nss_ipv6_conn_sync_many_notify_unregister();
	kfree(msg);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss synchronization
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_backend_stats(unsigned long, struct avm_pa_session_stats*)
 * @brief update avm_pa stats
 *
 * @param subsys [in] handle of the hwpa session
 * @param stats [out] avm_pa stats to fill
 * @return success only
 */
enum hwpa_backend_rv hwpa_backend_stats(unsigned long handle,
							struct avm_pa_session_stats *stats)
{
	struct hwpa_nss_hwpa_session *hws_hwpa;
	struct hwpa_nss_nss_session *hws_nss;

	hws_hwpa = hwpa_nss_handle_to_session(handle);
	if (!hws_hwpa) {
		PR_DEVEL("Requesting Stats from invalid session id: %lu", handle);
		memset(stats, 0, sizeof(*stats));
		return HWPA_BACKEND_SUCCESS;
	}

	rcu_read_lock();
	hws_nss = rcu_dereference(hws_hwpa->hws_nss);
	if (hws_nss->state != HWPA_NSS_SESSION_STATE_ACTIVE) {
		memset(stats, 0, sizeof(*stats));
	} else	{
		if (hws_hwpa->direction == HWPA_NSS_SESSION_DIRECTION_FLOW &&
				test_and_clear_bit(HWPA_NSS_SESSION_SYNC_FLOW_UPDATED, &hws_nss->flags)) {
			spin_lock_bh(&hws_nss->sync_lock);
			stats->tx_pkts = hws_nss->stats.flow_rx_pkts;
			stats->tx_bytes = (u64) hws_nss->stats.flow_rx_bytes;
			hws_nss->stats.flow_rx_pkts = 0;
			hws_nss->stats.flow_rx_bytes = 0;
			spin_unlock_bh(&hws_nss->sync_lock);

			stats->validflags |= AVM_PA_SESSION_STATS_VALID_BYTES;
			stats->validflags |= AVM_PA_SESSION_STATS_VALID_PKTS;
		} else if (hws_hwpa->direction == HWPA_NSS_SESSION_DIRECTION_RETURN &&
				test_and_clear_bit(HWPA_NSS_SESSION_SYNC_RETURN_UPDATED, &hws_nss->flags)) {
			spin_lock_bh(&hws_nss->sync_lock);
			stats->tx_pkts = hws_nss->stats.return_rx_pkts;
			stats->tx_bytes = (u64) hws_nss->stats.return_rx_bytes;
			hws_nss->stats.return_rx_pkts = 0;
			hws_nss->stats.return_rx_bytes = 0;
			spin_unlock_bh(&hws_nss->sync_lock);

			stats->validflags |= AVM_PA_SESSION_STATS_VALID_BYTES;
			stats->validflags |= AVM_PA_SESSION_STATS_VALID_PKTS;
		} else {
			memset(stats, 0, sizeof(*stats));
		}
	}

	rcu_read_unlock();

	return HWPA_BACKEND_SUCCESS;
}

/*
 *===============================================================================
 * hwpa nss offloaders init and exit
 *==============================================================================
 */

/**
 * @fn void hwpa_nss_offloader_exit(struct hwpa_nss_offloader*)
 * @brief exit offloader
 *
 * @param ofl [in] the offloader to exit
 */
static void hwpa_nss_offloader_exit(struct hwpa_nss_offloader *ofl)
{
	PR_DEVEL("Exit offloader: %s", ofl->label);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_offloader_init(struct hwpa_nss_offlaoder*)
 * @brief initialize offloader
 *
 * @param ofl [in] the offloader to initialize
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_offloader_init(struct hwpa_nss_offloader *ofl)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	PR_DEVEL("Initialize offoader: %s", ofl->label);

	spin_lock_init(&ofl->lock);

	return retval;
}

/*
 *===============================================================================
 * hwpa nss subsystems init and exit
 *==============================================================================
 */

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv4 subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_ipv4_exit(struct hwpa_nss_subsystem *subsys)
{
	nss_ipv4_notify_unregister();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_exit(struct hwpa_nss_subsystem*)
 * @brief exit ipv6 subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_ipv6_exit(struct hwpa_nss_subsystem *subsys)
{
	nss_ipv6_notify_unregister();
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv4_init(struct hwpa_nss_subsystem*)
 * @brief init ipv4 subsystem
 *
 * @param subsys [in] the subsystem to init
 *
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv4_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_ipv4_specific *ipv4_spec;

	ipv4_spec = subsys->ipv4_spec;
	hash_init(ipv4_spec->session_table);

	subsys->mgr = nss_ipv4_notify_register(hwpa_nss_ipv4_net_dev_callback,
					(void *) subsys);

	__set_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags);

	return retval;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_ipv6_init(struct hwpa_nss_subsystem*)
 * @brief init ipv6 subsystem
 *
 * @param subsys [in] the subsystem to init
 *
 * @return success only
 */
static enum hwpa_backend_rv hwpa_nss_ipv6_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;
	struct hwpa_nss_ipv6_specific *ipv6_spec;

	ipv6_spec = subsys->ipv6_spec;
	hash_init(ipv6_spec->session_table);

	subsys->mgr = nss_ipv6_notify_register(hwpa_nss_ipv6_net_dev_callback,
					(void *) subsys);

	__set_bit(HWPA_NSS_SUBSYS_FLAG_BIDIRECTIONAL_SESSIONS, &subsys->flags);

	return retval;
}

/**
 * @fn void hwpa_nss_subsys_exit(struct hwpa_nss_subsystem*)
 * @brief exit subsystem
 *
 * @param subsys [in] the subsystem to exit
 */
static void hwpa_nss_subsys_exit(struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	PR_DEVEL("Exit subsystem: %s", subsys->label);

	if (subsys->exit)
		subsys->exit(subsys);
	if (sync->exit)
		sync->exit(subsys);
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_subsys_init(struct hwpa_nss_subsystem*)
 * @brief initialize subsystem
 *
 * @param subsys [in] the subsystem to initialize
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_subsys_init(struct hwpa_nss_subsystem *subsys)
{
	enum hwpa_backend_rv retval;
	struct hwpa_nss_tracker *tracker = subsys->tracker;
	struct hwpa_nss_synchronizer *sync = subsys->sync;

	PR_DEVEL("Initialize subsystem: %s", subsys->label);

	mutex_init(&subsys->mutex);
	subsys->flags = 0;

	if (subsys->init) {
		retval = subsys->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize subsystem specific stuff: %s\n", subsys->label);
			goto failure_1;
		}
	}

	if (tracker->init) {
		retval = tracker->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize tracking for subsystem: %s\n", subsys->label);
			goto failure_2;
		}
		spin_lock_init(&tracker->lock);
	}

	if (sync->init) {
		retval = sync->init(subsys);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Couldn't initialize sync for subsystem: %s\n", subsys->label);
			goto failure_2;
		}
	}

failure_2:
	if (subsys->exit)
		subsys->exit(subsys);

failure_1:
	return retval;
}

/*
 *===============================================================================
 * hwpa nss init and exit
 *==============================================================================
 */

#ifdef CONFIG_PROC_FS
static void __init hwpa_nss_proc_init(void);
static void __exit hwpa_nss_proc_exit(void);
#endif

/**
 * @fn hwpa_nss_exit_offloaders_till(struct hwpa_nss_offloader*)
 * @brief exit all offloaders from global offloader list positioned before last_element_idx.
 *
 * @param last_element_idx [in] the index to which all offloaders are supposed to be exited.
 */
static void hwpa_nss_exit_offloaders_till(unsigned int last_element_idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	if (last_element_idx > ARRAY_SIZE(global_ctx->offloaders) - 1)
		return;

	for (i = 0; i <= last_element_idx; ++i)
		hwpa_nss_offloader_exit(global_ctx->offloaders[i]);
}

/**
 * @fn hwpa_nss_exit_offloaders()
 * @brief exit all offloaders from global offloader-list.
 */
static void hwpa_nss_exit_offloaders(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	hwpa_nss_exit_offloaders_till(ARRAY_SIZE(global_ctx->offloaders) - 1);
}

/**
 * @fn hwpa_nss_init_offloaders()
 * @brief init all offloaders from global offloader-list.
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_init_offloaders(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	enum hwpa_backend_rv retval;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->offloaders); ++i) {
		retval = hwpa_nss_offloader_init(global_ctx->offloaders[i]);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Could not initialize offloader: %s\n", global_ctx->offloaders[i]->label);
			/* Clean up */
			hwpa_nss_exit_offloaders_till((i-1 >= 0) ? i-1 : 0);
			break;
		}
	}

	return retval;
}

/**
 * @fn hwpa_nss_exit_subsystems_till(struct hwpa_nss_subsystem*)
 * @brief exit all subsystems from global subsystem list positioned before last_element_idx.
 *
 * @param last_element_idx [in] the index to which all offloaders are supposed to be exited.
 */
static void hwpa_nss_exit_subsystems_till(unsigned int last_element_idx)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	if (last_element_idx > ARRAY_SIZE(global_ctx->subsystems) - 1)
		return;

	for (i = 0; i <= last_element_idx; ++i)
		hwpa_nss_subsys_exit(global_ctx->subsystems[i]);
}

/**
 * @fn hwpa_nss_exit_subsystems()
 * @brief exit all subsystems from global subsystem-list.
 */
static void hwpa_nss_exit_subsystems(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	hwpa_nss_exit_subsystems_till(ARRAY_SIZE(global_ctx->subsystems) - 1);
}

/**
 * @fn hwpa_nss_init_subsystems()
 * @brief init all subsystems from global subsystem-list.
 *
 * @return success or error code.
 */
static enum hwpa_backend_rv hwpa_nss_init_subsystems(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	enum hwpa_backend_rv retval;
	int i;

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i) {
		retval = hwpa_nss_subsys_init(global_ctx->subsystems[i]);
		if (retval != HWPA_BACKEND_SUCCESS) {
			pr_err("Could not initialize subsystem: %s\n", global_ctx->subsystems[i]->label);
			/* Clean up */
			hwpa_nss_exit_subsystems_till((i-1 >= 0) ? i-1 : 0);
			break;
		}
	}

	return retval;
}

struct hwpa_nss_ipv4_specific ipv4_spec;
struct hwpa_nss_ipv6_specific ipv6_spec;

#ifdef HWPA_NSS_DEBUG
/**
 * @fn void hwpa_nss_init_magic(void)
 * @brief Init global debug magic
 */
static void hwpa_nss_init_magic(void)
{
	ipv4_spec.magic = IPV4_SPECIFIC_MAGIC;
	ipv6_spec.magic = IPV6_SPECIFIC_MAGIC;
}

/**
 * @fn enum hwpa_backend_rv hwpa_nss_check_magic(void)
 * @brief Check global debug magic
 *
 * @return success or error code
 */
static enum hwpa_backend_rv hwpa_nss_check_magic(void)
{
	struct hwpa_nss_ipv4_specific *ipv4;
	struct hwpa_nss_ipv6_specific *ipv6;
	enum hwpa_backend_rv retval = HWPA_BACKEND_SUCCESS;

	ipv4 = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV4)->ipv4_spec;
	ipv6 = hwpa_nss_get_subsys(HWPA_NSS_SUBSYSTEM_IDX_IPV6)->ipv6_spec;
PR_DEVEL("%x\n", ipv6->magic);
	if (ipv4->magic != IPV4_SPECIFIC_MAGIC)
		retval = HWPA_BACKEND_ERR_INTERNAL;
	else if (ipv6->magic != IPV6_SPECIFIC_MAGIC)
		retval = HWPA_BACKEND_ERR_INTERNAL;

	return retval;
}
#endif

static struct hwpa_nss_tracker ipv4_tracker = {
	.init = hwpa_nss_ipv4_init_limit,
};

static struct hwpa_nss_synchronizer ipv4_sync = {
	.init = hwpa_nss_ipv4_sync_init,
	.exit = hwpa_nss_ipv4_sync_exit,
};

static struct hwpa_nss_subsystem ipv4_subsys = {
	.label = "ipv4",
	.spec = &ipv4_spec,
	.init = hwpa_nss_ipv4_init,
	.exit = hwpa_nss_ipv4_exit,
	.gen_hash = hwpa_nss_ipv4_gen_session_hash,
	.register_nss_session = hwpa_nss_ipv4_register_nss_session,
	.find_nss_session = hwpa_nss_ipv4_find_nss_session,
	.purge_sessions = hwpa_nss_ipv4_purge_sessions,
	.tracker = &ipv4_tracker,
	.sync = &ipv4_sync,
};

static struct hwpa_nss_offloader ipv4_offloader = {
	.label = "ipv4",
	.subsys = &ipv4_subsys,
	.prepare_session = hwpa_nss_ipv4_prepare_session,
	.add_session = hwpa_nss_ipv4_add_session,
	.remove_session = hwpa_nss_ipv4_remove_session,
};
static struct hwpa_nss_tracker ipv6_tracker = {
	.init = hwpa_nss_ipv6_init_limit,
};

static struct hwpa_nss_synchronizer ipv6_sync = {
	.init = hwpa_nss_ipv6_sync_init,
	.exit = hwpa_nss_ipv6_sync_exit,
};

static struct hwpa_nss_subsystem ipv6_subsys = {
	.label = "ipv6",
	.spec = &ipv6_spec,
	.init = hwpa_nss_ipv6_init,
	.exit = hwpa_nss_ipv6_exit,
	.gen_hash = hwpa_nss_ipv6_gen_session_hash,
	.register_nss_session = hwpa_nss_ipv6_register_nss_session,
	.find_nss_session = hwpa_nss_ipv6_find_nss_session,
	.purge_sessions = hwpa_nss_ipv6_purge_sessions,
	.tracker = &ipv6_tracker,
	.sync = &ipv6_sync,
};

static struct hwpa_nss_offloader ipv6_offloader = {
	.label = "ipv6",
	.subsys = &ipv6_subsys,
	.prepare_session = hwpa_nss_ipv6_prepare_session,
	.add_session = hwpa_nss_ipv6_add_session,
	.remove_session = hwpa_nss_ipv6_remove_session,
};

static struct hwpa_nss_context hwpa_nss_ctx = {
	.subsystems = {
		&ipv4_subsys,
		&ipv6_subsys,
	},
	.offloaders = {
		&ipv4_offloader,
		&ipv6_offloader,
	},
};

/**
 * @fn enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config*)
 * @brief Init Proc entries, Purge Sessions, init offloaders and subsystem and init kmem_caches.
 * Also fills a HWPA configuration.
 *
 * @return success or error code
 */
enum hwpa_backend_rv hwpa_backend_init(struct hwpa_backend_config *hw_pa_config)
{
	enum hwpa_backend_rv retval;
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	PR_DEVEL("HWPA backend init\n");

	hw_pa_config->flags |= AVM_HW_F_NO_BSESSION;
	hw_pa_config->alloc_rx_channel = NULL;
	hw_pa_config->alloc_tx_channel = NULL;
	hw_pa_config->free_rx_channel = NULL;
	hw_pa_config->free_tx_channel = NULL;

	if (!nss_cmn_get_nss_enabled()) {
		retval = HWPA_BACKEND_ERR_INTERNAL;
		goto failure_1;
	}

#ifdef HWPA_NSS_DEBUG
	hwpa_nss_init_magic();
	retval = hwpa_nss_check_magic();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Bad Magic!\n");
		goto failure_1;
	}
#endif

	global_ctx->kmem_nss = kmem_cache_create("hwpa_nss_nss_sess",
				sizeof(struct hwpa_nss_nss_session), 0,
				SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE, NULL);
	if (!global_ctx->kmem_nss) {
		retval = HWPA_BACKEND_ERR_CACHE;
		pr_err("Could not create nss session cache!\n");
		goto failure_1;
	}

	global_ctx->kmem_hwpa = kmem_cache_create("hwpa_nss_hwpa_sess",
				sizeof(struct hwpa_nss_hwpa_session), 0,
				SLAB_HWCACHE_ALIGN | SLAB_RED_ZONE, NULL);
	if (!global_ctx->kmem_hwpa) {
		retval = HWPA_BACKEND_ERR_CACHE;
		pr_err("Could not create hwpa session cache!\n");
		goto failure_2;
	}

	retval = hwpa_nss_init_subsystems();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize all subsystems\n");
		goto failure_3;
	}

	retval = hwpa_nss_init_offloaders();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize all offloaders\n");
		goto failure_4;
	}

	retval = hwpa_nss_pending_offload_manager_init();
	if (retval != HWPA_BACKEND_SUCCESS) {
		pr_err("Couldn't initialize pending offload manager\n");
		goto failure_5;
	}

	hwpa_nss_proc_init();

	PR_DEVEL("HWPA_NSS init successful\n");

	return HWPA_BACKEND_SUCCESS;

failure_5:
	hwpa_nss_exit_offloaders();

failure_4:
	hwpa_nss_exit_subsystems();

failure_3:
	kmem_cache_destroy(global_ctx->kmem_hwpa);

failure_2:
	kmem_cache_destroy(global_ctx->kmem_nss);

failure_1:
	return retval;
}

/**
 * @fn hwpa_backend_exit(void)
 * @brief Remove Proc entries, Purge Sessions, remove offloaders and subsystem and release kmem_caches
 */
void hwpa_backend_exit(void)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;

	PR_DEVEL("HWPA Backend Exit\n");

	hwpa_nss_proc_exit();

	hwpa_nss_pending_offload_manager_exit();

	hwpa_nss_purge_sessions();
	hwpa_nss_exit_offloaders();
	hwpa_nss_exit_subsystems();

	kmem_cache_destroy(global_ctx->kmem_nss);
	kmem_cache_destroy(global_ctx->kmem_hwpa);
}

/*
 *==============================================================================
 * procfs user interface
 *==============================================================================
 */

#ifdef CONFIG_PROC_FS
//TODO: introduce control interface
typedef int hwpa_nss_fprintf(void *, const char *, ...);

static void hwpa_nss_show_offloader_stats(hwpa_nss_fprintf fprintffunc, void *arg,
				struct hwpa_nss_offloader *ofl)
{
	if (!ofl)
		return;

	spin_lock_bh(&ofl->lock);
	(*fprintffunc)(arg, "Offloader %s:\n", ofl->label);
	(*fprintffunc)(arg, "  pending nss sessions: %d\n", ofl->pending_nss_session_count);
	(*fprintffunc)(arg, "  pending avm_pa sessions: %d\n", ofl->pending_avm_pa_session_count);
	(*fprintffunc)(arg, "  active nss sessions: %d\n", ofl->active_nss_session_count);
	(*fprintffunc)(arg, "  active avm_pa sessions: %d\n", ofl->active_avm_pa_session_count);
	(*fprintffunc)(arg, "  successful NSS offloads: %d\n", ofl->successful_nss_offloads);
	(*fprintffunc)(arg, "  failed NSS offloads: %d\n", ofl->failed_nss_offloads);
	spin_unlock_bh(&ofl->lock);
}

static void hwpa_nss_show_subsystem_stats(hwpa_nss_fprintf fprintffunc, void *arg,
				struct hwpa_nss_subsystem *subsys)
{
	struct hwpa_nss_tracker *tracker = subsys->tracker;

	if (!tracker)
		return;

	(*fprintffunc)(arg, "Subsystem %s:\n", subsys->label);
	spin_lock_bh(&tracker->lock);
	(*fprintffunc)(arg, "  usage: %d\n", tracker->usage);
	spin_unlock_bh(&tracker->lock);
	(*fprintffunc)(arg, "  limit: %d\n", tracker->limit);
}

static void hwpa_nss_show_brief(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct hwpa_nss_context *global_ctx = &hwpa_nss_ctx;
	int i;

	(*fprintffunc)(arg, "HWPA_NSS summary\n");

	for (i = 0; i < ARRAY_SIZE(global_ctx->subsystems); ++i)
		hwpa_nss_show_subsystem_stats(fprintffunc, arg, global_ctx->subsystems[i]);

	for (i = 0; i < ARRAY_SIZE(global_ctx->offloaders); ++i)
		hwpa_nss_show_offloader_stats(fprintffunc, arg, global_ctx->offloaders[i]);
}

static int brief_show(struct seq_file *m, void *v)
{
	hwpa_nss_show_brief((hwpa_nss_fprintf *)seq_printf, m);
	return 0;
}

static int brief_show_open(struct inode *inode, struct file *file)
{
	return single_open(file, brief_show, PDE_DATA(inode));
}

static const struct file_operations brief_show_fops = {
	.open    = brief_show_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

static void hwpa_nss_show_interfaces(hwpa_nss_fprintf fprintffunc, void *arg)
{
	struct net_device *dev;
	struct net *net;
	int32_t if_num;

	(*fprintffunc)(arg, "%-20s%-10s%-10s%-10s\n", "Netdev", "type",
			"avm_pid", "nss_ifnum");

	rcu_read_lock();
	for_each_net_rcu(net) {
		for_each_netdev_rcu(net, dev) {
			if_num = nss_cmn_get_interface_number_by_dev(dev);
			(*fprintffunc)(arg, "%-20s%-10u%-10u%-10d\n",
					dev->name,
					(unsigned int)dev->type,
					(unsigned int)AVM_PA_DEVINFO(dev)->pid_handle,
					if_num
					);

		}
	}
	rcu_read_unlock();
}

static int interfaces_show(struct seq_file *m, void *v)
{
	hwpa_nss_show_interfaces((hwpa_nss_fprintf *)seq_printf, m);
	return 0;
}

static int interfaces_show_open(struct inode *inode, struct file *file)
{
	return single_open(file, interfaces_show, PDE_DATA(inode));
}

static const struct file_operations interfaces_show_fops = {
	.open    = interfaces_show_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

static struct proc_dir_entry *dir_entry;

static void __init hwpa_nss_proc_init(void)
{
	dir_entry = proc_net_mkdir(&init_net, "hwpa_nss", init_net.proc_net);

	proc_create("brief", 0444, dir_entry, &brief_show_fops);
	proc_create("interfaces", 0444, dir_entry, &interfaces_show_fops);

	PR_DEVEL("Created proc entries!\n");
}

static void __exit hwpa_nss_proc_exit(void)
{
	remove_proc_entry("brief", dir_entry);
	remove_proc_entry("interfaces", dir_entry);

	remove_proc_entry("hwpa_nss", init_net.proc_net);

	PR_DEVEL("Removed proc entries!\n");
}

#endif