/* * Copyright (c) 2019 AVM GmbH . * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include /* IPPROTOs */ #include #include #include #include #include #include #include #include #include "hwpa.h" #include "hwpa_backend.h" MODULE_LICENSE("GPL"); MODULE_AUTHOR("Paul Hüber"); MODULE_DESCRIPTION("AVM compat layer for network offloading"); ASYNC_DOMAIN(hwpa_add_domain); ASYNC_DOMAIN(hwpa_rem_domain); /* Workaround for JZ-71010: * This lock makes sure that no asynchronous removal can be scheduled while * hwpa_session_stats() is running. */ DEFINE_SPINLOCK(async_rem_lock); enum { hw_handle_invalid = ~(unsigned long)0 }; struct kobject *hwpa_kobj; static struct errstat_domain_storage edom_storage; static struct errstat_domain *edom; static const char *hwpa_backend_rv_descriptions[] = HWPA_BACKEND_RV_DESC_INITIALIZER; static atomic_t hwpa_backend_rv_counters[ARRAY_SIZE(hwpa_backend_rv_descriptions)]; /* avm_pa match whitelists * * valid combinations: * l2+l3 * l2+l3_l3encap+l3 * l2+l3_l2encap+l2+l3 */ static const struct avm_pa_match_info valid_l2_matches[][AVM_PA_MAX_MATCH] = { HWPA_VALID_L2 }; static const struct avm_pa_match_info valid_l3_l2encap_matches[][AVM_PA_MAX_MATCH] = { HWPA_VALID_L3_L2ENCAP }; static const struct avm_pa_match_info valid_l3_l3encap_matches[][AVM_PA_MAX_MATCH] = { HWPA_VALID_L3_L3ENCAP }; static const struct avm_pa_match_info valid_l3_matches[][AVM_PA_MAX_MATCH] = { HWPA_VALID_L3 }; static int match_longest_seq(const struct avm_pa_match_info *needle, int needle_len, const struct avm_pa_match_info *haystack, int haystack_size) { int i; const struct avm_pa_match_info *haystack_match; int longest_match_len; longest_match_len = -1; for (i = 0; i < haystack_size; i++) { int j; int match_count; match_count = 0; haystack_match = &haystack[i * AVM_PA_MAX_MATCH]; for (j = 0; j < needle_len && haystack_match[j].type != AVM_PA_NUM_MATCH_TYPES; j++) { if (needle[j].type == haystack_match[j].type) { match_count++; } else { match_count = -1; break; } } if (haystack_match[j].type == AVM_PA_NUM_MATCH_TYPES) /* Haystack entry fully compared, use result. */ longest_match_len = max(longest_match_len, match_count); } return longest_match_len; } static int match_traverse(const struct avm_pa_match_info *valid_matches, int valid_matches_size, const struct avm_pa_match_info **info, int *info_len_inout) { int i; i = match_longest_seq(*info, *info_len_inout, valid_matches, valid_matches_size); if (i >= 0) { *info += i; *info_len_inout -= i; return 1; } else { return 0; } } static bool is_invalid_session_match(const struct avm_pa_pkt_match *match) { const struct avm_pa_match_info *info; int info_len; bool is_matching; info = &match->match[0]; info_len = match->nmatch; #define TRAVERSE(v) match_traverse(&v[0][0], ARRAY_SIZE(v), &info, &info_len) /* l2 ... */ is_matching = TRAVERSE(valid_l2_matches) && ( /* ... + l3 */ (TRAVERSE(valid_l3_matches)) || /* ... + l2encap + l2 + l3 */ (TRAVERSE(valid_l3_l2encap_matches) && TRAVERSE(valid_l2_matches) && TRAVERSE(valid_l3_matches)) || /* ... + l3encap + l3 */ (TRAVERSE(valid_l3_l3encap_matches) && TRAVERSE(valid_l3_matches))); #undef TRAVERSE return (!is_matching || info != &match->match[match->nmatch]); } static bool is_invalid_session_matches(const struct avm_pa_session *s) { return is_invalid_session_match(&s->ingress) || is_invalid_session_match(&avm_pa_first_egress(s)->match); } static u16 pkttype_encap_added(u16 ingress_pkttype, u16 egress_pkttype) { u16 ig_encap = ingress_pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK; u16 eg_encap = egress_pkttype & AVM_PA_PKTTYPE_IPENCAP_MASK; if (ig_encap == eg_encap) return 0; else return eg_encap; } static u16 pkttype_encap_removed(u16 ingress_pkttype, u16 egress_pkttype) { /* swap ingress and egress */ return pkttype_encap_added(egress_pkttype, ingress_pkttype); } static bool is_invalid_session_pkttype(const struct avm_pa_session *s) { u16 egress_pkttype; egress_pkttype = avm_pa_first_egress(s)->match.pkttype; if (!s->ingress.pkttype || !egress_pkttype) { pr_debug("pkttype not set\n"); return true; } if ((s->ingress.pkttype | egress_pkttype) & (AVM_PA_PKTTYPE_LISP | AVM_PA_PKTTYPE_GRE)) { pr_debug("lisp or gre\n"); return true; } if (AVM_PA_PKTTYPE_IP_VERSION(egress_pkttype) != AVM_PA_PKTTYPE_IP_VERSION(s->ingress.pkttype)) { pr_debug("innermost IP version changed\n"); return true; } if (AVM_PA_PKTTYPE_IPPROTO(egress_pkttype) != AVM_PA_PKTTYPE_IPPROTO(s->ingress.pkttype)) { pr_debug("innermost transport protocol changed\n"); return true; } switch (AVM_PA_PKTTYPE_IPPROTO(egress_pkttype)) { case IPPROTO_TCP: case IPPROTO_UDP: break; default: pr_debug("innermost transport is neither udp nor tcp\n"); return true; } if (pkttype_encap_added(s->ingress.pkttype, egress_pkttype) && pkttype_encap_removed(s->ingress.pkttype, egress_pkttype)) { pr_debug("diffenent encapsulations terminated at once\n"); return true; } return false; } static bool is_invalid_session(const struct avm_pa_session *s) { if (is_invalid_session_pkttype(s)) { pr_debug("invalid pkttypes\n"); return true; } else if (is_invalid_session_matches(s)) { pr_debug("invalid matches\n"); return true; } else return false; } static void release_session(struct avm_pa_session *s) { avm_pa_set_hw_session(s, NULL); } static void add_session_async(void *session_ptr, async_cookie_t cookie) { enum hwpa_backend_rv backend_rv; unsigned long hw_handle; struct avm_pa_session *s = session_ptr; /* Wait for pending removals to finish. Avoid a situation where a * newly added session is hit by an older removal request because its * contents match. */ async_synchronize_cookie_domain(cookie, &hwpa_rem_domain); backend_rv = hwpa_backend_add_session(s, &hw_handle); if (errstat_track(edom, backend_rv) != HWPA_BACKEND_SUCCESS) return; BUILD_BUG_ON(sizeof(void *) < sizeof(unsigned long)); avm_pa_set_hw_session(s, (void *)hw_handle); } static int hwpa_add_session(struct avm_pa_session *avm_session) { if (is_invalid_session(avm_session)) return AVM_PA_TX_ERROR_SESSION; avm_pa_set_hw_session(avm_session, (void *)hw_handle_invalid); async_schedule_domain(add_session_async, avm_session, &hwpa_add_domain); return AVM_PA_TX_SESSION_ADDED; } static void remove_session_async(void *session_ptr, async_cookie_t cookie) { unsigned long hw_handle; struct avm_pa_session *avm_session = session_ptr; /* Make sure the session was added. */ async_synchronize_cookie_domain(cookie, &hwpa_add_domain); hw_handle = (unsigned long)avm_pa_get_hw_session(avm_session); if (hw_handle != hw_handle_invalid) errstat_track(edom, hwpa_backend_rem_session(hw_handle)); release_session(avm_session); } static int hwpa_remove_session(struct avm_pa_session *avm_session) { /* Workaround for JZ-71010: * Protect hwpa_session_stats(). */ spin_lock_bh(&async_rem_lock); async_schedule_domain(remove_session_async, avm_session, &hwpa_rem_domain); spin_unlock_bh(&async_rem_lock); return AVM_PA_TX_SESSION_ADDED; } static int hwpa_session_stats(struct avm_pa_session *avm_session, struct avm_pa_session_stats *stats) { unsigned long hw_handle; int rv = -1; /* Workaround for JZ-71010: * Do not race with remove_session_async(). Make sure it's neither * scheduled nor will be scheduled as long as hw_handle is being used * here. * TODO: bundle/replace hw_handle with a kobj to use per-session * reference counting. This will likely involve implicit removal of a * backend session and therefore a change of the backend interface. */ spin_lock_bh(&async_rem_lock); if (!list_empty(&hwpa_rem_domain.pending)) { memset(stats, 0, sizeof(*stats)); goto err_unlock; } hw_handle = (unsigned long)avm_pa_get_hw_session(avm_session); /* Ignore invalid handles for pending add */ if (hw_handle == hw_handle_invalid) goto err_unlock; if (errstat_track(edom, hwpa_backend_stats(hw_handle, stats)) == HWPA_BACKEND_SUCCESS) rv = 0; err_unlock: spin_unlock_bh(&async_rem_lock); return rv; } static int notifier_fn(struct notifier_block *nb, unsigned long action, void *data) { struct netdev_notifier_info *info = data; avm_pid_handle pid; switch (action) { case NETDEV_REGISTER: pid = AVM_PA_DEVINFO(info->dev)->pid_handle; if (pid) avm_pa_pid_activate_hw_accelaration(pid); return NOTIFY_OK; default: return NOTIFY_DONE; } } struct notifier_block notifier = { .notifier_call = notifier_fn }; /* These are optional for the backend to implement */ __attribute__((weak)) int alloc_rx_channel(avm_pid_handle pid_handle) { return -1; } __attribute__((weak)) int alloc_tx_channel(avm_pid_handle pid_handle) { return -1; } __attribute__((weak)) int free_rx_channel(avm_pid_handle pid_handle) { return -1; } __attribute__((weak)) int free_tx_channel(avm_pid_handle pid_handle) { return -1; } __attribute__((weak)) int try_to_accelerate(avm_pid_handle pid_handle, struct sk_buff *skb) { /* Pretend an ingress classification to skip avm_pa_pid_receive. The * vep probe of offload_dp passes the packet to the offload hardware * if the corresponding netdev is registered. * Also disable avm_pa acceleration for this packet in case the probe * does not redirect it. This should prevent any hickups in * avm_pa_pid_snoop_transmit due to incomplete classification. */ AVM_PKT_INFO(skb)->ingress_pid_handle = pid_handle; avm_pa_do_not_accelerate(skb); /* Let avm_dev_pid_receive continue to process the packet. */ return AVM_PA_RX_BYPASS; } static struct avm_hardware_pa hw_pa = { .add_session = hwpa_add_session, .remove_session = hwpa_remove_session, .session_stats = hwpa_session_stats, .alloc_rx_channel = alloc_rx_channel, .alloc_tx_channel = alloc_tx_channel, .free_rx_channel = free_rx_channel, .free_tx_channel = free_tx_channel, .try_to_accelerate = try_to_accelerate, .flags = AVM_HW_F_NO_BSESSION, }; int __init hwpa_init(void) { edom = errstat_domain_init(&edom_storage, hwpa_backend_rv_descriptions, hwpa_backend_rv_counters, ARRAY_SIZE(hwpa_backend_rv_counters)); hwpa_kobj = kobject_create_and_add(THIS_MODULE->name, kernel_kobj); if (!hwpa_kobj) return -1; errstat_sysfs_attach(edom, hwpa_kobj, "backend_errors"); hwpa_backend_init(); register_netdevice_notifier(¬ifier); avm_pa_register_hardware_pa(&hw_pa); return 0; } void __exit hwpa_exit(void) { unregister_netdevice_notifier(¬ifier); #ifdef AVM_PA_UNREGISTER_HARDWARE_PA_SYNC avm_pa_unregister_hardware_pa_sync(&hw_pa); #else avm_pa_register_hardware_pa(NULL); #endif async_synchronize_full_domain(&hwpa_add_domain); async_synchronize_full_domain(&hwpa_rem_domain); async_unregister_domain(&hwpa_add_domain); async_unregister_domain(&hwpa_rem_domain); hwpa_backend_exit(); errstat_sysfs_detach(edom); kobject_put(hwpa_kobj); } module_init(hwpa_init); module_exit(hwpa_exit);