/* * Forwarding decision * Linux ethernet bridge * * Authors: * Lennert Buytenhek * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include #include #include #include #include #include #include #include #include "br_private.h" #include static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)); /* Don't forward packets to originating port or forwarding disabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) { struct net_bridge_vlan_group *vg; vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING; } #ifdef CONFIG_AVM_BRIDGE_MULTICAST_TO_UNICAST static inline int br_forward_as_unicast(struct sk_buff *skb) { struct net_bridge_port *port = br_port_get_rcu(skb->dev); struct net_bridge_port_group *pg = BR_OUTPUT_SKB_CB(skb)->pg; struct net_bridge_group_recipient *p; /* Nothing to do? */ if (likely((port->flags & BR_MULTICAST_TO_UNICAST) == 0) || !pg) return 0; /* Should be BUG_ON() but I don't dare reboots now */ if (WARN(pg->port != port, "Ports do not match for group %s vs %s\n", pg->port->dev->name, port->dev->name)) { return 0; } /* Explicit MDB entries added by user space do not have snooped * recipients, proceed as multicast. Likewise if there are too many recipients */ if (pg->num_recipients == 0 || pg->num_recipients > port->multicast_to_unicast_threshold) return 0; /* CPA/TMA 2016/04/27: skb is cloned if transmitted to more than one port or * if passed to the local system as well. If not cloned we try to re-use * the original skb. * We have to unclone skb here as the packet must not go through anyone * else, because we manipulate the actual data. skb_cow() does * exactly that but clone must not be shared (this is the case here because * we come from deliver_clone()). */ if (skb_cow(skb, 0) != 0 || !pskb_may_pull(skb, ETH_ALEN)) { /* proceed as multicast if out-of-memory */ return 0; } /* CPA/TMA 2016/04/27: skb is cloned if transmitted to more than one port or * if passed to the local system as well. If not cloned we try to re-use * the original skb. * We have to unclone skb here as the packet must not go through anyone * else, because we manipulate the actual data. skb_cow() does * exactly that but clone must not be shared (this is the case here because * we come from deliver_clone()). */ if (skb_cow(skb, 0) != 0 || !pskb_may_pull(skb, ETH_ALEN)) { /* proceed as multicast if out-of-memory */ return 0; } list_for_each_entry_rcu(p, &pg->recipients, list) { struct net_bridge_fdb_entry *fdb = p->recipient; struct sk_buff *ucast_skb; struct ethhdr *ucast_eth; /* Re-use the original skb if possible. The skb must be copied * for every recipient except the last one where we can * use the (possibly uncloned) original skb */ if (list_is_last(&p->list, &pg->recipients)) ucast_skb = skb; else { ucast_skb = pskb_copy(skb, GFP_ATOMIC); if (unlikely(!ucast_skb)) { /* Abort if out-of-memory. Here we prefer to drop * instead of giving recipients duplicated packets. */ kfree_skb(skb); break; } } /* modify and send */ ucast_eth = eth_hdr(ucast_skb); memcpy(ucast_eth->h_dest, &fdb->addr.addr, ETH_ALEN); dev_queue_xmit(ucast_skb); } return 1; } static inline int is_udp(struct sk_buff *skb) { /* * only change UDP pakets to unicast * 2014-12-12, calle */ switch (skb->protocol) { case htons(ETH_P_IP): return ip_hdr(skb)->protocol == IPPROTO_UDP; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; #endif default: return 0; } } static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) { return is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest) && is_udp(skb); } #else static inline int br_forward_as_unicast(struct sk_buff *skb) { return 0; } static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) { return 0; } #endif int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { u8 *dest; dest = skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) goto drop; br_drop_fake_rtable(skb); skb_sender_cpu_clear(skb); if (skb->ip_summed == CHECKSUM_PARTIAL && (skb->protocol == htons(ETH_P_8021Q) || skb->protocol == htons(ETH_P_8021AD))) { int depth; if (!__vlan_get_protocol(skb, skb->protocol, &depth)) goto drop; skb_set_network_header(skb, depth); } if (!should_convert_to_unicast(skb, dest) || !br_forward_as_unicast(skb)) dev_queue_xmit(skb); return 0; drop: kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { return BR_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, net, sk, skb, NULL, skb->dev, br_dev_queue_push_xmit); } EXPORT_SYMBOL_GPL(br_forward_finish); static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_bridge_vlan_group *vg; vg = nbp_vlan_group_rcu(to); skb = br_handle_vlan(to->br, vg, skb); if (!skb) return; skb->dev = to->dev; if (unlikely(netpoll_tx_running(to->br->dev))) { skb_push(skb, ETH_HLEN); if (!is_skb_forwardable(skb->dev, skb)) kfree_skb(skb); else br_netpoll_send_skb(to, skb); return; } BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, dev_net(skb->dev), NULL, skb,NULL, skb->dev, br_forward_finish); } static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) { struct net_bridge_vlan_group *vg; struct net_device *indev; if (skb_warn_if_lro(skb)) { kfree_skb(skb); return; } vg = nbp_vlan_group_rcu(to); skb = br_handle_vlan(to->br, vg, skb); if (!skb) return; indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); BR_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, dev_net(indev), NULL, skb, indev, skb->dev, br_forward_finish); } /* called with rcu_read_lock */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { if (to && should_deliver(to, skb)) { __br_deliver(to, skb); return; } kfree_skb(skb); } /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { if (to && should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) { if (skb0) deliver_clone(to, skb, __br_forward); else __br_forward(to, skb); return; } if (!skb0) kfree_skb(skb); } static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) { dev->stats.tx_dropped++; return -ENOMEM; } __packet_hook(prev, skb); return 0; } static struct net_bridge_port *maybe_deliver( struct net_bridge_port *prev, struct net_bridge_port *p, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { int err; if (!should_deliver(p, skb)) return prev; if (!prev) goto out; err = deliver_clone(prev, skb, __packet_hook); if (err) return ERR_PTR(err); out: return p; } /* called under bridge lock */ static void br_flood(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb), bool unicast, bool forward) { struct net_bridge_port *p; struct net_bridge_port *prev; enum br_pkt_type pkt_type = BR_PKT_UNICAST; const unsigned char *dest; dest = eth_hdr(skb)->h_dest; if (is_broadcast_ether_addr(dest)) pkt_type = BR_PKT_BROADCAST; else if (is_multicast_ether_addr(dest)) pkt_type = BR_PKT_MULTICAST; prev = NULL; /* AVM/TMA 20170315 JZ-33183 * There are some cases where we come here for multicast traffic, for example * if all mdb entries are gone due to down'ing the bridge (multid may do this). * In the past, br_forward_as_unicast() accessed an uninitialized pointer. */ BR_OUTPUT_SKB_CB(skb)->pg = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { if (forward && (p->flags & BR_ISOLATE_MODE)) continue; /* Do not flood unicast traffic to ports that turn it off, nor * other traffic if flood off, except for traffic we originate */ switch (pkt_type) { case BR_PKT_UNICAST: if (!(p->flags & BR_FLOOD)) continue; break; case BR_PKT_MULTICAST: if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) continue; break; case BR_PKT_BROADCAST: if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) continue; break; } /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) continue; if ((p->flags & BR_PROXYARP_WIFI) && BR_INPUT_SKB_CB(skb)->proxyarp_replied) continue; prev = maybe_deliver(prev, p, skb, __packet_hook); if (IS_ERR(prev)) goto out; } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) { /* FIXME: Try to accelerate */ avm_pa_do_not_accelerate(skb); br_flood(br, skb, NULL, __br_deliver, unicast, false); } EXPORT_SYMBOL_GPL(br_deliver); /* called under bridge lock */ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb2, bool unicast) { /* FIXME: Try to accelerate */ avm_pa_do_not_accelerate(skb); if (br_flood_rl(br, skb, skb->dev)) br_flood(br, skb, skb2, __br_forward, unicast, true); else if (!skb2) kfree_skb(skb); } EXPORT_SYMBOL_GPL(br_forward); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING /* called with rcu_read_lock */ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb0, void (*__packet_hook)( const struct net_bridge_port *p, struct sk_buff *skb)) { struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; struct net_bridge *br = netdev_priv(dev); struct net_bridge_port *prev = NULL; struct net_bridge_port_group *p; struct hlist_node *rp; rp = rcu_dereference(hlist_first_rcu(&br->router_list)); p = mdst ? rcu_dereference(mdst->ports) : NULL; BR_OUTPUT_SKB_CB(skb)->pg = NULL; while (p || rp) { struct net_bridge_port *port, *lport, *rport; lport = p ? p->port : NULL; rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : NULL; port = (unsigned long)lport > (unsigned long)rport ? lport : rport; prev = maybe_deliver(prev, port, skb, __packet_hook); if (IS_ERR(prev)) goto out; /* store port_group which is needed by br_forward_as_unicast() to avoid * costly lookup in hot path. BR_OUTPUT_SKB_CB(skb)->pg must correspond to the * prev port, because the skb will be delivered to that one next. Therefore a) * assign BR_OUTPUT_SKB_CB(skb)->pg before p is updated and b) only change it when * maybe_deliver() was not a no-op */ if (prev == lport) BR_OUTPUT_SKB_CB(skb)->pg = p; else if (prev == rport) BR_OUTPUT_SKB_CB(skb)->pg = NULL; /* else: prev == (old) prev; i.e. should_deliver() was no-op and port was skipped */ if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); if ((unsigned long)rport >= (unsigned long)port) rp = rcu_dereference(hlist_next_rcu(rp)); } if (!prev) goto out; if (skb0) deliver_clone(prev, skb, __packet_hook); else __packet_hook(prev, skb); return; out: if (!skb0) kfree_skb(skb); } /* called with rcu_read_lock */ void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb) { br_multicast_flood(mdst, skb, NULL, __br_deliver); } /* called with rcu_read_lock */ void br_multicast_forward(struct net_bridge_mdb_entry *mdst, struct sk_buff *skb, struct sk_buff *skb2) { br_multicast_flood(mdst, skb, skb2, __br_forward); } #endif