--- zzzz-none-000/linux-2.6.32.61/net/bridge/br_forward.c 2013-06-10 09:43:48.000000000 +0000 +++ ar10-7272-687/linux-2.6.32.61/net/bridge/br_forward.c 2016-04-29 14:21:47.000000000 +0000 @@ -11,40 +11,148 @@ * 2 of the License, or (at your option) any later version. */ +#include +#include #include #include +#include #include #include #include +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) +#include +#endif +#ifdef CONFIG_AVM_PA +#include +#endif + #include "br_private.h" -/* Don't forward packets to originating port or forwarding diasabled */ +static int deliver_clone(const struct net_bridge_port *prev, + struct sk_buff *skb, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb)); + +/* Don't forward packets to originating port or forwarding disabled */ static inline int should_deliver(const struct net_bridge_port *p, const struct sk_buff *skb) { - return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && - p->state == BR_STATE_FORWARDING); + return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && + br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) && + p->state == BR_STATE_FORWARDING; +} + + +#ifdef CONFIG_AVM_BRIDGE_MULTICAST_TO_UNICAST +static inline int br_forward_as_unicast(struct sk_buff *skb) +{ + struct net_bridge_port *port = br_port_get_rcu(skb->dev); + struct net_bridge_port_group *pg = BR_INPUT_SKB_CB(skb)->pg; + struct net_bridge_group_recipient *p; + + /* Nothing to do? */ + if (likely((port->flags & BR_MULTICAST_TO_UNICAST) == 0) || !pg) + return 0; + + /* Should be BUG_ON() but I don't dare reboots now */ + if (WARN(pg->port != port, "Ports do not match for group %s vs %s\n", + pg->port->dev->name, port->dev->name)) { + return 0; + } + + /* Explicit MDB entries added by user space do not have snooped + * recipients, proceed as multicast. Likewise if there are too many recipients */ + if (pg->num_recipients == 0 || pg->num_recipients > port->multicast_to_unicast_threshold) + return 0; + + /* CPA/TMA 2016/04/27: skb is cloned if transmitted to more than one port or + * if passed to the local system as well. If not cloned we try to re-use + * the original skb. + * We have to unclone skb here as the packet must not go through anyone + * else, because we manipulate the actual data. skb_cow() does + * exactly that but clone must not be shared (this is the case here because + * we come from deliver_clone()). + */ + if (skb_cow(skb, 0) != 0 || !pskb_may_pull(skb, ETH_ALEN)) { + /* proceed as multicast if out-of-memory */ + return 0; + } + + list_for_each_entry_rcu(p, &pg->recipients, list) { + struct net_bridge_fdb_entry *fdb = p->recipient; + struct sk_buff *ucast_skb; + struct ethhdr *ucast_eth; + + /* Re-use the original skb if possible. The skb must be copied + * for every recipient except the last one where we can + * use the (possibly uncloned) original skb */ + if (list_is_last(&p->list, &pg->recipients)) + ucast_skb = skb; + else { + ucast_skb = pskb_copy(skb, GFP_ATOMIC); + if (unlikely(!ucast_skb)) { + /* Abort if out-of-memory. Here we prefer to drop + * instead of giving recipients duplicated packets. */ + kfree_skb(skb); + break; + } + } + + /* modify and send */ + ucast_eth = eth_hdr(ucast_skb); + memcpy(ucast_eth->h_dest, &fdb->addr.addr, ETH_ALEN); + dev_queue_xmit(ucast_skb); + } + + return 1; +} + +static inline int is_udp(struct sk_buff *skb) +{ + /* + * only change UDP pakets to unicast + * 2014-12-12, calle + */ + switch (skb->protocol) { + case htons(ETH_P_IP): + return ip_hdr(skb)->protocol == IPPROTO_UDP; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; +#endif + default: + return 0; + } } -static inline unsigned packet_length(const struct sk_buff *skb) +static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) { - return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); + return is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest) && is_udp(skb); } +#else +static inline int br_forward_as_unicast(struct sk_buff *skb) +{ + return 0; +} +static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) +{ + return 0; +} +#endif + int br_dev_queue_push_xmit(struct sk_buff *skb) { - /* drop mtu oversized packets except gso */ - if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) + /* ip_fragment doesn't copy the MAC header */ + if (nf_bridge_maybe_copy_header(skb) || + !is_skb_forwardable(skb->dev, skb)) { kfree_skb(skb); - else { - /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */ - if (nf_bridge_maybe_copy_header(skb)) - kfree_skb(skb); - else { - skb_push(skb, ETH_HLEN); - + } else { + /* dest points to ethhdr->h_dest */ + u8 *dest = skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); + if (!should_convert_to_unicast(skb, dest) || !br_forward_as_unicast(skb)) dev_queue_xmit(skb); - } } return 0; @@ -52,16 +160,32 @@ int br_forward_finish(struct sk_buff *skb) { - return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, + skb_mark_priority(skb); + return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev, br_dev_queue_push_xmit); } static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { + skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); + if (!skb) + return; + skb->dev = to->dev; - NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, - br_forward_finish); + + if (unlikely(netpoll_tx_running(to->br->dev))) { + if (!is_skb_forwardable(skb->dev, skb)) + kfree_skb(skb); + else { + skb_push(skb, ETH_HLEN); + br_netpoll_send_skb(to, skb); + } + return; + } + + NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, + br_forward_finish); } static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) @@ -73,18 +197,22 @@ return; } + skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb); + if (!skb) + return; + indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); - NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, - br_forward_finish); + NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, + br_forward_finish); } /* called with rcu_read_lock */ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { - if (should_deliver(to, skb)) { + if (to && should_deliver(to, skb)) { __br_deliver(to, skb); return; } @@ -93,61 +221,187 @@ } /* called with rcu_read_lock */ -void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) +void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { if (should_deliver(to, skb)) { - __br_forward(to, skb); + if (skb0) + deliver_clone(to, skb, __br_forward); + else + __br_forward(to, skb); return; } - kfree_skb(skb); + if (!skb0) + kfree_skb(skb); } -/* called under bridge lock */ -static void br_flood(struct net_bridge *br, struct sk_buff *skb, +static int deliver_clone(const struct net_bridge_port *prev, + struct sk_buff *skb, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb)) +{ + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + + skb = skb_clone(skb, GFP_ATOMIC); + if (!skb) { + dev->stats.tx_dropped++; + return -ENOMEM; + } + + __packet_hook(prev, skb); + return 0; +} + +static struct net_bridge_port *maybe_deliver( + struct net_bridge_port *prev, struct net_bridge_port *p, + struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb)) { + int err; + + if (!should_deliver(p, skb)) + return prev; + + if (!prev) + goto out; + + err = deliver_clone(prev, skb, __packet_hook); + if (err) + return ERR_PTR(err); + +out: + return p; +} + +/* called under bridge lock */ +static void br_flood(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb0, + void (*__packet_hook)(const struct net_bridge_port *p, + struct sk_buff *skb), + bool unicast) +{ struct net_bridge_port *p; struct net_bridge_port *prev; prev = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { - if (should_deliver(p, skb)) { - if (prev != NULL) { - struct sk_buff *skb2; - - if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) { - br->dev->stats.tx_dropped++; - kfree_skb(skb); - return; - } - - __packet_hook(prev, skb2); - } - - prev = p; - } + /* Do not flood unicast traffic to ports that turn it off */ + if (unicast && !(p->flags & BR_FLOOD)) + continue; + prev = maybe_deliver(prev, p, skb, __packet_hook); + if (IS_ERR(prev)) + goto out; } - if (prev != NULL) { + if (!prev) + goto out; + + if (skb0) + deliver_clone(prev, skb, __packet_hook); + else __packet_hook(prev, skb); - return; - } + return; - kfree_skb(skb); +out: + if (!skb0) + kfree_skb(skb); } /* called with rcu_read_lock */ -void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) +void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) { - br_flood(br, skb, __br_deliver); +#ifdef CONFIG_AVM_PA + avm_pa_do_not_accelerate(skb); +#endif + br_flood(br, skb, NULL, __br_deliver, unicast); } /* called under bridge lock */ -void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) +void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, + struct sk_buff *skb2, bool unicast) +{ +#ifdef CONFIG_AVM_PA + avm_pa_do_not_accelerate(skb); +#endif + br_flood(br, skb, skb2, __br_forward, unicast); +} + +#ifdef CONFIG_BRIDGE_IGMP_SNOOPING +/* called with rcu_read_lock */ +static void br_multicast_flood(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, struct sk_buff *skb0, + void (*__packet_hook)( + const struct net_bridge_port *p, + struct sk_buff *skb)) +{ + struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; + struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *prev = NULL; + struct net_bridge_port_group *p; + struct hlist_node *rp; + + rp = rcu_dereference(hlist_first_rcu(&br->router_list)); + p = mdst ? rcu_dereference(mdst->ports) : NULL; + BR_INPUT_SKB_CB(skb)->pg = NULL; + while (p || rp) { + struct net_bridge_port *port, *lport, *rport; + + lport = p ? p->port : NULL; + /*--- hlist_entry does not return 0, if rp is set ---*/ + rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : NULL; + + port = (unsigned long)lport > (unsigned long)rport ? + lport : rport; + + prev = maybe_deliver(prev, port, skb, __packet_hook); + if (IS_ERR(prev)) + goto out; + + /* store port_group which is needed by br_forward_as_unicast() to avoid + * costly lookup in hot path. BR_INPUT_SKB_CB(skb)->pg must correspond to the + * prev port, because the skb will be delivered to that one next. Therefore a) + * assign BR_INPUT_SKB_CB(skb)->pg before p is updated and b) only change it when + * maybe_deliver() was not a no-op */ + if (prev == lport) + BR_INPUT_SKB_CB(skb)->pg = p; + else if (prev == rport) + BR_INPUT_SKB_CB(skb)->pg = NULL; + /* else: prev == (old) prev; i.e. should_deliver() was no-op and port was skipped */ + + if ((unsigned long)lport >= (unsigned long)port) + p = rcu_dereference(p->next); + if ((unsigned long)rport >= (unsigned long)port) + rp = rcu_dereference(hlist_next_rcu(rp)); + } + + if (!prev) + goto out; + + if (skb0) + deliver_clone(prev, skb, __packet_hook); + else + __packet_hook(prev, skb); + return; + +out: + if (!skb0) + kfree_skb(skb); +} + +/* called with rcu_read_lock */ +void br_multicast_deliver(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb) +{ + br_multicast_flood(mdst, skb, NULL, __br_deliver); +} + +/* called with rcu_read_lock */ +void br_multicast_forward(struct net_bridge_mdb_entry *mdst, + struct sk_buff *skb, struct sk_buff *skb2) { - br_flood(br, skb, __br_forward); + br_multicast_flood(mdst, skb, skb2, __br_forward); } +#endif