--- zzzz-none-000/linux-4.4.271/net/bridge/br_forward.c 2021-06-03 06:22:09.000000000 +0000 +++ hawkeye-5590-750/linux-4.4.271/net/bridge/br_forward.c 2023-04-19 10:22:30.000000000 +0000 @@ -21,6 +21,8 @@ #include #include "br_private.h" +#include + static int deliver_clone(const struct net_bridge_port *prev, struct sk_buff *skb, void (*__packet_hook)(const struct net_bridge_port *p, @@ -37,9 +39,123 @@ br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING; } +#ifdef CONFIG_AVM_BRIDGE_MULTICAST_TO_UNICAST +static inline int br_forward_as_unicast(struct sk_buff *skb) +{ + struct net_bridge_port *port = br_port_get_rcu(skb->dev); + struct net_bridge_port_group *pg = BR_OUTPUT_SKB_CB(skb)->pg; + struct net_bridge_group_recipient *p; + + /* Nothing to do? */ + if (likely((port->flags & BR_MULTICAST_TO_UNICAST) == 0) || !pg) + return 0; + + /* Should be BUG_ON() but I don't dare reboots now */ + if (WARN(pg->port != port, "Ports do not match for group %s vs %s\n", + pg->port->dev->name, port->dev->name)) { + return 0; + } + + /* Explicit MDB entries added by user space do not have snooped + * recipients, proceed as multicast. Likewise if there are too many recipients */ + if (pg->num_recipients == 0 || pg->num_recipients > port->multicast_to_unicast_threshold) + return 0; + + /* CPA/TMA 2016/04/27: skb is cloned if transmitted to more than one port or + * if passed to the local system as well. If not cloned we try to re-use + * the original skb. + * We have to unclone skb here as the packet must not go through anyone + * else, because we manipulate the actual data. skb_cow() does + * exactly that but clone must not be shared (this is the case here because + * we come from deliver_clone()). + */ + if (skb_cow(skb, 0) != 0 || !pskb_may_pull(skb, ETH_ALEN)) { + /* proceed as multicast if out-of-memory */ + return 0; + } + + /* CPA/TMA 2016/04/27: skb is cloned if transmitted to more than one port or + * if passed to the local system as well. If not cloned we try to re-use + * the original skb. + * We have to unclone skb here as the packet must not go through anyone + * else, because we manipulate the actual data. skb_cow() does + * exactly that but clone must not be shared (this is the case here because + * we come from deliver_clone()). + */ + if (skb_cow(skb, 0) != 0 || !pskb_may_pull(skb, ETH_ALEN)) { + /* proceed as multicast if out-of-memory */ + return 0; + } + + list_for_each_entry_rcu(p, &pg->recipients, list) { + struct net_bridge_fdb_entry *fdb = p->recipient; + struct sk_buff *ucast_skb; + struct ethhdr *ucast_eth; + + /* Re-use the original skb if possible. The skb must be copied + * for every recipient except the last one where we can + * use the (possibly uncloned) original skb */ + if (list_is_last(&p->list, &pg->recipients)) + ucast_skb = skb; + else { + ucast_skb = pskb_copy(skb, GFP_ATOMIC); + if (unlikely(!ucast_skb)) { + /* Abort if out-of-memory. Here we prefer to drop + * instead of giving recipients duplicated packets. */ + kfree_skb(skb); + break; + } + } + + /* modify and send */ + ucast_eth = eth_hdr(ucast_skb); + memcpy(ucast_eth->h_dest, &fdb->addr.addr, ETH_ALEN); + dev_queue_xmit(ucast_skb); + } + + return 1; +} + +static inline int is_udp(struct sk_buff *skb) +{ + /* + * only change UDP pakets to unicast + * 2014-12-12, calle + */ + switch (skb->protocol) { + case htons(ETH_P_IP): + return ip_hdr(skb)->protocol == IPPROTO_UDP; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + case htons(ETH_P_IPV6): + return ipv6_hdr(skb)->nexthdr == IPPROTO_UDP; +#endif + default: + return 0; + } +} + +static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) +{ + return is_multicast_ether_addr(dest) && !is_broadcast_ether_addr(dest) && is_udp(skb); +} + +#else +static inline int br_forward_as_unicast(struct sk_buff *skb) +{ + return 0; +} +static inline int should_convert_to_unicast(struct sk_buff *skb, const u8 *dest) +{ + return 0; +} +#endif + int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) { - skb_push(skb, ETH_HLEN); + u8 *dest; + + dest = skb_push(skb, ETH_HLEN); + if (!is_skb_forwardable(skb->dev, skb)) goto drop; @@ -57,7 +173,8 @@ skb_set_network_header(skb, depth); } - dev_queue_xmit(skb); + if (!should_convert_to_unicast(skb, dest) || !br_forward_as_unicast(skb)) + dev_queue_xmit(skb); return 0; @@ -69,7 +186,7 @@ int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { - return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, + return BR_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, net, sk, skb, NULL, skb->dev, br_dev_queue_push_xmit); @@ -96,7 +213,7 @@ return; } - NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, + BR_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, dev_net(skb->dev), NULL, skb,NULL, skb->dev, br_forward_finish); } @@ -120,7 +237,7 @@ skb->dev = to->dev; skb_forward_csum(skb); - NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, + BR_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, dev_net(indev), NULL, skb, indev, skb->dev, br_forward_finish); } @@ -135,12 +252,11 @@ kfree_skb(skb); } -EXPORT_SYMBOL_GPL(br_deliver); /* called with rcu_read_lock */ void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) { - if (to && should_deliver(to, skb)) { + if (to && should_deliver(to, skb) && !(to->flags & BR_ISOLATE_MODE)) { if (skb0) deliver_clone(to, skb, __br_forward); else @@ -196,17 +312,47 @@ struct sk_buff *skb0, void (*__packet_hook)(const struct net_bridge_port *p, struct sk_buff *skb), - bool unicast) + bool unicast, bool forward) { struct net_bridge_port *p; struct net_bridge_port *prev; + enum br_pkt_type pkt_type = BR_PKT_UNICAST; + const unsigned char *dest; + + dest = eth_hdr(skb)->h_dest; + if (is_broadcast_ether_addr(dest)) + pkt_type = BR_PKT_BROADCAST; + else if (is_multicast_ether_addr(dest)) + pkt_type = BR_PKT_MULTICAST; prev = NULL; + /* AVM/TMA 20170315 JZ-33183 + * There are some cases where we come here for multicast traffic, for example + * if all mdb entries are gone due to down'ing the bridge (multid may do this). + * In the past, br_forward_as_unicast() accessed an uninitialized pointer. + */ + BR_OUTPUT_SKB_CB(skb)->pg = NULL; list_for_each_entry_rcu(p, &br->port_list, list) { - /* Do not flood unicast traffic to ports that turn it off */ - if (unicast && !(p->flags & BR_FLOOD)) + if (forward && (p->flags & BR_ISOLATE_MODE)) continue; + /* Do not flood unicast traffic to ports that turn it off, nor + * other traffic if flood off, except for traffic we originate + */ + switch (pkt_type) { + case BR_PKT_UNICAST: + if (!(p->flags & BR_FLOOD)) + continue; + break; + case BR_PKT_MULTICAST: + if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + case BR_PKT_BROADCAST: + if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev) + continue; + break; + } /* Do not flood to ports that enable proxy ARP */ if (p->flags & BR_PROXYARP) @@ -238,15 +384,24 @@ /* called with rcu_read_lock */ void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast) { - br_flood(br, skb, NULL, __br_deliver, unicast); + /* FIXME: Try to accelerate */ + avm_pa_do_not_accelerate(skb); + br_flood(br, skb, NULL, __br_deliver, unicast, false); } +EXPORT_SYMBOL_GPL(br_deliver); /* called under bridge lock */ void br_flood_forward(struct net_bridge *br, struct sk_buff *skb, struct sk_buff *skb2, bool unicast) { - br_flood(br, skb, skb2, __br_forward, unicast); + /* FIXME: Try to accelerate */ + avm_pa_do_not_accelerate(skb); + if (br_flood_rl(br, skb, skb->dev)) + br_flood(br, skb, skb2, __br_forward, unicast, true); + else if (!skb2) + kfree_skb(skb); } +EXPORT_SYMBOL_GPL(br_forward); #ifdef CONFIG_BRIDGE_IGMP_SNOOPING /* called with rcu_read_lock */ @@ -264,6 +419,8 @@ rp = rcu_dereference(hlist_first_rcu(&br->router_list)); p = mdst ? rcu_dereference(mdst->ports) : NULL; + BR_OUTPUT_SKB_CB(skb)->pg = NULL; + while (p || rp) { struct net_bridge_port *port, *lport, *rport; @@ -278,6 +435,17 @@ if (IS_ERR(prev)) goto out; + /* store port_group which is needed by br_forward_as_unicast() to avoid + * costly lookup in hot path. BR_OUTPUT_SKB_CB(skb)->pg must correspond to the + * prev port, because the skb will be delivered to that one next. Therefore a) + * assign BR_OUTPUT_SKB_CB(skb)->pg before p is updated and b) only change it when + * maybe_deliver() was not a no-op */ + if (prev == lport) + BR_OUTPUT_SKB_CB(skb)->pg = p; + else if (prev == rport) + BR_OUTPUT_SKB_CB(skb)->pg = NULL; + /* else: prev == (old) prev; i.e. should_deliver() was no-op and port was skipped */ + if ((unsigned long)lport >= (unsigned long)port) p = rcu_dereference(p->next); if ((unsigned long)rport >= (unsigned long)port)