--- zzzz-none-000/linux-3.10.107/drivers/net/team/team.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/drivers/net/team/team.c 2021-02-04 17:41:59.000000000 +0000 @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -174,7 +175,6 @@ static int __team_option_inst_add_option(struct team *team, struct team_option *option) { - struct team_port *port; int err; if (!option->per_port) { @@ -182,12 +182,6 @@ if (err) goto inst_del_option; } - - list_for_each_entry(port, &team->port_list, list) { - err = __team_option_inst_add(team, option, port); - if (err) - goto inst_del_option; - } return 0; inst_del_option: @@ -523,31 +517,26 @@ team->mode = &__team_no_mode; } -static void __team_adjust_ops(struct team *team, int en_port_count) +static void team_adjust_ops(struct team *team) { /* * To avoid checks in rx/tx skb paths, ensure here that non-null and * correct ops are always set. */ - if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit; - if (!en_port_count || !team_is_mode_set(team) || + if (!team->en_port_count || !team_is_mode_set(team) || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; } -static void team_adjust_ops(struct team *team) -{ - __team_adjust_ops(team, team->en_port_count); -} - /* * We can benefit from the fact that it's ensured no port is present * at the time of mode change. Therefore no packets are in fly so there's no @@ -625,6 +614,98 @@ } +/********************* + * Peers notification + *********************/ + +static void team_notify_peers_work(struct work_struct *work) +{ + struct team *team; + int val; + + team = container_of(work, struct team, notify_peers.dw.work); + + if (!rtnl_trylock()) { + schedule_delayed_work(&team->notify_peers.dw, 0); + return; + } + val = atomic_dec_if_positive(&team->notify_peers.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); + rtnl_unlock(); + if (val) + schedule_delayed_work(&team->notify_peers.dw, + msecs_to_jiffies(team->notify_peers.interval)); +} + +static void team_notify_peers(struct team *team) +{ + if (!team->notify_peers.count || !netif_running(team->dev)) + return; + atomic_add(team->notify_peers.count, &team->notify_peers.count_pending); + schedule_delayed_work(&team->notify_peers.dw, 0); +} + +static void team_notify_peers_init(struct team *team) +{ + INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work); +} + +static void team_notify_peers_fini(struct team *team) +{ + cancel_delayed_work_sync(&team->notify_peers.dw); +} + + +/******************************* + * Send multicast group rejoins + *******************************/ + +static void team_mcast_rejoin_work(struct work_struct *work) +{ + struct team *team; + int val; + + team = container_of(work, struct team, mcast_rejoin.dw.work); + + if (!rtnl_trylock()) { + schedule_delayed_work(&team->mcast_rejoin.dw, 0); + return; + } + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } + call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); + rtnl_unlock(); + if (val) + schedule_delayed_work(&team->mcast_rejoin.dw, + msecs_to_jiffies(team->mcast_rejoin.interval)); +} + +static void team_mcast_rejoin(struct team *team) +{ + if (!team->mcast_rejoin.count || !netif_running(team->dev)) + return; + atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending); + schedule_delayed_work(&team->mcast_rejoin.dw, 0); +} + +static void team_mcast_rejoin_init(struct team *team) +{ + INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work); +} + +static void team_mcast_rejoin_fini(struct team *team) +{ + cancel_delayed_work_sync(&team->mcast_rejoin.dw); +} + + /************************ * Rx path frame handler ************************/ @@ -723,9 +804,9 @@ static void __team_queue_override_port_del(struct team *team, struct team_port *port) { + if (!port->queue_id) + return; list_del_rcu(&port->qom_list); - synchronize_rcu(); - INIT_LIST_HEAD(&port->qom_list); } static bool team_queue_override_port_has_gt_prio_than(struct team_port *port, @@ -747,9 +828,8 @@ struct list_head *qom_list; struct list_head *node; - if (!port->queue_id || !team_port_enabled(port)) + if (!port->queue_id) return; - qom_list = __team_get_qom_list(team, port->queue_id); node = qom_list; list_for_each_entry(cur, qom_list, qom_list) { @@ -766,7 +846,7 @@ bool enabled = false; list_for_each_entry(port, &team->port_list, list) { - if (!list_empty(&port->qom_list)) { + if (port->queue_id) { enabled = true; break; } @@ -778,14 +858,44 @@ team->queue_override_enabled = enabled; } -static void team_queue_override_port_refresh(struct team *team, - struct team_port *port) +static void team_queue_override_port_prio_changed(struct team *team, + struct team_port *port) { + if (!port->queue_id || team_port_enabled(port)) + return; __team_queue_override_port_del(team, port); __team_queue_override_port_add(team, port); __team_queue_override_enabled_check(team); } +static void team_queue_override_port_change_queue_id(struct team *team, + struct team_port *port, + u16 new_queue_id) +{ + if (team_port_enabled(port)) { + __team_queue_override_port_del(team, port); + port->queue_id = new_queue_id; + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); + } else { + port->queue_id = new_queue_id; + } +} + +static void team_queue_override_port_add(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_add(team, port); + __team_queue_override_enabled_check(team); +} + +static void team_queue_override_port_del(struct team *team, + struct team_port *port) +{ + __team_queue_override_port_del(team, port); + __team_queue_override_enabled_check(team); +} + /**************** * Port handling @@ -817,9 +927,11 @@ hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); team_adjust_ops(team); - team_queue_override_port_refresh(team, port); + team_queue_override_port_add(team, port); if (team->ops.port_enabled) team->ops.port_enabled(team, port); + team_notify_peers(team); + team_mcast_rejoin(team); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -846,26 +958,24 @@ hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, port->index); port->index = -1; - team_queue_override_port_refresh(team, port); - __team_adjust_ops(team, team->en_port_count - 1); - /* - * Wait until readers see adjusted ops. This ensures that - * readers never see team->en_port_count == 0 - */ - synchronize_rcu(); team->en_port_count--; + team_queue_override_port_del(team, port); + team_adjust_ops(team); + team_notify_peers(team); + team_mcast_rejoin(team); } #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ NETIF_F_HIGHDMA | NETIF_F_LRO) -static void __team_compute_features(struct team *team) +static void ___team_compute_features(struct team *team) { struct team_port *port; - u32 vlan_features = TEAM_VLAN_FEATURES; + u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL; unsigned short max_hard_header_len = ETH_HLEN; - unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; + unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | + IFF_XMIT_DST_RELEASE_PERM; list_for_each_entry(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, @@ -880,17 +990,23 @@ team->dev->vlan_features = vlan_features; team->dev->hard_header_len = max_hard_header_len; - flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE; - team->dev->priv_flags = flags | dst_release_flag; + team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM)) + team->dev->priv_flags |= IFF_XMIT_DST_RELEASE; +} +static void __team_compute_features(struct team *team) +{ + ___team_compute_features(team); netdev_change_features(team->dev); } static void team_compute_features(struct team *team) { mutex_lock(&team->lock); - __team_compute_features(team); + ___team_compute_features(team); mutex_unlock(&team->lock); + netdev_change_features(team->dev); } static int team_port_enter(struct team *team, struct team_port *port) @@ -898,7 +1014,6 @@ int err = 0; dev_hold(team->dev); - port->dev->priv_flags |= IFF_TEAM_PORT; if (team->ops.port_enter) { err = team->ops.port_enter(team, port); if (err) { @@ -911,7 +1026,6 @@ return 0; err_port_enter: - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); return err; @@ -921,22 +1035,23 @@ { if (team->ops.port_leave) team->ops.port_leave(team, port); - port->dev->priv_flags &= ~IFF_TEAM_PORT; dev_put(team->dev); } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port, - gfp_t gfp) +static int team_port_enable_netpoll(struct team *team, struct team_port *port) { struct netpoll *np; int err; - np = kzalloc(sizeof(*np), gfp); + if (!team->dev->npinfo) + return 0; + + np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; - err = __netpoll_setup(np, port->dev, gfp); + err = __netpoll_setup(np, port->dev); if (err) { kfree(np); return err; @@ -958,26 +1073,34 @@ __netpoll_cleanup(np); kfree(np); } - -static struct netpoll_info *team_netpoll_info(struct team *team) -{ - return team->dev->npinfo; -} - #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port, - gfp_t gfp) +static int team_port_enable_netpoll(struct team *team, struct team_port *port) { return 0; } static void team_port_disable_netpoll(struct team_port *port) { } -static struct netpoll_info *team_netpoll_info(struct team *team) +#endif + +static int team_upper_dev_link(struct net_device *dev, + struct net_device *port_dev) { - return NULL; + int err; + + err = netdev_master_upper_dev_link(port_dev, dev); + if (err) + return err; + port_dev->priv_flags |= IFF_TEAM_PORT; + return 0; +} + +static void team_upper_dev_unlink(struct net_device *dev, + struct net_device *port_dev) +{ + netdev_upper_dev_unlink(port_dev, dev); + port_dev->priv_flags &= ~IFF_TEAM_PORT; } -#endif static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_dev_type_check_change(struct net_device *dev, @@ -1058,22 +1181,16 @@ goto err_vids_add; } - if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port, GFP_KERNEL); - if (err) { - netdev_err(dev, "Failed to enable netpoll on device %s\n", - portname); - goto err_enable_netpoll; - } - } - - err = netdev_master_upper_dev_link(port_dev, dev); + err = team_port_enable_netpoll(team, port); if (err) { - netdev_err(dev, "Device %s failed to set upper link\n", + netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); - goto err_set_upper_link; + goto err_enable_netpoll; } + if (!(dev->features & NETIF_F_LRO)) + dev_disable_lro(port_dev); + err = netdev_rx_handler_register(port_dev, team_handle_frame, port); if (err) { @@ -1082,6 +1199,13 @@ goto err_handler_register; } + err = team_upper_dev_link(dev, port_dev); + if (err) { + netdev_err(dev, "Device %s failed to set upper link\n", + portname); + goto err_set_upper_link; + } + err = __team_option_inst_add_port(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", @@ -1101,12 +1225,12 @@ return 0; err_option_port_add: + team_upper_dev_unlink(dev, port_dev); + +err_set_upper_link: netdev_rx_handler_unregister(port_dev); err_handler_register: - netdev_upper_dev_unlink(port_dev, dev); - -err_set_upper_link: team_port_disable_netpoll(port); err_enable_netpoll: @@ -1145,8 +1269,8 @@ team_port_disable(team, port); list_del_rcu(&port->list); + team_upper_dev_unlink(dev, port_dev); netdev_rx_handler_unregister(port_dev); - netdev_upper_dev_unlink(port_dev, dev); team_port_disable_netpoll(port); vlan_vids_del_by_dev(port_dev, dev); dev_uc_unsync(port_dev, dev); @@ -1161,8 +1285,7 @@ team_port_set_orig_dev_addr(port); dev_set_mtu(port_dev, port->orig.mtu); - synchronize_rcu(); - kfree(port); + kfree_rcu(port, rcu); netdev_info(dev, "Port device %s removed\n", portname); __team_compute_features(team); @@ -1185,6 +1308,62 @@ return team_change_mode(team, ctx->data.str_val); } +static int team_notify_peers_count_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->notify_peers.count; + return 0; +} + +static int team_notify_peers_count_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->notify_peers.count = ctx->data.u32_val; + return 0; +} + +static int team_notify_peers_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->notify_peers.interval; + return 0; +} + +static int team_notify_peers_interval_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->notify_peers.interval = ctx->data.u32_val; + return 0; +} + +static int team_mcast_rejoin_count_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->mcast_rejoin.count; + return 0; +} + +static int team_mcast_rejoin_count_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->mcast_rejoin.count = ctx->data.u32_val; + return 0; +} + +static int team_mcast_rejoin_interval_get(struct team *team, + struct team_gsetter_ctx *ctx) +{ + ctx->data.u32_val = team->mcast_rejoin.interval; + return 0; +} + +static int team_mcast_rejoin_interval_set(struct team *team, + struct team_gsetter_ctx *ctx) +{ + team->mcast_rejoin.interval = ctx->data.u32_val; + return 0; +} + static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { @@ -1261,9 +1440,12 @@ struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + s32 priority = ctx->data.s32_val; - port->priority = ctx->data.s32_val; - team_queue_override_port_refresh(team, port); + if (port->priority == priority) + return 0; + port->priority = priority; + team_queue_override_port_prio_changed(team, port); return 0; } @@ -1280,17 +1462,16 @@ struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; + u16 new_queue_id = ctx->data.u32_val; - if (port->queue_id == ctx->data.u32_val) + if (port->queue_id == new_queue_id) return 0; - if (ctx->data.u32_val >= team->dev->real_num_tx_queues) + if (new_queue_id >= team->dev->real_num_tx_queues) return -EINVAL; - port->queue_id = ctx->data.u32_val; - team_queue_override_port_refresh(team, port); + team_queue_override_port_change_queue_id(team, port, new_queue_id); return 0; } - static const struct team_option team_options[] = { { .name = "mode", @@ -1299,6 +1480,30 @@ .setter = team_mode_option_set, }, { + .name = "notify_peers_count", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_notify_peers_count_get, + .setter = team_notify_peers_count_set, + }, + { + .name = "notify_peers_interval", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_notify_peers_interval_get, + .setter = team_notify_peers_interval_set, + }, + { + .name = "mcast_rejoin_count", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_mcast_rejoin_count_get, + .setter = team_mcast_rejoin_count_set, + }, + { + .name = "mcast_rejoin_interval", + .type = TEAM_OPTION_TYPE_U32, + .getter = team_mcast_rejoin_interval_get, + .setter = team_mcast_rejoin_interval_set, + }, + { .name = "enabled", .type = TEAM_OPTION_TYPE_BOOL, .per_port = true, @@ -1363,7 +1568,7 @@ mutex_init(&team->lock); team_set_no_mode(team); - team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); + team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats); if (!team->pcpu_stats) return -ENOMEM; @@ -1378,6 +1583,10 @@ INIT_LIST_HEAD(&team->option_list); INIT_LIST_HEAD(&team->option_inst_list); + + team_notify_peers_init(team); + team_mcast_rejoin_init(team); + err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); if (err) goto err_options_register; @@ -1388,6 +1597,8 @@ return 0; err_options_register: + team_mcast_rejoin_fini(team); + team_notify_peers_fini(team); team_queue_override_fini(team); err_team_queue_override_init: free_percpu(team->pcpu_stats); @@ -1407,6 +1618,8 @@ __team_change_mode(team, NULL); /* cleanup */ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options)); + team_mcast_rejoin_fini(team); + team_notify_peers_fini(team); team_queue_override_fini(team); mutex_unlock(&team->lock); } @@ -1456,7 +1669,8 @@ return NETDEV_TX_OK; } -static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb) +static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) { /* * This helper function exists to help dev_pick_tx get the correct @@ -1578,13 +1792,13 @@ for_each_possible_cpu(i) { p = per_cpu_ptr(team->pcpu_stats, i); do { - start = u64_stats_fetch_begin_bh(&p->syncp); + start = u64_stats_fetch_begin_irq(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; - } while (u64_stats_fetch_retry_bh(&p->syncp, start)); + } while (u64_stats_fetch_retry_irq(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; @@ -1667,7 +1881,7 @@ } static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo, gfp_t gfp) + struct netpoll_info *npifo) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1675,7 +1889,7 @@ mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port, gfp); + err = team_port_enable_netpoll(team, port); if (err) { __team_netpoll_cleanup(team); break; @@ -1726,6 +1940,9 @@ mask); } rcu_read_unlock(); + + features = netdev_add_tso_features(features, mask); + return features; } @@ -1765,6 +1982,13 @@ .ndo_del_slave = team_del_slave, .ndo_fix_features = team_fix_features, .ndo_change_carrier = team_change_carrier, + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, + .ndo_fdb_add = switchdev_port_fdb_add, + .ndo_fdb_del = switchdev_port_fdb_del, + .ndo_fdb_dump = switchdev_port_fdb_dump, + .ndo_features_check = passthru_features_check, }; /*********************** @@ -1796,7 +2020,7 @@ dev->addr_len = port_dev->addr_len; dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); - memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len); + eth_hw_addr_inherit(dev, port_dev); } static int team_dev_type_check_change(struct net_device *dev, @@ -1832,9 +2056,9 @@ dev->netdev_ops = &team_netdev_ops; dev->ethtool_ops = &team_ethtool_ops; dev->destructor = team_destructor; - dev->tx_queue_len = 0; dev->flags |= IFF_MULTICAST; dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); + dev->priv_flags |= IFF_NO_QUEUE; /* * Indicate we support unicast address filtering. That way core won't @@ -1845,6 +2069,10 @@ dev->features |= NETIF_F_LLTX; dev->features |= NETIF_F_GRO; + + /* Don't allow team devices to change network namespaces. */ + dev->features |= NETIF_F_NETNS_LOCAL; + dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | @@ -1857,16 +2085,10 @@ static int team_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { - int err; - if (tb[IFLA_ADDRESS] == NULL) eth_hw_addr_random(dev); - err = register_netdevice(dev); - if (err) - return err; - - return 0; + return register_netdevice(dev); } static int team_validate(struct nlattr *tb[], struct nlattr *data[]) @@ -2466,7 +2688,7 @@ return err; } -static struct genl_ops team_nl_ops[] = { +static const struct genl_ops team_nl_ops[] = { { .cmd = TEAM_CMD_NOOP, .doit = team_nl_cmd_noop, @@ -2492,15 +2714,15 @@ }, }; -static struct genl_multicast_group team_change_event_mcgrp = { - .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, +static const struct genl_multicast_group team_nl_mcgrps[] = { + { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }, }; static int team_nl_send_multicast(struct sk_buff *skb, struct team *team, u32 portid) { - return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, - team_change_event_mcgrp.id, GFP_KERNEL); + return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev), + skb, 0, 0, GFP_KERNEL); } static int team_nl_send_event_options_get(struct team *team, @@ -2519,23 +2741,8 @@ static int team_nl_init(void) { - int err; - - err = genl_register_family_with_ops(&team_nl_family, team_nl_ops, - ARRAY_SIZE(team_nl_ops)); - if (err) - return err; - - err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp); - if (err) - goto err_change_event_grp_reg; - - return 0; - -err_change_event_grp_reg: - genl_unregister_family(&team_nl_family); - - return err; + return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops, + team_nl_mcgrps); } static void team_nl_fini(void) @@ -2653,7 +2860,7 @@ static int team_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { - struct net_device *dev = (struct net_device *) ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct team_port *port; port = team_port_get_rtnl(dev); @@ -2664,8 +2871,10 @@ case NETDEV_UP: if (netif_carrier_ok(dev)) team_port_change_check(port, true); + break; case NETDEV_DOWN: team_port_change_check(port, false); + break; case NETDEV_CHANGE: if (netif_running(port->dev)) team_port_change_check(port, @@ -2677,7 +2886,7 @@ case NETDEV_FEAT_CHANGE: team_compute_features(port->team); break; - case NETDEV_CHANGEMTU: + case NETDEV_PRECHANGEMTU: /* Forbid to change mtu of underlaying device */ if (!port->team->port_mtu_change_allowed) return NOTIFY_BAD; @@ -2685,6 +2894,10 @@ case NETDEV_PRE_TYPE_CHANGE: /* Forbid to change type of underlaying device */ return NOTIFY_BAD; + case NETDEV_RESEND_IGMP: + /* Propagate to master device */ + call_netdevice_notifiers(event, port->team->dev); + break; } return NOTIFY_DONE; }