--- zzzz-none-000/linux-3.10.107/net/tipc/net.c 2017-06-27 09:49:32.000000000 +0000 +++ scorpion-7490-727/linux-3.10.107/net/tipc/net.c 2021-02-04 17:41:59.000000000 +0000 @@ -1,7 +1,7 @@ /* * net/tipc/net.c: TIPC network routing code * - * Copyright (c) 1995-2006, Ericsson AB + * Copyright (c) 1995-2006, 2014, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * @@ -38,46 +38,46 @@ #include "net.h" #include "name_distr.h" #include "subscr.h" -#include "port.h" +#include "socket.h" #include "node.h" -#include "config.h" +#include "bcast.h" + +static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { + [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, + [TIPC_NLA_NET_ID] = { .type = NLA_U32 } +}; /* * The TIPC locking policy is designed to ensure a very fine locking * granularity, permitting complete parallel access to individual - * port and node/link instances. The code consists of three major + * port and node/link instances. The code consists of four major * locking domains, each protected with their own disjunct set of locks. * - * 1: The routing hierarchy. - * Comprises the structures 'zone', 'cluster', 'node', 'link' - * and 'bearer'. The whole hierarchy is protected by a big - * read/write lock, tipc_net_lock, to enssure that nothing is added - * or removed while code is accessing any of these structures. - * This layer must not be called from the two others while they - * hold any of their own locks. - * Neither must it itself do any upcalls to the other two before - * it has released tipc_net_lock and other protective locks. - * - * Within the tipc_net_lock domain there are two sub-domains;'node' and - * 'bearer', where local write operations are permitted, - * provided that those are protected by individual spin_locks - * per instance. Code holding tipc_net_lock(read) and a node spin_lock - * is permitted to poke around in both the node itself and its - * subordinate links. I.e, it can update link counters and queues, - * change link state, send protocol messages, and alter the - * "active_links" array in the node; but it can _not_ remove a link - * or a node from the overall structure. - * Correspondingly, individual bearers may change status within a - * tipc_net_lock(read), protected by an individual spin_lock ber bearer - * instance, but it needs tipc_net_lock(write) to remove/add any bearers. - * - * - * 2: The transport level of the protocol. - * This consists of the structures port, (and its user level - * representations, such as user_port and tipc_sock), reference and - * tipc_user (port.c, reg.c, socket.c). + * 1: The bearer level. + * RTNL lock is used to serialize the process of configuring bearer + * on update side, and RCU lock is applied on read side to make + * bearer instance valid on both paths of message transmission and + * reception. + * + * 2: The node and link level. + * All node instances are saved into two tipc_node_list and node_htable + * lists. The two lists are protected by node_list_lock on write side, + * and they are guarded with RCU lock on read side. Especially node + * instance is destroyed only when TIPC module is removed, and we can + * confirm that there has no any user who is accessing the node at the + * moment. Therefore, Except for iterating the two lists within RCU + * protection, it's no needed to hold RCU that we access node instance + * in other places. + * + * In addition, all members in node structure including link instances + * are protected by node spin lock. + * + * 3: The transport level of the protocol. + * This consists of the structures port, (and its user level + * representations, such as user_port and tipc_sock), reference and + * tipc_user (port.c, reg.c, socket.c). * - * This layer has four different locks: + * This layer has four different locks: * - The tipc_port spin_lock. This is protecting each port instance * from parallel data access and removal. Since we can not place * this lock in the port itself, it has been placed in the @@ -96,7 +96,7 @@ * There are two such lists; 'port_list', which is used for management, * and 'wait_list', which is used to queue ports during congestion. * - * 3: The name table (name_table.c, name_distr.c, subscription.c) + * 4: The name table (name_table.c, name_distr.c, subscription.c) * - There is one big read/write-lock (tipc_nametbl_lock) protecting the * overall name table structure. Nothing must be added/removed to * this structure without holding write access to it. @@ -108,98 +108,144 @@ * - A local spin_lock protecting the queue of subscriber events. */ -DEFINE_RWLOCK(tipc_net_lock); - -static void net_route_named_msg(struct sk_buff *buf) +int tipc_net_start(struct net *net, u32 addr) { - struct tipc_msg *msg = buf_msg(buf); - u32 dnode; - u32 dport; + struct tipc_net *tn = net_generic(net, tipc_net_id); + char addr_string[16]; - if (!msg_named(msg)) { - kfree_skb(buf); - return; - } + tn->own_addr = addr; + tipc_named_reinit(net); + tipc_sk_reinit(net); + tipc_bcast_reinit(net); - dnode = addr_domain(msg_lookup_scope(msg)); - dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode); - if (dport) { - msg_set_destnode(msg, dnode); - msg_set_destport(msg, dport); - tipc_net_route_msg(buf); - return; - } - tipc_reject_msg(buf, TIPC_ERR_NO_NAME); + tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr, + TIPC_ZONE_SCOPE, 0, tn->own_addr); + + pr_info("Started in network mode\n"); + pr_info("Own node address %s, network identity %u\n", + tipc_addr_string_fill(addr_string, tn->own_addr), + tn->net_id); + return 0; } -void tipc_net_route_msg(struct sk_buff *buf) +void tipc_net_stop(struct net *net) { - struct tipc_msg *msg; - u32 dnode; + struct tipc_net *tn = net_generic(net, tipc_net_id); - if (!buf) + if (!tn->own_addr) return; - msg = buf_msg(buf); - /* Handle message for this node */ - dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); - if (tipc_in_scope(dnode, tipc_own_addr)) { - if (msg_isdata(msg)) { - if (msg_mcast(msg)) - tipc_port_recv_mcast(buf, NULL); - else if (msg_destport(msg)) - tipc_port_recv_msg(buf); - else - net_route_named_msg(buf); - return; - } - switch (msg_user(msg)) { - case NAME_DISTRIBUTOR: - tipc_named_recv(buf); - break; - case CONN_MANAGER: - tipc_port_recv_proto_msg(buf); - break; - default: - kfree_skb(buf); - } - return; - } + tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0, + tn->own_addr); + rtnl_lock(); + tipc_bearer_stop(net); + tipc_node_stop(net); + rtnl_unlock(); - /* Handle message for another node */ - skb_trim(buf, msg_size(msg)); - tipc_link_send(buf, dnode, msg_link_selector(msg)); + pr_info("Left network mode\n"); } -void tipc_net_start(u32 addr) +static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg) { - char addr_string[16]; + struct tipc_net *tn = net_generic(net, tipc_net_id); + void *hdr; + struct nlattr *attrs; - write_lock_bh(&tipc_net_lock); - tipc_own_addr = addr; - tipc_named_reinit(); - tipc_port_reinit(); - tipc_bclink_init(); - write_unlock_bh(&tipc_net_lock); + hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family, + NLM_F_MULTI, TIPC_NL_NET_GET); + if (!hdr) + return -EMSGSIZE; - tipc_cfg_reinit(); + attrs = nla_nest_start(msg->skb, TIPC_NLA_NET); + if (!attrs) + goto msg_full; - pr_info("Started in network mode\n"); - pr_info("Own node address %s, network identity %u\n", - tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); + if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id)) + goto attr_msg_full; + + nla_nest_end(msg->skb, attrs); + genlmsg_end(msg->skb, hdr); + + return 0; + +attr_msg_full: + nla_nest_cancel(msg->skb, attrs); +msg_full: + genlmsg_cancel(msg->skb, hdr); + + return -EMSGSIZE; } -void tipc_net_stop(void) +int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) { - struct tipc_node *node, *t_node; + struct net *net = sock_net(skb->sk); + int err; + int done = cb->args[0]; + struct tipc_nl_msg msg; + + if (done) + return 0; + + msg.skb = skb; + msg.portid = NETLINK_CB(cb->skb).portid; + msg.seq = cb->nlh->nlmsg_seq; + + err = __tipc_nl_add_net(net, &msg); + if (err) + goto out; + + done = 1; +out: + cb->args[0] = done; - if (!tipc_own_addr) - return; - write_lock_bh(&tipc_net_lock); - tipc_bearer_stop(); - tipc_bclink_stop(); - list_for_each_entry_safe(node, t_node, &tipc_node_list, list) - tipc_node_delete(node); - write_unlock_bh(&tipc_net_lock); - pr_info("Left network mode\n"); + return skb->len; +} + +int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = sock_net(skb->sk); + struct tipc_net *tn = net_generic(net, tipc_net_id); + struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; + int err; + + if (!info->attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, + info->attrs[TIPC_NLA_NET], + tipc_nl_net_policy); + if (err) + return err; + + if (attrs[TIPC_NLA_NET_ID]) { + u32 val; + + /* Can't change net id once TIPC has joined a network */ + if (tn->own_addr) + return -EPERM; + + val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); + if (val < 1 || val > 9999) + return -EINVAL; + + tn->net_id = val; + } + + if (attrs[TIPC_NLA_NET_ADDR]) { + u32 addr; + + /* Can't change net addr once TIPC has joined a network */ + if (tn->own_addr) + return -EPERM; + + addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); + if (!tipc_addr_node_valid(addr)) + return -EINVAL; + + rtnl_lock(); + tipc_net_start(net, addr); + rtnl_unlock(); + } + + return 0; }