--- zzzz-none-000/linux-4.1.38/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c 2017-01-18 18:48:06.000000000 +0000 +++ bcm63-7582-715/linux-4.1.38/net/ipv4/netfilter/nf_nat_masquerade_ipv4.c 2020-11-25 10:06:48.000000000 +0000 @@ -22,6 +22,140 @@ #include #include +#if defined(CONFIG_BCM_KF_NETFILTER) +#include +#include +#include +#endif + +#if defined(CONFIG_BCM_KF_NETFILTER) +/****************************************************************************/ +static void bcm_nat_expect(struct nf_conn *ct, + struct nf_conntrack_expect *exp) +{ + struct nf_nat_range range; + + /* This must be a fresh one. */ + BUG_ON(ct->status & IPS_NAT_DONE_MASK); + + /* Change src to where new ct comes from */ + range.flags = NF_NAT_RANGE_MAP_IPS; + range.min_addr = range.max_addr = + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3; + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); + + /* For DST manip, map port here to where it's expected. */ + range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED); + range.min_proto = range.max_proto = exp->saved_proto; + range.min_addr = range.max_addr = exp->saved_addr; + nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); +} + +/****************************************************************************/ +static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff, + struct nf_conn *ct, enum ip_conntrack_info ctinfo) +{ + int dir = CTINFO2DIR(ctinfo); + struct nf_conn_help *help = nfct_help(ct); + struct nf_conntrack_expect *exp; + + if (dir != IP_CT_DIR_ORIGINAL || + help->expecting[NF_CT_EXPECT_CLASS_DEFAULT]) + return NF_ACCEPT; + + pr_debug("bcm_nat: packet[%d bytes] ", skb->len); + nf_ct_dump_tuple(&ct->tuplehash[dir].tuple); + pr_debug("reply: "); + nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple); + + /* Create expect */ + if ((exp = nf_ct_expect_alloc(ct)) == NULL) + return NF_ACCEPT; + + nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL, + &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP, + NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port); + exp->flags = NF_CT_EXPECT_PERMANENT; + exp->saved_addr = ct->tuplehash[dir].tuple.src.u3; + exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port; + exp->dir = !dir; + exp->expectfn = bcm_nat_expect; + + /* Setup expect */ + nf_ct_expect_related(exp); + nf_ct_expect_put(exp); + pr_debug("bcm_nat: expect setup\n"); + + return NF_ACCEPT; +} + +/****************************************************************************/ +static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = { + .max_expected = 1000, + .timeout = 240, +}; + +/****************************************************************************/ +static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = { + .name = "BCM-NAT", + .me = THIS_MODULE, + .tuple.src.l3num = AF_INET, + .tuple.dst.protonum = IPPROTO_UDP, + .expect_policy = &bcm_nat_exp_policy, + .expect_class_max = 1, + .help = bcm_nat_help, +}; + +/****************************************************************************/ +static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct) +{ + struct nf_conntrack_tuple tuple; + struct nf_conntrack_expect *i = NULL; + + + memset(&tuple, 0, sizeof(tuple)); + tuple.src.l3num = AF_INET; + tuple.dst.protonum = IPPROTO_UDP; + tuple.dst.u3.ip = ip; + tuple.dst.u.udp.port = port; + + rcu_read_lock(); + i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple); + rcu_read_unlock(); + + return i != NULL; +} + +/****************************************************************************/ +static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct) +{ + struct nf_conntrack_tuple * tp = + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; + struct net *net = nf_ct_net(ct); + struct nf_conntrack_expect * exp = NULL; + struct nf_conntrack_expect * i; + unsigned int h; + + rcu_read_lock(); + for (h = 0; h < nf_ct_expect_hsize; h++) { + hlist_for_each_entry_rcu(i, &net->ct.expect_hash[h], hnode) { + if (nf_inet_addr_cmp(&i->saved_addr, &tp->src.u3) && + i->saved_proto.all == tp->src.u.all && + i->tuple.dst.protonum == tp->dst.protonum && + i->tuple.src.u3.ip == 0 && + i->tuple.src.u.udp.port == 0) { + exp = i; + break; + } + } + } + rcu_read_unlock(); + + return exp; +} +#endif /* CONFIG_KF_NETFILTER */ + + unsigned int nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, const struct nf_nat_range *range, @@ -58,6 +192,76 @@ nat->masq_index = out->ifindex; +#if defined(CONFIG_BCM_KF_NETFILTER) + +/* RFC 4787 - 4.2.2. Port Parity + i.e., an even port will be mapped to an even port, and an odd port will be mapped to an odd port. +*/ +#define CHECK_PORT_PARITY(a, b) ((a%2)==(b%2)) + + if (range->min_addr.ip != 0 /* nat_mode == full cone */ + && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL) + && nf_ct_protonum(ct) == IPPROTO_UDP) { + unsigned int ret; + u_int16_t minport; + u_int16_t maxport; + struct nf_conntrack_expect *exp; + + pr_debug("bcm_nat: need full cone NAT\n"); + + /* Choose port */ + spin_lock_bh(&nf_conntrack_expect_lock); + /* Look for existing expectation */ + exp = find_fullcone_exp(ct); + if (exp) { + minport = maxport = exp->tuple.dst.u.udp.port; + pr_debug("bcm_nat: existing mapped port = %hu\n", + ntohs(minport)); + } else { /* no previous expect */ + u_int16_t newport, tmpport, orgport; + + minport = range->min_proto.all == 0? + ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src. + u.udp.port : range->min_proto.all; + maxport = range->max_proto.all == 0? + htons(65535) : range->max_proto.all; + orgport = ntohs(minport); + for (newport = ntohs(minport),tmpport = ntohs(maxport); + newport <= tmpport; newport++) { + if (CHECK_PORT_PARITY(orgport, newport) && !find_exp(newsrc, htons(newport), ct)) { + pr_debug("bcm_nat: new mapped port = " + "%hu\n", newport); + minport = maxport = htons(newport); + break; + } + } + } + spin_unlock_bh(&nf_conntrack_expect_lock); + + + memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); + memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); + + newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS | + NF_NAT_RANGE_PROTO_SPECIFIED; + newrange.max_addr.ip = newrange.min_addr.ip = newsrc; + newrange.min_proto.udp.port = newrange.max_proto.udp.port = minport; + + /* Set ct helper */ + ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC); + if (ret == NF_ACCEPT) { + struct nf_conn_help *help = nfct_help(ct); + if (help == NULL) + help = nf_ct_helper_ext_add(ct, &nf_conntrack_helper_bcm_nat, GFP_ATOMIC); + if (help != NULL) { + help->helper = &nf_conntrack_helper_bcm_nat; + pr_debug("bcm_nat: helper set\n"); + } + } + return ret; + } +#endif /* CONFIG_KF_NETFILTER */ + /* Transfer from original range. */ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr)); memset(&newrange.max_addr, 0, sizeof(newrange.max_addr)); @@ -148,6 +352,9 @@ void nf_nat_masquerade_ipv4_unregister_notifier(void) { +#if defined(CONFIG_BCM_KF_NETFILTER) + nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat); +#endif /* check if the notifier still has clients */ if (atomic_dec_return(&masquerade_notifier_refcount) > 0) return;