--- zzzz-none-000/linux-4.9.276/net/ipv4/netfilter/ip_tables.c 2021-07-20 14:21:16.000000000 +0000 +++ falcon-5530-750/linux-4.9.276/net/ipv4/netfilter/ip_tables.c 2023-04-05 08:19:02.000000000 +0000 @@ -58,10 +58,11 @@ { unsigned long ret; - if (NF_INVF(ipinfo, IPT_INV_SRCIP, - (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || - NF_INVF(ipinfo, IPT_INV_DSTIP, - (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr)) + if (ipinfo->flags & IPT_F_NO_DEF_MATCH) + return true; + + if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr && (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || + NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr && (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr)) return false; ret = ifname_compare_aligned(indev, ipinfo->iniface, ipinfo->iniface_mask); @@ -88,6 +89,29 @@ return true; } +static void +ip_checkdefault(struct ipt_ip *ip) +{ + static const char iface_mask[IFNAMSIZ] = {}; + + if (ip->invflags || ip->flags & IPT_F_FRAG) + return; + + if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0) + return; + + if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0) + return; + + if (ip->smsk.s_addr || ip->dmsk.s_addr) + return; + + if (ip->proto) + return; + + ip->flags |= IPT_F_NO_DEF_MATCH; +} + static bool ip_checkentry(const struct ipt_ip *ip) { @@ -228,6 +252,33 @@ return (void *)entry + entry->next_offset; } +static bool +ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict) +{ + struct xt_entry_target *t; + struct xt_standard_target *st; + + if (e->target_offset != sizeof(struct ipt_entry)) + return false; + + if (!(e->ip.flags & IPT_F_NO_DEF_MATCH)) + return false; + + t = ipt_get_target(e); + if (t->u.kernel.target->target) + return false; + + st = (struct xt_standard_target *)t; + if (st->verdict == XT_RETURN) + return false; + + if (st->verdict >= 0) + return false; + + *verdict = (unsigned)(-st->verdict) - 1; + return true; +} + /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, @@ -248,37 +299,35 @@ unsigned int addend; /* Initialization */ - stackidx = 0; - ip = ip_hdr(skb); - indev = state->in ? state->in->name : nulldevname; - outdev = state->out ? state->out->name : nulldevname; - /* We handle fragments by dealing with the first fragment as - * if it was a normal packet. All other fragments are treated - * normally, except that they will NEVER match rules that ask - * things we don't know, ie. tcp syn flag or ports). If the - * rule is also a fragment-specific rule, non-fragments won't - * match it. */ - acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; - acpar.thoff = ip_hdrlen(skb); - acpar.hotdrop = false; - acpar.net = state->net; - acpar.in = state->in; - acpar.out = state->out; - acpar.family = NFPROTO_IPV4; - acpar.hooknum = hook; - IP_NF_ASSERT(table->valid_hooks & (1 << hook)); local_bh_disable(); - addend = xt_write_recseq_begin(); - private = table->private; - cpu = smp_processor_id(); + private + = table->private; + cpu = smp_processor_id(); /* * Ensure we load private-> members after we've fetched the base * pointer. */ smp_read_barrier_depends(); table_base = private->entries; - jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; + + e = get_entry(table_base, private->hook_entry[hook]); + if (ipt_handle_default_rule(e, &verdict)) { + struct xt_counters *counter; + + counter = xt_get_this_cpu_counter(&e->counters); + ADD_COUNTER(*counter, skb->len, 1); + local_bh_enable(); + return verdict; + } + + stackidx = 0; + ip = ip_hdr(skb); + indev = state->in ? state->in->name : nulldevname; + outdev = state->out ? state->out->name : nulldevname; + + addend = xt_write_recseq_begin(); + jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. * TEE issues XT_CONTINUE verdict on original skb so we must not @@ -290,7 +339,20 @@ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); - e = get_entry(table_base, private->hook_entry[hook]); + /* We handle fragments by dealing with the first fragment as + * if it was a normal packet. All other fragments are treated + * normally, except that they will NEVER match rules that ask + * things we don't know, ie. tcp syn flag or ports). If the + * rule is also a fragment-specific rule, non-fragments won't + * match it. */ + acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; + acpar.thoff = ip_hdrlen(skb); + acpar.hotdrop = false; + acpar.net = state->net; + acpar.in = state->in; + acpar.out = state->out; + acpar.family = NFPROTO_IPV4; + acpar.hooknum = hook; do { const struct xt_entry_target *t; @@ -550,6 +612,8 @@ struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; + ip_checkdefault(&e->ip); + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; @@ -830,6 +894,7 @@ const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; + u8 flags; counters = alloc_counters(table); if (IS_ERR(counters)) @@ -857,6 +922,13 @@ goto free_counters; } + flags = e->ip.flags & IPT_F_MASK; + if (copy_to_user(userptr + off + offsetof(struct ipt_entry, ip.flags), + &flags, sizeof(flags)) != 0) { + ret = -EFAULT; + goto free_counters; + } + for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { @@ -1123,6 +1195,58 @@ return ret; } +#if IS_ENABLED(CONFIG_PPA_IPTABLE_EVENT_HANDLING) +static RAW_NOTIFIER_HEAD(iptable_chain); + +static int call_iptable_notifier(struct notifier_block *nb, unsigned long val) +{ + return nb->notifier_call(nb, val, NULL); +} + +int register_iptable_notifier(struct notifier_block *nb) +{ + int err; + + rtnl_lock(); + err = raw_notifier_chain_register(&iptable_chain, nb); + if (err) + goto unlock; + + err = call_iptable_notifier(nb, IPTABLE_CHANGE); + err = notifier_to_errno(err); + if (err) + raw_notifier_chain_unregister(&iptable_chain, nb); + +unlock: + rtnl_unlock(); + return err; +} +EXPORT_SYMBOL(register_iptable_notifier); + +int unregister_iptable_notifier(struct notifier_block *nb) +{ + int err; + + rtnl_lock(); + err = raw_notifier_chain_unregister(&iptable_chain, nb); + rtnl_unlock(); + + return err; +} +EXPORT_SYMBOL(unregister_iptable_notifier); + +static int call_iptable_notifiers_info(unsigned long val) +{ + return raw_notifier_call_chain(&iptable_chain, val, NULL); +} + +int call_iptable_notifiers(void) +{ + return call_iptable_notifiers_info(1); +} +EXPORT_SYMBOL(call_iptable_notifiers); +#endif + static int do_replace(struct net *net, const void __user *user, unsigned int len) { @@ -1246,12 +1370,15 @@ compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; + u8 flags = e->ip.flags & IPT_F_MASK; origsize = *size; ce = (struct compat_ipt_entry __user *)*dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], - sizeof(counters[i])) != 0) + sizeof(counters[i])) != 0 || + copy_to_user(&ce->ip.flags, &flags, + sizeof(flags)) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); @@ -1693,6 +1820,12 @@ switch (cmd) { case IPT_SO_SET_REPLACE: ret = do_replace(sock_net(sk), user, len); +#if IS_ENABLED(CONFIG_PPA_IPTABLE_EVENT_HANDLING) + call_iptable_notifiers(); + /* invokes the ppa handler for flushing the sessions + programmed currently in the HW on any modifications + made on iptable rules. */ +#endif break; case IPT_SO_SET_ADD_COUNTERS: