--- zzzz-none-000/linux-5.4.213/net/ipv4/netfilter/ip_tables.c 2022-09-15 10:04:56.000000000 +0000 +++ miami-7690-761/linux-5.4.213/net/ipv4/netfilter/ip_tables.c 2024-05-29 11:20:02.000000000 +0000 @@ -50,9 +50,12 @@ { unsigned long ret; - if (NF_INVF(ipinfo, IPT_INV_SRCIP, + if (ipinfo->flags & IPT_F_NO_DEF_MATCH) + return true; + + if (NF_INVF(ipinfo, IPT_INV_SRCIP, ipinfo->smsk.s_addr && (ip->saddr & ipinfo->smsk.s_addr) != ipinfo->src.s_addr) || - NF_INVF(ipinfo, IPT_INV_DSTIP, + NF_INVF(ipinfo, IPT_INV_DSTIP, ipinfo->dmsk.s_addr && (ip->daddr & ipinfo->dmsk.s_addr) != ipinfo->dst.s_addr)) return false; @@ -80,6 +83,29 @@ return true; } +static void +ip_checkdefault(struct ipt_ip *ip) +{ + static const char iface_mask[IFNAMSIZ] = {}; + + if (ip->invflags || ip->flags & IPT_F_FRAG) + return; + + if (memcmp(ip->iniface_mask, iface_mask, IFNAMSIZ) != 0) + return; + + if (memcmp(ip->outiface_mask, iface_mask, IFNAMSIZ) != 0) + return; + + if (ip->smsk.s_addr || ip->dmsk.s_addr) + return; + + if (ip->proto) + return; + + ip->flags |= IPT_F_NO_DEF_MATCH; +} + static bool ip_checkentry(const struct ipt_ip *ip) { @@ -220,6 +246,33 @@ return (void *)entry + entry->next_offset; } +static bool +ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict) +{ + struct xt_entry_target *t; + struct xt_standard_target *st; + + if (e->target_offset != sizeof(struct ipt_entry)) + return false; + + if (!(e->ip.flags & IPT_F_NO_DEF_MATCH)) + return false; + + t = ipt_get_target(e); + if (t->u.kernel.target->target) + return false; + + st = (struct xt_standard_target *) t; + if (st->verdict == XT_RETURN) + return false; + + if (st->verdict >= 0) + return false; + + *verdict = (unsigned)(-st->verdict) - 1; + return true; +} + /* Returns one of the generic firewall policies, like NF_ACCEPT. */ unsigned int ipt_do_table(struct sk_buff *skb, @@ -240,27 +293,28 @@ unsigned int addend; /* Initialization */ + WARN_ON(!(table->valid_hooks & (1 << hook))); + local_bh_disable(); + private = READ_ONCE(table->private); + cpu = smp_processor_id(); + table_base = private->entries; + + e = get_entry(table_base, private->hook_entry[hook]); + if (ipt_handle_default_rule(e, &verdict)) { + struct xt_counters *counter; + + counter = xt_get_this_cpu_counter(&e->counters); + ADD_COUNTER(*counter, skb->len, 1); + local_bh_enable(); + return verdict; + } + stackidx = 0; ip = ip_hdr(skb); indev = state->in ? state->in->name : nulldevname; outdev = state->out ? state->out->name : nulldevname; - /* We handle fragments by dealing with the first fragment as - * if it was a normal packet. All other fragments are treated - * normally, except that they will NEVER match rules that ask - * things we don't know, ie. tcp syn flag or ports). If the - * rule is also a fragment-specific rule, non-fragments won't - * match it. */ - acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; - acpar.thoff = ip_hdrlen(skb); - acpar.hotdrop = false; - acpar.state = state; - WARN_ON(!(table->valid_hooks & (1 << hook))); - local_bh_disable(); addend = xt_write_recseq_begin(); - private = READ_ONCE(table->private); /* Address dependency. */ - cpu = smp_processor_id(); - table_base = private->entries; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; /* Switch to alternate jumpstack if we're being invoked via TEE. @@ -273,7 +327,16 @@ if (static_key_false(&xt_tee_enabled)) jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); - e = get_entry(table_base, private->hook_entry[hook]); + /* We handle fragments by dealing with the first fragment as + * if it was a normal packet. All other fragments are treated + * normally, except that they will NEVER match rules that ask + * things we don't know, ie. tcp syn flag or ports). If the + * rule is also a fragment-specific rule, non-fragments won't + * match it. */ + acpar.fragoff = ntohs(ip->frag_off) & IP_OFFSET; + acpar.thoff = ip_hdrlen(skb); + acpar.hotdrop = false; + acpar.state = state; do { const struct xt_entry_target *t; @@ -462,6 +525,28 @@ } static int +check_entry(struct ipt_entry *e) +{ + const struct xt_entry_target *t; + + if (!ip_checkentry(&e->ip)) + return -EINVAL; + + ip_checkdefault(&e->ip); + + if (e->target_offset + sizeof(struct xt_entry_target) > + e->next_offset) + return -EINVAL; + + t = ipt_get_target_c(e); + + if (e->target_offset + t->u.target_size > e->next_offset) + return -EINVAL; + + return 0; +} + +static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par) { const struct ipt_ip *ip = par->entryinfo; @@ -524,6 +609,10 @@ struct xt_mtchk_param mtpar; struct xt_entry_match *ematch; + ret = check_entry(e); + if (ret) + return ret; + if (!xt_percpu_counter_alloc(alloc_state, &e->counters)) return -ENOMEM; @@ -818,6 +907,7 @@ const struct xt_table_info *private = table->private; int ret = 0; const void *loc_cpu_entry; + u8 flags; counters = alloc_counters(table); if (IS_ERR(counters)) @@ -845,6 +935,14 @@ goto free_counters; } + flags = e->ip.flags & IPT_F_MASK; + if (copy_to_user(userptr + off + + offsetof(struct ipt_entry, ip.flags), + &flags, sizeof(flags)) != 0) { + ret = -EFAULT; + goto free_counters; + } + for (i = sizeof(struct ipt_entry); i < e->target_offset; i += m->u.match_size) { @@ -1225,12 +1323,15 @@ compat_uint_t origsize; const struct xt_entry_match *ematch; int ret = 0; + u8 flags = e->ip.flags & IPT_F_MASK; origsize = *size; ce = *dstptr; if (copy_to_user(ce, e, sizeof(struct ipt_entry)) != 0 || copy_to_user(&ce->counters, &counters[i], - sizeof(counters[i])) != 0) + sizeof(counters[i])) != 0 || + copy_to_user(&ce->ip.flags, &flags, + sizeof(flags)) != 0) return -EFAULT; *dstptr += sizeof(struct compat_ipt_entry); @@ -1305,8 +1406,10 @@ sizeof(struct compat_xt_entry_target)) return -EINVAL; - if (!ip_checkentry(&e->ip)) - return -EINVAL; + /* For purposes of check_entry casting the compat entry is fine */ + ret = check_entry((struct ipt_entry *)e); + if (ret) + return ret; ret = xt_compat_check_entry_offsets(e, e->elems, e->target_offset, e->next_offset);