aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch')
-rw-r--r--target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch94
1 files changed, 94 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch b/target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch
new file mode 100644
index 0000000..ef993c8
--- /dev/null
+++ b/target/linux/generic/patches-3.18/611-netfilter_match_bypass_default_table.patch
@@ -0,0 +1,94 @@
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -310,6 +310,33 @@ struct ipt_entry *ipt_next_entry(const s
+ return (void *)entry + entry->next_offset;
+ }
+
++static bool
++ipt_handle_default_rule(struct ipt_entry *e, unsigned int *verdict)
++{
++ struct xt_entry_target *t;
++ struct xt_standard_target *st;
++
++ if (e->target_offset != sizeof(struct ipt_entry))
++ return false;
++
++ if (!(e->ip.flags & IPT_F_NO_DEF_MATCH))
++ return false;
++
++ t = ipt_get_target(e);
++ if (t->u.kernel.target->target)
++ return false;
++
++ st = (struct xt_standard_target *) t;
++ if (st->verdict == XT_RETURN)
++ return false;
++
++ if (st->verdict >= 0)
++ return false;
++
++ *verdict = (unsigned)(-st->verdict) - 1;
++ return true;
++}
++
+ /* Returns one of the generic firewall policies, like NF_ACCEPT. */
+ unsigned int
+ ipt_do_table(struct sk_buff *skb,
+@@ -331,9 +358,33 @@ ipt_do_table(struct sk_buff *skb,
+ unsigned int addend;
+
+ /* Initialization */
++ IP_NF_ASSERT(table->valid_hooks & (1 << hook));
++ local_bh_disable();
++ private = table->private;
++ cpu = smp_processor_id();
++ /*
++ * Ensure we load private-> members after we've fetched the base
++ * pointer.
++ */
++ smp_read_barrier_depends();
++ table_base = private->entries[cpu];
++
++ e = get_entry(table_base, private->hook_entry[hook]);
++ if (ipt_handle_default_rule(e, &verdict)) {
++ ADD_COUNTER(e->counters, skb->len, 1);
++ local_bh_enable();
++ return verdict;
++ }
++
+ ip = ip_hdr(skb);
+ indev = in ? in->name : nulldevname;
+ outdev = out ? out->name : nulldevname;
++
++ addend = xt_write_recseq_begin();
++ jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
++ stackptr = per_cpu_ptr(private->stackptr, cpu);
++ origptr = *stackptr;
++
+ /* We handle fragments by dealing with the first fragment as
+ * if it was a normal packet. All other fragments are treated
+ * normally, except that they will NEVER match rules that ask
+@@ -348,23 +399,6 @@ ipt_do_table(struct sk_buff *skb,
+ acpar.family = NFPROTO_IPV4;
+ acpar.hooknum = hook;
+
+- IP_NF_ASSERT(table->valid_hooks & (1 << hook));
+- local_bh_disable();
+- addend = xt_write_recseq_begin();
+- private = table->private;
+- cpu = smp_processor_id();
+- /*
+- * Ensure we load private-> members after we've fetched the base
+- * pointer.
+- */
+- smp_read_barrier_depends();
+- table_base = private->entries[cpu];
+- jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
+- stackptr = per_cpu_ptr(private->stackptr, cpu);
+- origptr = *stackptr;
+-
+- e = get_entry(table_base, private->hook_entry[hook]);
+-
+ pr_debug("Entering %s(hook %u); sp at %u (UF %p)\n",
+ table->name, hook, origptr,
+ get_entry(table_base, private->underflow[hook]));