aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch')
-rw-r--r--target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch7749
1 files changed, 0 insertions, 7749 deletions
diff --git a/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
deleted file mode 100644
index dd8dbd9fea..0000000000
--- a/target/linux/generic-2.6/patches-2.6.27/130-netfilter_ipset.patch
+++ /dev/null
@@ -1,7749 +0,0 @@
---- a/include/linux/netfilter_ipv4/Kbuild
-+++ b/include/linux/netfilter_ipv4/Kbuild
-@@ -45,3 +45,20 @@ header-y += ipt_ttl.h
-
- unifdef-y += ip_queue.h
- unifdef-y += ip_tables.h
-+
-+unifdef-y += ip_set.h
-+header-y += ip_set_iphash.h
-+unifdef-y += ip_set_bitmaps.h
-+unifdef-y += ip_set_getport.h
-+unifdef-y += ip_set_hashes.h
-+header-y += ip_set_ipmap.h
-+header-y += ip_set_ipporthash.h
-+header-y += ip_set_ipportiphash.h
-+header-y += ip_set_ipportnethash.h
-+unifdef-y += ip_set_iptree.h
-+unifdef-y += ip_set_iptreemap.h
-+header-y += ip_set_jhash.h
-+header-y += ip_set_macipmap.h
-+header-y += ip_set_nethash.h
-+header-y += ip_set_portmap.h
-+header-y += ip_set_setlist.h
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set.h
-@@ -0,0 +1,574 @@
-+#ifndef _IP_SET_H
-+#define _IP_SET_H
-+
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+#if 0
-+#define IP_SET_DEBUG
-+#endif
-+
-+/*
-+ * A sockopt of such quality has hardly ever been seen before on the open
-+ * market! This little beauty, hardly ever used: above 64, so it's
-+ * traditionally used for firewalling, not touched (even once!) by the
-+ * 2.0, 2.2 and 2.4 kernels!
-+ *
-+ * Comes with its own certificate of authenticity, valid anywhere in the
-+ * Free world!
-+ *
-+ * Rusty, 19.4.2000
-+ */
-+#define SO_IP_SET 83
-+
-+/*
-+ * Heavily modify by Joakim Axelsson 08.03.2002
-+ * - Made it more modulebased
-+ *
-+ * Additional heavy modifications by Jozsef Kadlecsik 22.02.2004
-+ * - bindings added
-+ * - in order to "deal with" backward compatibility, renamed to ipset
-+ */
-+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
-+ */
-+#define IP_SET_PROTOCOL_VERSION 3
-+
-+#define IP_SET_MAXNAMELEN 32 /* set names and set typenames */
-+
-+/* Lets work with our own typedef for representing an IP address.
-+ * We hope to make the code more portable, possibly to IPv6...
-+ *
-+ * The representation works in HOST byte order, because most set types
-+ * will perform arithmetic operations and compare operations.
-+ *
-+ * For now the type is an uint32_t.
-+ *
-+ * Make sure to ONLY use the functions when translating and parsing
-+ * in order to keep the host byte order and make it more portable:
-+ * parse_ip()
-+ * parse_mask()
-+ * parse_ipandmask()
-+ * ip_tostring()
-+ * (Joakim: where are they???)
-+ */
-+
-+typedef uint32_t ip_set_ip_t;
-+
-+/* Sets are identified by an id in kernel space. Tweak with ip_set_id_t
-+ * and IP_SET_INVALID_ID if you want to increase the max number of sets.
-+ */
-+typedef uint16_t ip_set_id_t;
-+
-+#define IP_SET_INVALID_ID 65535
-+
-+/* How deep we follow bindings */
-+#define IP_SET_MAX_BINDINGS 6
-+
-+/*
-+ * Option flags for kernel operations (ipt_set_info)
-+ */
-+#define IPSET_SRC 0x01 /* Source match/add */
-+#define IPSET_DST 0x02 /* Destination match/add */
-+#define IPSET_MATCH_INV 0x04 /* Inverse matching */
-+
-+/*
-+ * Set features
-+ */
-+#define IPSET_TYPE_IP 0x01 /* IP address type of set */
-+#define IPSET_TYPE_PORT 0x02 /* Port type of set */
-+#define IPSET_DATA_SINGLE 0x04 /* Single data storage */
-+#define IPSET_DATA_DOUBLE 0x08 /* Double data storage */
-+#define IPSET_DATA_TRIPLE 0x10 /* Triple data storage */
-+#define IPSET_TYPE_IP1 0x20 /* IP address type of set */
-+#define IPSET_TYPE_SETNAME 0x40 /* setname type of set */
-+
-+/* Reserved keywords */
-+#define IPSET_TOKEN_DEFAULT ":default:"
-+#define IPSET_TOKEN_ALL ":all:"
-+
-+/* SO_IP_SET operation constants, and their request struct types.
-+ *
-+ * Operation ids:
-+ * 0-99: commands with version checking
-+ * 100-199: add/del/test/bind/unbind
-+ * 200-299: list, save, restore
-+ */
-+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
-+ *
-+ * Sets are identified by name.
-+ */
-+
-+#define IP_SET_REQ_STD \
-+ unsigned op; \
-+ unsigned version; \
-+ char name[IP_SET_MAXNAMELEN]
-+
-+#define IP_SET_OP_CREATE 0x00000001 /* Create a new (empty) set */
-+struct ip_set_req_create {
-+ IP_SET_REQ_STD;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_DESTROY 0x00000002 /* Remove a (empty) set */
-+struct ip_set_req_std {
-+ IP_SET_REQ_STD;
-+};
-+
-+#define IP_SET_OP_FLUSH 0x00000003 /* Remove all IPs in a set */
-+/* Uses ip_set_req_std */
-+
-+#define IP_SET_OP_RENAME 0x00000004 /* Rename a set */
-+/* Uses ip_set_req_create */
-+
-+#define IP_SET_OP_SWAP 0x00000005 /* Swap two sets */
-+/* Uses ip_set_req_create */
-+
-+union ip_set_name_index {
-+ char name[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+};
-+
-+#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
-+struct ip_set_req_get_set {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+};
-+
-+#define IP_SET_OP_GET_BYINDEX 0x00000007 /* Get set name by index */
-+/* Uses ip_set_req_get_set */
-+
-+#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
-+struct ip_set_req_version {
-+ unsigned op;
-+ unsigned version;
-+};
-+
-+/* Double shots operations:
-+ * add, del, test, bind and unbind.
-+ *
-+ * First we query the kernel to get the index and type of the target set,
-+ * then issue the command. Validity of IP is checked in kernel in order
-+ * to minimalize sockopt operations.
-+ */
-+
-+/* Get minimal set data for add/del/test/bind/unbind IP */
-+#define IP_SET_OP_ADT_GET 0x00000010 /* Get set and type */
-+struct ip_set_req_adt_get {
-+ unsigned op;
-+ unsigned version;
-+ union ip_set_name_index set;
-+ char typename[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_REQ_BYINDEX \
-+ unsigned op; \
-+ ip_set_id_t index;
-+
-+struct ip_set_req_adt {
-+ IP_SET_REQ_BYINDEX;
-+};
-+
-+#define IP_SET_OP_ADD_IP 0x00000101 /* Add an IP to a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_DEL_IP 0x00000102 /* Remove an IP from a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_TEST_IP 0x00000103 /* Test an IP in a set */
-+/* Uses ip_set_req_adt, with type specific addage */
-+
-+#define IP_SET_OP_BIND_SET 0x00000104 /* Bind an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+struct ip_set_req_bind {
-+ IP_SET_REQ_BYINDEX;
-+ char binding[IP_SET_MAXNAMELEN];
-+};
-+
-+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
-+ * index = 0 means unbinding for all sets */
-+
-+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
-+/* Uses ip_set_req_bind, with type specific addage */
-+
-+/* Multiple shots operations: list, save, restore.
-+ *
-+ * - check kernel version and query the max number of sets
-+ * - get the basic information on all sets
-+ * and size required for the next step
-+ * - get actual set data: header, data, bindings
-+ */
-+
-+/* Get max_sets and the index of a queried set
-+ */
-+#define IP_SET_OP_MAX_SETS 0x00000020
-+struct ip_set_req_max_sets {
-+ unsigned op;
-+ unsigned version;
-+ ip_set_id_t max_sets; /* max_sets */
-+ ip_set_id_t sets; /* real number of sets */
-+ union ip_set_name_index set; /* index of set if name used */
-+};
-+
-+/* Get the id and name of the sets plus size for next step */
-+#define IP_SET_OP_LIST_SIZE 0x00000201
-+#define IP_SET_OP_SAVE_SIZE 0x00000202
-+struct ip_set_req_setnames {
-+ unsigned op;
-+ ip_set_id_t index; /* set to list/save */
-+ u_int32_t size; /* size to get setdata/bindings */
-+ /* followed by sets number of struct ip_set_name_list */
-+};
-+
-+struct ip_set_name_list {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ ip_set_id_t id;
-+};
-+
-+/* The actual list operation */
-+#define IP_SET_OP_LIST 0x00000203
-+struct ip_set_req_list {
-+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
-+};
-+
-+struct ip_set_list {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ u_int32_t ref;
-+ u_int32_t header_size; /* Set header data of header_size */
-+ u_int32_t members_size; /* Set members data of members_size */
-+ u_int32_t bindings_size;/* Set bindings data of bindings_size */
-+};
-+
-+struct ip_set_hash_list {
-+ ip_set_ip_t ip;
-+ ip_set_id_t binding;
-+};
-+
-+/* The save operation */
-+#define IP_SET_OP_SAVE 0x00000204
-+/* Uses ip_set_req_list, in the reply replaced by
-+ * sets number of struct ip_set_save plus a marker
-+ * ip_set_save followed by ip_set_hash_save structures.
-+ */
-+struct ip_set_save {
-+ ip_set_id_t index;
-+ ip_set_id_t binding;
-+ u_int32_t header_size; /* Set header data of header_size */
-+ u_int32_t members_size; /* Set members data of members_size */
-+};
-+
-+/* At restoring, ip == 0 means default binding for the given set: */
-+struct ip_set_hash_save {
-+ ip_set_ip_t ip;
-+ ip_set_id_t id;
-+ ip_set_id_t binding;
-+};
-+
-+/* The restore operation */
-+#define IP_SET_OP_RESTORE 0x00000205
-+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
-+ * structures.
-+ */
-+struct ip_set_restore {
-+ char name[IP_SET_MAXNAMELEN];
-+ char typename[IP_SET_MAXNAMELEN];
-+ ip_set_id_t index;
-+ u_int32_t header_size; /* Create data of header_size */
-+ u_int32_t members_size; /* Set members data of members_size */
-+};
-+
-+static inline int bitmap_bytes(ip_set_ip_t a, ip_set_ip_t b)
-+{
-+ return 4 * ((((b - a + 8) / 8) + 3) / 4);
-+}
-+
-+/* General limit for the elements in a set */
-+#define MAX_RANGE 0x0000FFFF
-+
-+#ifdef __KERNEL__
-+#include <linux/netfilter_ipv4/ip_set_compat.h>
-+#include <linux/netfilter_ipv4/ip_set_malloc.h>
-+
-+#define ip_set_printk(format, args...) \
-+ do { \
-+ printk("%s: %s: ", __FILE__, __FUNCTION__); \
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+
-+#if defined(IP_SET_DEBUG)
-+#define DP(format, args...) \
-+ do { \
-+ printk("%s: %s (DBG): ", __FILE__, __FUNCTION__);\
-+ printk(format "\n" , ## args); \
-+ } while (0)
-+#define IP_SET_ASSERT(x) \
-+ do { \
-+ if (!(x)) \
-+ printk("IP_SET_ASSERT: %s:%i(%s)\n", \
-+ __FILE__, __LINE__, __FUNCTION__); \
-+ } while (0)
-+#else
-+#define DP(format, args...)
-+#define IP_SET_ASSERT(x)
-+#endif
-+
-+struct ip_set;
-+
-+/*
-+ * The ip_set_type definition - one per set type, e.g. "ipmap".
-+ *
-+ * Each individual set has a pointer, set->type, going to one
-+ * of these structures. Function pointers inside the structure implement
-+ * the real behaviour of the sets.
-+ *
-+ * If not mentioned differently, the implementation behind the function
-+ * pointers of a set_type, is expected to return 0 if ok, and a negative
-+ * errno (e.g. -EINVAL) on error.
-+ */
-+struct ip_set_type {
-+ struct list_head list; /* next in list of set types */
-+
-+ /* test for IP in set (kernel: iptables -m set src|dst)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* test for IP in set (userspace: ipset -T set IP)
-+ * return 0 if not in set, 1 if in set.
-+ */
-+ int (*testip) (struct ip_set *set,
-+ const void *data, u_int32_t size,
-+ ip_set_ip_t *ip);
-+
-+ /*
-+ * Size of the data structure passed by when
-+ * adding/deletin/testing an entry.
-+ */
-+ u_int32_t reqsize;
-+
-+ /* Add IP into set (userspace: ipset -A set IP)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip) (struct ip_set *set,
-+ const void *data, u_int32_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* Add IP into set (kernel: iptables ... -j SET set src|dst)
-+ * Return -EEXIST if the address is already in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address was not already in the set, 0 is returned.
-+ */
-+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* remove IP from set (userspace: ipset -D set --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip) (struct ip_set *set,
-+ const void *data, u_int32_t size,
-+ ip_set_ip_t *ip);
-+
-+ /* remove IP from set (kernel: iptables ... -j SET --entry x)
-+ * Return -EEXIST if the address is NOT in the set,
-+ * and -ERANGE if the address lies outside the set bounds.
-+ * If the address really was in the set, 0 is returned.
-+ */
-+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
-+ ip_set_ip_t *ip,
-+ const u_int32_t *flags,
-+ unsigned char index);
-+
-+ /* new set creation - allocated type specific items
-+ */
-+ int (*create) (struct ip_set *set,
-+ const void *data, u_int32_t size);
-+
-+ /* retry the operation after successfully tweaking the set
-+ */
-+ int (*retry) (struct ip_set *set);
-+
-+ /* set destruction - free type specific items
-+ * There is no return value.
-+ * Can be called only when child sets are destroyed.
-+ */
-+ void (*destroy) (struct ip_set *set);
-+
# Power Features Python is an extremely flexible language and gives you many fancy features such as custom metaclasses, access to bytecode, on-the-fly compilation, dynamic inheritance, object reparenting, import hacks, reflection, modification of system internals, etc. Don't use these. Performance is not a critical concern for us, and code understandability is. We want our codebase to be approachable by someone who only has a day or two to play with it. These features generally come with a cost to easy understanding, and we would prefer to have code that can be readily understood over faster or more compact code. Note that some standard library modules use these techniques and it is ok to make use of those modules. But please keep readability and understandability in mind when using them. # Type Annotated Code For now we are not using any type annotation system, and would prefer that code remain unannotated. We may revisit this in the future. # Function length Prefer small and focused functions. We recognize that long functions are sometimes appropriate, so no hard limit is placed on function length. If a function exceeds about 40 lines, think about whether it can be broken up without harming the structure of the program. Even if your long function works perfectly now, someone modifying it in a few months may add new behavior. This could result in bugs that are hard to find. Keeping your functions short and simple makes it easier for other people to read and modify your code. You could find long and complicated functions when working with some code. Do not be intimidated by modifying existing code: if working with such a function proves to be difficult, you find that errors are hard to debug, or you want to use a piece of it in several different contexts, consider breaking up the function into smaller and more manageable pieces. # FIXMEs It is OK to leave FIXMEs in code. Why? Encouraging people to at least document parts of code that need to be thought out more (or that are confusing) is better than leaving this code undocumented. All FIXMEs should be formatted like: ``` FIXME(username): Revisit this code when the frob feature is done. ``` ...where username is your GitHub username. # Testing We use a combination of Integration and Unit testing to ensure that the our code is as bug-free as possible. All the tests can be found in `lib/python/qmk/tests/`. You can run all the tests with `qmk pytest`. At the time of this writing our tests are not very comprehensive. Looking at the current tests and writing new test cases for untested situations is a great way to both familiarize yourself with the codebase and contribute to QMK. ## Integration Tests Integration tests can be found in `lib/python/qmk/tests/test_cli_commands.py`. This is where CLI commands are actually run and their overall behavior is verified. We use [`subprocess`](https://docs.python.org/3.6/library/subprocess.html#module-subprocess) to launch each CLI command and a combination of checking output and returncode to determine if the right thing happened. ## Unit Tests The other `test_*.py` files in `lib/python/qmk/tests/` contain unit tests. You can write tests for individual functions inside `lib/python/qmk/` here. Generally these files are named after the module, with dots replaced by underscores. At the time of this writing we do not do any mocking for our tests. If you would like to help us change this please [open an issue](https://github.com/qmk/qmk_firmware/issues/new?assignees=&labels=cli%2C+python&template=other_issues.md&title=) or [join #cli on Discord](https://discord.gg/heQPAgy) and start a conversation there.
del'>-+ const struct sk_buff *skb,
-+ const u_int32_t *flags);
-+
-+/* Macros to generate functions */
-+
-+#define STRUCT(pre, type) CONCAT2(pre, type)
-+#define CONCAT2(pre, type) struct pre##type
-+
-+#define FNAME(pre, mid, post) CONCAT3(pre, mid, post)
-+#define CONCAT3(pre, mid, post) pre##mid##post
-+
-+#define UADT0(type, adt, args...) \
-+static int \
-+FNAME(type,_u,adt)(struct ip_set *set, const void *data, u_int32_t size,\
-+ ip_set_ip_t *hash_ip) \
-+{ \
-+ const STRUCT(ip_set_req_,type) *req = data; \
-+ \
-+ return FNAME(type,_,adt)(set, hash_ip , ## args); \
-+}
-+
-+#define UADT(type, adt, args...) \
-+ UADT0(type, adt, req->ip , ## args)
-+
-+#define KADT(type, adt, getfn, args...) \
-+static int \
-+FNAME(type,_k,adt)(struct ip_set *set, \
-+ const struct sk_buff *skb, \
-+ ip_set_ip_t *hash_ip, \
-+ const u_int32_t *flags, \
-+ unsigned char index) \
-+{ \
-+ ip_set_ip_t ip = getfn(skb, flags[index]); \
-+ \
-+ KADT_CONDITION \
-+ return FNAME(type,_,adt)(set, hash_ip, ip , ##args); \
-+}
-+
-+#define REGISTER_MODULE(type) \
-+static int __init ip_set_##type##_init(void) \
-+{ \
-+ init_max_page_size(); \
-+ return ip_set_register_set_type(&ip_set_##type); \
-+} \
-+ \
-+static void __exit ip_set_##type##_fini(void) \
-+{ \
-+ /* FIXME: possible race with ip_set_create() */ \
-+ ip_set_unregister_set_type(&ip_set_##type); \
-+} \
-+ \
-+module_init(ip_set_##type##_init); \
-+module_exit(ip_set_##type##_fini);
-+
-+/* Common functions */
-+
-+static inline ip_set_ip_t
-+ipaddr(const struct sk_buff *skb, u_int32_t flag)
-+{
-+ return ntohl(flag & IPSET_SRC ? ip_hdr(skb)->saddr : ip_hdr(skb)->daddr);
-+}
-+
-+#define jhash_ip(map, i, ip) jhash_1word(ip, *(map->initval + i))
-+
-+#define pack_ip_port(map, ip, port) \
-+ (port + ((ip - ((map)->first_ip)) << 16))
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_H*/
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_bitmaps.h
-@@ -0,0 +1,121 @@
-+#ifndef __IP_SET_BITMAPS_H
-+#define __IP_SET_BITMAPS_H
-+
-+/* Macros to generate functions */
-+
-+#ifdef __KERNEL__
-+#define BITMAP_CREATE(type) \
-+static int \
-+type##_create(struct ip_set *set, const void *data, u_int32_t size) \
-+{ \
-+ int newbytes; \
-+ const struct ip_set_req_##type##_create *req = data; \
-+ struct ip_set_##type *map; \
-+ \
-+ if (req->from > req->to) { \
-+ DP("bad range"); \
-+ return -ENOEXEC; \
-+ } \
-+ \
-+ map = kmalloc(sizeof(struct ip_set_##type), GFP_KERNEL); \
-+ if (!map) { \
-+ DP("out of memory for %zu bytes", \
-+ sizeof(struct ip_set_##type)); \
-+ return -ENOMEM; \
-+ } \
-+ map->first_ip = req->from; \
-+ map->last_ip = req->to; \
-+ \
-+ newbytes = __##type##_create(req, map); \
-+ if (newbytes < 0) { \
-+ kfree(map); \
-+ return newbytes; \
-+ } \
-+ \
-+ map->size = newbytes; \
-+ map->members = ip_set_malloc(newbytes); \
-+ if (!map->members) { \
-+ DP("out of memory for %i bytes", newbytes); \
-+ kfree(map); \
-+ return -ENOMEM; \
-+ } \
-+ memset(map->members, 0, newbytes); \
-+ \
-+ set->data = map; \
-+ return 0; \
-+}
-+
-+#define BITMAP_DESTROY(type) \
-+static void \
-+type##_destroy(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data; \
-+ \
-+ ip_set_free(map->members, map->size); \
-+ kfree(map); \
-+ \
-+ set->data = NULL; \
-+}
-+
-+#define BITMAP_FLUSH(type) \
-+static void \
-+type##_flush(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data; \
-+ memset(map->members, 0, map->size); \
-+}
-+
-+#define BITMAP_LIST_HEADER(type) \
-+static void \
-+type##_list_header(const struct ip_set *set, void *data) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ struct ip_set_req_##type##_create *header = data; \
-+ \
-+ header->from = map->first_ip; \
-+ header->to = map->last_ip; \
-+ __##type##_list_header(map, header); \
-+}
-+
-+#define BITMAP_LIST_MEMBERS_SIZE(type) \
-+static int \
-+type##_list_members_size(const struct ip_set *set) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ \
-+ return map->size; \
-+}
-+
-+#define BITMAP_LIST_MEMBERS(type) \
-+static void \
-+type##_list_members(const struct ip_set *set, void *data) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ \
-+ memcpy(data, map->members, map->size); \
-+}
-+
-+#define IP_SET_TYPE(type, __features) \
-+struct ip_set_type ip_set_##type = { \
-+ .typename = #type, \
-+ .features = __features, \
-+ .protocol_version = IP_SET_PROTOCOL_VERSION, \
-+ .create = &type##_create, \
-+ .destroy = &type##_destroy, \
-+ .flush = &type##_flush, \
-+ .reqsize = sizeof(struct ip_set_req_##type), \
-+ .addip = &type##_uadd, \
-+ .addip_kernel = &type##_kadd, \
-+ .delip = &type##_udel, \
-+ .delip_kernel = &type##_kdel, \
-+ .testip = &type##_utest, \
-+ .testip_kernel = &type##_ktest, \
-+ .header_size = sizeof(struct ip_set_req_##type##_create),\
-+ .list_header = &type##_list_header, \
-+ .list_members_size = &type##_list_members_size, \
-+ .list_members = &type##_list_members, \
-+ .me = THIS_MODULE, \
-+};
-+#endif /* __KERNEL */
-+
-+#endif /* __IP_SET_BITMAPS_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_compat.h
-@@ -0,0 +1,71 @@
-+#ifndef _IP_SET_COMPAT_H
-+#define _IP_SET_COMPAT_H
-+
-+#ifdef __KERNEL__
-+#include <linux/version.h>
-+
-+/* Arrgh */
-+#ifdef MODULE
-+#define __MOD_INC(foo) __MOD_INC_USE_COUNT(foo)
-+#define __MOD_DEC(foo) __MOD_DEC_USE_COUNT(foo)
-+#else
-+#define __MOD_INC(foo) 1
-+#define __MOD_DEC(foo)
-+#endif
-+
-+/* Backward compatibility */
-+#ifndef __nocast
-+#define __nocast
-+#endif
-+#ifndef __bitwise__
-+#define __bitwise__
-+#endif
-+
-+/* Compatibility glue code */
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#include <linux/interrupt.h>
-+#define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED
-+#define try_module_get(x) __MOD_INC(x)
-+#define module_put(x) __MOD_DEC(x)
-+#define __clear_bit(nr, addr) clear_bit(nr, addr)
-+#define __set_bit(nr, addr) set_bit(nr, addr)
-+#define __test_and_set_bit(nr, addr) test_and_set_bit(nr, addr)
-+#define __test_and_clear_bit(nr, addr) test_and_clear_bit(nr, addr)
-+
-+typedef unsigned __bitwise__ gfp_t;
-+
-+static inline void *kzalloc(size_t size, gfp_t flags)
-+{
-+ void *data = kmalloc(size, flags);
-+
-+ if (data)
-+ memset(data, 0, size);
-+
-+ return data;
-+}
-+#endif
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
-+#define __KMEM_CACHE_T__ kmem_cache_t
-+#else
-+#define __KMEM_CACHE_T__ struct kmem_cache
-+#endif
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
-+#define ip_hdr(skb) ((skb)->nh.iph)
-+#define skb_mac_header(skb) ((skb)->mac.raw)
-+#define eth_hdr(skb) ((struct ethhdr *)skb_mac_header(skb))
-+#endif
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-+#include <linux/netfilter.h>
-+#define KMEM_CACHE_CREATE(name, size) \
-+ kmem_cache_create(name, size, 0, 0, NULL, NULL)
-+#else
-+#define KMEM_CACHE_CREATE(name, size) \
-+ kmem_cache_create(name, size, 0, 0, NULL)
-+#endif
-+
-+
-+#endif /* __KERNEL__ */
-+#endif /* _IP_SET_COMPAT_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_getport.h
-@@ -0,0 +1,48 @@
-+#ifndef _IP_SET_GETPORT_H
-+#define _IP_SET_GETPORT_H
-+
-+#ifdef __KERNEL__
-+
-+#define INVALID_PORT (MAX_RANGE + 1)
-+
-+/* We must handle non-linear skbs */
-+static inline ip_set_ip_t
-+get_port(const struct sk_buff *skb, u_int32_t flags)
-+{
-+ struct iphdr *iph = ip_hdr(skb);
-+ u_int16_t offset = ntohs(iph->frag_off) & IP_OFFSET;
-+ switch (iph->protocol) {
-+ case IPPROTO_TCP: {
-+ struct tcphdr tcph;
-+
-+ /* See comments at tcp_match in ip_tables.c */
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &tcph, sizeof(tcph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ tcph.source : tcph.dest);
-+ }
-+ case IPPROTO_UDP: {
-+ struct udphdr udph;
-+
-+ if (offset)
-+ return INVALID_PORT;
-+
-+ if (skb_copy_bits(skb, ip_hdr(skb)->ihl*4, &udph, sizeof(udph)) < 0)
-+ /* No choice either */
-+ return INVALID_PORT;
-+
-+ return ntohs(flags & IPSET_SRC ?
-+ udph.source : udph.dest);
-+ }
-+ default:
-+ return INVALID_PORT;
-+ }
-+}
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_GETPORT_H*/
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_hashes.h
-@@ -0,0 +1,306 @@
-+#ifndef __IP_SET_HASHES_H
-+#define __IP_SET_HASHES_H
-+
-+#define initval_t uint32_t
-+
-+/* Macros to generate functions */
-+
-+#ifdef __KERNEL__
-+#define HASH_RETRY0(type, dtype, cond) \
-+static int \
-+type##_retry(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data, *tmp; \
-+ dtype *elem; \
-+ void *members; \
-+ u_int32_t i, hashsize = map->hashsize; \
-+ int res; \
-+ \
-+ if (map->resize == 0) \
-+ return -ERANGE; \
-+ \
-+ again: \
-+ res = 0; \
-+ \
-+ /* Calculate new hash size */ \
-+ hashsize += (hashsize * map->resize)/100; \
-+ if (hashsize == map->hashsize) \
-+ hashsize++; \
-+ \
-+ ip_set_printk("rehashing of set %s triggered: " \
-+ "hashsize grows from %lu to %lu", \
-+ set->name, \
-+ (long unsigned)map->hashsize, \
-+ (long unsigned)hashsize); \
-+ \
-+ tmp = kmalloc(sizeof(struct ip_set_##type) \
-+ + map->probes * sizeof(initval_t), GFP_ATOMIC); \
-+ if (!tmp) { \
-+ DP("out of memory for %zu bytes", \
-+ sizeof(struct ip_set_##type) \
-+ + map->probes * sizeof(initval_t)); \
-+ return -ENOMEM; \
-+ } \
-+ tmp->members = harray_malloc(hashsize, sizeof(dtype), GFP_ATOMIC);\
-+ if (!tmp->members) { \
-+ DP("out of memory for %zu bytes", hashsize * sizeof(dtype));\
-+ kfree(tmp); \
-+ return -ENOMEM; \
-+ } \
-+ tmp->hashsize = hashsize; \
-+ tmp->elements = 0; \
-+ tmp->probes = map->probes; \
-+ tmp->resize = map->resize; \
-+ memcpy(tmp->initval, map->initval, map->probes * sizeof(initval_t));\
-+ __##type##_retry(tmp, map); \
-+ \
-+ write_lock_bh(&set->lock); \
-+ map = set->data; /* Play safe */ \
-+ for (i = 0; i < map->hashsize && res == 0; i++) { \
-+ elem = HARRAY_ELEM(map->members, dtype *, i); \
-+ if (cond) \
-+ res = __##type##_add(tmp, elem); \
-+ } \
-+ if (res) { \
-+ /* Failure, try again */ \
-+ write_unlock_bh(&set->lock); \
-+ harray_free(tmp->members); \
-+ kfree(tmp); \
-+ goto again; \
-+ } \
-+ \
-+ /* Success at resizing! */ \
-+ members = map->members; \
-+ \
-+ map->hashsize = tmp->hashsize; \
-+ map->members = tmp->members; \
-+ write_unlock_bh(&set->lock); \
-+ \
-+ harray_free(members); \
-+ kfree(tmp); \
-+ \
-+ return 0; \
-+}
-+
-+#define HASH_RETRY(type, dtype) \
-+ HASH_RETRY0(type, dtype, *elem)
-+
-+#define HASH_RETRY2(type, dtype) \
-+ HASH_RETRY0(type, dtype, elem->ip || elem->ip1)
-+
-+#define HASH_CREATE(type, dtype) \
-+static int \
-+type##_create(struct ip_set *set, const void *data, u_int32_t size) \
-+{ \
-+ const struct ip_set_req_##type##_create *req = data; \
-+ struct ip_set_##type *map; \
-+ uint16_t i; \
-+ \
-+ if (req->hashsize < 1) { \
-+ ip_set_printk("hashsize too small"); \
-+ return -ENOEXEC; \
-+ } \
-+ \
-+ if (req->probes < 1) { \
-+ ip_set_printk("probes too small"); \
-+ return -ENOEXEC; \
-+ } \
-+ \
-+ map = kmalloc(sizeof(struct ip_set_##type) \
-+ + req->probes * sizeof(initval_t), GFP_KERNEL); \
-+ if (!map) { \
-+ DP("out of memory for %zu bytes", \
-+ sizeof(struct ip_set_##type) \
-+ + req->probes * sizeof(initval_t)); \
-+ return -ENOMEM; \
-+ } \
-+ for (i = 0; i < req->probes; i++) \
-+ get_random_bytes(((initval_t *) map->initval)+i, 4); \
-+ map->elements = 0; \
-+ map->hashsize = req->hashsize; \
-+ map->probes = req->probes; \
-+ map->resize = req->resize; \
-+ if (__##type##_create(req, map)) { \
-+ kfree(map); \
-+ return -ENOEXEC; \
-+ } \
-+ map->members = harray_malloc(map->hashsize, sizeof(dtype), GFP_KERNEL);\
-+ if (!map->members) { \
-+ DP("out of memory for %zu bytes", map->hashsize * sizeof(dtype));\
-+ kfree(map); \
-+ return -ENOMEM; \
-+ } \
-+ \
-+ set->data = map; \
-+ return 0; \
-+}
-+
-+#define HASH_DESTROY(type) \
-+static void \
-+type##_destroy(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data; \
-+ \
-+ harray_free(map->members); \
-+ kfree(map); \
-+ \
-+ set->data = NULL; \
-+}
-+
-+#define HASH_FLUSH(type, dtype) \
-+static void \
-+type##_flush(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data; \
-+ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
-+ map->elements = 0; \
-+}
-+
-+#define HASH_FLUSH_CIDR(type, dtype) \
-+static void \
-+type##_flush(struct ip_set *set) \
-+{ \
-+ struct ip_set_##type *map = set->data; \
-+ harray_flush(map->members, map->hashsize, sizeof(dtype)); \
-+ memset(map->cidr, 0, sizeof(map->cidr)); \
-+ memset(map->nets, 0, sizeof(map->nets)); \
-+ map->elements = 0; \
-+}
-+
-+#define HASH_LIST_HEADER(type) \
-+static void \
-+type##_list_header(const struct ip_set *set, void *data) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ struct ip_set_req_##type##_create *header = data; \
-+ \
-+ header->hashsize = map->hashsize; \
-+ header->probes = map->probes; \
-+ header->resize = map->resize; \
-+ __##type##_list_header(map, header); \
-+}
-+
-+#define HASH_LIST_MEMBERS_SIZE(type, dtype) \
-+static int \
-+type##_list_members_size(const struct ip_set *set) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ \
-+ return (map->hashsize * sizeof(dtype)); \
-+}
-+
-+#define HASH_LIST_MEMBERS(type, dtype) \
-+static void \
-+type##_list_members(const struct ip_set *set, void *data) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ dtype *elem; \
-+ uint32_t i; \
-+ \
-+ for (i = 0; i < map->hashsize; i++) { \
-+ elem = HARRAY_ELEM(map->members, dtype *, i); \
-+ ((dtype *)data)[i] = *elem; \
-+ } \
-+}
-+
-+#define HASH_LIST_MEMBERS_MEMCPY(type, dtype) \
-+static void \
-+type##_list_members(const struct ip_set *set, void *data) \
-+{ \
-+ const struct ip_set_##type *map = set->data; \
-+ dtype *elem; \
-+ uint32_t i; \
-+ \
-+ for (i = 0; i < map->hashsize; i++) { \
-+ elem = HARRAY_ELEM(map->members, dtype *, i); \
-+ memcpy((((dtype *)data)+i), elem, sizeof(dtype)); \
-+ } \
-+}
-+
-+#define IP_SET_RTYPE(type, __features) \
-+struct ip_set_type ip_set_##type = { \
-+ .typename = #type, \
-+ .features = __features, \
-+ .protocol_version = IP_SET_PROTOCOL_VERSION, \
-+ .create = &type##_create, \
-+ .retry = &type##_retry, \
-+ .destroy = &type##_destroy, \
-+ .flush = &type##_flush, \
-+ .reqsize = sizeof(struct ip_set_req_##type), \
-+ .addip = &type##_uadd, \
-+ .addip_kernel = &type##_kadd, \
-+ .delip = &type##_udel, \
-+ .delip_kernel = &type##_kdel, \
-+ .testip = &type##_utest, \
-+ .testip_kernel = &type##_ktest, \
-+ .header_size = sizeof(struct ip_set_req_##type##_create),\
-+ .list_header = &type##_list_header, \
-+ .list_members_size = &type##_list_members_size, \
-+ .list_members = &type##_list_members, \
-+ .me = THIS_MODULE, \
-+};
-+
-+/* Helper functions */
-+static inline void
-+add_cidr_size(uint8_t *cidr, uint8_t size)
-+{
-+ uint8_t next;
-+ int i;
-+
-+ for (i = 0; i < 30 && cidr[i]; i++) {
-+ if (cidr[i] < size) {
-+ next = cidr[i];
-+ cidr[i] = size;
-+ size = next;
-+ }
-+ }
-+ if (i < 30)
-+ cidr[i] = size;
-+}
-+
-+static inline void
-+del_cidr_size(uint8_t *cidr, uint8_t size)
-+{
-+ int i;
-+
-+ for (i = 0; i < 29 && cidr[i]; i++) {
-+ if (cidr[i] == size)
-+ cidr[i] = size = cidr[i+1];
-+ }
-+ cidr[29] = 0;
-+}
-+#else
-+#include <arpa/inet.h>
-+#endif /* __KERNEL */
-+
-+#ifndef UINT16_MAX
-+#define UINT16_MAX 65535
-+#endif
-+
-+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
-+
-+static inline ip_set_ip_t
-+pack_ip_cidr(ip_set_ip_t ip, unsigned char cidr)
-+{
-+ ip_set_ip_t addr, *paddr = &addr;
-+ unsigned char n, t, *a;
-+
-+ addr = htonl(ip & (0xFFFFFFFF << (32 - (cidr))));
-+#ifdef __KERNEL__
-+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
-+#endif
-+ n = cidr / 8;
-+ t = cidr % 8;
-+ a = &((unsigned char *)paddr)[n];
-+ *a = *a /(1 << (8 - t)) + shifts[t];
-+#ifdef __KERNEL__
-+ DP("n: %u, t: %u, a: %u", n, t, *a);
-+ DP("ip:%u.%u.%u.%u/%u, %u.%u.%u.%u",
-+ HIPQUAD(ip), cidr, NIPQUAD(addr));
-+#endif
-+
-+ return ntohl(addr);
-+}
-+
-+
-+#endif /* __IP_SET_HASHES_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_iphash.h
-@@ -0,0 +1,30 @@
-+#ifndef __IP_SET_IPHASH_H
-+#define __IP_SET_IPHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_hashes.h>
-+
-+#define SETTYPE_NAME "iphash"
-+
-+struct ip_set_iphash {
-+ ip_set_ip_t *members; /* the iphash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t netmask; /* netmask */
-+ initval_t initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_iphash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_iphash {
-+ ip_set_ip_t ip;
-+};
-+
-+#endif /* __IP_SET_IPHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_ipmap.h
-@@ -0,0 +1,57 @@
-+#ifndef __IP_SET_IPMAP_H
-+#define __IP_SET_IPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+
-+#define SETTYPE_NAME "ipmap"
-+
-+struct ip_set_ipmap {
-+ void *members; /* the ipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ ip_set_ip_t netmask; /* subnet netmask */
-+ ip_set_ip_t sizeid; /* size of set in IPs */
-+ ip_set_ip_t hosts; /* number of hosts in a subnet */
-+ u_int32_t size; /* size of the ipmap proper */
-+};
-+
-+struct ip_set_req_ipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ ip_set_ip_t netmask;
-+};
-+
-+struct ip_set_req_ipmap {
-+ ip_set_ip_t ip;
-+};
-+
-+static inline unsigned int
-+mask_to_bits(ip_set_ip_t mask)
-+{
-+ unsigned int bits = 32;
-+ ip_set_ip_t maskaddr;
-+
-+ if (mask == 0xFFFFFFFF)
-+ return bits;
-+
-+ maskaddr = 0xFFFFFFFE;
-+ while (--bits > 0 && maskaddr != mask)
-+ maskaddr <<= 1;
-+
-+ return bits;
-+}
-+
-+static inline ip_set_ip_t
-+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
-+{
-+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
-+ *bits = 32;
-+ while (--(*bits) > 0 && mask && (to & mask) != from)
-+ mask <<= 1;
-+
-+ return mask;
-+}
-+
-+#endif /* __IP_SET_IPMAP_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
-@@ -0,0 +1,33 @@
-+#ifndef __IP_SET_IPPORTHASH_H
-+#define __IP_SET_IPPORTHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_hashes.h>
-+
-+#define SETTYPE_NAME "ipporthash"
-+
-+struct ip_set_ipporthash {
-+ ip_set_ip_t *members; /* the ipporthash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ initval_t initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipporthash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipporthash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+};
-+
-+#endif /* __IP_SET_IPPORTHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_ipportiphash.h
-@@ -0,0 +1,39 @@
-+#ifndef __IP_SET_IPPORTIPHASH_H
-+#define __IP_SET_IPPORTIPHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_hashes.h>
-+
-+#define SETTYPE_NAME "ipportiphash"
-+
-+struct ipportip {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t ip1;
-+};
-+
-+struct ip_set_ipportiphash {
-+ struct ipportip *members; /* the ipportip proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ initval_t initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipportiphash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipportiphash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+ ip_set_ip_t ip1;
-+};
-+
-+#endif /* __IP_SET_IPPORTIPHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_ipportnethash.h
-@@ -0,0 +1,42 @@
-+#ifndef __IP_SET_IPPORTNETHASH_H
-+#define __IP_SET_IPPORTNETHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_hashes.h>
-+
-+#define SETTYPE_NAME "ipportnethash"
-+
-+struct ipportip {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t ip1;
-+};
-+
-+struct ip_set_ipportnethash {
-+ struct ipportip *members; /* the ipportip proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ uint8_t cidr[30]; /* CIDR sizes */
-+ uint16_t nets[30]; /* nr of nets by CIDR sizes */
-+ initval_t initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_ipportnethash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_ipportnethash {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t port;
-+ ip_set_ip_t ip1;
-+ uint8_t cidr;
-+};
-+
-+#endif /* __IP_SET_IPPORTNETHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_iptree.h
-@@ -0,0 +1,39 @@
-+#ifndef __IP_SET_IPTREE_H
-+#define __IP_SET_IPTREE_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iptree"
-+
-+struct ip_set_iptreed {
-+ unsigned long expires[256]; /* x.x.x.ADDR */
-+};
-+
-+struct ip_set_iptreec {
-+ struct ip_set_iptreed *tree[256]; /* x.x.ADDR.* */
-+};
-+
-+struct ip_set_iptreeb {
-+ struct ip_set_iptreec *tree[256]; /* x.ADDR.*.* */
-+};
-+
-+struct ip_set_iptree {
-+ unsigned int timeout;
-+ unsigned int gc_interval;
-+#ifdef __KERNEL__
-+ uint32_t elements; /* number of elements */
-+ struct timer_list gc;
-+ struct ip_set_iptreeb *tree[256]; /* ADDR.*.*.* */
-+#endif
-+};
-+
-+struct ip_set_req_iptree_create {
-+ unsigned int timeout;
-+};
-+
-+struct ip_set_req_iptree {
-+ ip_set_ip_t ip;
-+ unsigned int timeout;
-+};
-+
-+#endif /* __IP_SET_IPTREE_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_iptreemap.h
-@@ -0,0 +1,40 @@
-+#ifndef __IP_SET_IPTREEMAP_H
-+#define __IP_SET_IPTREEMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "iptreemap"
-+
-+#ifdef __KERNEL__
-+struct ip_set_iptreemap_d {
-+ unsigned char bitmap[32]; /* x.x.x.y */
-+};
-+
-+struct ip_set_iptreemap_c {
-+ struct ip_set_iptreemap_d *tree[256]; /* x.x.y.x */
-+};
-+
-+struct ip_set_iptreemap_b {
-+ struct ip_set_iptreemap_c *tree[256]; /* x.y.x.x */
-+ unsigned char dirty[32];
-+};
-+#endif
-+
-+struct ip_set_iptreemap {
-+ unsigned int gc_interval;
-+#ifdef __KERNEL__
-+ struct timer_list gc;
-+ struct ip_set_iptreemap_b *tree[256]; /* y.x.x.x */
-+#endif
-+};
-+
-+struct ip_set_req_iptreemap_create {
-+ unsigned int gc_interval;
-+};
-+
-+struct ip_set_req_iptreemap {
-+ ip_set_ip_t ip;
-+ ip_set_ip_t end;
-+};
-+
-+#endif /* __IP_SET_IPTREEMAP_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_jhash.h
-@@ -0,0 +1,157 @@
-+#ifndef _LINUX_JHASH_H
-+#define _LINUX_JHASH_H
-+
-+/* jhash.h: Jenkins hash support.
-+ *
-+ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
-+ *
-+ * http://burtleburtle.net/bob/hash/
-+ *
-+ * These are the credits from Bob's sources:
-+ *
-+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
-+ *
-+ * These are functions for producing 32-bit hashes for hash table lookup.
-+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
-+ * are externally useful functions. Routines to test the hash are included
-+ * if SELF_TEST is defined. You can use this free for any purpose. It's in
-+ * the public domain. It has no warranty.
-+ *
-+ * Copyright (C) 2009 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
-+ *
-+ * I've modified Bob's hash to be useful in the Linux kernel, and
-+ * any bugs present are my fault. Jozsef
-+ */
-+
-+#define __rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
-+
-+/* __jhash_mix - mix 3 32-bit values reversibly. */
-+#define __jhash_mix(a,b,c) \
-+{ \
-+ a -= c; a ^= __rot(c, 4); c += b; \
-+ b -= a; b ^= __rot(a, 6); a += c; \
-+ c -= b; c ^= __rot(b, 8); b += a; \
-+ a -= c; a ^= __rot(c,16); c += b; \
-+ b -= a; b ^= __rot(a,19); a += c; \
-+ c -= b; c ^= __rot(b, 4); b += a; \
-+}
-+
-+/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
-+#define __jhash_final(a,b,c) \
-+{ \
-+ c ^= b; c -= __rot(b,14); \
-+ a ^= c; a -= __rot(c,11); \
-+ b ^= a; b -= __rot(a,25); \
-+ c ^= b; c -= __rot(b,16); \
-+ a ^= c; a -= __rot(c,4); \
-+ b ^= a; b -= __rot(a,14); \
-+ c ^= b; c -= __rot(b,24); \
-+}
-+
-+/* The golden ration: an arbitrary value */
-+#define JHASH_GOLDEN_RATIO 0xdeadbeef
-+
-+/* The most generic version, hashes an arbitrary sequence
-+ * of bytes. No alignment or length assumptions are made about
-+ * the input key. The result depends on endianness.
-+ */
-+static inline u32 jhash(const void *key, u32 length, u32 initval)
-+{
-+ u32 a,b,c;
-+ const u8 *k = key;
-+
-+ /* Set up the internal state */
-+ a = b = c = JHASH_GOLDEN_RATIO + length + initval;
-+
-+ /* all but the last block: affect some 32 bits of (a,b,c) */
-+ while (length > 12) {
-+ a += (k[0] + ((u32)k[1]<<8) + ((u32)k[2]<<16) + ((u32)k[3]<<24));
-+ b += (k[4] + ((u32)k[5]<<8) + ((u32)k[6]<<16) + ((u32)k[7]<<24));
-+ c += (k[8] + ((u32)k[9]<<8) + ((u32)k[10]<<16) + ((u32)k[11]<<24));
-+ __jhash_mix(a, b, c);
-+ length -= 12;
-+ k += 12;
-+ }
-+
-+ /* last block: affect all 32 bits of (c) */
-+ /* all the case statements fall through */
-+ switch (length) {
-+ case 12: c += (u32)k[11]<<24;
-+ case 11: c += (u32)k[10]<<16;
-+ case 10: c += (u32)k[9]<<8;
-+ case 9 : c += k[8];
-+ case 8 : b += (u32)k[7]<<24;
-+ case 7 : b += (u32)k[6]<<16;
-+ case 6 : b += (u32)k[5]<<8;
-+ case 5 : b += k[4];
-+ case 4 : a += (u32)k[3]<<24;
-+ case 3 : a += (u32)k[2]<<16;
-+ case 2 : a += (u32)k[1]<<8;
-+ case 1 : a += k[0];
-+ __jhash_final(a, b, c);
-+ case 0 :
-+ break;
-+ }
-+
-+ return c;
-+}
-+
-+/* A special optimized version that handles 1 or more of u32s.
-+ * The length parameter here is the number of u32s in the key.
-+ */
-+static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
-+{
-+ u32 a, b, c;
-+
-+ /* Set up the internal state */
-+ a = b = c = JHASH_GOLDEN_RATIO + (length<<2) + initval;
-+
-+ /* handle most of the key */
-+ while (length > 3) {
-+ a += k[0];
-+ b += k[1];
-+ c += k[2];
-+ __jhash_mix(a, b, c);
-+ length -= 3;
-+ k += 3;
-+ }
-+
-+ /* handle the last 3 u32's */
-+ /* all the case statements fall through */
-+ switch (length) {
-+ case 3: c += k[2];
-+ case 2: b += k[1];
-+ case 1: a += k[0];
-+ __jhash_final(a, b, c);
-+ case 0: /* case 0: nothing left to add */
-+ break;
-+ }
-+
-+ return c;
-+}
-+
-+/* A special ultra-optimized versions that knows they are hashing exactly
-+ * 3, 2 or 1 word(s).
-+ */
-+static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
-+{
-+ a += JHASH_GOLDEN_RATIO + initval;
-+ b += JHASH_GOLDEN_RATIO + initval;
-+ c += JHASH_GOLDEN_RATIO + initval;
-+
-+ __jhash_final(a, b, c);
-+
-+ return c;
-+}
-+
-+static inline u32 jhash_2words(u32 a, u32 b, u32 initval)
-+{
-+ return jhash_3words(0, a, b, initval);
-+}
-+
-+static inline u32 jhash_1word(u32 a, u32 initval)
-+{
-+ return jhash_3words(0, 0, a, initval);
-+}
-+
-+#endif /* _LINUX_JHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_macipmap.h
-@@ -0,0 +1,39 @@
-+#ifndef __IP_SET_MACIPMAP_H
-+#define __IP_SET_MACIPMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+
-+#define SETTYPE_NAME "macipmap"
-+
-+/* general flags */
-+#define IPSET_MACIP_MATCHUNSET 1
-+
-+/* per ip flags */
-+#define IPSET_MACIP_ISSET 1
-+
-+struct ip_set_macipmap {
-+ void *members; /* the macipmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ u_int32_t flags;
-+ u_int32_t size; /* size of the ipmap proper */
-+};
-+
-+struct ip_set_req_macipmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+ u_int32_t flags;
-+};
-+
-+struct ip_set_req_macipmap {
-+ ip_set_ip_t ip;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+struct ip_set_macip {
-+ unsigned short match;
-+ unsigned char ethernet[ETH_ALEN];
-+};
-+
-+#endif /* __IP_SET_MACIPMAP_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_malloc.h
-@@ -0,0 +1,153 @@
-+#ifndef _IP_SET_MALLOC_H
-+#define _IP_SET_MALLOC_H
-+
-+#ifdef __KERNEL__
-+#include <linux/vmalloc.h>
-+
-+static size_t max_malloc_size = 0, max_page_size = 0;
-+static size_t default_max_malloc_size = 131072; /* Guaranteed: slab.c */
-+
-+static inline int init_max_page_size(void)
-+{
-+/* Compatibility glues to support 2.4.36 */
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+#define __GFP_NOWARN 0
-+
-+ /* Guaranteed: slab.c */
-+ max_malloc_size = max_page_size = default_max_malloc_size;
-+#else
-+ size_t page_size = 0;
-+
-+#define CACHE(x) if (max_page_size == 0 || x < max_page_size) \
-+ page_size = x;
-+#include <linux/kmalloc_sizes.h>
-+#undef CACHE
-+ if (page_size) {
-+ if (max_malloc_size == 0)
-+ max_malloc_size = page_size;
-+
-+ max_page_size = page_size;
-+
-+ return 1;
-+ }
-+#endif
-+ return 0;
-+}
-+
-+struct harray {
-+ size_t max_elements;
-+ void *arrays[0];
-+};
-+
-+static inline void *
-+__harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
-+{
-+ struct harray *harray;
-+ size_t max_elements, size, i, j;
-+
-+ BUG_ON(max_page_size == 0);
-+
-+ if (typesize > max_page_size)
-+ return NULL;
-+
-+ max_elements = max_page_size/typesize;
-+ size = hashsize/max_elements;
-+ if (hashsize % max_elements)
-+ size++;
-+
-+ /* Last pointer signals end of arrays */
-+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
-+ flags);
-+
-+ if (!harray)
-+ return NULL;
-+
-+ for (i = 0; i < size - 1; i++) {
-+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, max_elements * typesize);
-+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
-+ flags);
-+ if (!harray->arrays[i])
-+ goto undo;
-+ memset(harray->arrays[i], 0, (hashsize - i * max_elements) * typesize);
-+
-+ harray->max_elements = max_elements;
-+ harray->arrays[size] = NULL;
-+
-+ return (void *)harray;
-+
-+ undo:
-+ for (j = 0; j < i; j++) {
-+ kfree(harray->arrays[j]);
-+ }
-+ kfree(harray);
-+ return NULL;
-+}
-+
-+static inline void *
-+harray_malloc(size_t hashsize, size_t typesize, gfp_t flags)
-+{
-+ void *harray;
-+
-+ do {
-+ harray = __harray_malloc(hashsize, typesize, flags|__GFP_NOWARN);
-+ } while (harray == NULL && init_max_page_size());
-+
-+ return harray;
-+}
-+
-+static inline void harray_free(void *h)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i] != NULL; i++)
-+ kfree(harray->arrays[i]);
-+ kfree(harray);
-+}
-+
-+static inline void harray_flush(void *h, size_t hashsize, size_t typesize)
-+{
-+ struct harray *harray = (struct harray *) h;
-+ size_t i;
-+
-+ for (i = 0; harray->arrays[i+1] != NULL; i++)
-+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
-+ (hashsize - i * harray->max_elements) * typesize);
-+}
-+
-+#define HARRAY_ELEM(h, type, which) \
-+({ \
-+ struct harray *__h = (struct harray *)(h); \
-+ ((type)((__h)->arrays[(which)/(__h)->max_elements]) \
-+ + (which)%(__h)->max_elements); \
-+})
-+
-+/* General memory allocation and deallocation */
-+static inline void * ip_set_malloc(size_t bytes)
-+{
-+ BUG_ON(max_malloc_size == 0);
-+
-+ if (bytes > default_max_malloc_size)
-+ return vmalloc(bytes);
-+ else
-+ return kmalloc(bytes, GFP_KERNEL | __GFP_NOWARN);
-+}
-+
-+static inline void ip_set_free(void * data, size_t bytes)
-+{
-+ BUG_ON(max_malloc_size == 0);
-+
-+ if (bytes > default_max_malloc_size)
-+ vfree(data);
-+ else
-+ kfree(data);
-+}
-+
-+#endif /* __KERNEL__ */
-+
-+#endif /*_IP_SET_MALLOC_H*/
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_nethash.h
-@@ -0,0 +1,31 @@
-+#ifndef __IP_SET_NETHASH_H
-+#define __IP_SET_NETHASH_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_hashes.h>
-+
-+#define SETTYPE_NAME "nethash"
-+
-+struct ip_set_nethash {
-+ ip_set_ip_t *members; /* the nethash proper */
-+ uint32_t elements; /* number of elements */
-+ uint32_t hashsize; /* hash size */
-+ uint16_t probes; /* max number of probes */
-+ uint16_t resize; /* resize factor in percent */
-+ uint8_t cidr[30]; /* CIDR sizes */
-+ uint16_t nets[30]; /* nr of nets by CIDR sizes */
-+ initval_t initval[0]; /* initvals for jhash_1word */
-+};
-+
-+struct ip_set_req_nethash_create {
-+ uint32_t hashsize;
-+ uint16_t probes;
-+ uint16_t resize;
-+};
-+
-+struct ip_set_req_nethash {
-+ ip_set_ip_t ip;
-+ uint8_t cidr;
-+};
-+
-+#endif /* __IP_SET_NETHASH_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_portmap.h
-@@ -0,0 +1,25 @@
-+#ifndef __IP_SET_PORTMAP_H
-+#define __IP_SET_PORTMAP_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+
-+#define SETTYPE_NAME "portmap"
-+
-+struct ip_set_portmap {
-+ void *members; /* the portmap proper */
-+ ip_set_ip_t first_ip; /* host byte order, included in range */
-+ ip_set_ip_t last_ip; /* host byte order, included in range */
-+ u_int32_t size; /* size of the ipmap proper */
-+};
-+
-+struct ip_set_req_portmap_create {
-+ ip_set_ip_t from;
-+ ip_set_ip_t to;
-+};
-+
-+struct ip_set_req_portmap {
-+ ip_set_ip_t ip;
-+};
-+
-+#endif /* __IP_SET_PORTMAP_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ip_set_setlist.h
-@@ -0,0 +1,26 @@
-+#ifndef __IP_SET_SETLIST_H
-+#define __IP_SET_SETLIST_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+#define SETTYPE_NAME "setlist"
-+
-+#define IP_SET_SETLIST_ADD_AFTER 0
-+#define IP_SET_SETLIST_ADD_BEFORE 1
-+
-+struct ip_set_setlist {
-+ uint8_t size;
-+ ip_set_id_t index[0];
-+};
-+
-+struct ip_set_req_setlist_create {
-+ uint8_t size;
-+};
-+
-+struct ip_set_req_setlist {
-+ char name[IP_SET_MAXNAMELEN];
-+ char ref[IP_SET_MAXNAMELEN];
-+ uint8_t before;
-+};
-+
-+#endif /* __IP_SET_SETLIST_H */
---- /dev/null
-+++ b/include/linux/netfilter_ipv4/ipt_set.h
-@@ -0,0 +1,21 @@
-+#ifndef _IPT_SET_H
-+#define _IPT_SET_H
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+struct ipt_set_info {
-+ ip_set_id_t index;
-+ u_int32_t flags[IP_SET_MAX_BINDINGS + 1];
-+};
-+
-+/* match info */
-+struct ipt_set_info_match {
-+ struct ipt_set_info match_set;
-+};
-+
-+struct ipt_set_info_target {
-+ struct ipt_set_info add_set;
-+ struct ipt_set_info del_set;
-+};
-+
-+#endif /*_IPT_SET_H*/
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set.c
-@@ -0,0 +1,2076 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module for IP set management */
-+
-+#include <linux/version.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+#include <linux/config.h>
-+#endif
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/kmod.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/random.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <linux/capability.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
-+#include <asm/semaphore.h>
-+#else
-+#include <linux/semaphore.h>
-+#endif
-+#include <linux/spinlock.h>
-+
-+#define ASSERT_READ_LOCK(x)
-+#define ASSERT_WRITE_LOCK(x)
-+#include <linux/netfilter.h>
-+#include <linux/netfilter_ipv4/ip_set.h>
-+
-+static struct list_head set_type_list; /* all registered sets */
-+static struct ip_set **ip_set_list; /* all individual sets */
-+static DEFINE_RWLOCK(ip_set_lock); /* protects the lists and the hash */
-+static DECLARE_MUTEX(ip_set_app_mutex); /* serializes user access */
-+static ip_set_id_t ip_set_max = CONFIG_IP_NF_SET_MAX;
-+static ip_set_id_t ip_set_bindings_hash_size = CONFIG_IP_NF_SET_HASHSIZE;
-+static struct list_head *ip_set_hash; /* hash of bindings */
-+static unsigned int ip_set_hash_random; /* random seed */
-+
-+#define SETNAME_EQ(a,b) (strncmp(a,b,IP_SET_MAXNAMELEN) == 0)
-+
-+/*
-+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
-+ * (set/SET netfilter modules, binding value, etc.)
-+ *
-+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
-+ * must obey the following rules:
-+ *
-+ * - kernel requests: read and write locking mandatory
-+ * - user requests: read locking optional, write locking mandatory
-+ */
-+
-+static inline void
-+__ip_set_get(ip_set_id_t index)
-+{
-+ atomic_inc(&ip_set_list[index]->ref);
-+}
-+
-+static inline void
-+__ip_set_put(ip_set_id_t index)
-+{
-+ atomic_dec(&ip_set_list[index]->ref);
-+}
-+
-+/*
-+ * Binding routines
-+ */
-+
-+static inline struct ip_set_hash *
-+__ip_set_find(u_int32_t key, ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ struct ip_set_hash *set_hash;
-+
-+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
-+ if (set_hash->id == id && set_hash->ip == ip)
-+ return set_hash;
-+
-+ return NULL;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ ASSERT_READ_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
-+ set_hash = __ip_set_find(key, id, ip);
-+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
-+}
-+
-+static inline void
-+__set_hash_del(struct ip_set_hash *set_hash)
-+{
-+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+
-+ __ip_set_put(set_hash->binding);
-+ list_del(&set_hash->list);
-+ kfree(set_hash);
-+}
-+
-+static int
-+ip_set_hash_del(ip_set_id_t id, ip_set_ip_t ip)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip),
-+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
-+
-+ if (set_hash != NULL)
-+ __set_hash_del(set_hash);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+static int
-+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
-+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
-+ % ip_set_bindings_hash_size;
-+ struct ip_set_hash *set_hash;
-+ int ret = 0;
-+
-+ IP_SET_ASSERT(ip_set_list[id]);
-+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
-+ HIPQUAD(ip), ip_set_list[binding]->name);
-+ write_lock_bh(&ip_set_lock);
-+ set_hash = __ip_set_find(key, id, ip);
-+ if (!set_hash) {
-+ set_hash = kmalloc(sizeof(struct ip_set_hash), GFP_ATOMIC);
-+ if (!set_hash) {
-+ ret = -ENOMEM;
-+ goto unlock;
-+ }
-+ INIT_LIST_HEAD(&set_hash->list);
-+ set_hash->id = id;
-+ set_hash->ip = ip;
-+ list_add(&set_hash->list, &ip_set_hash[key]);
-+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
-+ DP("overwrite binding: %s",
-+ ip_set_list[set_hash->binding]->name);
-+ __ip_set_put(set_hash->binding);
-+ }
-+ set_hash->binding = binding;
-+ __ip_set_get(set_hash->binding);
-+ DP("stored: key %u, id %u (%s), ip %u.%u.%u.%u, binding %u (%s)",
-+ key, id, ip_set_list[id]->name,
-+ HIPQUAD(ip), binding, ip_set_list[binding]->name);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+#define FOREACH_HASH_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash; \
-+ \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry(__set_hash, &ip_set_hash[__key], list) \
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+#define FOREACH_HASH_RW_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __key; \
-+ struct ip_set_hash *__set_hash, *__n; \
-+ \
-+ ASSERT_WRITE_LOCK(&ip_set_lock); \
-+ for (__key = 0; __key < ip_set_bindings_hash_size; __key++) { \
-+ list_for_each_entry_safe(__set_hash, __n, &ip_set_hash[__key], list)\
-+ fn(__set_hash , ## args); \
-+ } \
-+})
-+
-+/* Add, del and test set entries from kernel */
-+
-+#define follow_bindings(index, set, ip) \
-+((index = ip_set_find_in_hash((set)->id, ip)) != IP_SET_INVALID_ID \
-+ || (index = (set)->binding) != IP_SET_INVALID_ID)
-+
-+int
-+ip_set_testip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
-+ read_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return (res < 0 ? 0 : res);
-+}
-+
-+int
-+ip_set_addip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ retry:
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ if (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0)
-+ goto retry;
-+
-+ return res;
-+}
-+
-+int
-+ip_set_delip_kernel(ip_set_id_t index,
-+ const struct sk_buff *skb,
-+ const u_int32_t *flags)
-+{
-+ struct ip_set *set;
-+ ip_set_ip_t ip;
-+ int res;
-+ unsigned char i = 0;
-+
-+ IP_SET_ASSERT(flags[i]);
-+ read_lock_bh(&ip_set_lock);
-+ do {
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ DP("set %s, index %u", set->name, index);
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip_kernel(set, skb, &ip, flags, i++);
-+ write_unlock_bh(&set->lock);
-+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
-+ && follow_bindings(index, set, ip));
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return res;
-+}
-+
-+/* Register and deregister settype */
-+
-+static inline struct ip_set_type *
-+find_set_type(const char *name)
-+{
-+ struct ip_set_type *set_type;
-+
-+ list_for_each_entry(set_type, &set_type_list, list)
-+ if (!strncmp(set_type->typename, name, IP_SET_MAXNAMELEN - 1))
-+ return set_type;
-+ return NULL;
-+}
-+
-+int
-+ip_set_register_set_type(struct ip_set_type *set_type)
-+{
-+ int ret = 0;
-+
-+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
-+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
-+ set_type->typename,
-+ set_type->protocol_version,
-+ IP_SET_PROTOCOL_VERSION);
-+ return -EINVAL;
-+ }
-+
-+ write_lock_bh(&ip_set_lock);
-+ if (find_set_type(set_type->typename)) {
-+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
-+ set_type->typename);
-+ ret = -EINVAL;
-+ goto unlock;
-+ }
-+ if (!try_module_get(THIS_MODULE)) {
-+ ret = -EFAULT;
-+ goto unlock;
-+ }
-+ list_add(&set_type->list, &set_type_list);
-+ DP("'%s' registered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return ret;
-+}
-+
-+void
-+ip_set_unregister_set_type(struct ip_set_type *set_type)
-+{
-+ write_lock_bh(&ip_set_lock);
-+ if (!find_set_type(set_type->typename)) {
-+ ip_set_printk("'%s' not registered?",
-+ set_type->typename);
-+ goto unlock;
-+ }
-+ list_del(&set_type->list);
-+ module_put(THIS_MODULE);
-+ DP("'%s' unregistered.", set_type->typename);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+
-+}
-+
-+ip_set_id_t
-+__ip_set_get_byname(const char *name, struct ip_set **set)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && SETNAME_EQ(ip_set_list[i]->name, name)) {
-+ __ip_set_get(i);
-+ index = i;
-+ *set = ip_set_list[i];
-+ break;
-+ }
-+ }
-+ return index;
-+}
-+
-+void __ip_set_put_byindex(ip_set_id_t index)
-+{
-+ if (ip_set_list[index])
-+ __ip_set_put(index);
-+}
-+
-+/*
-+ * Userspace routines
-+ */
-+
-+/*
-+ * Find set by name, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ down(&ip_set_app_mutex);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && SETNAME_EQ(ip_set_list[i]->name, name)) {
-+ __ip_set_get(i);
-+ index = i;
-+ break;
-+ }
-+ }
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * Find set by index, reference it once. The reference makes sure the
-+ * thing pointed to, does not go away under our feet. Drop the reference
-+ * later, using ip_set_put().
-+ */
-+ip_set_id_t
-+ip_set_get_byindex(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+
-+ if (index >= ip_set_max)
-+ return IP_SET_INVALID_ID;
-+
-+ if (ip_set_list[index])
-+ __ip_set_get(index);
-+ else
-+ index = IP_SET_INVALID_ID;
-+
-+ up(&ip_set_app_mutex);
-+ return index;
-+}
-+
-+/*
-+ * Find the set id belonging to the index.
-+ * We are protected by the mutex, so we do not need to use
-+ * ip_set_lock. There is no need to reference the sets either.
-+ */
-+ip_set_id_t
-+ip_set_id(ip_set_id_t index)
-+{
-+ if (index >= ip_set_max || !ip_set_list[index])
-+ return IP_SET_INVALID_ID;
-+
-+ return ip_set_list[index]->id;
-+}
-+
-+/*
-+ * If the given set pointer points to a valid set, decrement
-+ * reference count by 1. The caller shall not assume the index
-+ * to be valid, after calling this function.
-+ */
-+void ip_set_put_byindex(ip_set_id_t index)
-+{
-+ down(&ip_set_app_mutex);
-+ if (ip_set_list[index])
-+ __ip_set_put(index);
-+ up(&ip_set_app_mutex);
-+}
-+
-+/* Find a set by name or index */
-+static ip_set_id_t
-+ip_set_find_byname(const char *name)
-+{
-+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && SETNAME_EQ(ip_set_list[i]->name, name)) {
-+ index = i;
-+ break;
-+ }
-+ }
-+ return index;
-+}
-+
-+static ip_set_id_t
-+ip_set_find_byindex(ip_set_id_t index)
-+{
-+ if (index >= ip_set_max || ip_set_list[index] == NULL)
-+ index = IP_SET_INVALID_ID;
-+
-+ return index;
-+}
-+
-+/*
-+ * Add, del, test, bind and unbind
-+ */
-+
-+static inline int
-+__ip_set_testip(struct ip_set *set,
-+ const void *data,
-+ u_int32_t size,
-+ ip_set_ip_t *ip)
-+{
-+ int res;
-+
-+ read_lock_bh(&set->lock);
-+ res = set->type->testip(set, data, size, ip);
-+ read_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+__ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ do {
-+ write_lock_bh(&set->lock);
-+ res = set->type->addip(set, data, size, &ip);
-+ write_unlock_bh(&set->lock);
-+ } while (res == -EAGAIN
-+ && set->type->retry
-+ && (res = set->type->retry(set)) == 0);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_addip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+
-+ IP_SET_ASSERT(set);
-+
-+ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
-+ ip_set_printk("data length wrong (want %lu, have %zu)",
-+ (long unsigned)set->type->reqsize,
-+ size - sizeof(struct ip_set_req_adt));
-+ return -EINVAL;
-+ }
-+ return __ip_set_addip(index,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt));
-+}
-+
-+static int
-+ip_set_delip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+
-+ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
-+ ip_set_printk("data length wrong (want %lu, have %zu)",
-+ (long unsigned)set->type->reqsize,
-+ size - sizeof(struct ip_set_req_adt));
-+ return -EINVAL;
-+ }
-+ write_lock_bh(&set->lock);
-+ res = set->type->delip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+ write_unlock_bh(&set->lock);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+
-+ if (size - sizeof(struct ip_set_req_adt) != set->type->reqsize) {
-+ ip_set_printk("data length wrong (want %lu, have %zu)",
-+ (long unsigned)set->type->reqsize,
-+ size - sizeof(struct ip_set_req_adt));
-+ return -EINVAL;
-+ }
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_adt),
-+ size - sizeof(struct ip_set_req_adt),
-+ &ip);
-+
-+ return (res > 0 ? -EEXIST : res);
-+}
-+
-+static int
-+ip_set_bindip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ const struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = data;
-+
-+ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
-+ /* Default binding of a set */
-+ const char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = data + sizeof(struct ip_set_req_bind);
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets as binding values are referenced */
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ set->binding = binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = ip_set_hash_add(set->id, ip, binding);
-+
-+ return res;
-+}
-+
-+#define FOREACH_SET_DO(fn, args...) \
-+({ \
-+ ip_set_id_t __i; \
-+ struct ip_set *__set; \
-+ \
-+ for (__i = 0; __i < ip_set_max; __i++) { \
-+ __set = ip_set_list[__i]; \
-+ if (__set != NULL) \
-+ fn(__set , ##args); \
-+ } \
-+})
-+
-+static inline void
-+__set_hash_del_byid(struct ip_set_hash *set_hash, ip_set_id_t id)
-+{
-+ if (set_hash->id == id)
-+ __set_hash_del(set_hash);
-+}
-+
-+static inline void
-+__unbind_default(struct ip_set *set)
-+{
-+ if (set->binding != IP_SET_INVALID_ID) {
-+ /* Sets as binding values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ }
-+}
-+
-+static int
-+ip_set_unbindip(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set;
-+ const struct ip_set_req_bind *req_bind;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ DP("");
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = data;
-+
-+ DP("%u %s", index, req_bind->binding);
-+ if (index == IP_SET_INVALID_ID) {
-+ /* unbind :all: */
-+ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
-+ /* Default binding of sets */
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_SET_DO(__unbind_default);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
-+ /* Flush all bindings of all sets*/
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+ DP("unreachable reached!");
-+ return -EINVAL;
-+ }
-+
-+ set = ip_set_list[index];
-+ IP_SET_ASSERT(set);
-+ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
-+ /* Default binding of set */
-+ ip_set_id_t binding = ip_set_find_byindex(set->binding);
-+
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ write_lock_bh(&ip_set_lock);
-+ /* Sets in hash values are referenced */
-+ __ip_set_put(set->binding);
-+ set->binding = IP_SET_INVALID_ID;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ return 0;
-+ } else if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_ALL)) {
-+ /* Flush all bindings */
-+
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+ }
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+
-+ DP("set %s, ip: %u.%u.%u.%u", set->name, HIPQUAD(ip));
-+ if (res >= 0)
-+ res = ip_set_hash_del(set->id, ip);
-+
-+ return res;
-+}
-+
-+static int
-+ip_set_testbind(ip_set_id_t index,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ const struct ip_set_req_bind *req_bind;
-+ ip_set_id_t binding;
-+ ip_set_ip_t ip;
-+ int res;
-+
-+ IP_SET_ASSERT(set);
-+ if (size < sizeof(struct ip_set_req_bind))
-+ return -EINVAL;
-+
-+ req_bind = data;
-+
-+ if (SETNAME_EQ(req_bind->binding, IPSET_TOKEN_DEFAULT)) {
-+ /* Default binding of set */
-+ const char *binding_name;
-+
-+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
-+ return -EINVAL;
-+
-+ binding_name = data + sizeof(struct ip_set_req_bind);
-+
-+ binding = ip_set_find_byname(binding_name);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+ res = (set->binding == binding) ? -EEXIST : 0;
-+
-+ return res;
-+ }
-+ binding = ip_set_find_byname(req_bind->binding);
-+ if (binding == IP_SET_INVALID_ID)
-+ return -ENOENT;
-+
-+
-+ res = __ip_set_testip(set,
-+ data + sizeof(struct ip_set_req_bind),
-+ size - sizeof(struct ip_set_req_bind),
-+ &ip);
-+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
-+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
-+ if (res >= 0)
-+ res = (ip_set_find_in_hash(set->id, ip) == binding)
-+ ? -EEXIST : 0;
-+
-+ return res;
-+}
-+
-+static struct ip_set_type *
-+find_set_type_rlock(const char *typename)
-+{
-+ struct ip_set_type *type;
-+
-+ read_lock_bh(&ip_set_lock);
-+ type = find_set_type(typename);
-+ if (type == NULL)
-+ read_unlock_bh(&ip_set_lock);
-+
-+ return type;
-+}
-+
-+static int
-+find_free_id(const char *name,
-+ ip_set_id_t *index,
-+ ip_set_id_t *id)
-+{
-+ ip_set_id_t i;
-+
-+ *id = IP_SET_INVALID_ID;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL) {
-+ if (*id == IP_SET_INVALID_ID)
-+ *id = *index = i;
-+ } else if (SETNAME_EQ(name, ip_set_list[i]->name))
-+ /* Name clash */
-+ return -EEXIST;
-+ }
-+ if (*id == IP_SET_INVALID_ID)
-+ /* No free slot remained */
-+ return -ERANGE;
-+ /* Check that index is usable as id (swapping) */
-+ check:
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && ip_set_list[i]->id == *id) {
-+ *id = i;
-+ goto check;
-+ }
-+ }
-+ return 0;
-+}
-+
-+/*
-+ * Create a set
-+ */
-+static int
-+ip_set_create(const char *name,
-+ const char *typename,
-+ ip_set_id_t restore,
-+ const void *data,
-+ u_int32_t size)
-+{
-+ struct ip_set *set;
-+ ip_set_id_t index = 0, id;
-+ int res = 0;
-+
-+ DP("setname: %s, typename: %s, id: %u", name, typename, restore);
-+
-+ /*
-+ * First, and without any locks, allocate and initialize
-+ * a normal base set structure.
-+ */
-+ set = kmalloc(sizeof(struct ip_set), GFP_KERNEL);
-+ if (!set)
-+ return -ENOMEM;
-+ rwlock_init(&set->lock);
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ set->binding = IP_SET_INVALID_ID;
-+ atomic_set(&set->ref, 0);
-+
-+ /*
-+ * Next, take the &ip_set_lock, check that we know the type,
-+ * and take a reference on the type, to make sure it
-+ * stays available while constructing our new set.
-+ *
-+ * After referencing the type, we drop the &ip_set_lock,
-+ * and let the new set construction run without locks.
-+ */
-+ set->type = find_set_type_rlock(typename);
-+ if (set->type == NULL) {
-+ /* Try loading the module */
-+ char modulename[IP_SET_MAXNAMELEN + strlen("ip_set_") + 1];
-+ strcpy(modulename, "ip_set_");
-+ strcat(modulename, typename);
-+ DP("try to load %s", modulename);
-+ request_module(modulename);
-+ set->type = find_set_type_rlock(typename);
-+ }
-+ if (set->type == NULL) {
-+ ip_set_printk("no set type '%s', set '%s' not created",
-+ typename, name);
-+ res = -ENOENT;
-+ goto out;
-+ }
-+ if (!try_module_get(set->type->me)) {
-+ read_unlock_bh(&ip_set_lock);
-+ res = -EFAULT;
-+ goto out;
-+ }
-+ read_unlock_bh(&ip_set_lock);
-+
-+ /* Check request size */
-+ if (size != set->type->header_size) {
-+ ip_set_printk("data length wrong (want %lu, have %lu)",
-+ (long unsigned)set->type->header_size,
-+ (long unsigned)size);
-+ goto put_out;
-+ }
-+
-+ /*
-+ * Without holding any locks, create private part.
-+ */
-+ res = set->type->create(set, data, size);
-+ if (res != 0)
-+ goto put_out;
-+
-+ /* BTW, res==0 here. */
-+
-+ /*
-+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
-+ * ip_set_list.
-+ */
-+ write_lock_bh(&ip_set_lock);
-+ if ((res = find_free_id(set->name, &index, &id)) != 0) {
-+ DP("no free id!");
-+ goto cleanup;
-+ }
-+
-+ /* Make sure restore gets the same index */
-+ if (restore != IP_SET_INVALID_ID && index != restore) {
-+ DP("Can't restore, sets are screwed up");
-+ res = -ERANGE;
-+ goto cleanup;
-+ }
-+
-+ /*
-+ * Finally! Add our shiny new set to the list, and be done.
-+ */
-+ DP("create: '%s' created with index %u, id %u!", set->name, index, id);
-+ set->id = id;
-+ ip_set_list[index] = set;
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+
-+ cleanup:
-+ write_unlock_bh(&ip_set_lock);
-+ set->type->destroy(set);
-+ put_out:
-+ module_put(set->type->me);
-+ out:
-+ kfree(set);
-+ return res;
-+}
-+
-+/*
-+ * Destroy a given existing set
-+ */
-+static void
-+ip_set_destroy_set(ip_set_id_t index)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+
-+ IP_SET_ASSERT(set);
-+ DP("set: %s", set->name);
-+ write_lock_bh(&ip_set_lock);
-+ FOREACH_HASH_RW_DO(__set_hash_del_byid, set->id);
-+ if (set->binding != IP_SET_INVALID_ID)
-+ __ip_set_put(set->binding);
-+ ip_set_list[index] = NULL;
-+ write_unlock_bh(&ip_set_lock);
-+
-+ /* Must call it without holding any lock */
-+ set->type->destroy(set);
-+ module_put(set->type->me);
-+ kfree(set);
-+}
-+
-+/*
-+ * Destroy a set - or all sets
-+ * Sets must not be referenced/used.
-+ */
-+static int
-+ip_set_destroy(ip_set_id_t index)
-+{
-+ ip_set_id_t i;
-+
-+ /* ref modification always protected by the mutex */
-+ if (index != IP_SET_INVALID_ID) {
-+ if (atomic_read(&ip_set_list[index]->ref))
-+ return -EBUSY;
-+ ip_set_destroy_set(index);
-+ } else {
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && (atomic_read(&ip_set_list[i]->ref)))
-+ return -EBUSY;
-+ }
-+
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ ip_set_destroy_set(i);
-+ }
-+ }
-+ return 0;
-+}
-+
-+static void
-+ip_set_flush_set(struct ip_set *set)
-+{
-+ DP("set: %s %u", set->name, set->id);
-+
-+ write_lock_bh(&set->lock);
-+ set->type->flush(set);
-+ write_unlock_bh(&set->lock);
-+}
-+
-+/*
-+ * Flush data in a set - or in all sets
-+ */
-+static int
-+ip_set_flush(ip_set_id_t index)
-+{
-+ if (index != IP_SET_INVALID_ID) {
-+ IP_SET_ASSERT(ip_set_list[index]);
-+ ip_set_flush_set(ip_set_list[index]);
-+ } else
-+ FOREACH_SET_DO(ip_set_flush_set);
-+
-+ return 0;
-+}
-+
-+/* Rename a set */
-+static int
-+ip_set_rename(ip_set_id_t index, const char *name)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ ip_set_id_t i;
-+ int res = 0;
-+
-+ DP("set: %s to %s", set->name, name);
-+ write_lock_bh(&ip_set_lock);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
-+ && SETNAME_EQ(ip_set_list[i]->name, name)) {
-+ res = -EEXIST;
-+ goto unlock;
-+ }
-+ }
-+ strncpy(set->name, name, IP_SET_MAXNAMELEN);
-+ unlock:
-+ write_unlock_bh(&ip_set_lock);
-+ return res;
-+}
-+
-+/*
-+ * Swap two sets so that name/index points to the other.
-+ * References are also swapped.
-+ */
-+static int
-+ip_set_swap(ip_set_id_t from_index, ip_set_id_t to_index)
-+{
-+ struct ip_set *from = ip_set_list[from_index];
-+ struct ip_set *to = ip_set_list[to_index];
-+ char from_name[IP_SET_MAXNAMELEN];
-+ u_int32_t from_ref;
-+
-+ DP("set: %s to %s", from->name, to->name);
-+ /* Features must not change.
-+ * Not an artifical restriction anymore, as we must prevent
-+ * possible loops created by swapping in setlist type of sets. */
-+ if (from->type->features != to->type->features)
-+ return -ENOEXEC;
-+
-+ /* No magic here: ref munging protected by the mutex */
-+ write_lock_bh(&ip_set_lock);
-+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
-+ from_ref = atomic_read(&from->ref);
-+
-+ strncpy(from->name, to->name, IP_SET_MAXNAMELEN);
-+ atomic_set(&from->ref, atomic_read(&to->ref));
-+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
-+ atomic_set(&to->ref, from_ref);
-+
-+ ip_set_list[from_index] = to;
-+ ip_set_list[to_index] = from;
-+
-+ write_unlock_bh(&ip_set_lock);
-+ return 0;
-+}
-+
-+/*
-+ * List set data
-+ */
-+
-+static inline void
-+__set_hash_bindings_size_list(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, u_int32_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_list);
-+}
-+
-+static inline void
-+__set_hash_bindings_size_save(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, u_int32_t *size)
-+{
-+ if (set_hash->id == id)
-+ *size += sizeof(struct ip_set_hash_save);
-+}
-+
-+static inline void
-+__set_hash_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id, void *data, int *used)
-+{
-+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list = data + *used;
-+
-+ hash_list->ip = set_hash->ip;
-+ hash_list->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_list);
-+ }
-+}
-+
-+static int ip_set_list_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set = ip_set_list[index];
-+ struct ip_set_list *set_list;
-+
-+ /* Pointer to our header */
-+ set_list = data + *used;
-+
-+ DP("set: %s, used: %d %p %p", set->name, *used, data, data + *used);
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_list) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_list);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_list->header_size = set->type->header_size;
-+ if (*used + set_list->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_list->index = index;
-+ set_list->binding = set->binding;
-+ set_list->ref = atomic_read(&set->ref);
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_list->header_size;
-+
-+ /* Get and ensure set specific members size */
-+ set_list->members_size = set->type->list_members_size(set);
-+ if (*used + set_list->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_list->members_size;
-+ read_unlock_bh(&set->lock);
-+
-+ /* Bindings */
-+
-+ /* Get and ensure set specific bindings size */
-+ set_list->bindings_size = 0;
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &set_list->bindings_size);
-+ if (*used + set_list->bindings_size > len)
-+ goto not_enough_mem;
-+
-+ /* Fill in set spefific bindings data */
-+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+/*
-+ * Save sets
-+ */
-+static int ip_set_save_set(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ struct ip_set *set;
-+ struct ip_set_save *set_save;
-+
-+ /* Pointer to our header */
-+ set_save = data + *used;
-+
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ goto not_enough_mem;
-+ *used += sizeof(struct ip_set_save);
-+
-+ set = ip_set_list[index];
-+ DP("set: %s, used: %d(%d) %p %p", set->name, *used, len,
-+ data, data + *used);
-+
-+ read_lock_bh(&set->lock);
-+ /* Get and ensure set specific header size */
-+ set_save->header_size = set->type->header_size;
-+ if (*used + set_save->header_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in the header */
-+ set_save->index = index;
-+ set_save->binding = set->binding;
-+
-+ /* Fill in set spefific header data */
-+ set->type->list_header(set, data + *used);
-+ *used += set_save->header_size;
-+
-+ DP("set header filled: %s, used: %d(%lu) %p %p", set->name, *used,
-+ (unsigned long)set_save->header_size, data, data + *used);
-+ /* Get and ensure set specific members size */
-+ set_save->members_size = set->type->list_members_size(set);
-+ if (*used + set_save->members_size > len)
-+ goto unlock_set;
-+
-+ /* Fill in set spefific members data */
-+ set->type->list_members(set, data + *used);
-+ *used += set_save->members_size;
-+ read_unlock_bh(&set->lock);
-+ DP("set members filled: %s, used: %d(%lu) %p %p", set->name, *used,
-+ (unsigned long)set_save->members_size, data, data + *used);
-+ return 0;
-+
-+ unlock_set:
-+ read_unlock_bh(&set->lock);
-+ not_enough_mem:
-+ DP("not enough mem, try again");
-+ return -EAGAIN;
-+}
-+
-+static inline void
-+__set_hash_save_bindings(struct ip_set_hash *set_hash,
-+ ip_set_id_t id,
-+ void *data,
-+ int *used,
-+ int len,
-+ int *res)
-+{
-+ if (*res == 0
-+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save = data + *used;
-+ /* Ensure bindings size */
-+ if (*used + sizeof(struct ip_set_hash_save) > len) {
-+ *res = -ENOMEM;
-+ return;
-+ }
-+ hash_save->id = set_hash->id;
-+ hash_save->ip = set_hash->ip;
-+ hash_save->binding = set_hash->binding;
-+ *used += sizeof(struct ip_set_hash_save);
-+ }
-+}
-+
-+static int ip_set_save_bindings(ip_set_id_t index,
-+ void *data,
-+ int *used,
-+ int len)
-+{
-+ int res = 0;
-+ struct ip_set_save *set_save;
-+
-+ DP("used %u, len %u", *used, len);
-+ /* Get and ensure header size */
-+ if (*used + sizeof(struct ip_set_save) > len)
-+ return -ENOMEM;
-+
-+ /* Marker */
-+ set_save = data + *used;
-+ set_save->index = IP_SET_INVALID_ID;
-+ set_save->header_size = 0;
-+ set_save->members_size = 0;
-+ *used += sizeof(struct ip_set_save);
-+
-+ DP("marker added used %u, len %u", *used, len);
-+ /* Fill in bindings data */
-+ if (index != IP_SET_INVALID_ID)
-+ /* Sets are identified by id in hash */
-+ index = ip_set_list[index]->id;
-+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
-+
-+ return res;
-+}
-+
-+/*
-+ * Restore sets
-+ */
-+static int ip_set_restore(void *data,
-+ int len)
-+{
-+ int res = 0;
-+ int line = 0, used = 0, members_size;
-+ struct ip_set *set;
-+ struct ip_set_hash_save *hash_save;
-+ struct ip_set_restore *set_restore;
-+ ip_set_id_t index;
-+
-+ /* Loop to restore sets */
-+ while (1) {
-+ line++;
-+
-+ DP("%d %zu %d", used, sizeof(struct ip_set_restore), len);
-+ /* Get and ensure header size */
-+ if (used + sizeof(struct ip_set_restore) > len)
-+ return line;
-+ set_restore = data + used;
-+ used += sizeof(struct ip_set_restore);
-+
-+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
-+ + set_restore->members_size > len)
-+ return line;
-+
-+ /* Check marker */
-+ if (set_restore->index == IP_SET_INVALID_ID) {
-+ line--;
-+ goto bindings;
-+ }
-+
-+ /* Try to create the set */
-+ DP("restore %s %s", set_restore->name, set_restore->typename);
-+ res = ip_set_create(set_restore->name,
-+ set_restore->typename,
-+ set_restore->index,
-+ data + used,
-+ set_restore->header_size);
-+
-+ if (res != 0)
-+ return line;
-+ used += set_restore->header_size;
-+
-+ index = ip_set_find_byindex(set_restore->index);
-+ DP("index %u, restore_index %u", index, set_restore->index);
-+ if (index != set_restore->index)
-+ return line;
-+ /* Try to restore members data */
-+ set = ip_set_list[index];
-+ members_size = 0;
-+ DP("members_size %lu reqsize %lu",
-+ (unsigned long)set_restore->members_size,
-+ (unsigned long)set->type->reqsize);
-+ while (members_size + set->type->reqsize <=
-+ set_restore->members_size) {
-+ line++;
-+ DP("members: %d, line %d", members_size, line);
-+ res = __ip_set_addip(index,
-+ data + used + members_size,
-+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
-+ return line;
-+ members_size += set->type->reqsize;
-+ }
-+
-+ DP("members_size %lu %d",
-+ (unsigned long)set_restore->members_size, members_size);
-+ if (members_size != set_restore->members_size)
-+ return line++;
-+ used += set_restore->members_size;
-+ }
-+
-+ bindings:
-+ /* Loop to restore bindings */
-+ while (used < len) {
-+ line++;
-+
-+ DP("restore binding, line %u", line);
-+ /* Get and ensure size */
-+ if (used + sizeof(struct ip_set_hash_save) > len)
-+ return line;
-+ hash_save = data + used;
-+ used += sizeof(struct ip_set_hash_save);
-+
-+ /* hash_save->id is used to store the index */
-+ index = ip_set_find_byindex(hash_save->id);
-+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
-+ if (index != hash_save->id)
-+ return line;
-+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
-+ DP("corrupt binding set index %u", hash_save->binding);
-+ return line;
-+ }
-+ set = ip_set_list[hash_save->id];
-+ /* Null valued IP means default binding */
-+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
-+ hash_save->ip,
-+ hash_save->binding);
-+ else {
-+ IP_SET_ASSERT(set->binding == IP_SET_INVALID_ID);
-+ write_lock_bh(&ip_set_lock);
-+ set->binding = hash_save->binding;
-+ __ip_set_get(set->binding);
-+ write_unlock_bh(&ip_set_lock);
-+ DP("default binding: %u", set->binding);
-+ }
-+ if (res != 0)
-+ return line;
-+ }
-+ if (used != len)
-+ return line;
-+
-+ return 0;
-+}
-+
-+static int
-+ip_set_sockfn_set(struct sock *sk, int optval, void *user, unsigned int len)
-+{
-+ void *data;
-+ int res = 0; /* Assume OK */
-+ unsigned *op;
-+ struct ip_set_req_adt *req_adt;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ int (*adtfn)(ip_set_id_t index,
-+ const void *data, u_int32_t size);
-+ struct fn_table {
-+ int (*fn)(ip_set_id_t index,
-+ const void *data, u_int32_t size);
-+ } adtfn_table[] =
-+ { { ip_set_addip }, { ip_set_delip }, { ip_set_testip},
-+ { ip_set_bindip}, { ip_set_unbindip }, { ip_set_testbind },
-+ };
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (len <= sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >%zu, got %u)",
-+ sizeof(unsigned), len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(len);
-+ if (!data) {
-+ DP("out of mem for %u bytes", len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *)data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version = data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_CREATE:{
-+ struct ip_set_req_create *req_create = data;
-+
-+ if (len < sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_create->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_create->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+ res = ip_set_create(req_create->name,
-+ req_create->typename,
-+ IP_SET_INVALID_ID,
-+ data + sizeof(struct ip_set_req_create),
-+ len - sizeof(struct ip_set_req_create));
-+ goto done;
-+ }
-+ case IP_SET_OP_DESTROY:{
-+ struct ip_set_req_std *req_destroy = data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (SETNAME_EQ(req_destroy->name, IPSET_TOKEN_ALL)) {
-+ /* Destroy all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_destroy->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_destroy->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+
-+ res = ip_set_destroy(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_FLUSH:{
-+ struct ip_set_req_std *req_flush = data;
-+
-+ if (len != sizeof(struct ip_set_req_std)) {
-+ ip_set_printk("invalid FLUSH data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_std), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ if (SETNAME_EQ(req_flush->name, IPSET_TOKEN_ALL)) {
-+ /* Flush all sets */
-+ index = IP_SET_INVALID_ID;
-+ } else {
-+ req_flush->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_flush->name);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = ip_set_flush(index);
-+ goto done;
-+ }
-+ case IP_SET_OP_RENAME:{
-+ struct ip_set_req_create *req_rename = data;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid RENAME data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_rename->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_rename(index, req_rename->typename);
-+ goto done;
-+ }
-+ case IP_SET_OP_SWAP:{
-+ struct ip_set_req_create *req_swap = data;
-+ ip_set_id_t to_index;
-+
-+ if (len != sizeof(struct ip_set_req_create)) {
-+ ip_set_printk("invalid SWAP data (want %zu, got %u)",
-+ sizeof(struct ip_set_req_create), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_swap->name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_swap->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
-+ index = ip_set_find_byname(req_swap->name);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ to_index = ip_set_find_byname(req_swap->typename);
-+ if (to_index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ res = ip_set_swap(index, to_index);
-+ goto done;
-+ }
-+ default:
-+ break; /* Set identified by id */
-+ }
-+
-+ /* There we may have add/del/test/bind/unbind/test_bind operations */
-+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
-+ res = -EBADMSG;
-+ goto done;
-+ }
-+ adtfn = adtfn_table[*op - IP_SET_OP_ADD_IP].fn;
-+
-+ if (len < sizeof(struct ip_set_req_adt)) {
-+ ip_set_printk("short data in adt request (want >=%zu, got %u)",
-+ sizeof(struct ip_set_req_adt), len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_adt = data;
-+
-+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
-+ && req_adt->index == IP_SET_INVALID_ID)) {
-+ index = ip_set_find_byindex(req_adt->index);
-+ if (index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ res = adtfn(index, data, len);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static int
-+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
-+{
-+ int res = 0;
-+ unsigned *op;
-+ ip_set_id_t index = IP_SET_INVALID_ID;
-+ void *data;
-+ int copylen = *len;
-+
-+ DP("optval=%d, user=%p, len=%d", optval, user, *len);
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (optval != SO_IP_SET)
-+ return -EBADF;
-+ if (*len < sizeof(unsigned)) {
-+ ip_set_printk("short userdata (want >=%zu, got %d)",
-+ sizeof(unsigned), *len);
-+ return -EINVAL;
-+ }
-+ data = vmalloc(*len);
-+ if (!data) {
-+ DP("out of mem for %d bytes", *len);
-+ return -ENOMEM;
-+ }
-+ if (copy_from_user(data, user, *len) != 0) {
-+ res = -EFAULT;
-+ goto done;
-+ }
-+ if (down_interruptible(&ip_set_app_mutex)) {
-+ res = -EINTR;
-+ goto done;
-+ }
-+
-+ op = (unsigned *) data;
-+ DP("op=%x", *op);
-+
-+ if (*op < IP_SET_OP_VERSION) {
-+ /* Check the version at the beginning of operations */
-+ struct ip_set_req_version *req_version = data;
-+ if (req_version->version != IP_SET_PROTOCOL_VERSION) {
-+ res = -EPROTO;
-+ goto done;
-+ }
-+ }
-+
-+ switch (*op) {
-+ case IP_SET_OP_VERSION: {
-+ struct ip_set_req_version *req_version = data;
-+
-+ if (*len != sizeof(struct ip_set_req_version)) {
-+ ip_set_printk("invalid VERSION (want %zu, got %d)",
-+ sizeof(struct ip_set_req_version),
-+ *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_version->version = IP_SET_PROTOCOL_VERSION;
-+ res = copy_to_user(user, req_version,
-+ sizeof(struct ip_set_req_version));
-+ goto done;
-+ }
-+ case IP_SET_OP_GET_BYNAME: {
-+ struct ip_set_req_get_set *req_get = data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYNAME (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ req_get->set.index = index;
-+ goto copy;
-+ }
-+ case IP_SET_OP_GET_BYINDEX: {
-+ struct ip_set_req_get_set *req_get = data;
-+
-+ if (*len != sizeof(struct ip_set_req_get_set)) {
-+ ip_set_printk("invalid GET_BYINDEX (want %zu, got %d)",
-+ sizeof(struct ip_set_req_get_set), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byindex(req_get->set.index);
-+ strncpy(req_get->set.name,
-+ index == IP_SET_INVALID_ID ? ""
-+ : ip_set_list[index]->name, IP_SET_MAXNAMELEN);
-+ goto copy;
-+ }
-+ case IP_SET_OP_ADT_GET: {
-+ struct ip_set_req_adt_get *req_get = data;
-+
-+ if (*len != sizeof(struct ip_set_req_adt_get)) {
-+ ip_set_printk("invalid ADT_GET (want %zu, got %d)",
-+ sizeof(struct ip_set_req_adt_get), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ req_get->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ index = ip_set_find_byname(req_get->set.name);
-+ if (index != IP_SET_INVALID_ID) {
-+ req_get->set.index = index;
-+ strncpy(req_get->typename,
-+ ip_set_list[index]->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ } else {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_MAX_SETS: {
-+ struct ip_set_req_max_sets *req_max_sets = data;
-+ ip_set_id_t i;
-+
-+ if (*len != sizeof(struct ip_set_req_max_sets)) {
-+ ip_set_printk("invalid MAX_SETS (want %zu, got %d)",
-+ sizeof(struct ip_set_req_max_sets), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ if (SETNAME_EQ(req_max_sets->set.name, IPSET_TOKEN_ALL)) {
-+ req_max_sets->set.index = IP_SET_INVALID_ID;
-+ } else {
-+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
-+ ip_set_find_byname(req_max_sets->set.name);
-+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ }
-+ req_max_sets->max_sets = ip_set_max;
-+ req_max_sets->sets = 0;
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL)
-+ req_max_sets->sets++;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST_SIZE:
-+ case IP_SET_OP_SAVE_SIZE: {
-+ struct ip_set_req_setnames *req_setnames = data;
-+ struct ip_set_name_list *name_list;
-+ struct ip_set *set;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)) {
-+ ip_set_printk("short LIST_SIZE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_setnames), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+
-+ req_setnames->size = 0;
-+ used = sizeof(struct ip_set_req_setnames);
-+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] == NULL)
-+ continue;
-+ name_list = data + used;
-+ used += sizeof(struct ip_set_name_list);
-+ if (used > copylen) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ set = ip_set_list[i];
-+ /* Fill in index, name, etc. */
-+ name_list->index = i;
-+ name_list->id = set->id;
-+ strncpy(name_list->name,
-+ set->name,
-+ IP_SET_MAXNAMELEN - 1);
-+ strncpy(name_list->typename,
-+ set->type->typename,
-+ IP_SET_MAXNAMELEN - 1);
-+ DP("filled %s of type %s, index %u\n",
-+ name_list->name, name_list->typename,
-+ name_list->index);
-+ if (!(req_setnames->index == IP_SET_INVALID_ID
-+ || req_setnames->index == i))
-+ continue;
-+ /* Update size */
-+ switch (*op) {
-+ case IP_SET_OP_LIST_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_list)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ case IP_SET_OP_SAVE_SIZE: {
-+ req_setnames->size += sizeof(struct ip_set_save)
-+ + set->type->header_size
-+ + set->type->list_members_size(set);
-+ FOREACH_HASH_DO(__set_hash_bindings_size_save,
-+ set->id, &req_setnames->size);
-+ break;
-+ }
-+ default:
-+ break;
-+ }
-+ }
-+ if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_LIST: {
-+ struct ip_set_req_list *req_list = data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short LIST (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_list->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* List all sets */
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL)
-+ res = ip_set_list_set(i, data, &used, *len);
-+ }
-+ } else {
-+ /* List an individual set */
-+ res = ip_set_list_set(index, data, &used, *len);
-+ }
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_SAVE: {
-+ struct ip_set_req_list *req_save = data;
-+ ip_set_id_t i;
-+ int used;
-+
-+ if (*len < sizeof(struct ip_set_req_list)) {
-+ ip_set_printk("short SAVE (want >=%zu, got %d)",
-+ sizeof(struct ip_set_req_list), *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ index = req_save->index;
-+ if (index != IP_SET_INVALID_ID
-+ && ip_set_find_byindex(index) != index) {
-+ res = -ENOENT;
-+ goto done;
-+ }
-+
-+#define SETLIST(set) (strcmp(set->type->typename, "setlist") == 0)
-+
-+ used = 0;
-+ if (index == IP_SET_INVALID_ID) {
-+ /* Save all sets: ugly setlist type dependency */
-+ int setlist = 0;
-+ setlists:
-+ for (i = 0; i < ip_set_max && res == 0; i++) {
-+ if (ip_set_list[i] != NULL
-+ && !(setlist ^ SETLIST(ip_set_list[i])))
-+ res = ip_set_save_set(i, data, &used, *len);
-+ }
-+ if (!setlist) {
-+ setlist = 1;
-+ goto setlists;
-+ }
-+ } else {
-+ /* Save an individual set */
-+ res = ip_set_save_set(index, data, &used, *len);
-+ }
-+ if (res == 0)
-+ res = ip_set_save_bindings(index, data, &used, *len);
-+
-+ if (res != 0)
-+ goto done;
-+ else if (copylen != used) {
-+ res = -EAGAIN;
-+ goto done;
-+ }
-+ goto copy;
-+ }
-+ case IP_SET_OP_RESTORE: {
-+ struct ip_set_req_setnames *req_restore = data;
-+ int line;
-+
-+ if (*len < sizeof(struct ip_set_req_setnames)
-+ || *len != req_restore->size) {
-+ ip_set_printk("invalid RESTORE (want =%lu, got %d)",
-+ (long unsigned)req_restore->size, *len);
-+ res = -EINVAL;
-+ goto done;
-+ }
-+ line = ip_set_restore(data + sizeof(struct ip_set_req_setnames),
-+ req_restore->size - sizeof(struct ip_set_req_setnames));
-+ DP("ip_set_restore: %d", line);
-+ if (line != 0) {
-+ res = -EAGAIN;
-+ req_restore->size = line;
-+ copylen = sizeof(struct ip_set_req_setnames);
-+ goto copy;
-+ }
-+ goto done;
-+ }
-+ default:
-+ res = -EBADMSG;
-+ goto done;
-+ } /* end of switch(op) */
-+
-+ copy:
-+ DP("set %s, copylen %d", index != IP_SET_INVALID_ID
-+ && ip_set_list[index]
-+ ? ip_set_list[index]->name
-+ : ":all:", copylen);
-+ res = copy_to_user(user, data, copylen);
-+
-+ done:
-+ up(&ip_set_app_mutex);
-+ vfree(data);
-+ if (res > 0)
-+ res = 0;
-+ DP("final result %d", res);
-+ return res;
-+}
-+
-+static struct nf_sockopt_ops so_set = {
-+ .pf = PF_INET,
-+ .set_optmin = SO_IP_SET,
-+ .set_optmax = SO_IP_SET + 1,
-+ .set = &ip_set_sockfn_set,
-+ .get_optmin = SO_IP_SET,
-+ .get_optmax = SO_IP_SET + 1,
-+ .get = &ip_set_sockfn_get,
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-+ .use = 0,
-+#else
-+ .owner = THIS_MODULE,
-+#endif
-+};
-+
-+static int max_sets, hash_size;
-+
-+module_param(max_sets, int, 0600);
-+MODULE_PARM_DESC(max_sets, "maximal number of sets");
-+module_param(hash_size, int, 0600);
-+MODULE_PARM_DESC(hash_size, "hash size for bindings");
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("module implementing core IP set support");
-+
-+static int __init ip_set_init(void)
-+{
-+ int res;
-+ ip_set_id_t i;
-+
-+ get_random_bytes(&ip_set_hash_random, 4);
-+ if (max_sets)
-+ ip_set_max = max_sets;
-+ ip_set_list = vmalloc(sizeof(struct ip_set *) * ip_set_max);
-+ if (!ip_set_list) {
-+ printk(KERN_ERR "Unable to create ip_set_list\n");
-+ return -ENOMEM;
-+ }
-+ memset(ip_set_list, 0, sizeof(struct ip_set *) * ip_set_max);
-+ if (hash_size)
-+ ip_set_bindings_hash_size = hash_size;
-+ ip_set_hash = vmalloc(sizeof(struct list_head) * ip_set_bindings_hash_size);
-+ if (!ip_set_hash) {
-+ printk(KERN_ERR "Unable to create ip_set_hash\n");
-+ vfree(ip_set_list);
-+ return -ENOMEM;
-+ }
-+ for (i = 0; i < ip_set_bindings_hash_size; i++)
-+ INIT_LIST_HEAD(&ip_set_hash[i]);
-+
-+ INIT_LIST_HEAD(&set_type_list);
-+
-+ res = nf_register_sockopt(&so_set);
-+ if (res != 0) {
-+ ip_set_printk("SO_SET registry failed: %d", res);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ return res;
-+ }
-+
-+ return 0;
-+}
-+
-+static void __exit ip_set_fini(void)
-+{
-+ /* There can't be any existing set or binding */
-+ nf_unregister_sockopt(&so_set);
-+ vfree(ip_set_list);
-+ vfree(ip_set_hash);
-+ DP("these are the famous last words");
-+}
-+
-+EXPORT_SYMBOL(ip_set_register_set_type);
-+EXPORT_SYMBOL(ip_set_unregister_set_type);
-+
-+EXPORT_SYMBOL(ip_set_get_byname);
-+EXPORT_SYMBOL(ip_set_get_byindex);
-+EXPORT_SYMBOL(ip_set_put_byindex);
-+EXPORT_SYMBOL(ip_set_id);
-+EXPORT_SYMBOL(__ip_set_get_byname);
-+EXPORT_SYMBOL(__ip_set_put_byindex);
-+
-+EXPORT_SYMBOL(ip_set_addip_kernel);
-+EXPORT_SYMBOL(ip_set_delip_kernel);
-+EXPORT_SYMBOL(ip_set_testip_kernel);
-+
-+module_init(ip_set_init);
-+module_exit(ip_set_fini);
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_iphash.c
-@@ -0,0 +1,166 @@
-+/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip hash set */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_iphash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+iphash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iphash *map = set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut - there can be deleted entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+iphash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ return (ip && iphash_id(set, hash_ip, ip) != UINT_MAX);
-+}
-+
-+#define KADT_CONDITION
-+
-+UADT(iphash, test)
-+KADT(iphash, test, ipaddr)
-+
-+static inline int
-+__iphash_add(struct ip_set_iphash *map, ip_set_ip_t *ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem, *slot = NULL;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *ip)
-+ return -EEXIST;
-+ if (!(slot || *elem))
-+ slot = elem;
-+ /* There can be deleted entries, must check all slots */
-+ }
-+ if (slot) {
-+ *slot = *ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+iphash_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iphash *map = set->data;
-+
-+ if (!ip || map->elements >= limit)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+
-+ return __iphash_add(map, hash_ip);
-+}
-+
-+UADT(iphash, add)
-+KADT(iphash, add, ipaddr)
-+
-+static inline void
-+__iphash_retry(struct ip_set_iphash *tmp, struct ip_set_iphash *map)
-+{
-+ tmp->netmask = map->netmask;
-+}
-+
-+HASH_RETRY(iphash, ip_set_ip_t)
-+
-+static inline int
-+iphash_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iphash *map = set->data;
-+ ip_set_ip_t id, *elem;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ id = iphash_id(set, hash_ip, ip);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+UADT(iphash, del)
-+KADT(iphash, del, ipaddr)
-+
-+static inline int
-+__iphash_create(const struct ip_set_req_iphash_create *req,
-+ struct ip_set_iphash *map)
-+{
-+ map->netmask = req->netmask;
-+
-+ return 0;
-+}
-+
-+HASH_CREATE(iphash, ip_set_ip_t)
-+HASH_DESTROY(iphash)
-+
-+HASH_FLUSH(iphash, ip_set_ip_t)
-+
-+static inline void
-+__iphash_list_header(const struct ip_set_iphash *map,
-+ struct ip_set_req_iphash_create *header)
-+{
-+ header->netmask = map->netmask;
-+}
-+
-+HASH_LIST_HEADER(iphash)
-+HASH_LIST_MEMBERS_SIZE(iphash, ip_set_ip_t)
-+HASH_LIST_MEMBERS(iphash, ip_set_ip_t)
-+
-+IP_SET_RTYPE(iphash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iphash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+REGISTER_MODULE(iphash)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_ipmap.c
-@@ -0,0 +1,142 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the single bitmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_ipmap.h>
-+
-+static inline ip_set_ip_t
-+ip_to_id(const struct ip_set_ipmap *map, ip_set_ip_t ip)
-+{
-+ return (ip - map->first_ip)/map->hosts;
-+}
-+
-+static inline int
-+ipmap_test(const struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ const struct ip_set_ipmap *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return !!test_bit(ip_to_id(map, *hash_ip), map->members);
-+}
-+
-+#define KADT_CONDITION
-+
-+UADT(ipmap, test)
-+KADT(ipmap, test, ipaddr)
-+
-+static inline int
-+ipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_ipmap *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (test_and_set_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+UADT(ipmap, add)
-+KADT(ipmap, add, ipaddr)
-+
-+static inline int
-+ipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_ipmap *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip & map->netmask;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
-+ return -EEXIST;
-+
-+ return 0;
-+}
-+
-+UADT(ipmap, del)
-+KADT(ipmap, del, ipaddr)
-+
-+static inline int
-+__ipmap_create(const struct ip_set_req_ipmap_create *req,
-+ struct ip_set_ipmap *map)
-+{
-+ map->netmask = req->netmask;
-+
-+ if (req->netmask == 0xFFFFFFFF) {
-+ map->hosts = 1;
-+ map->sizeid = map->last_ip - map->first_ip + 1;
-+ } else {
-+ unsigned int mask_bits, netmask_bits;
-+ ip_set_ip_t mask;
-+
-+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
-+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
-+ netmask_bits = mask_to_bits(map->netmask);
-+
-+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
-+ || netmask_bits <= mask_bits)
-+ return -ENOEXEC;
-+
-+ DP("mask_bits %u, netmask_bits %u",
-+ mask_bits, netmask_bits);
-+ map->hosts = 2 << (32 - netmask_bits - 1);
-+ map->sizeid = 2 << (netmask_bits - mask_bits - 1);
-+ }
-+ if (map->sizeid > MAX_RANGE + 1) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ map->sizeid, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ DP("hosts %u, sizeid %u", map->hosts, map->sizeid);
-+ return bitmap_bytes(0, map->sizeid - 1);
-+}
-+
-+BITMAP_CREATE(ipmap)
-+BITMAP_DESTROY(ipmap)
-+BITMAP_FLUSH(ipmap)
-+
-+static inline void
-+__ipmap_list_header(const struct ip_set_ipmap *map,
-+ struct ip_set_req_ipmap_create *header)
-+{
-+ header->netmask = map->netmask;
-+}
-+
-+BITMAP_LIST_HEADER(ipmap)
-+BITMAP_LIST_MEMBERS_SIZE(ipmap)
-+BITMAP_LIST_MEMBERS(ipmap)
-+
-+IP_SET_TYPE(ipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipmap type of IP sets");
-+
-+REGISTER_MODULE(ipmap)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_ipporthash.c
-@@ -0,0 +1,203 @@
-+/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port hash set */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_ipporthash.h>
-+#include <linux/netfilter_ipv4/ip_set_getport.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+ipporthash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port)
-+{
-+ struct ip_set_ipporthash *map = set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+ if (!*hash_ip)
-+ return UINT_MAX;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut - there can be deleted entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+ipporthash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port)
-+{
-+ struct ip_set_ipporthash *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (ipporthash_id(set, hash_ip, ip, port) != UINT_MAX);
-+}
-+
-+#define KADT_CONDITION \
-+ ip_set_ip_t port; \
-+ \
-+ if (flags[index+1] == 0) \
-+ return 0; \
-+ \
-+ port = get_port(skb, flags[index+1]); \
-+ \
-+ if (port == INVALID_PORT) \
-+ return 0;
-+
-+UADT(ipporthash, test, req->port)
-+KADT(ipporthash, test, ipaddr, port)
-+
-+static inline int
-+__ipporthash_add(struct ip_set_ipporthash *map, ip_set_ip_t *ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem, *slot = NULL;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *ip)
-+ return -EEXIST;
-+ if (!(slot || *elem))
-+ slot = elem;
-+ /* There can be deleted entries, must check all slots */
-+ }
-+ if (slot) {
-+ *slot = *ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+ipporthash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port)
-+{
-+ struct ip_set_ipporthash *map = set->data;
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+
-+ if (!*hash_ip)
-+ return -ERANGE;
-+
-+ return __ipporthash_add(map, hash_ip);
-+}
-+
-+UADT(ipporthash, add, req->port)
-+KADT(ipporthash, add, ipaddr, port)
-+
-+static inline void
-+__ipporthash_retry(struct ip_set_ipporthash *tmp,
-+ struct ip_set_ipporthash *map)
-+{
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+}
-+
-+HASH_RETRY(ipporthash, ip_set_ip_t)
-+
-+static inline int
-+ipporthash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port)
-+{
-+ struct ip_set_ipporthash *map = set->data;
-+ ip_set_ip_t id;
-+ ip_set_ip_t *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ id = ipporthash_id(set, hash_ip, ip, port);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+UADT(ipporthash, del, req->port)
-+KADT(ipporthash, del, ipaddr, port)
-+
-+static inline int
-+__ipporthash_create(const struct ip_set_req_ipporthash_create *req,
-+ struct ip_set_ipporthash *map)
-+{
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ req->to - req->from + 1, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ return 0;
-+}
-+
-+HASH_CREATE(ipporthash, ip_set_ip_t)
-+HASH_DESTROY(ipporthash)
-+HASH_FLUSH(ipporthash, ip_set_ip_t)
-+
-+static inline void
-+__ipporthash_list_header(const struct ip_set_ipporthash *map,
-+ struct ip_set_req_ipporthash_create *header)
-+{
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+HASH_LIST_HEADER(ipporthash)
-+HASH_LIST_MEMBERS_SIZE(ipporthash, ip_set_ip_t)
-+HASH_LIST_MEMBERS(ipporthash, ip_set_ip_t)
-+
-+IP_SET_RTYPE(ipporthash, IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_DATA_DOUBLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipporthash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+REGISTER_MODULE(ipporthash)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_ipportiphash.c
-@@ -0,0 +1,216 @@
-+/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port+ip hash set */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_ipportiphash.h>
-+#include <linux/netfilter_ipv4/ip_set_getport.h>
-+
-+static int limit = MAX_RANGE;
-+
-+#define jhash_ip2(map, i, ipport, ip1) \
-+ jhash_2words(ipport, ip1, *(map->initval + i))
-+
-+static inline __u32
-+ipportiphash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportiphash *map = set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ struct ipportip *elem;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+ if (!(*hash_ip || ip1))
-+ return UINT_MAX;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
-+ if (elem->ip == *hash_ip && elem->ip1 == ip1)
-+ return id;
-+ /* No shortcut - there can be deleted entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline int
-+ipportiphash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportiphash *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (ipportiphash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
-+}
-+
-+#define KADT_CONDITION \
-+ ip_set_ip_t port, ip1; \
-+ \
-+ if (flags[index+2] == 0) \
-+ return 0; \
-+ \
-+ port = get_port(skb, flags[index+1]); \
-+ ip1 = ipaddr(skb, flags[index+2]); \
-+ \
-+ if (port == INVALID_PORT) \
-+ return 0;
-+
-+UADT(ipportiphash, test, req->port, req->ip1)
-+KADT(ipportiphash, test, ipaddr, port, ip1)
-+
-+static inline int
-+__ipportip_add(struct ip_set_ipportiphash *map,
-+ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ struct ipportip *elem, *slot = NULL;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
-+ if (elem->ip == hash_ip && elem->ip1 == ip1)
-+ return -EEXIST;
-+ if (!(slot || elem->ip || elem->ip1))
-+ slot = elem;
-+ /* There can be deleted entries, must check all slots */
-+ }
-+ if (slot) {
-+ slot->ip = hash_ip;
-+ slot->ip1 = ip1;
-+ map->elements++;
-+ return 0;
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__ipportiphash_add(struct ip_set_ipportiphash *map,
-+ struct ipportip *elem)
-+{
-+ return __ipportip_add(map, elem->ip, elem->ip1);
-+}
-+
-+static inline int
-+ipportiphash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportiphash *map = set->data;
-+
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+ if (!(*hash_ip || ip1))
-+ return -ERANGE;
-+
-+ return __ipportip_add(map, *hash_ip, ip1);
-+}
-+
-+UADT(ipportiphash, add, req->port, req->ip1)
-+KADT(ipportiphash, add, ipaddr, port, ip1)
-+
-+static inline void
-+__ipportiphash_retry(struct ip_set_ipportiphash *tmp,
-+ struct ip_set_ipportiphash *map)
-+{
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+}
-+
-+HASH_RETRY2(ipportiphash, struct ipportip)
-+
-+static inline int
-+ipportiphash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportiphash *map = set->data;
-+ ip_set_ip_t id;
-+ struct ipportip *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ id = ipportiphash_id(set, hash_ip, ip, port, ip1);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
-+ elem->ip = elem->ip1 = 0;
-+ map->elements--;
-+
-+ return 0;
-+}
-+
-+UADT(ipportiphash, del, req->port, req->ip1)
-+KADT(ipportiphash, del, ipaddr, port, ip1)
-+
-+static inline int
-+__ipportiphash_create(const struct ip_set_req_ipportiphash_create *req,
-+ struct ip_set_ipportiphash *map)
-+{
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ req->to - req->from + 1, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ return 0;
-+}
-+
-+HASH_CREATE(ipportiphash, struct ipportip)
-+HASH_DESTROY(ipportiphash)
-+HASH_FLUSH(ipportiphash, struct ipportip)
-+
-+static inline void
-+__ipportiphash_list_header(const struct ip_set_ipportiphash *map,
-+ struct ip_set_req_ipportiphash_create *header)
-+{
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+HASH_LIST_HEADER(ipportiphash)
-+HASH_LIST_MEMBERS_SIZE(ipportiphash, struct ipportip)
-+HASH_LIST_MEMBERS_MEMCPY(ipportiphash, struct ipportip)
-+
-+IP_SET_RTYPE(ipportiphash, IPSET_TYPE_IP | IPSET_TYPE_PORT
-+ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipportiphash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+REGISTER_MODULE(ipportiphash)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_ipportnethash.c
-@@ -0,0 +1,304 @@
-+/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an ip+port+net hash set */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_ipportnethash.h>
-+#include <linux/netfilter_ipv4/ip_set_getport.h>
-+
-+static int limit = MAX_RANGE;
-+
-+#define jhash_ip2(map, i, ipport, ip1) \
-+ jhash_2words(ipport, ip1, *(map->initval + i))
-+
-+static inline __u32
-+ipportnethash_id_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t ip1, uint8_t cidr)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+ __u32 id;
-+ u_int16_t i;
-+ struct ipportip *elem;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+ ip1 = pack_ip_cidr(ip1, cidr);
-+ if (!(*hash_ip || ip1))
-+ return UINT_MAX;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip2(map, i, *hash_ip, ip1) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
-+ if (elem->ip == *hash_ip && elem->ip1 == ip1)
-+ return id;
-+ /* No shortcut - there can be deleted entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline __u32
-+ipportnethash_id(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+ __u32 id = UINT_MAX;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
-+ map->cidr[i]);
-+ if (id != UINT_MAX)
-+ break;
-+ }
-+ return id;
-+}
-+
-+static inline int
-+ipportnethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t ip1, uint8_t cidr)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (ipportnethash_id_cidr(set, hash_ip, ip, port, ip1,
-+ cidr) != UINT_MAX);
-+}
-+
-+static inline int
-+ipportnethash_test(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port, ip_set_ip_t ip1)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+
-+ return (ipportnethash_id(set, hash_ip, ip, port, ip1) != UINT_MAX);
-+}
-+
-+static int
-+ipportnethash_utest(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ const struct ip_set_req_ipportnethash *req = data;
-+
-+ if (req->cidr <= 0 || req->cidr > 32)
-+ return -EINVAL;
-+ return (req->cidr == 32
-+ ? ipportnethash_test(set, hash_ip, req->ip, req->port,
-+ req->ip1)
-+ : ipportnethash_test_cidr(set, hash_ip, req->ip, req->port,
-+ req->ip1, req->cidr));
-+}
-+
-+#define KADT_CONDITION \
-+ ip_set_ip_t port, ip1; \
-+ \
-+ if (flags[index+2] == 0) \
-+ return 0; \
-+ \
-+ port = get_port(skb, flags[index+1]); \
-+ ip1 = ipaddr(skb, flags[index+2]); \
-+ \
-+ if (port == INVALID_PORT) \
-+ return 0;
-+
-+KADT(ipportnethash, test, ipaddr, port, ip1)
-+
-+static inline int
-+__ipportnet_add(struct ip_set_ipportnethash *map,
-+ ip_set_ip_t hash_ip, ip_set_ip_t ip1)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ struct ipportip *elem, *slot = NULL;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip2(map, i, hash_ip, ip1) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, probe);
-+ if (elem->ip == hash_ip && elem->ip1 == ip1)
-+ return -EEXIST;
-+ if (!(slot || elem->ip || elem->ip1))
-+ slot = elem;
-+ /* There can be deleted entries, must check all slots */
-+ }
-+ if (slot) {
-+ slot->ip = hash_ip;
-+ slot->ip1 = ip1;
-+ map->elements++;
-+ return 0;
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+__ipportnethash_add(struct ip_set_ipportnethash *map,
-+ struct ipportip *elem)
-+{
-+ return __ipportnet_add(map, elem->ip, elem->ip1);
-+}
-+
-+static inline int
-+ipportnethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t ip1, uint8_t cidr)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+ struct ipportip;
-+ int ret;
-+
-+ if (map->elements > limit)
-+ return -ERANGE;
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (cidr <= 0 || cidr >= 32)
-+ return -EINVAL;
-+ if (map->nets[cidr-1] == UINT16_MAX)
-+ return -ERANGE;
-+
-+ *hash_ip = pack_ip_port(map, ip, port);
-+ ip1 = pack_ip_cidr(ip1, cidr);
-+ if (!(*hash_ip || ip1))
-+ return -ERANGE;
-+
-+ ret =__ipportnet_add(map, *hash_ip, ip1);
-+ if (ret == 0) {
-+ if (!map->nets[cidr-1]++)
-+ add_cidr_size(map->cidr, cidr);
-+ map->elements++;
-+ }
-+ return ret;
-+}
-+
-+#undef KADT_CONDITION
-+#define KADT_CONDITION \
-+ struct ip_set_ipportnethash *map = set->data; \
-+ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31; \
-+ ip_set_ip_t port, ip1; \
-+ \
-+ if (flags[index+2] == 0) \
-+ return 0; \
-+ \
-+ port = get_port(skb, flags[index+1]); \
-+ ip1 = ipaddr(skb, flags[index+2]); \
-+ \
-+ if (port == INVALID_PORT) \
-+ return 0;
-+
-+UADT(ipportnethash, add, req->port, req->ip1, req->cidr)
-+KADT(ipportnethash, add, ipaddr, port, ip1, cidr)
-+
-+static inline void
-+__ipportnethash_retry(struct ip_set_ipportnethash *tmp,
-+ struct ip_set_ipportnethash *map)
-+{
-+ tmp->first_ip = map->first_ip;
-+ tmp->last_ip = map->last_ip;
-+ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
-+ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
-+}
-+
-+HASH_RETRY2(ipportnethash, struct ipportip)
-+
-+static inline int
-+ipportnethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, ip_set_ip_t port,
-+ ip_set_ip_t ip1, uint8_t cidr)
-+{
-+ struct ip_set_ipportnethash *map = set->data;
-+ ip_set_ip_t id;
-+ struct ipportip *elem;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (!ip)
-+ return -ERANGE;
-+ if (cidr <= 0 || cidr >= 32)
-+ return -EINVAL;
-+
-+ id = ipportnethash_id_cidr(set, hash_ip, ip, port, ip1, cidr);
-+
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, struct ipportip *, id);
-+ elem->ip = elem->ip1 = 0;
-+ map->elements--;
-+ if (!map->nets[cidr-1]--)
-+ del_cidr_size(map->cidr, cidr);
-+
-+ return 0;
-+}
-+
-+UADT(ipportnethash, del, req->port, req->ip1, req->cidr)
-+KADT(ipportnethash, del, ipaddr, port, ip1, cidr)
-+
-+static inline int
-+__ipportnethash_create(const struct ip_set_req_ipportnethash_create *req,
-+ struct ip_set_ipportnethash *map)
-+{
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ req->to - req->from + 1, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ map->first_ip = req->from;
-+ map->last_ip = req->to;
-+ memset(map->cidr, 0, sizeof(map->cidr));
-+ memset(map->nets, 0, sizeof(map->nets));
-+ return 0;
-+}
-+
-+HASH_CREATE(ipportnethash, struct ipportip)
-+HASH_DESTROY(ipportnethash)
-+HASH_FLUSH_CIDR(ipportnethash, struct ipportip);
-+
-+static inline void
-+__ipportnethash_list_header(const struct ip_set_ipportnethash *map,
-+ struct ip_set_req_ipportnethash_create *header)
-+{
-+ header->from = map->first_ip;
-+ header->to = map->last_ip;
-+}
-+
-+HASH_LIST_HEADER(ipportnethash)
-+
-+HASH_LIST_MEMBERS_SIZE(ipportnethash, struct ipportip)
-+HASH_LIST_MEMBERS_MEMCPY(ipportnethash, struct ipportip)
-+
-+IP_SET_RTYPE(ipportnethash, IPSET_TYPE_IP | IPSET_TYPE_PORT
-+ | IPSET_TYPE_IP1 | IPSET_DATA_TRIPLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("ipportnethash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+REGISTER_MODULE(ipportnethash)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_iptree.c
-@@ -0,0 +1,466 @@
-+/* Copyright (C) 2005-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the iptree type */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/timer.h>
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+#include <linux/netfilter_ipv4/ip_set_iptree.h>
-+
-+static int limit = MAX_RANGE;
-+
-+/* Garbage collection interval in seconds: */
-+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
-+#define IPTREE_DESTROY_SLEEP 100
-+
-+static __KMEM_CACHE_T__ *branch_cachep;
-+static __KMEM_CACHE_T__ *leaf_cachep;
-+
-+
-+#if defined(__LITTLE_ENDIAN)
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[3]; \
-+ b = ((unsigned char *)addrp)[2]; \
-+ c = ((unsigned char *)addrp)[1]; \
-+ d = ((unsigned char *)addrp)[0]; \
-+} while (0)
-+#elif defined(__BIG_ENDIAN)
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[0]; \
-+ b = ((unsigned char *)addrp)[1]; \
-+ c = ((unsigned char *)addrp)[2]; \
-+ d = ((unsigned char *)addrp)[3]; \
-+} while (0)
-+#else
-+#error "Please fix asm/byteorder.h"
-+#endif /* __LITTLE_ENDIAN */
-+
-+#define TESTIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return 0; \
-+} while (0)
-+
-+static inline int
-+iptree_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
-+ TESTIP_WALK(map, a, btree);
-+ TESTIP_WALK(btree, b, ctree);
-+ TESTIP_WALK(ctree, c, dtree);
-+ DP("%lu %lu", dtree->expires[d], jiffies);
-+ return dtree->expires[d]
-+ && (!map->timeout
-+ || time_after(dtree->expires[d], jiffies));
-+}
-+
-+#define KADT_CONDITION
-+
-+UADT(iptree, test)
-+KADT(iptree, test, ipaddr)
-+
-+#define ADDIP_WALK(map, elem, branch, type, cachep) do { \
-+ if ((map)->tree[elem]) { \
-+ DP("found %u", elem); \
-+ branch = (map)->tree[elem]; \
-+ } else { \
-+ branch = (type *) \
-+ kmem_cache_alloc(cachep, GFP_ATOMIC); \
-+ if (branch == NULL) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[elem] = branch; \
-+ DP("alloc %u", elem); \
-+ } \
-+} while (0)
-+
-+static inline int
-+iptree_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, unsigned int timeout)
-+{
-+ struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+ int ret = 0;
-+
-+ if (!ip || map->elements >= limit)
-+ /* We could call the garbage collector
-+ * but it's probably overkill */
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
-+ ADDIP_WALK(map, a, btree, struct ip_set_iptreeb, branch_cachep);
-+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreec, branch_cachep);
-+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreed, leaf_cachep);
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ ret = -EEXIST;
-+ if (map->timeout && timeout == 0)
-+ timeout = map->timeout;
-+ dtree->expires[d] = map->timeout ? (timeout * HZ + jiffies) : 1;
-+ /* Lottery: I won! */
-+ if (dtree->expires[d] == 0)
-+ dtree->expires[d] = 1;
-+ DP("%u %lu", d, dtree->expires[d]);
-+ if (ret == 0)
-+ map->elements++;
-+ return ret;
-+}
-+
-+UADT(iptree, add, req->timeout)
-+KADT(iptree, add, ipaddr, 0)
-+
-+#define DELIP_WALK(map, elem, branch) do { \
-+ if ((map)->tree[elem]) { \
-+ branch = (map)->tree[elem]; \
-+ } else \
-+ return -EEXIST; \
-+} while (0)
-+
-+static inline int
-+iptree_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned char a,b,c,d;
-+
-+ if (!ip)
-+ return -ERANGE;
-+
-+ *hash_ip = ip;
-+ ABCD(a, b, c, d, hash_ip);
-+ DELIP_WALK(map, a, btree);
-+ DELIP_WALK(btree, b, ctree);
-+ DELIP_WALK(ctree, c, dtree);
-+
-+ if (dtree->expires[d]) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ return 0;
-+ }
-+ return -EEXIST;
-+}
-+
-+UADT(iptree, del)
-+KADT(iptree, del, ipaddr)
-+
-+#define LOOP_WALK_BEGIN(map, i, branch) \
-+ for (i = 0; i < 256; i++) { \
-+ if (!(map)->tree[i]) \
-+ continue; \
-+ branch = (map)->tree[i]
-+
-+#define LOOP_WALK_END }
-+
-+static void
-+ip_tree_gc(unsigned long ul_set)
-+{
-+ struct ip_set *set = (struct ip_set *) ul_set;
-+ struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned char i,j,k;
-+
-+ i = j = k = 0;
-+ DP("gc: %s", set->name);
-+ write_lock_bh(&set->lock);
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]) {
-+ DP("gc: %u %u %u %u: expires %lu jiffies %lu",
-+ a, b, c, d,
-+ dtree->expires[d], jiffies);
-+ if (map->timeout
-+ && time_before(dtree->expires[d], jiffies)) {
-+ dtree->expires[d] = 0;
-+ map->elements--;
-+ } else
-+ k = 1;
-+ }
-+ }
-+ if (k == 0) {
-+ DP("gc: %s: leaf %u %u %u empty",
-+ set->name, a, b, c);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ ctree->tree[c] = NULL;
-+ } else {
-+ DP("gc: %s: leaf %u %u %u not empty",
-+ set->name, a, b, c);
-+ j = 1;
-+ k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (j == 0) {
-+ DP("gc: %s: branch %u %u empty",
-+ set->name, a, b);
-+ kmem_cache_free(branch_cachep, ctree);
-+ btree->tree[b] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u %u not empty",
-+ set->name, a, b);
-+ i = 1;
-+ j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ if (i == 0) {
-+ DP("gc: %s: branch %u empty",
-+ set->name, a);
-+ kmem_cache_free(branch_cachep, btree);
-+ map->tree[a] = NULL;
-+ } else {
-+ DP("gc: %s: branch %u not empty",
-+ set->name, a);
-+ i = j = k = 0;
-+ }
-+ LOOP_WALK_END;
-+ write_unlock_bh(&set->lock);
-+
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static inline void
-+init_gc_timer(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = set->data;
-+
-+ /* Even if there is no timeout for the entries,
-+ * we still have to call gc because delete
-+ * do not clean up empty branches */
-+ map->gc_interval = IPTREE_GC_TIME;
-+ init_timer(&map->gc);
-+ map->gc.data = (unsigned long) set;
-+ map->gc.function = ip_tree_gc;
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static int
-+iptree_create(struct ip_set *set, const void *data, u_int32_t size)
-+{
-+ const struct ip_set_req_iptree_create *req = data;
-+ struct ip_set_iptree *map;
-+
-+ if (size != sizeof(struct ip_set_req_iptree_create)) {
-+ ip_set_printk("data length wrong (want %zu, have %lu)",
-+ sizeof(struct ip_set_req_iptree_create),
-+ (unsigned long)size);
-+ return -EINVAL;
-+ }
-+
-+ map = kmalloc(sizeof(struct ip_set_iptree), GFP_KERNEL);
-+ if (!map) {
-+ DP("out of memory for %zu bytes",
-+ sizeof(struct ip_set_iptree));
-+ return -ENOMEM;
-+ }
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = req->timeout;
-+ map->elements = 0;
-+ set->data = map;
-+
-+ init_gc_timer(set);
-+
-+ return 0;
-+}
-+
-+static inline void
-+__flush(struct ip_set_iptree *map)
-+{
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ kmem_cache_free(leaf_cachep, dtree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, ctree);
-+ LOOP_WALK_END;
-+ kmem_cache_free(branch_cachep, btree);
-+ LOOP_WALK_END;
-+ map->elements = 0;
-+}
-+
-+static void
-+iptree_destroy(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = set->data;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ kfree(map);
-+ set->data = NULL;
-+}
-+
-+static void
-+iptree_flush(struct ip_set *set)
-+{
-+ struct ip_set_iptree *map = set->data;
-+ unsigned int timeout = map->timeout;
-+
-+ /* gc might be running */
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREE_DESTROY_SLEEP);
-+ __flush(map);
-+ memset(map, 0, sizeof(*map));
-+ map->timeout = timeout;
-+
-+ init_gc_timer(set);
-+}
-+
-+static void
-+iptree_list_header(const struct ip_set *set, void *data)
-+{
-+ const struct ip_set_iptree *map = set->data;
-+ struct ip_set_req_iptree_create *header = data;
-+
-+ header->timeout = map->timeout;
-+}
-+
-+static int
-+iptree_list_members_size(const struct ip_set *set)
-+{
-+ const struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ unsigned int count = 0;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies)))
-+ count++;
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+
-+ DP("members %u", count);
-+ return (count * sizeof(struct ip_set_req_iptree));
-+}
-+
-+static void
-+iptree_list_members(const struct ip_set *set, void *data)
-+{
-+ const struct ip_set_iptree *map = set->data;
-+ struct ip_set_iptreeb *btree;
-+ struct ip_set_iptreec *ctree;
-+ struct ip_set_iptreed *dtree;
-+ unsigned int a,b,c,d;
-+ size_t offset = 0;
-+ struct ip_set_req_iptree *entry;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ LOOP_WALK_BEGIN(btree, b, ctree);
-+ LOOP_WALK_BEGIN(ctree, c, dtree);
-+ for (d = 0; d < 256; d++) {
-+ if (dtree->expires[d]
-+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
-+ entry = data + offset;
-+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
-+ : (dtree->expires[d] - jiffies)/HZ;
-+ offset += sizeof(struct ip_set_req_iptree);
-+ }
-+ }
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+ LOOP_WALK_END;
-+}
-+
-+IP_SET_TYPE(iptree, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptree type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+static int __init ip_set_iptree_init(void)
-+{
-+ int ret;
-+
-+ branch_cachep = KMEM_CACHE_CREATE("ip_set_iptreeb",
-+ sizeof(struct ip_set_iptreeb));
-+ if (!branch_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreeb slab cache\n");
-+ ret = -ENOMEM;
-+ goto out;
-+ }
-+ leaf_cachep = KMEM_CACHE_CREATE("ip_set_iptreed",
-+ sizeof(struct ip_set_iptreed));
-+ if (!leaf_cachep) {
-+ printk(KERN_ERR "Unable to create ip_set_iptreed slab cache\n");
-+ ret = -ENOMEM;
-+ goto free_branch;
-+ }
-+ ret = ip_set_register_set_type(&ip_set_iptree);
-+ if (ret == 0)
-+ goto out;
-+
-+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
-+ kmem_cache_destroy(branch_cachep);
-+ out:
-+ return ret;
-+}
-+
-+static void __exit ip_set_iptree_fini(void)
-+{
-+ /* FIXME: possible race with ip_set_create() */
-+ ip_set_unregister_set_type(&ip_set_iptree);
-+ kmem_cache_destroy(leaf_cachep);
-+ kmem_cache_destroy(branch_cachep);
-+}
-+
-+module_init(ip_set_iptree_init);
-+module_exit(ip_set_iptree_fini);
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_iptreemap.c
-@@ -0,0 +1,708 @@
-+/* Copyright (C) 2007 Sven Wegener <sven.wegener@stealer.net>
-+ *
-+ * This program is free software; you can redistribute it and/or modify it
-+ * under the terms of the GNU General Public License version 2 as published by
-+ * the Free Software Foundation.
-+ */
-+
-+/* This modules implements the iptreemap ipset type. It uses bitmaps to
-+ * represent every single IPv4 address as a bit. The bitmaps are managed in a
-+ * tree structure, where the first three octets of an address are used as an
-+ * index to find the bitmap and the last octet is used as the bit number.
-+ */
-+
-+#include <linux/kernel.h>
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/slab.h>
-+#include <linux/delay.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/timer.h>
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+#include <linux/netfilter_ipv4/ip_set_iptreemap.h>
-+
-+#define IPTREEMAP_DEFAULT_GC_TIME (5 * 60)
-+#define IPTREEMAP_DESTROY_SLEEP (100)
-+
-+static __KMEM_CACHE_T__ *cachep_b;
-+static __KMEM_CACHE_T__ *cachep_c;
-+static __KMEM_CACHE_T__ *cachep_d;
-+
-+static struct ip_set_iptreemap_d *fullbitmap_d;
-+static struct ip_set_iptreemap_c *fullbitmap_c;
-+static struct ip_set_iptreemap_b *fullbitmap_b;
-+
-+#if defined(__LITTLE_ENDIAN)
-+#define ABCD(a, b, c, d, addr) \
-+ do { \
-+ a = ((unsigned char *)addr)[3]; \
-+ b = ((unsigned char *)addr)[2]; \
-+ c = ((unsigned char *)addr)[1]; \
-+ d = ((unsigned char *)addr)[0]; \
-+ } while (0)
-+#elif defined(__BIG_ENDIAN)
-+#define ABCD(a,b,c,d,addrp) do { \
-+ a = ((unsigned char *)addrp)[0]; \
-+ b = ((unsigned char *)addrp)[1]; \
-+ c = ((unsigned char *)addrp)[2]; \
-+ d = ((unsigned char *)addrp)[3]; \
-+} while (0)
-+#else
-+#error "Please fix asm/byteorder.h"
-+#endif /* __LITTLE_ENDIAN */
-+
-+#define TESTIP_WALK(map, elem, branch, full) \
-+ do { \
-+ branch = (map)->tree[elem]; \
-+ if (!branch) \
-+ return 0; \
-+ else if (branch == full) \
-+ return 1; \
-+ } while (0)
-+
-+#define ADDIP_WALK(map, elem, branch, type, cachep, full) \
-+ do { \
-+ branch = (map)->tree[elem]; \
-+ if (!branch) { \
-+ branch = (type *) kmem_cache_alloc(cachep, GFP_ATOMIC); \
-+ if (!branch) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[elem] = branch; \
-+ } else if (branch == full) { \
-+ return -EEXIST; \
-+ } \
-+ } while (0)
-+
-+#define ADDIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free) \
-+ for (a = a1; a <= a2; a++) { \
-+ branch = (map)->tree[a]; \
-+ if (branch != full) { \
-+ if ((a > a1 && a < a2) || (hint)) { \
-+ if (branch) \
-+ free(branch); \
-+ (map)->tree[a] = full; \
-+ continue; \
-+ } else if (!branch) { \
-+ branch = kmem_cache_alloc(cachep, GFP_ATOMIC); \
-+ if (!branch) \
-+ return -ENOMEM; \
-+ memset(branch, 0, sizeof(*branch)); \
-+ (map)->tree[a] = branch; \
-+ }
-+
-+#define ADDIP_RANGE_LOOP_END() \
-+ } \
-+ }
-+
-+#define DELIP_WALK(map, elem, branch, cachep, full, flags) \
-+ do { \
-+ branch = (map)->tree[elem]; \
-+ if (!branch) { \
-+ return -EEXIST; \
-+ } else if (branch == full) { \
-+ branch = kmem_cache_alloc(cachep, flags); \
-+ if (!branch) \
-+ return -ENOMEM; \
-+ memcpy(branch, full, sizeof(*full)); \
-+ (map)->tree[elem] = branch; \
-+ } \
-+ } while (0)
-+
-+#define DELIP_RANGE_LOOP(map, a, a1, a2, hint, branch, full, cachep, free, flags) \
-+ for (a = a1; a <= a2; a++) { \
-+ branch = (map)->tree[a]; \
-+ if (branch) { \
-+ if ((a > a1 && a < a2) || (hint)) { \
-+ if (branch != full) \
-+ free(branch); \
-+ (map)->tree[a] = NULL; \
-+ continue; \
-+ } else if (branch == full) { \
-+ branch = kmem_cache_alloc(cachep, flags); \
-+ if (!branch) \
-+ return -ENOMEM; \
-+ memcpy(branch, full, sizeof(*branch)); \
-+ (map)->tree[a] = branch; \
-+ }
-+
-+#define DELIP_RANGE_LOOP_END() \
-+ } \
-+ }
-+
-+#define LOOP_WALK_BEGIN(map, i, branch) \
-+ for (i = 0; i < 256; i++) { \
-+ branch = (map)->tree[i]; \
-+ if (likely(!branch)) \
-+ continue;
-+
-+#define LOOP_WALK_END() \
-+ }
-+
-+#define LOOP_WALK_BEGIN_GC(map, i, branch, full, cachep, count) \
-+ count = -256; \
-+ for (i = 0; i < 256; i++) { \
-+ branch = (map)->tree[i]; \
-+ if (likely(!branch)) \
-+ continue; \
-+ count++; \
-+ if (branch == full) { \
-+ count++; \
-+ continue; \
-+ }
-+
-+#define LOOP_WALK_END_GC(map, i, branch, full, cachep, count) \
-+ if (-256 == count) { \
-+ kmem_cache_free(cachep, branch); \
-+ (map)->tree[i] = NULL; \
-+ } else if (256 == count) { \
-+ kmem_cache_free(cachep, branch); \
-+ (map)->tree[i] = full; \
-+ } \
-+ }
-+
-+#define LOOP_WALK_BEGIN_COUNT(map, i, branch, inrange, count) \
-+ for (i = 0; i < 256; i++) { \
-+ if (!(map)->tree[i]) { \
-+ if (inrange) { \
-+ count++; \
-+ inrange = 0; \
-+ } \
-+ continue; \
-+ } \
-+ branch = (map)->tree[i];
-+
-+#define LOOP_WALK_END_COUNT() \
-+ }
-+
-+#define GETVALUE1(a, a1, b1, r) \
-+ (a == a1 ? b1 : r)
-+
-+#define GETVALUE2(a, b, a1, b1, c1, r) \
-+ (a == a1 && b == b1 ? c1 : r)
-+
-+#define GETVALUE3(a, b, c, a1, b1, c1, d1, r) \
-+ (a == a1 && b == b1 && c == c1 ? d1 : r)
-+
-+#define CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2) \
-+ ( \
-+ GETVALUE1(a, a1, b1, 0) == 0 \
-+ && GETVALUE1(a, a2, b2, 255) == 255 \
-+ && c1 == 0 \
-+ && c2 == 255 \
-+ && d1 == 0 \
-+ && d2 == 255 \
-+ )
-+
-+#define CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2) \
-+ ( \
-+ GETVALUE2(a, b, a1, b1, c1, 0) == 0 \
-+ && GETVALUE2(a, b, a2, b2, c2, 255) == 255 \
-+ && d1 == 0 \
-+ && d2 == 255 \
-+ )
-+
-+#define CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2) \
-+ ( \
-+ GETVALUE3(a, b, c, a1, b1, c1, d1, 0) == 0 \
-+ && GETVALUE3(a, b, c, a2, b2, c2, d2, 255) == 255 \
-+ )
-+
-+
-+static inline void
-+free_d(struct ip_set_iptreemap_d *map)
-+{
-+ kmem_cache_free(cachep_d, map);
-+}
-+
-+static inline void
-+free_c(struct ip_set_iptreemap_c *map)
-+{
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int i;
-+
-+ LOOP_WALK_BEGIN(map, i, dtree) {
-+ if (dtree != fullbitmap_d)
-+ free_d(dtree);
-+ } LOOP_WALK_END();
-+
-+ kmem_cache_free(cachep_c, map);
-+}
-+
-+static inline void
-+free_b(struct ip_set_iptreemap_b *map)
-+{
-+ struct ip_set_iptreemap_c *ctree;
-+ unsigned int i;
-+
-+ LOOP_WALK_BEGIN(map, i, ctree) {
-+ if (ctree != fullbitmap_c)
-+ free_c(ctree);
-+ } LOOP_WALK_END();
-+
-+ kmem_cache_free(cachep_b, map);
-+}
-+
-+static inline int
-+iptreemap_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned char a, b, c, d;
-+
-+ *hash_ip = ip;
-+
-+ ABCD(a, b, c, d, hash_ip);
-+
-+ TESTIP_WALK(map, a, btree, fullbitmap_b);
-+ TESTIP_WALK(btree, b, ctree, fullbitmap_c);
-+ TESTIP_WALK(ctree, c, dtree, fullbitmap_d);
-+
-+ return !!test_bit(d, (void *) dtree->bitmap);
-+}
-+
-+#define KADT_CONDITION
-+
-+UADT(iptreemap, test)
-+KADT(iptreemap, test, ipaddr)
-+
-+static inline int
-+__addip_single(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_iptreemap *map = (struct ip_set_iptreemap *) set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned char a, b, c, d;
-+
-+ *hash_ip = ip;
-+
-+ ABCD(a, b, c, d, hash_ip);
-+
-+ ADDIP_WALK(map, a, btree, struct ip_set_iptreemap_b, cachep_b, fullbitmap_b);
-+ ADDIP_WALK(btree, b, ctree, struct ip_set_iptreemap_c, cachep_c, fullbitmap_c);
-+ ADDIP_WALK(ctree, c, dtree, struct ip_set_iptreemap_d, cachep_d, fullbitmap_d);
-+
-+ if (__test_and_set_bit(d, (void *) dtree->bitmap))
-+ return -EEXIST;
-+
-+ __set_bit(b, (void *) btree->dirty);
-+
-+ return 0;
-+}
-+
-+static inline int
-+iptreemap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t start, ip_set_ip_t end)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int a, b, c, d;
-+ unsigned char a1, b1, c1, d1;
-+ unsigned char a2, b2, c2, d2;
-+
-+ if (start == end)
-+ return __addip_single(set, hash_ip, start);
-+
-+ *hash_ip = start;
-+
-+ ABCD(a1, b1, c1, d1, &start);
-+ ABCD(a2, b2, c2, d2, &end);
-+
-+ /* This is sooo ugly... */
-+ ADDIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b) {
-+ ADDIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c) {
-+ ADDIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d) {
-+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ __set_bit(d, (void *) dtree->bitmap);
-+ __set_bit(b, (void *) btree->dirty);
-+ } ADDIP_RANGE_LOOP_END();
-+ } ADDIP_RANGE_LOOP_END();
-+ } ADDIP_RANGE_LOOP_END();
-+
-+ return 0;
-+}
-+
-+UADT0(iptreemap, add, min(req->ip, req->end), max(req->ip, req->end))
-+KADT(iptreemap, add, ipaddr, ip)
-+
-+static inline int
-+__delip_single(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, gfp_t flags)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned char a,b,c,d;
-+
-+ *hash_ip = ip;
-+
-+ ABCD(a, b, c, d, hash_ip);
-+
-+ DELIP_WALK(map, a, btree, cachep_b, fullbitmap_b, flags);
-+ DELIP_WALK(btree, b, ctree, cachep_c, fullbitmap_c, flags);
-+ DELIP_WALK(ctree, c, dtree, cachep_d, fullbitmap_d, flags);
-+
-+ if (!__test_and_clear_bit(d, (void *) dtree->bitmap))
-+ return -EEXIST;
-+
-+ __set_bit(b, (void *) btree->dirty);
-+
-+ return 0;
-+}
-+
-+static inline int
-+iptreemap_del(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t start, ip_set_ip_t end, gfp_t flags)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int a, b, c, d;
-+ unsigned char a1, b1, c1, d1;
-+ unsigned char a2, b2, c2, d2;
-+
-+ if (start == end)
-+ return __delip_single(set, hash_ip, start, flags);
-+
-+ *hash_ip = start;
-+
-+ ABCD(a1, b1, c1, d1, &start);
-+ ABCD(a2, b2, c2, d2, &end);
-+
-+ /* This is sooo ugly... */
-+ DELIP_RANGE_LOOP(map, a, a1, a2, CHECK1(a, a1, a2, b1, b2, c1, c2, d1, d2), btree, fullbitmap_b, cachep_b, free_b, flags) {
-+ DELIP_RANGE_LOOP(btree, b, GETVALUE1(a, a1, b1, 0), GETVALUE1(a, a2, b2, 255), CHECK2(a, b, a1, a2, b1, b2, c1, c2, d1, d2), ctree, fullbitmap_c, cachep_c, free_c, flags) {
-+ DELIP_RANGE_LOOP(ctree, c, GETVALUE2(a, b, a1, b1, c1, 0), GETVALUE2(a, b, a2, b2, c2, 255), CHECK3(a, b, c, a1, a2, b1, b2, c1, c2, d1, d2), dtree, fullbitmap_d, cachep_d, free_d, flags) {
-+ for (d = GETVALUE3(a, b, c, a1, b1, c1, d1, 0); d <= GETVALUE3(a, b, c, a2, b2, c2, d2, 255); d++)
-+ __clear_bit(d, (void *) dtree->bitmap);
-+ __set_bit(b, (void *) btree->dirty);
-+ } DELIP_RANGE_LOOP_END();
-+ } DELIP_RANGE_LOOP_END();
-+ } DELIP_RANGE_LOOP_END();
-+
-+ return 0;
-+}
-+
-+UADT0(iptreemap, del, min(req->ip, req->end), max(req->ip, req->end), GFP_KERNEL)
-+KADT(iptreemap, del, ipaddr, ip, GFP_ATOMIC)
-+
-+/* Check the status of the bitmap
-+ * -1 == all bits cleared
-+ * 1 == all bits set
-+ * 0 == anything else
-+ */
-+static inline int
-+bitmap_status(struct ip_set_iptreemap_d *dtree)
-+{
-+ unsigned char first = dtree->bitmap[0];
-+ int a;
-+
-+ for (a = 1; a < 32; a++)
-+ if (dtree->bitmap[a] != first)
-+ return 0;
-+
-+ return (first == 0 ? -1 : (first == 255 ? 1 : 0));
-+}
-+
-+static void
-+gc(unsigned long addr)
-+{
-+ struct ip_set *set = (struct ip_set *) addr;
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int a, b, c;
-+ int i, j, k;
-+
-+ write_lock_bh(&set->lock);
-+
-+ LOOP_WALK_BEGIN_GC(map, a, btree, fullbitmap_b, cachep_b, i) {
-+ LOOP_WALK_BEGIN_GC(btree, b, ctree, fullbitmap_c, cachep_c, j) {
-+ if (!__test_and_clear_bit(b, (void *) btree->dirty))
-+ continue;
-+ LOOP_WALK_BEGIN_GC(ctree, c, dtree, fullbitmap_d, cachep_d, k) {
-+ switch (bitmap_status(dtree)) {
-+ case -1:
-+ kmem_cache_free(cachep_d, dtree);
-+ ctree->tree[c] = NULL;
-+ k--;
-+ break;
-+ case 1:
-+ kmem_cache_free(cachep_d, dtree);
-+ ctree->tree[c] = fullbitmap_d;
-+ k++;
-+ break;
-+ }
-+ } LOOP_WALK_END();
-+ } LOOP_WALK_END_GC(btree, b, ctree, fullbitmap_c, cachep_c, k);
-+ } LOOP_WALK_END_GC(map, a, btree, fullbitmap_b, cachep_b, j);
-+
-+ write_unlock_bh(&set->lock);
-+
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static inline void
-+init_gc_timer(struct ip_set *set)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+
-+ init_timer(&map->gc);
-+ map->gc.data = (unsigned long) set;
-+ map->gc.function = gc;
-+ map->gc.expires = jiffies + map->gc_interval * HZ;
-+ add_timer(&map->gc);
-+}
-+
-+static int
-+iptreemap_create(struct ip_set *set, const void *data, u_int32_t size)
-+{
-+ const struct ip_set_req_iptreemap_create *req = data;
-+ struct ip_set_iptreemap *map;
-+
-+ map = kzalloc(sizeof(*map), GFP_KERNEL);
-+ if (!map)
-+ return -ENOMEM;
-+
-+ map->gc_interval = req->gc_interval ? req->gc_interval : IPTREEMAP_DEFAULT_GC_TIME;
-+ set->data = map;
-+
-+ init_gc_timer(set);
-+
-+ return 0;
-+}
-+
-+static inline void
-+__flush(struct ip_set_iptreemap *map)
-+{
-+ struct ip_set_iptreemap_b *btree;
-+ unsigned int a;
-+
-+ LOOP_WALK_BEGIN(map, a, btree);
-+ if (btree != fullbitmap_b)
-+ free_b(btree);
-+ LOOP_WALK_END();
-+}
-+
-+static void
-+iptreemap_destroy(struct ip_set *set)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREEMAP_DESTROY_SLEEP);
-+
-+ __flush(map);
-+ kfree(map);
-+
-+ set->data = NULL;
-+}
-+
-+static void
-+iptreemap_flush(struct ip_set *set)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+
-+ while (!del_timer(&map->gc))
-+ msleep(IPTREEMAP_DESTROY_SLEEP);
-+
-+ __flush(map);
-+
-+ memset(map, 0, sizeof(*map));
-+
-+ init_gc_timer(set);
-+}
-+
-+static void
-+iptreemap_list_header(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_req_iptreemap_create *header = data;
-+
-+ header->gc_interval = map->gc_interval;
-+}
-+
-+static int
-+iptreemap_list_members_size(const struct ip_set *set)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int a, b, c, d, inrange = 0, count = 0;
-+
-+ LOOP_WALK_BEGIN_COUNT(map, a, btree, inrange, count) {
-+ LOOP_WALK_BEGIN_COUNT(btree, b, ctree, inrange, count) {
-+ LOOP_WALK_BEGIN_COUNT(ctree, c, dtree, inrange, count) {
-+ for (d = 0; d < 256; d++) {
-+ if (test_bit(d, (void *) dtree->bitmap)) {
-+ inrange = 1;
-+ } else if (inrange) {
-+ count++;
-+ inrange = 0;
-+ }
-+ }
-+ } LOOP_WALK_END_COUNT();
-+ } LOOP_WALK_END_COUNT();
-+ } LOOP_WALK_END_COUNT();
-+
-+ if (inrange)
-+ count++;
-+
-+ return (count * sizeof(struct ip_set_req_iptreemap));
-+}
-+
-+static inline u_int32_t
-+add_member(void *data, size_t offset, ip_set_ip_t start, ip_set_ip_t end)
-+{
-+ struct ip_set_req_iptreemap *entry = data + offset;
-+
-+ entry->ip = start;
-+ entry->end = end;
-+
-+ return sizeof(*entry);
-+}
-+
-+static void
-+iptreemap_list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_iptreemap *map = set->data;
-+ struct ip_set_iptreemap_b *btree;
-+ struct ip_set_iptreemap_c *ctree;
-+ struct ip_set_iptreemap_d *dtree;
-+ unsigned int a, b, c, d, inrange = 0;
-+ size_t offset = 0;
-+ ip_set_ip_t start = 0, end = 0, ip;
-+
-+ LOOP_WALK_BEGIN(map, a, btree) {
-+ LOOP_WALK_BEGIN(btree, b, ctree) {
-+ LOOP_WALK_BEGIN(ctree, c, dtree) {
-+ for (d = 0; d < 256; d++) {
-+ if (test_bit(d, (void *) dtree->bitmap)) {
-+ ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ if (!inrange) {
-+ inrange = 1;
-+ start = ip;
-+ } else if (end < ip - 1) {
-+ offset += add_member(data, offset, start, end);
-+ start = ip;
-+ }
-+ end = ip;
-+ } else if (inrange) {
-+ offset += add_member(data, offset, start, end);
-+ inrange = 0;
-+ }
-+ }
-+ } LOOP_WALK_END();
-+ } LOOP_WALK_END();
-+ } LOOP_WALK_END();
-+
-+ if (inrange)
-+ add_member(data, offset, start, end);
-+}
-+
-+IP_SET_TYPE(iptreemap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Sven Wegener <sven.wegener@stealer.net>");
-+MODULE_DESCRIPTION("iptreemap type of IP sets");
-+
-+static int __init ip_set_iptreemap_init(void)
-+{
-+ int ret = -ENOMEM;
-+ int a;
-+
-+ cachep_b = KMEM_CACHE_CREATE("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b));
-+ if (!cachep_b) {
-+ ip_set_printk("Unable to create ip_set_iptreemap_b slab cache");
-+ goto out;
-+ }
-+
-+ cachep_c = KMEM_CACHE_CREATE("ip_set_iptreemap_c",
-+ sizeof(struct ip_set_iptreemap_c));
-+ if (!cachep_c) {
-+ ip_set_printk("Unable to create ip_set_iptreemap_c slab cache");
-+ goto outb;
-+ }
-+
-+ cachep_d = KMEM_CACHE_CREATE("ip_set_iptreemap_d",
-+ sizeof(struct ip_set_iptreemap_d));
-+ if (!cachep_d) {
-+ ip_set_printk("Unable to create ip_set_iptreemap_d slab cache");
-+ goto outc;
-+ }
-+
-+ fullbitmap_d = kmem_cache_alloc(cachep_d, GFP_KERNEL);
-+ if (!fullbitmap_d)
-+ goto outd;
-+
-+ fullbitmap_c = kmem_cache_alloc(cachep_c, GFP_KERNEL);
-+ if (!fullbitmap_c)
-+ goto outbitmapd;
-+
-+ fullbitmap_b = kmem_cache_alloc(cachep_b, GFP_KERNEL);
-+ if (!fullbitmap_b)
-+ goto outbitmapc;
-+
-+ ret = ip_set_register_set_type(&ip_set_iptreemap);
-+ if (0 > ret)
-+ goto outbitmapb;
-+
-+ /* Now init our global bitmaps */
-+ memset(fullbitmap_d->bitmap, 0xff, sizeof(fullbitmap_d->bitmap));
-+
-+ for (a = 0; a < 256; a++)
-+ fullbitmap_c->tree[a] = fullbitmap_d;
-+
-+ for (a = 0; a < 256; a++)
-+ fullbitmap_b->tree[a] = fullbitmap_c;
-+ memset(fullbitmap_b->dirty, 0, sizeof(fullbitmap_b->dirty));
-+
-+ return 0;
-+
-+outbitmapb:
-+ kmem_cache_free(cachep_b, fullbitmap_b);
-+outbitmapc:
-+ kmem_cache_free(cachep_c, fullbitmap_c);
-+outbitmapd:
-+ kmem_cache_free(cachep_d, fullbitmap_d);
-+outd:
-+ kmem_cache_destroy(cachep_d);
-+outc:
-+ kmem_cache_destroy(cachep_c);
-+outb:
-+ kmem_cache_destroy(cachep_b);
-+out:
-+
-+ return ret;
-+}
-+
-+static void __exit ip_set_iptreemap_fini(void)
-+{
-+ ip_set_unregister_set_type(&ip_set_iptreemap);
-+ kmem_cache_free(cachep_d, fullbitmap_d);
-+ kmem_cache_free(cachep_c, fullbitmap_c);
-+ kmem_cache_free(cachep_b, fullbitmap_b);
-+ kmem_cache_destroy(cachep_d);
-+ kmem_cache_destroy(cachep_c);
-+ kmem_cache_destroy(cachep_b);
-+}
-+
-+module_init(ip_set_iptreemap_init);
-+module_exit(ip_set_iptreemap_fini);
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_macipmap.c
-@@ -0,0 +1,164 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the macipmap type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/if_ether.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_macipmap.h>
-+
-+static int
-+macipmap_utest(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ const struct ip_set_macipmap *map = set->data;
-+ const struct ip_set_macip *table = map->members;
-+ const struct ip_set_req_macipmap *req = data;
-+
-+ if (req->ip < map->first_ip || req->ip > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_ip = req->ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
-+ if (table[req->ip - map->first_ip].match) {
-+ return (memcmp(req->ethernet,
-+ &table[req->ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0);
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+static int
-+macipmap_ktest(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ const struct ip_set_macipmap *map = set->data;
-+ const struct ip_set_macip *table = map->members;
-+ ip_set_ip_t ip;
-+
-+ ip = ipaddr(skb, flags[index]);
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return 0;
-+
-+ *hash_ip = ip;
-+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ if (table[ip - map->first_ip].match) {
-+ /* Is mac pointer valid?
-+ * If so, compare... */
-+ return (skb_mac_header(skb) >= skb->head
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data
-+ && (memcmp(eth_hdr(skb)->h_source,
-+ &table[ip - map->first_ip].ethernet,
-+ ETH_ALEN) == 0));
-+ } else {
-+ return (map->flags & IPSET_MACIP_MATCHUNSET ? 1 : 0);
-+ }
-+}
-+
-+/* returns 0 on success */
-+static inline int
-+macipmap_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, const unsigned char *ethernet)
-+{
-+ struct ip_set_macipmap *map = set->data;
-+ struct ip_set_macip *table = map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (table[ip - map->first_ip].match)
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ memcpy(&table[ip - map->first_ip].ethernet, ethernet, ETH_ALEN);
-+ table[ip - map->first_ip].match = IPSET_MACIP_ISSET;
-+ return 0;
-+}
-+
-+#define KADT_CONDITION \
-+ if (!(skb_mac_header(skb) >= skb->head \
-+ && (skb_mac_header(skb) + ETH_HLEN) <= skb->data))\
-+ return -EINVAL;
-+
-+UADT(macipmap, add, req->ethernet)
-+KADT(macipmap, add, ipaddr, eth_hdr(skb)->h_source)
-+
-+static inline int
-+macipmap_del(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ struct ip_set_macipmap *map = set->data;
-+ struct ip_set_macip *table = map->members;
-+
-+ if (ip < map->first_ip || ip > map->last_ip)
-+ return -ERANGE;
-+ if (!table[ip - map->first_ip].match)
-+ return -EEXIST;
-+
-+ *hash_ip = ip;
-+ table[ip - map->first_ip].match = 0;
-+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
-+ return 0;
-+}
-+
-+#undef KADT_CONDITION
-+#define KADT_CONDITION
-+
-+UADT(macipmap, del)
-+KADT(macipmap, del, ipaddr)
-+
-+static inline int
-+__macipmap_create(const struct ip_set_req_macipmap_create *req,
-+ struct ip_set_macipmap *map)
-+{
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ req->to - req->from + 1, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ map->flags = req->flags;
-+ return (req->to - req->from + 1) * sizeof(struct ip_set_macip);
-+}
-+
-+BITMAP_CREATE(macipmap)
-+BITMAP_DESTROY(macipmap)
-+BITMAP_FLUSH(macipmap)
-+
-+static inline void
-+__macipmap_list_header(const struct ip_set_macipmap *map,
-+ struct ip_set_req_macipmap_create *header)
-+{
-+ header->flags = map->flags;
-+}
-+
-+BITMAP_LIST_HEADER(macipmap)
-+BITMAP_LIST_MEMBERS_SIZE(macipmap)
-+BITMAP_LIST_MEMBERS(macipmap)
-+
-+IP_SET_TYPE(macipmap, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("macipmap type of IP sets");
-+
-+REGISTER_MODULE(macipmap)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_nethash.c
-@@ -0,0 +1,225 @@
-+/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a cidr nethash set */
-+
-+#include <linux/module.h>
-+#include <linux/moduleparam.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/netfilter_ipv4/ip_set_jhash.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+#include <linux/random.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_nethash.h>
-+
-+static int limit = MAX_RANGE;
-+
-+static inline __u32
-+nethash_id_cidr(const struct ip_set_nethash *map,
-+ ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip,
-+ uint8_t cidr)
-+{
-+ __u32 id;
-+ u_int16_t i;
-+ ip_set_ip_t *elem;
-+
-+ *hash_ip = pack_ip_cidr(ip, cidr);
-+ if (!*hash_ip)
-+ return MAX_RANGE;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
-+ DP("hash key: %u", id);
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ if (*elem == *hash_ip)
-+ return id;
-+ /* No shortcut - there can be deleted entries. */
-+ }
-+ return UINT_MAX;
-+}
-+
-+static inline __u32
-+nethash_id(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ const struct ip_set_nethash *map = set->data;
-+ __u32 id = UINT_MAX;
-+ int i;
-+
-+ for (i = 0; i < 30 && map->cidr[i]; i++) {
-+ id = nethash_id_cidr(map, hash_ip, ip, map->cidr[i]);
-+ if (id != UINT_MAX)
-+ break;
-+ }
-+ return id;
-+}
-+
-+static inline int
-+nethash_test_cidr(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, uint8_t cidr)
-+{
-+ const struct ip_set_nethash *map = set->data;
-+
-+ return (nethash_id_cidr(map, hash_ip, ip, cidr) != UINT_MAX);
-+}
-+
-+static inline int
-+nethash_test(struct ip_set *set, ip_set_ip_t *hash_ip, ip_set_ip_t ip)
-+{
-+ return (nethash_id(set, hash_ip, ip) != UINT_MAX);
-+}
-+
-+static int
-+nethash_utest(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ const struct ip_set_req_nethash *req = data;
-+
-+ if (req->cidr <= 0 || req->cidr > 32)
-+ return -EINVAL;
-+ return (req->cidr == 32 ? nethash_test(set, hash_ip, req->ip)
-+ : nethash_test_cidr(set, hash_ip, req->ip, req->cidr));
-+}
-+
-+#define KADT_CONDITION
-+
-+KADT(nethash, test, ipaddr)
-+
-+static inline int
-+__nethash_add(struct ip_set_nethash *map, ip_set_ip_t *ip)
-+{
-+ __u32 probe;
-+ u_int16_t i;
-+ ip_set_ip_t *elem, *slot = NULL;
-+
-+ for (i = 0; i < map->probes; i++) {
-+ probe = jhash_ip(map, i, *ip) % map->hashsize;
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
-+ if (*elem == *ip)
-+ return -EEXIST;
-+ if (!(slot || *elem))
-+ slot = elem;
-+ /* There can be deleted entries, must check all slots */
-+ }
-+ if (slot) {
-+ *slot = *ip;
-+ map->elements++;
-+ return 0;
-+ }
-+ /* Trigger rehashing */
-+ return -EAGAIN;
-+}
-+
-+static inline int
-+nethash_add(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, uint8_t cidr)
-+{
-+ struct ip_set_nethash *map = set->data;
-+ int ret;
-+
-+ if (map->elements >= limit || map->nets[cidr-1] == UINT16_MAX)
-+ return -ERANGE;
-+ if (cidr <= 0 || cidr >= 32)
-+ return -EINVAL;
-+
-+ *hash_ip = pack_ip_cidr(ip, cidr);
-+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+ if (!*hash_ip)
-+ return -ERANGE;
-+
-+ ret = __nethash_add(map, hash_ip);
-+ if (ret == 0) {
-+ if (!map->nets[cidr-1]++)
-+ add_cidr_size(map->cidr, cidr);
-+ map->elements++;
-+ }
-+
-+ return ret;
-+}
-+
-+#undef KADT_CONDITION
-+#define KADT_CONDITION \
-+ struct ip_set_nethash *map = set->data; \
-+ uint8_t cidr = map->cidr[0] ? map->cidr[0] : 31;
-+
-+UADT(nethash, add, req->cidr)
-+KADT(nethash, add, ipaddr, cidr)
-+
-+static inline void
-+__nethash_retry(struct ip_set_nethash *tmp, struct ip_set_nethash *map)
-+{
-+ memcpy(tmp->cidr, map->cidr, sizeof(tmp->cidr));
-+ memcpy(tmp->nets, map->nets, sizeof(tmp->nets));
-+}
-+
-+HASH_RETRY(nethash, ip_set_ip_t)
-+
-+static inline int
-+nethash_del(struct ip_set *set, ip_set_ip_t *hash_ip,
-+ ip_set_ip_t ip, uint8_t cidr)
-+{
-+ struct ip_set_nethash *map = set->data;
-+ ip_set_ip_t id, *elem;
-+
-+ if (cidr <= 0 || cidr >= 32)
-+ return -EINVAL;
-+
-+ id = nethash_id_cidr(map, hash_ip, ip, cidr);
-+ if (id == UINT_MAX)
-+ return -EEXIST;
-+
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
-+ *elem = 0;
-+ map->elements--;
-+ if (!map->nets[cidr-1]--)
-+ del_cidr_size(map->cidr, cidr);
-+ return 0;
-+}
-+
-+UADT(nethash, del, req->cidr)
-+KADT(nethash, del, ipaddr, cidr)
-+
-+static inline int
-+__nethash_create(const struct ip_set_req_nethash_create *req,
-+ struct ip_set_nethash *map)
-+{
-+ memset(map->cidr, 0, sizeof(map->cidr));
-+ memset(map->nets, 0, sizeof(map->nets));
-+
-+ return 0;
-+}
-+
-+HASH_CREATE(nethash, ip_set_ip_t)
-+HASH_DESTROY(nethash)
-+
-+HASH_FLUSH_CIDR(nethash, ip_set_ip_t)
-+
-+static inline void
-+__nethash_list_header(const struct ip_set_nethash *map,
-+ struct ip_set_req_nethash_create *header)
-+{
-+}
-+
-+HASH_LIST_HEADER(nethash)
-+HASH_LIST_MEMBERS_SIZE(nethash, ip_set_ip_t)
-+HASH_LIST_MEMBERS(nethash, ip_set_ip_t)
-+
-+IP_SET_RTYPE(nethash, IPSET_TYPE_IP | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("nethash type of IP sets");
-+module_param(limit, int, 0600);
-+MODULE_PARM_DESC(limit, "maximal number of elements stored in the sets");
-+
-+REGISTER_MODULE(nethash)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_portmap.c
-@@ -0,0 +1,114 @@
-+/* Copyright (C) 2003-2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing a port set type as a bitmap */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/tcp.h>
-+#include <linux/udp.h>
-+#include <linux/skbuff.h>
-+#include <linux/errno.h>
-+#include <asm/uaccess.h>
-+#include <asm/bitops.h>
-+#include <linux/spinlock.h>
-+
-+#include <net/ip.h>
-+
-+#include <linux/netfilter_ipv4/ip_set_portmap.h>
-+#include <linux/netfilter_ipv4/ip_set_getport.h>
-+
-+static inline int
-+portmap_test(const struct ip_set *set, ip_set_ip_t *hash_port,
-+ ip_set_ip_t port)
-+{
-+ const struct ip_set_portmap *map = set->data;
-+
-+ if (port < map->first_ip || port > map->last_ip)
-+ return -ERANGE;
-+
-+ *hash_port = port;
-+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
-+ return !!test_bit(port - map->first_ip, map->members);
-+}
-+
-+#define KADT_CONDITION \
-+ if (ip == INVALID_PORT) \
-+ return 0;
-+
-+UADT(portmap, test)
-+KADT(portmap, test, get_port)
-+
-+static inline int
-+portmap_add(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
-+{
-+ struct ip_set_portmap *map = set->data;
-+
-+ if (port < map->first_ip || port > map->last_ip)
-+ return -ERANGE;
-+ if (test_and_set_bit(port - map->first_ip, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+UADT(portmap, add)
-+KADT(portmap, add, get_port)
-+
-+static inline int
-+portmap_del(struct ip_set *set, ip_set_ip_t *hash_port, ip_set_ip_t port)
-+{
-+ struct ip_set_portmap *map = set->data;
-+
-+ if (port < map->first_ip || port > map->last_ip)
-+ return -ERANGE;
-+ if (!test_and_clear_bit(port - map->first_ip, map->members))
-+ return -EEXIST;
-+
-+ *hash_port = port;
-+ DP("port %u", port);
-+ return 0;
-+}
-+
-+UADT(portmap, del)
-+KADT(portmap, del, get_port)
-+
-+static inline int
-+__portmap_create(const struct ip_set_req_portmap_create *req,
-+ struct ip_set_portmap *map)
-+{
-+ if (req->to - req->from > MAX_RANGE) {
-+ ip_set_printk("range too big, %d elements (max %d)",
-+ req->to - req->from + 1, MAX_RANGE+1);
-+ return -ENOEXEC;
-+ }
-+ return bitmap_bytes(req->from, req->to);
-+}
-+
-+BITMAP_CREATE(portmap)
-+BITMAP_DESTROY(portmap)
-+BITMAP_FLUSH(portmap)
-+
-+static inline void
-+__portmap_list_header(const struct ip_set_portmap *map,
-+ struct ip_set_req_portmap_create *header)
-+{
-+}
-+
-+BITMAP_LIST_HEADER(portmap)
-+BITMAP_LIST_MEMBERS_SIZE(portmap)
-+BITMAP_LIST_MEMBERS(portmap)
-+
-+IP_SET_TYPE(portmap, IPSET_TYPE_PORT | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("portmap type of IP sets");
-+
-+REGISTER_MODULE(portmap)
---- /dev/null
-+++ b/net/ipv4/netfilter/ip_set_setlist.c
-@@ -0,0 +1,330 @@
-+/* Copyright (C) 2008 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module implementing an IP set type: the setlist type */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/errno.h>
-+
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ip_set_bitmaps.h>
-+#include <linux/netfilter_ipv4/ip_set_setlist.h>
-+
-+/*
-+ * before ==> index, ref
-+ * after ==> ref, index
-+ */
-+
-+static inline int
-+next_index_eq(const struct ip_set_setlist *map, int i, ip_set_id_t index)
-+{
-+ return i < map->size && map->index[i] == index;
-+}
-+
-+static int
-+setlist_utest(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ const struct ip_set_setlist *map = set->data;
-+ const struct ip_set_req_setlist *req = data;
-+ ip_set_id_t index, ref = IP_SET_INVALID_ID;
-+ int i, res = 0;
-+ struct ip_set *s;
-+
-+ if (req->before && req->ref[0] == '\0')
-+ return 0;
-+
-+ index = __ip_set_get_byname(req->name, &s);
-+ if (index == IP_SET_INVALID_ID)
-+ return 0;
-+ if (req->ref[0] != '\0') {
-+ ref = __ip_set_get_byname(req->ref, &s);
-+ if (ref == IP_SET_INVALID_ID)
-+ goto finish;
-+ }
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID; i++) {
-+ if (req->before && map->index[i] == index) {
-+ res = next_index_eq(map, i + 1, ref);
-+ break;
-+ } else if (!req->before) {
-+ if ((ref == IP_SET_INVALID_ID
-+ && map->index[i] == index)
-+ || (map->index[i] == ref
-+ && next_index_eq(map, i + 1, index))) {
-+ res = 1;
-+ break;
-+ }
-+ }
-+ }
-+ if (ref != IP_SET_INVALID_ID)
-+ __ip_set_put_byindex(ref);
-+finish:
-+ __ip_set_put_byindex(index);
-+ return res;
-+}
-+
-+static int
-+setlist_ktest(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i, res = 0;
-+
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID
-+ && res == 0; i++)
-+ res = ip_set_testip_kernel(map->index[i], skb, flags);
-+ return res;
-+}
-+
-+static inline int
-+insert_setlist(struct ip_set_setlist *map, int i, ip_set_id_t index)
-+{
-+ ip_set_id_t tmp;
-+ int j;
-+
-+ DP("i: %u, last %u\n", i, map->index[map->size - 1]);
-+ if (i >= map->size || map->index[map->size - 1] != IP_SET_INVALID_ID)
-+ return -ERANGE;
-+
-+ for (j = i; j < map->size
-+ && index != IP_SET_INVALID_ID; j++) {
-+ tmp = map->index[j];
-+ map->index[j] = index;
-+ index = tmp;
-+ }
-+ return 0;
-+}
-+
-+static int
-+setlist_uadd(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ const struct ip_set_req_setlist *req = data;
-+ ip_set_id_t index, ref = IP_SET_INVALID_ID;
-+ int i, res = -ERANGE;
-+ struct ip_set *s;
-+
-+ if (req->before && req->ref[0] == '\0')
-+ return -EINVAL;
-+
-+ index = __ip_set_get_byname(req->name, &s);
-+ if (index == IP_SET_INVALID_ID)
-+ return -EEXIST;
-+ /* "Loop detection" */
-+ if (strcmp(s->type->typename, "setlist") == 0)
-+ goto finish;
-+
-+ if (req->ref[0] != '\0') {
-+ ref = __ip_set_get_byname(req->ref, &s);
-+ if (ref == IP_SET_INVALID_ID) {
-+ res = -EEXIST;
-+ goto finish;
-+ }
-+ }
-+ for (i = 0; i < map->size; i++) {
-+ if (map->index[i] != ref)
-+ continue;
-+ if (req->before)
-+ res = insert_setlist(map, i, index);
-+ else
-+ res = insert_setlist(map,
-+ ref == IP_SET_INVALID_ID ? i : i + 1,
-+ index);
-+ break;
-+ }
-+ if (ref != IP_SET_INVALID_ID)
-+ __ip_set_put_byindex(ref);
-+ /* In case of success, we keep the reference to the set */
-+finish:
-+ if (res != 0)
-+ __ip_set_put_byindex(index);
-+ return res;
-+}
-+
-+static int
-+setlist_kadd(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i, res = -EINVAL;
-+
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID
-+ && res != 0; i++)
-+ res = ip_set_addip_kernel(map->index[i], skb, flags);
-+ return res;
-+}
-+
-+static inline int
-+unshift_setlist(struct ip_set_setlist *map, int i)
-+{
-+ int j;
-+
-+ for (j = i; j < map->size - 1; j++)
-+ map->index[j] = map->index[j+1];
-+ map->index[map->size-1] = IP_SET_INVALID_ID;
-+ return 0;
-+}
-+
-+static int
-+setlist_udel(struct ip_set *set, const void *data, u_int32_t size,
-+ ip_set_ip_t *hash_ip)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ const struct ip_set_req_setlist *req = data;
-+ ip_set_id_t index, ref = IP_SET_INVALID_ID;
-+ int i, res = -EEXIST;
-+ struct ip_set *s;
-+
-+ if (req->before && req->ref[0] == '\0')
-+ return -EINVAL;
-+
-+ index = __ip_set_get_byname(req->name, &s);
-+ if (index == IP_SET_INVALID_ID)
-+ return -EEXIST;
-+ if (req->ref[0] != '\0') {
-+ ref = __ip_set_get_byname(req->ref, &s);
-+ if (ref == IP_SET_INVALID_ID)
-+ goto finish;
-+ }
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID; i++) {
-+ if (req->before) {
-+ if (map->index[i] == index
-+ && next_index_eq(map, i + 1, ref)) {
-+ res = unshift_setlist(map, i);
-+ break;
-+ }
-+ } else if (ref == IP_SET_INVALID_ID) {
-+ if (map->index[i] == index) {
-+ res = unshift_setlist(map, i);
-+ break;
-+ }
-+ } else if (map->index[i] == ref
-+ && next_index_eq(map, i + 1, index)) {
-+ res = unshift_setlist(map, i + 1);
-+ break;
-+ }
-+ }
-+ if (ref != IP_SET_INVALID_ID)
-+ __ip_set_put_byindex(ref);
-+finish:
-+ __ip_set_put_byindex(index);
-+ /* In case of success, release the reference to the set */
-+ if (res == 0)
-+ __ip_set_put_byindex(index);
-+ return res;
-+}
-+
-+static int
-+setlist_kdel(struct ip_set *set,
-+ const struct sk_buff *skb,
-+ ip_set_ip_t *hash_ip,
-+ const u_int32_t *flags,
-+ unsigned char index)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i, res = -EINVAL;
-+
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID
-+ && res != 0; i++)
-+ res = ip_set_delip_kernel(map->index[i], skb, flags);
-+ return res;
-+}
-+
-+static int
-+setlist_create(struct ip_set *set, const void *data, u_int32_t size)
-+{
-+ struct ip_set_setlist *map;
-+ const struct ip_set_req_setlist_create *req = data;
-+ int i;
-+
-+ map = kmalloc(sizeof(struct ip_set_setlist) +
-+ req->size * sizeof(ip_set_id_t), GFP_KERNEL);
-+ if (!map)
-+ return -ENOMEM;
-+ map->size = req->size;
-+ for (i = 0; i < map->size; i++)
-+ map->index[i] = IP_SET_INVALID_ID;
-+
-+ set->data = map;
-+ return 0;
-+}
-+
-+static void
-+setlist_destroy(struct ip_set *set)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i;
-+
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID; i++)
-+ __ip_set_put_byindex(map->index[i]);
-+
-+ kfree(map);
-+ set->data = NULL;
-+}
-+
-+static void
-+setlist_flush(struct ip_set *set)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i;
-+
-+ for (i = 0; i < map->size
-+ && map->index[i] != IP_SET_INVALID_ID; i++) {
-+ __ip_set_put_byindex(map->index[i]);
-+ map->index[i] = IP_SET_INVALID_ID;
-+ }
-+}
-+
-+static void
-+setlist_list_header(const struct ip_set *set, void *data)
-+{
-+ const struct ip_set_setlist *map = set->data;
-+ struct ip_set_req_setlist_create *header = data;
-+
-+ header->size = map->size;
-+}
-+
-+static int
-+setlist_list_members_size(const struct ip_set *set)
-+{
-+ const struct ip_set_setlist *map = set->data;
-+
-+ return map->size * sizeof(ip_set_id_t);
-+}
-+
-+static void
-+setlist_list_members(const struct ip_set *set, void *data)
-+{
-+ struct ip_set_setlist *map = set->data;
-+ int i;
-+
-+ for (i = 0; i < map->size; i++)
-+ *((ip_set_id_t *)data + i) = ip_set_id(map->index[i]);
-+}
-+
-+IP_SET_TYPE(setlist, IPSET_TYPE_SETNAME | IPSET_DATA_SINGLE)
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("setlist type of IP sets");
-+
-+REGISTER_MODULE(setlist)
---- /dev/null
-+++ b/net/ipv4/netfilter/ipt_set.c
-@@ -0,0 +1,238 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* Kernel module to match an IP set. */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#define xt_register_match ipt_register_match
-+#define xt_unregister_match ipt_unregister_match
-+#define xt_match ipt_match
-+#else
-+#include <linux/netfilter/x_tables.h>
-+#endif
-+#include <linux/netfilter_ipv4/ip_set.h>
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static inline int
-+match_set(const struct ipt_set_info *info,
-+ const struct sk_buff *skb,
-+ int inv)
-+{
-+ if (ip_set_testip_kernel(info->index, skb, info->flags))
-+ inv = !inv;
-+ return inv;
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const void *matchinfo,
-+ int offset,
-+ const void *hdr,
-+ u_int16_t datalen,
-+ int *hotdrop)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const void *matchinfo,
-+ int offset,
-+ int *hotdrop)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const void *matchinfo,
-+ int offset,
-+ unsigned int protoff,
-+ int *hotdrop)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-+static int
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const struct xt_match *match,
-+ const void *matchinfo,
-+ int offset,
-+ unsigned int protoff,
-+ int *hotdrop)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+static bool
-+match(const struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const struct xt_match *match,
-+ const void *matchinfo,
-+ int offset,
-+ unsigned int protoff,
-+ bool *hotdrop)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+static bool
-+match(const struct sk_buff *skb,
-+ const struct xt_match_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ const struct ipt_set_info_match *info = matchinfo;
-+#else
-+ const struct ipt_set_info_match *info = par->matchinfo;
-+#endif
-+
-+ return match_set(&info->match_set,
-+ skb,
-+ info->match_set.flags[0] & IPSET_MATCH_INV);
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+static int
-+checkentry(const char *tablename,
-+ const struct ipt_ip *ip,
-+ void *matchinfo,
-+ unsigned int matchsize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static int
-+checkentry(const char *tablename,
-+ const void *inf,
-+ void *matchinfo,
-+ unsigned int matchsize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+static int
-+checkentry(const char *tablename,
-+ const void *inf,
-+ const struct xt_match *match,
-+ void *matchinfo,
-+ unsigned int matchsize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-+static int
-+checkentry(const char *tablename,
-+ const void *inf,
-+ const struct xt_match *match,
-+ void *matchinfo,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+static bool
-+checkentry(const char *tablename,
-+ const void *inf,
-+ const struct xt_match *match,
-+ void *matchinfo,
-+ unsigned int hook_mask)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+static bool
-+checkentry(const struct xt_mtchk_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ struct ipt_set_info_match *info = matchinfo;
-+#else
-+ struct ipt_set_info_match *info = par->matchinfo;
-+#endif
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return 0;
-+ }
-+#endif
-+
-+ index = ip_set_get_byindex(info->match_set.index);
-+
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("Cannot find set indentified by id %u to match",
-+ info->match_set.index);
-+ return 0; /* error */
-+ }
-+ if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static void destroy(void *matchinfo,
-+ unsigned int matchsize)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+static void destroy(const struct xt_match *match,
-+ void *matchinfo,
-+ unsigned int matchsize)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+static void destroy(const struct xt_match *match,
-+ void *matchinfo)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+static void destroy(const struct xt_mtdtor_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ struct ipt_set_info_match *info = matchinfo;
-+#else
-+ struct ipt_set_info_match *info = par->matchinfo;
-+#endif
-+
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ if (matchsize != IPT_ALIGN(sizeof(struct ipt_set_info_match))) {
-+ ip_set_printk("invalid matchsize %d", matchsize);
-+ return;
-+ }
-+#endif
-+ ip_set_put_byindex(info->match_set.index);
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static struct xt_match set_match = {
-+ .name = "set",
-+ .match = &match,
-+ .checkentry = &checkentry,
-+ .destroy = &destroy,
-+ .me = THIS_MODULE
-+};
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
-+static struct xt_match set_match = {
-+ .name = "set",
-+ .family = AF_INET,
-+ .match = &match,
-+ .matchsize = sizeof(struct ipt_set_info_match),
-+ .checkentry = &checkentry,
-+ .destroy = &destroy,
-+ .me = THIS_MODULE
-+};
-+#endif
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set match module");
-+
-+static int __init ipt_ipset_init(void)
-+{
-+ return xt_register_match(&set_match);
-+}
-+
-+static void __exit ipt_ipset_fini(void)
-+{
-+ xt_unregister_match(&set_match);
-+}
-+
-+module_init(ipt_ipset_init);
-+module_exit(ipt_ipset_fini);
---- /dev/null
-+++ b/net/ipv4/netfilter/ipt_SET.c
-@@ -0,0 +1,242 @@
-+/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu>
-+ * Patrick Schaaf <bof@bof.de>
-+ * Martin Josefsson <gandalf@wlug.westbo.se>
-+ * Copyright (C) 2003-2004 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
-+ *
-+ * This program is free software; you can redistribute it and/or modify
-+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
-+ */
-+
-+/* ipt_SET.c - netfilter target to manipulate IP sets */
-+
-+#include <linux/module.h>
-+#include <linux/ip.h>
-+#include <linux/skbuff.h>
-+#include <linux/version.h>
-+
-+#include <linux/netfilter_ipv4.h>
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+#include <linux/netfilter_ipv4/ip_tables.h>
-+#define xt_register_target ipt_register_target
-+#define xt_unregister_target ipt_unregister_target
-+#define xt_target ipt_target
-+#define XT_CONTINUE IPT_CONTINUE
-+#else
-+#include <linux/netfilter/x_tables.h>
-+#endif
-+#include <linux/netfilter_ipv4/ipt_set.h>
-+
-+static unsigned int
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
-+target(struct sk_buff **pskb,
-+ unsigned int hooknum,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ const void *targinfo,
-+ void *userinfo)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+ const void *targinfo,
-+ void *userinfo)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+ const struct xt_target *target,
-+ const void *targinfo,
-+ void *userinfo)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-+target(struct sk_buff **pskb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+ const struct xt_target *target,
-+ const void *targinfo)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+target(struct sk_buff *skb,
-+ const struct net_device *in,
-+ const struct net_device *out,
-+ unsigned int hooknum,
-+ const struct xt_target *target,
-+ const void *targinfo)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+target(struct sk_buff *skb,
-+ const struct xt_target_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ const struct ipt_set_info_target *info = targinfo;
-+#else
-+ const struct ipt_set_info_target *info = par->targinfo;
-+#endif
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
-+ struct sk_buff *skb = *pskb;
-+#endif
-+
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_addip_kernel(info->add_set.index,
-+ skb,
-+ info->add_set.flags);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_delip_kernel(info->del_set.index,
-+ skb,
-+ info->del_set.flags);
-+
-+ return XT_CONTINUE;
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
-+static int
-+checkentry(const char *tablename,
-+ const struct ipt_entry *e,
-+ void *targinfo,
-+ unsigned int targinfosize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static int
-+checkentry(const char *tablename,
-+ const void *e,
-+ void *targinfo,
-+ unsigned int targinfosize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+static int
-+checkentry(const char *tablename,
-+ const void *e,
-+ const struct xt_target *target,
-+ void *targinfo,
-+ unsigned int targinfosize,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
-+static int
-+checkentry(const char *tablename,
-+ const void *e,
-+ const struct xt_target *target,
-+ void *targinfo,
-+ unsigned int hook_mask)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+static bool
-+checkentry(const char *tablename,
-+ const void *e,
-+ const struct xt_target *target,
-+ void *targinfo,
-+ unsigned int hook_mask)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+static bool
-+checkentry(const struct xt_tgchk_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ const struct ipt_set_info_target *info = targinfo;
-+#else
-+ const struct ipt_set_info_target *info = par->targinfo;
-+#endif
-+ ip_set_id_t index;
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ if (targinfosize != IPT_ALIGN(sizeof(*info))) {
-+ DP("bad target info size %u", targinfosize);
-+ return 0;
-+ }
-+#endif
-+
-+ if (info->add_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->add_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find add_set index %u as target",
-+ info->add_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+
-+ if (info->del_set.index != IP_SET_INVALID_ID) {
-+ index = ip_set_get_byindex(info->del_set.index);
-+ if (index == IP_SET_INVALID_ID) {
-+ ip_set_printk("cannot find del_set index %u as target",
-+ info->del_set.index);
-+ return 0; /* error */
-+ }
-+ }
-+ if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0
-+ || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) {
-+ ip_set_printk("That's nasty!");
-+ return 0; /* error */
-+ }
-+
-+ return 1;
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static void destroy(void *targetinfo,
-+ unsigned int targetsize)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+static void destroy(const struct xt_target *target,
-+ void *targetinfo,
-+ unsigned int targetsize)
-+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+static void destroy(const struct xt_target *target,
-+ void *targetinfo)
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */
-+static void destroy(const struct xt_tgdtor_param *par)
-+#endif
-+{
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
-+ const struct ipt_set_info_target *info = targetinfo;
-+#else
-+ const struct ipt_set_info_target *info = par->targinfo;
-+#endif
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+ if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) {
-+ ip_set_printk("invalid targetsize %d", targetsize);
-+ return;
-+ }
-+#endif
-+ if (info->add_set.index != IP_SET_INVALID_ID)
-+ ip_set_put_byindex(info->add_set.index);
-+ if (info->del_set.index != IP_SET_INVALID_ID)
-+ ip_set_put_byindex(info->del_set.index);
-+}
-+
-+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
-+static struct xt_target SET_target = {
-+ .name = "SET",
-+ .target = target,
-+ .checkentry = checkentry,
-+ .destroy = destroy,
-+ .me = THIS_MODULE
-+};
-+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) */
-+static struct xt_target SET_target = {
-+ .name = "SET",
-+ .family = AF_INET,
-+ .target = target,
-+ .targetsize = sizeof(struct ipt_set_info_target),
-+ .checkentry = checkentry,
-+ .destroy = destroy,
-+ .me = THIS_MODULE
-+};
-+#endif
-+
-+MODULE_LICENSE("GPL");
-+MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>");
-+MODULE_DESCRIPTION("iptables IP set target module");
-+
-+static int __init ipt_SET_init(void)
-+{
-+ return xt_register_target(&SET_target);
-+}
-+
-+static void __exit ipt_SET_fini(void)
-+{
-+ xt_unregister_target(&SET_target);
-+}
-+
-+module_init(ipt_SET_init);
-+module_exit(ipt_SET_fini);
---- a/net/ipv4/netfilter/Kconfig
-+++ b/net/ipv4/netfilter/Kconfig
-@@ -406,5 +406,146 @@ config IP_NF_ARP_MANGLE
- Allows altering the ARP packet payload: source and destination
- hardware and network addresses.
-
-+config IP_NF_SET
-+ tristate "IP set support"
-+ depends on INET && NETFILTER
-+ help
-+ This option adds IP set support to the kernel.
-+ In order to define and use sets, you need the userspace utility
-+ ipset(8).
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MAX
-+ int "Maximum number of IP sets"
-+ default 256
-+ range 2 65534
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the maximum number
-+ of IP sets for the kernel.
-+
-+ The value can be overriden by the 'max_sets' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_HASHSIZE
-+ int "Hash size for bindings of IP sets"
-+ default 1024
-+ depends on IP_NF_SET
-+ help
-+ You can define here default value of the hash size for
-+ bindings of IP sets.
-+
-+ The value can be overriden by the 'hash_size' module
-+ parameter of the 'ip_set' module.
-+
-+config IP_NF_SET_IPMAP
-+ tristate "ipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_MACIPMAP
-+ tristate "macipmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the macipmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_PORTMAP
-+ tristate "portmap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the portmap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPHASH
-+ tristate "iphash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iphash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_NETHASH
-+ tristate "nethash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the nethash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTHASH
-+ tristate "ipporthash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipporthash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTIPHASH
-+ tristate "ipportiphash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipportiphash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPPORTNETHASH
-+ tristate "ipportnethash set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the ipportnethash set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPTREE
-+ tristate "iptree set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iptree set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_IPTREEMAP
-+ tristate "iptreemap set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the iptreemap set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_SET_SETLIST
-+ tristate "setlist set support"
-+ depends on IP_NF_SET
-+ help
-+ This option adds the setlist set type support.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_MATCH_SET
-+ tristate "set match support"
-+ depends on IP_NF_SET
-+ help
-+ Set matching matches against given IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+config IP_NF_TARGET_SET
-+ tristate "SET target support"
-+ depends on IP_NF_SET
-+ help
-+ The SET target makes possible to add/delete entries
-+ in IP sets.
-+ You need the ipset utility to create and set up the sets.
-+
-+ To compile it as a module, choose M here. If unsure, say N.
-+
-+
- endmenu
-
---- a/net/ipv4/netfilter/Makefile
-+++ b/net/ipv4/netfilter/Makefile
-@@ -50,6 +50,7 @@ obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
- obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
- obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
- obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-+obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set.o
-
- # targets
- obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
-@@ -61,6 +62,21 @@ obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += i
- obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
- obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
- obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
-+obj-$(CONFIG_IP_NF_TARGET_SET) += ipt_SET.o
-+
-+# sets
-+obj-$(CONFIG_IP_NF_SET) += ip_set.o
-+obj-$(CONFIG_IP_NF_SET_IPMAP) += ip_set_ipmap.o
-+obj-$(CONFIG_IP_NF_SET_PORTMAP) += ip_set_portmap.o
-+obj-$(CONFIG_IP_NF_SET_MACIPMAP) += ip_set_macipmap.o
-+obj-$(CONFIG_IP_NF_SET_IPHASH) += ip_set_iphash.o
-+obj-$(CONFIG_IP_NF_SET_NETHASH) += ip_set_nethash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTHASH) += ip_set_ipporthash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTIPHASH) += ip_set_ipportiphash.o
-+obj-$(CONFIG_IP_NF_SET_IPPORTNETHASH) += ip_set_ipportnethash.o
-+obj-$(CONFIG_IP_NF_SET_IPTREE) += ip_set_iptree.o
-+obj-$(CONFIG_IP_NF_SET_IPTREEMAP) += ip_set_iptreemap.o
-+obj-$(CONFIG_IP_NF_SET_SETLIST) += ip_set_setlist.o
-
- # generic ARP tables
- obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o