aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/060-block2mtd_init.patch11
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/065-rootfs_split.patch2
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/100-netfilter_layer7_2.17.patch54
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/101-netfilter_layer7_pktmatch.patch17
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/130-netfilter_ipset.patch718
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch4
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/200-sched_esfq.patch48
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/204-jffs2_eofdetect.patch88
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/213-kobject_uevent.patch2
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/600-phy_extension.patch24
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/903-hostap_txpower.patch4
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/910-cryptodev_backport.patch36
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/915-hso-backport.patch15
-rw-r--r--target/linux/generic-2.6/patches-2.6.26/970-ocf_20080704.patch326
14 files changed, 726 insertions, 623 deletions
diff --git a/target/linux/generic-2.6/patches-2.6.26/060-block2mtd_init.patch b/target/linux/generic-2.6/patches-2.6.26/060-block2mtd_init.patch
index a271efbcd2..6fb0be1010 100644
--- a/target/linux/generic-2.6/patches-2.6.26/060-block2mtd_init.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/060-block2mtd_init.patch
@@ -35,23 +35,20 @@
+
if (!dev->mtd.name)
goto devinit_err;
-+
-+ strcpy(dev->mtd.name, mtdname);
- sprintf(dev->mtd.name, "block2mtd: %s", devname);
--
++ strcpy(dev->mtd.name, mtdname);
+
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.type = MTD_RAM;
-@@ -298,15 +304,18 @@
- dev->mtd.read = block2mtd_read;
+@@ -299,14 +305,17 @@
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
--
+
- if (add_mtd_device(&dev->mtd)) {
-+
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = dev->mtd.name;
+ part->offset = 0;
diff --git a/target/linux/generic-2.6/patches-2.6.26/065-rootfs_split.patch b/target/linux/generic-2.6/patches-2.6.26/065-rootfs_split.patch
index 069b243990..c619cdad26 100644
--- a/target/linux/generic-2.6/patches-2.6.26/065-rootfs_split.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/065-rootfs_split.patch
@@ -866,7 +866,7 @@
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
+ dev->mtd.refresh_device = block2mtd_refresh;
-
+
part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
part->name = dev->mtd.name;
--- a/drivers/mtd/mtdchar.c
diff --git a/target/linux/generic-2.6/patches-2.6.26/100-netfilter_layer7_2.17.patch b/target/linux/generic-2.6/patches-2.6.26/100-netfilter_layer7_2.17.patch
index 6d7fbd585a..58ebdc5329 100644
--- a/target/linux/generic-2.6/patches-2.6.26/100-netfilter_layer7_2.17.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/100-netfilter_layer7_2.17.patch
@@ -170,7 +170,7 @@
+
+/* Use instead of regcomp. As we expect to be seeing the same regexps over and
+over again, it make sense to cache the results. */
-+static regexp * compile_and_cache(const char * regex_string,
++static regexp * compile_and_cache(const char * regex_string,
+ const char * protocol)
+{
+ struct pattern_cache * node = first_pattern_cache;
@@ -227,7 +227,7 @@
+ if ( !node->pattern ) {
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: Error compiling regexp "
-+ "\"%s\" (%s)\n",
++ "\"%s\" (%s)\n",
+ regex_string, protocol);
+ /* pattern is now cached as NULL, so we won't try again. */
+ }
@@ -275,8 +275,8 @@
+}
+
+/* handles whether there's a match when we aren't appending data anymore */
-+static int match_no_append(struct nf_conn * conntrack,
-+ struct nf_conn * master_conntrack,
++static int match_no_append(struct nf_conn * conntrack,
++ struct nf_conn * master_conntrack,
+ enum ip_conntrack_info ctinfo,
+ enum ip_conntrack_info master_ctinfo,
+ const struct xt_layer7_info * info)
@@ -286,9 +286,9 @@
+
+ #ifdef CONFIG_IP_NF_MATCH_LAYER7_DEBUG
+ if(!master_conntrack->layer7.app_proto) {
-+ char * f =
++ char * f =
+ friendly_print(master_conntrack->layer7.app_data);
-+ char * g =
++ char * g =
+ hex_print(master_conntrack->layer7.app_data);
+ DPRINTK("\nl7-filter gave up after %d bytes "
+ "(%d packets):\n%s\n",
@@ -306,8 +306,8 @@
+ if(master_conntrack->layer7.app_proto){
+ /* Here child connections set their .app_proto (for /proc) */
+ if(!conntrack->layer7.app_proto) {
-+ conntrack->layer7.app_proto =
-+ kmalloc(strlen(master_conntrack->layer7.app_proto)+1,
++ conntrack->layer7.app_proto =
++ kmalloc(strlen(master_conntrack->layer7.app_proto)+1,
+ GFP_ATOMIC);
+ if(!conntrack->layer7.app_proto){
+ if (net_ratelimit())
@@ -316,17 +316,17 @@
+ "bailing.\n");
+ return 1;
+ }
-+ strcpy(conntrack->layer7.app_proto,
++ strcpy(conntrack->layer7.app_proto,
+ master_conntrack->layer7.app_proto);
+ }
+
-+ return (!strcmp(master_conntrack->layer7.app_proto,
++ return (!strcmp(master_conntrack->layer7.app_proto,
+ info->protocol));
+ }
+ else {
+ /* If not classified, set to "unknown" to distinguish from
+ connections that are still being tested. */
-+ master_conntrack->layer7.app_proto =
++ master_conntrack->layer7.app_proto =
+ kmalloc(strlen("unknown")+1, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_proto){
+ if (net_ratelimit())
@@ -346,9 +346,9 @@
+ int length = 0, i;
+ int oldlength = master_conntrack->layer7.app_data_len;
+
-+ /* This is a fix for a race condition by Deti Fliegl. However, I'm not
-+ clear on whether the race condition exists or whether this really
-+ fixes it. I might just be being dense... Anyway, if it's not really
++ /* This is a fix for a race condition by Deti Fliegl. However, I'm not
++ clear on whether the race condition exists or whether this really
++ fixes it. I might just be being dense... Anyway, if it's not really
+ a fix, all it does is waste a very small amount of time. */
+ if(!master_conntrack->layer7.app_data) return 0;
+
@@ -359,7 +359,7 @@
+ if(app_data[i] != '\0') {
+ /* the kernel version of tolower mungs 'upper ascii' */
+ master_conntrack->layer7.app_data[length+oldlength] =
-+ isascii(app_data[i])?
++ isascii(app_data[i])?
+ tolower(app_data[i]) : app_data[i];
+ length++;
+ }
@@ -449,7 +449,7 @@
+ bool *hotdrop)
+{
+ /* sidestep const without getting a compiler warning... */
-+ struct sk_buff * skb = (struct sk_buff *)skbin;
++ struct sk_buff * skb = (struct sk_buff *)skbin;
+
+ const struct xt_layer7_info * info = matchinfo;
+ enum ip_conntrack_info master_ctinfo, ctinfo;
@@ -485,12 +485,12 @@
+ if(TOTAL_PACKETS > num_packets ||
+ master_conntrack->layer7.app_proto) {
+
-+ pattern_result = match_no_append(conntrack, master_conntrack,
++ pattern_result = match_no_append(conntrack, master_conntrack,
+ ctinfo, master_ctinfo, info);
+
-+ /* skb->cb[0] == seen. Don't do things twice if there are
-+ multiple l7 rules. I'm not sure that using cb for this purpose
-+ is correct, even though it says "put your private variables
++ /* skb->cb[0] == seen. Don't do things twice if there are
++ multiple l7 rules. I'm not sure that using cb for this purpose
++ is correct, even though it says "put your private variables
+ there". But it doesn't look like it is being used for anything
+ else in the skbs that make it here. */
+ skb->cb[0] = 1; /* marking it seen here's probably irrelevant */
@@ -517,9 +517,9 @@
+ comppattern = compile_and_cache(info->pattern, info->protocol);
+
+ /* On the first packet of a connection, allocate space for app data */
-+ if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
++ if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
+ !master_conntrack->layer7.app_data){
-+ master_conntrack->layer7.app_data =
++ master_conntrack->layer7.app_data =
+ kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_data){
+ if (net_ratelimit())
@@ -562,14 +562,14 @@
+ DPRINTK("layer7: matched unset: not yet classified "
+ "(%d/%d packets)\n", TOTAL_PACKETS, num_packets);
+ /* If the regexp failed to compile, don't bother running it */
-+ } else if(comppattern &&
++ } else if(comppattern &&
+ regexec(comppattern, master_conntrack->layer7.app_data)){
+ DPRINTK("layer7: matched %s\n", info->protocol);
+ pattern_result = 1;
+ } else pattern_result = 0;
+
+ if(pattern_result == 1) {
-+ master_conntrack->layer7.app_proto =
++ master_conntrack->layer7.app_proto =
+ kmalloc(strlen(info->protocol)+1, GFP_ATOMIC);
+ if(!master_conntrack->layer7.app_proto){
+ if (net_ratelimit())
@@ -914,7 +914,7 @@
+ register int len;
+ int flags;
+ struct match_globals g;
-+
++
+ /* commented out by ethan
+ extern char *malloc();
+ */
@@ -1041,7 +1041,7 @@
+ }
+
+ /* Make a closing node, and hook it on the end. */
-+ ender = regnode(g, (paren) ? CLOSE+parno : END);
++ ender = regnode(g, (paren) ? CLOSE+parno : END);
+ regtail(g, ret, ender);
+
+ /* Hook the tails of the branches to the closing node. */
@@ -1986,7 +1986,7 @@
+ register char c;
+ register int no;
+ register int len;
-+
++
+ /* Not necessary and gcc doesn't like it -MLS */
+ /*extern char *strncpy();*/
+
diff --git a/target/linux/generic-2.6/patches-2.6.26/101-netfilter_layer7_pktmatch.patch b/target/linux/generic-2.6/patches-2.6.26/101-netfilter_layer7_pktmatch.patch
index 167d578cf1..dff6d188aa 100644
--- a/target/linux/generic-2.6/patches-2.6.26/101-netfilter_layer7_pktmatch.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/101-netfilter_layer7_pktmatch.patch
@@ -20,13 +20,12 @@
{
int length = 0, i;
- int oldlength = master_conntrack->layer7.app_data_len;
--
-- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
-- clear on whether the race condition exists or whether this really
-- fixes it. I might just be being dense... Anyway, if it's not really
+
+- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
+- clear on whether the race condition exists or whether this really
+- fixes it. I might just be being dense... Anyway, if it's not really
- a fix, all it does is waste a very small amount of time. */
- if(!master_conntrack->layer7.app_data) return 0;
-+
+ if (!target) return 0;
/* Strip nulls. Make everything lower case (our regex lib doesn't
@@ -38,13 +37,13 @@
/* the kernel version of tolower mungs 'upper ascii' */
- master_conntrack->layer7.app_data[length+oldlength] =
+ target[length+offset] =
- isascii(app_data[i])?
+ isascii(app_data[i])?
tolower(app_data[i]) : app_data[i];
length++;
}
}
+ target[length+offset] = '\0';
-+
++
+ return length;
+}
@@ -79,7 +78,7 @@
+ if(!info->pkt && (TOTAL_PACKETS > num_packets ||
+ master_conntrack->layer7.app_proto)) {
- pattern_result = match_no_append(conntrack, master_conntrack,
+ pattern_result = match_no_append(conntrack, master_conntrack,
ctinfo, master_ctinfo, info);
@@ -473,6 +475,25 @@
/* the return value gets checked later, when we're ready to use it */
@@ -105,5 +104,5 @@
+ }
+
/* On the first packet of a connection, allocate space for app data */
- if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
+ if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
!master_conntrack->layer7.app_data){
diff --git a/target/linux/generic-2.6/patches-2.6.26/130-netfilter_ipset.patch b/target/linux/generic-2.6/patches-2.6.26/130-netfilter_ipset.patch
index d3f5a5957f..31e62e250f 100644
--- a/target/linux/generic-2.6/patches-2.6.26/130-netfilter_ipset.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/130-netfilter_ipset.patch
@@ -11,7 +11,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+#if 0
@@ -40,8 +40,8 @@
+ * - in order to "deal with" backward compatibility, renamed to ipset
+ */
+
-+/*
-+ * Used so that the kernel module and ipset-binary can match their versions
++/*
++ * Used so that the kernel module and ipset-binary can match their versions
+ */
+#define IP_SET_PROTOCOL_VERSION 2
+
@@ -52,7 +52,7 @@
+ *
+ * The representation works in HOST byte order, because most set types
+ * will perform arithmetic operations and compare operations.
-+ *
++ *
+ * For now the type is an uint32_t.
+ *
+ * Make sure to ONLY use the functions when translating and parsing
@@ -103,8 +103,8 @@
+ * 200-299: list, save, restore
+ */
+
-+/* Single shot operations:
-+ * version, create, destroy, flush, rename and swap
++/* Single shot operations:
++ * version, create, destroy, flush, rename and swap
+ *
+ * Sets are identified by name.
+ */
@@ -155,7 +155,7 @@
+ unsigned version;
+};
+
-+/* Double shots operations:
++/* Double shots operations:
+ * add, del, test, bind and unbind.
+ *
+ * First we query the kernel to get the index and type of the target set,
@@ -197,7 +197,7 @@
+};
+
+#define IP_SET_OP_UNBIND_SET 0x00000105 /* Unbind an IP from a set */
-+/* Uses ip_set_req_bind, with type speficic addage
++/* Uses ip_set_req_bind, with type speficic addage
+ * index = 0 means unbinding for all sets */
+
+#define IP_SET_OP_TEST_BIND_SET 0x00000106 /* Test binding an IP to a set */
@@ -243,7 +243,7 @@
+#define IP_SET_OP_LIST 0x00000203
+struct ip_set_req_list {
+ IP_SET_REQ_BYINDEX;
-+ /* sets number of struct ip_set_list in reply */
++ /* sets number of struct ip_set_list in reply */
+};
+
+struct ip_set_list {
@@ -283,7 +283,7 @@
+/* The restore operation */
+#define IP_SET_OP_RESTORE 0x00000205
+/* Uses ip_set_req_setnames followed by ip_set_restore structures
-+ * plus a marker ip_set_restore, followed by ip_set_hash_save
++ * plus a marker ip_set_restore, followed by ip_set_hash_save
+ * structures.
+ */
+struct ip_set_restore {
@@ -344,7 +344,7 @@
+ * return 0 if not in set, 1 if in set.
+ */
+ int (*testip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -367,7 +367,7 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address was not already in the set, 0 is returned.
+ */
-+ int (*addip) (struct ip_set *set,
++ int (*addip) (struct ip_set *set,
+ const void *data, size_t size,
+ ip_set_ip_t *ip);
+
@@ -377,7 +377,7 @@
+ * If the address was not already in the set, 0 is returned.
+ */
+ int (*addip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -387,7 +387,7 @@
+ * and -ERANGE if the address lies outside the set bounds.
+ * If the address really was in the set, 0 is returned.
+ */
-+ int (*delip) (struct ip_set *set,
++ int (*delip) (struct ip_set *set,
+ const void *data, size_t size,
+ ip_set_ip_t *ip);
+
@@ -397,7 +397,7 @@
+ * If the address really was in the set, 0 is returned.
+ */
+ int (*delip_kernel) (struct ip_set *set,
-+ const struct sk_buff * skb,
++ const struct sk_buff * skb,
+ ip_set_ip_t *ip,
+ const u_int32_t *flags,
+ unsigned char index);
@@ -429,11 +429,11 @@
+ /* Listing: Get the header
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_header_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_header_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
-+ void (*list_header) (const struct ip_set *set,
++ void (*list_header) (const struct ip_set *set,
+ void *data);
+
+ /* Listing: Get the size for the set members
@@ -443,9 +443,9 @@
+ /* Listing: Get the set members
+ *
+ * Fill in the information in "data".
-+ * This function is always run after list_member_size() under a
-+ * writelock on the set. Therefor is the length of "data" always
-+ * correct.
++ * This function is always run after list_member_size() under a
++ * writelock on the set. Therefor is the length of "data" always
++ * correct.
+ */
+ void (*list_members) (const struct ip_set *set,
+ void *data);
@@ -567,14 +567,14 @@
+{
+ unsigned int bits = 32;
+ ip_set_ip_t maskaddr;
-+
++
+ if (mask == 0xFFFFFFFF)
+ return bits;
-+
++
+ maskaddr = 0xFFFFFFFE;
+ while (--bits >= 0 && maskaddr != mask)
+ maskaddr <<= 1;
-+
++
+ return bits;
+}
+
@@ -582,14 +582,14 @@
+range_to_mask(ip_set_ip_t from, ip_set_ip_t to, unsigned int *bits)
+{
+ ip_set_ip_t mask = 0xFFFFFFFE;
-+
++
+ *bits = 32;
+ while (--(*bits) >= 0 && mask && (to & mask) != from)
+ mask <<= 1;
-+
++
+ return mask;
+}
-+
++
+#endif /* __IP_SET_IPMAP_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ip_set_ipporthash.h
@@ -945,7 +945,7 @@
+ void *arrays[0];
+};
+
-+static inline void *
++static inline void *
+harray_malloc(size_t hashsize, size_t typesize, int flags)
+{
+ struct harray *harray;
@@ -961,21 +961,21 @@
+ size = hashsize/max_elements;
+ if (hashsize % max_elements)
+ size++;
-+
++
+ /* Last pointer signals end of arrays */
+ harray = kmalloc(sizeof(struct harray) + (size + 1) * sizeof(void *),
+ flags);
+
+ if (!harray)
+ return NULL;
-+
++
+ for (i = 0; i < size - 1; i++) {
+ harray->arrays[i] = kmalloc(max_elements * typesize, flags);
+ if (!harray->arrays[i])
+ goto undo;
+ memset(harray->arrays[i], 0, max_elements * typesize);
+ }
-+ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
++ harray->arrays[i] = kmalloc((hashsize - i * max_elements) * typesize,
+ flags);
+ if (!harray->arrays[i])
+ goto undo;
@@ -983,7 +983,7 @@
+
+ harray->max_elements = max_elements;
+ harray->arrays[size] = NULL;
-+
++
+ return (void *)harray;
+
+ undo:
@@ -998,7 +998,7 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i] != NULL; i++)
+ kfree(harray->arrays[i]);
+ kfree(harray);
@@ -1008,10 +1008,10 @@
+{
+ struct harray *harray = (struct harray *) h;
+ size_t i;
-+
++
+ for (i = 0; harray->arrays[i+1] != NULL; i++)
+ memset(harray->arrays[i], 0, harray->max_elements * typesize);
-+ memset(harray->arrays[i], 0,
++ memset(harray->arrays[i], 0,
+ (hashsize - i * harray->max_elements) * typesize);
+}
+
@@ -1059,7 +1059,7 @@
+
+static unsigned char shifts[] = {255, 253, 249, 241, 225, 193, 129, 1};
+
-+static inline ip_set_ip_t
++static inline ip_set_ip_t
+pack(ip_set_ip_t ip, unsigned char cidr)
+{
+ ip_set_ip_t addr, *paddr = &addr;
@@ -1070,7 +1070,7 @@
+ DP("ip:%u.%u.%u.%u/%u", NIPQUAD(addr), cidr);
+#endif
+ n = cidr / 8;
-+ t = cidr % 8;
++ t = cidr % 8;
+ a = &((unsigned char *)paddr)[n];
+ *a = *a /(1 << (8 - t)) + shifts[t];
+#ifdef __KERNEL__
@@ -1144,7 +1144,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module for IP set management */
@@ -1183,12 +1183,12 @@
+
+/*
+ * Sets are identified either by the index in ip_set_list or by id.
-+ * The id never changes and is used to find a key in the hash.
-+ * The index may change by swapping and used at all other places
++ * The id never changes and is used to find a key in the hash.
++ * The index may change by swapping and used at all other places
+ * (set/SET netfilter modules, binding value, etc.)
+ *
+ * Userspace requests are serialized by ip_set_mutex and sets can
-+ * be deleted only from userspace. Therefore ip_set_list locking
++ * be deleted only from userspace. Therefore ip_set_list locking
+ * must obey the following rules:
+ *
+ * - kernel requests: read and write locking mandatory
@@ -1219,35 +1219,35 @@
+ list_for_each_entry(set_hash, &ip_set_hash[key], list)
+ if (set_hash->id == id && set_hash->ip == ip)
+ return set_hash;
-+
++
+ return NULL;
+}
+
+static ip_set_id_t
+ip_set_find_in_hash(ip_set_id_t id, ip_set_ip_t ip)
+{
-+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
++ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+
+ ASSERT_READ_LOCK(&ip_set_lock);
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
-+
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++
+ set_hash = __ip_set_find(key, id, ip);
-+
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip),
+ set_hash != NULL ? ip_set_list[set_hash->binding]->name : "");
+
+ return (set_hash != NULL ? set_hash->binding : IP_SET_INVALID_ID);
+}
+
-+static inline void
++static inline void
+__set_hash_del(struct ip_set_hash *set_hash)
+{
+ ASSERT_WRITE_LOCK(&ip_set_lock);
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+
+ __ip_set_put(set_hash->binding);
+ list_del(&set_hash->list);
@@ -1260,9 +1260,9 @@
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
-+ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
++ DP("set: %s, ip: %u.%u.%u.%u", ip_set_list[id]->name, HIPQUAD(ip));
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
@@ -1275,17 +1275,17 @@
+ return 0;
+}
+
-+static int
++static int
+ip_set_hash_add(ip_set_id_t id, ip_set_ip_t ip, ip_set_id_t binding)
+{
+ u_int32_t key = jhash_2words(id, ip, ip_set_hash_random)
+ % ip_set_bindings_hash_size;
+ struct ip_set_hash *set_hash;
+ int ret = 0;
-+
++
+ IP_SET_ASSERT(ip_set_list[id]);
+ IP_SET_ASSERT(ip_set_list[binding]);
-+ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
++ DP("set: %s, ip: %u.%u.%u.%u, binding: %s", ip_set_list[id]->name,
+ HIPQUAD(ip), ip_set_list[binding]->name);
+ write_lock_bh(&ip_set_lock);
+ set_hash = __ip_set_find(key, id, ip);
@@ -1300,7 +1300,7 @@
+ set_hash->ip = ip;
+ list_add(&set_hash->list, &ip_set_hash[key]);
+ } else {
-+ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
++ IP_SET_ASSERT(ip_set_list[set_hash->binding]);
+ DP("overwrite binding: %s",
+ ip_set_list[set_hash->binding]->name);
+ __ip_set_put(set_hash->binding);
@@ -1353,7 +1353,7 @@
+ ip_set_ip_t ip;
+ int res;
+ unsigned char i = 0;
-+
++
+ IP_SET_ASSERT(flags[i]);
+ read_lock_bh(&ip_set_lock);
+ do {
@@ -1364,8 +1364,8 @@
+ res = set->type->testip_kernel(set, skb, &ip, flags, i++);
+ read_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
-+ } while (res > 0
-+ && flags[i]
++ } while (res > 0
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
@@ -1394,7 +1394,7 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+
@@ -1425,7 +1425,7 @@
+ write_unlock_bh(&set->lock);
+ i += !!(set->type->features & IPSET_DATA_DOUBLE);
+ } while ((res == 0 || res == -EEXIST)
-+ && flags[i]
++ && flags[i]
+ && follow_bindings(index, set, ip));
+ read_unlock_bh(&ip_set_lock);
+}
@@ -1443,11 +1443,11 @@
+ return NULL;
+}
+
-+int
++int
+ip_set_register_set_type(struct ip_set_type *set_type)
+{
+ int ret = 0;
-+
++
+ if (set_type->protocol_version != IP_SET_PROTOCOL_VERSION) {
+ ip_set_printk("'%s' uses wrong protocol version %u (want %u)",
+ set_type->typename,
@@ -1459,7 +1459,7 @@
+ write_lock_bh(&ip_set_lock);
+ if (find_set_type(set_type->typename)) {
+ /* Duplicate! */
-+ ip_set_printk("'%s' already registered!",
++ ip_set_printk("'%s' already registered!",
+ set_type->typename);
+ ret = -EINVAL;
+ goto unlock;
@@ -1505,7 +1505,7 @@
+ip_set_get_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ down(&ip_set_app_mutex);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
@@ -1531,12 +1531,12 @@
+
+ if (index >= ip_set_max)
+ return IP_SET_INVALID_ID;
-+
++
+ if (ip_set_list[index])
+ __ip_set_get(index);
+ else
+ index = IP_SET_INVALID_ID;
-+
++
+ up(&ip_set_app_mutex);
+ return index;
+}
@@ -1559,7 +1559,7 @@
+ip_set_find_byname(const char *name)
+{
+ ip_set_id_t i, index = IP_SET_INVALID_ID;
-+
++
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && strcmp(ip_set_list[i]->name, name) == 0) {
@@ -1575,7 +1575,7 @@
+{
+ if (index >= ip_set_max || ip_set_list[index] == NULL)
+ index = IP_SET_INVALID_ID;
-+
++
+ return index;
+}
+
@@ -1606,7 +1606,7 @@
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ do {
+ write_lock_bh(&set->lock);
@@ -1638,7 +1638,7 @@
+ struct ip_set *set = ip_set_list[index];
+ ip_set_ip_t ip;
+ int res;
-+
++
+ IP_SET_ASSERT(set);
+ write_lock_bh(&set->lock);
+ res = set->type->delip(set,
@@ -1682,18 +1682,18 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
++
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of a set */
+ char *binding_name;
-+
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ binding = ip_set_find_byname(binding_name);
@@ -1720,7 +1720,7 @@
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = ip_set_hash_add(set->id, ip, binding);
+
@@ -1769,10 +1769,10 @@
+ DP("");
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
++
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ DP("%u %s", index, req_bind->binding);
+ if (index == IP_SET_INVALID_ID) {
+ /* unbind :all: */
@@ -1792,7 +1792,7 @@
+ DP("unreachable reached!");
+ return -EINVAL;
+ }
-+
++
+ set = ip_set_list[index];
+ IP_SET_ASSERT(set);
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
@@ -1801,7 +1801,7 @@
+
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ write_lock_bh(&ip_set_lock);
+ /* Sets in hash values are referenced */
+ __ip_set_put(set->binding);
@@ -1817,7 +1817,7 @@
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+ }
-+
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
@@ -1844,24 +1844,24 @@
+ IP_SET_ASSERT(set);
+ if (size < sizeof(struct ip_set_req_bind))
+ return -EINVAL;
-+
++
+ req_bind = (struct ip_set_req_bind *) data;
+ req_bind->binding[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ if (strcmp(req_bind->binding, IPSET_TOKEN_DEFAULT) == 0) {
+ /* Default binding of set */
+ char *binding_name;
-+
++
+ if (size != sizeof(struct ip_set_req_bind) + IP_SET_MAXNAMELEN)
+ return -EINVAL;
+
-+ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
++ binding_name = (char *)(data + sizeof(struct ip_set_req_bind));
+ binding_name[IP_SET_MAXNAMELEN - 1] = '\0';
+
+ binding = ip_set_find_byname(binding_name);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
++
+ res = (set->binding == binding) ? -EEXIST : 0;
+
+ return res;
@@ -1869,15 +1869,15 @@
+ binding = ip_set_find_byname(req_bind->binding);
+ if (binding == IP_SET_INVALID_ID)
+ return -ENOENT;
-+
-+
++
++
+ res = __ip_set_testip(set,
+ data + sizeof(struct ip_set_req_bind),
+ size - sizeof(struct ip_set_req_bind),
+ &ip);
+ DP("set %s, ip: %u.%u.%u.%u, binding %s",
+ set->name, HIPQUAD(ip), ip_set_list[binding]->name);
-+
++
+ if (res >= 0)
+ res = (ip_set_find_in_hash(set->id, ip) == binding)
+ ? -EEXIST : 0;
@@ -1889,7 +1889,7 @@
+find_set_type_rlock(const char *typename)
+{
+ struct ip_set_type *type;
-+
++
+ read_lock_bh(&ip_set_lock);
+ type = find_set_type(typename);
+ if (type == NULL)
@@ -1918,7 +1918,7 @@
+ /* No free slot remained */
+ return -ERANGE;
+ /* Check that index is usable as id (swapping) */
-+ check:
++ check:
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
+ && ip_set_list[i]->id == *id) {
@@ -1998,7 +1998,7 @@
+
+ /*
+ * Here, we have a valid, constructed set. &ip_set_lock again,
-+ * find free id/index and check that it is not already in
++ * find free id/index and check that it is not already in
+ * ip_set_list.
+ */
+ write_lock_bh(&ip_set_lock);
@@ -2013,7 +2013,7 @@
+ res = -ERANGE;
+ goto cleanup;
+ }
-+
++
+ /*
+ * Finally! Add our shiny new set to the list, and be done.
+ */
@@ -2022,7 +2022,7 @@
+ ip_set_list[index] = set;
+ write_unlock_bh(&ip_set_lock);
+ return res;
-+
++
+ cleanup:
+ write_unlock_bh(&ip_set_lock);
+ set->type->destroy(set);
@@ -2072,7 +2072,7 @@
+ ip_set_destroy_set(index);
+ } else {
+ for (i = 0; i < ip_set_max; i++) {
-+ if (ip_set_list[i] != NULL
++ if (ip_set_list[i] != NULL
+ && (atomic_read(&ip_set_list[i]->ref)))
+ return -EBUSY;
+ }
@@ -2095,7 +2095,7 @@
+ write_unlock_bh(&set->lock);
+}
+
-+/*
++/*
+ * Flush data in a set - or in all sets
+ */
+static int
@@ -2122,7 +2122,7 @@
+ write_lock_bh(&ip_set_lock);
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] != NULL
-+ && strncmp(ip_set_list[i]->name,
++ && strncmp(ip_set_list[i]->name,
+ name,
+ IP_SET_MAXNAMELEN - 1) == 0) {
+ res = -EEXIST;
@@ -2152,7 +2152,7 @@
+ if (from->type->features != to->type->features)
+ return -ENOEXEC;
+
-+ /* No magic here: ref munging protected by the mutex */
++ /* No magic here: ref munging protected by the mutex */
+ write_lock_bh(&ip_set_lock);
+ strncpy(from_name, from->name, IP_SET_MAXNAMELEN);
+ from_ref = atomic_read(&from->ref);
@@ -2161,10 +2161,10 @@
+ atomic_set(&from->ref, atomic_read(&to->ref));
+ strncpy(to->name, from_name, IP_SET_MAXNAMELEN);
+ atomic_set(&to->ref, from_ref);
-+
++
+ ip_set_list[from_index] = to;
+ ip_set_list[to_index] = from;
-+
++
+ write_unlock_bh(&ip_set_lock);
+ return 0;
+}
@@ -2194,7 +2194,7 @@
+ ip_set_id_t id, void *data, int *used)
+{
+ if (set_hash->id == id) {
-+ struct ip_set_hash_list *hash_list =
++ struct ip_set_hash_list *hash_list =
+ (struct ip_set_hash_list *)(data + *used);
+
+ hash_list->ip = set_hash->ip;
@@ -2257,7 +2257,7 @@
+
+ /* Fill in set spefific bindings data */
+ FOREACH_HASH_DO(__set_hash_bindings, set->id, data, used);
-+
++
+ return 0;
+
+ unlock_set:
@@ -2287,7 +2287,7 @@
+ *used += sizeof(struct ip_set_save);
+
+ set = ip_set_list[index];
-+ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
++ DP("set: %s, used: %u(%u) %p %p", set->name, *used, len,
+ data, data + *used);
+
+ read_lock_bh(&set->lock);
@@ -2336,7 +2336,7 @@
+{
+ if (*res == 0
+ && (id == IP_SET_INVALID_ID || set_hash->id == id)) {
-+ struct ip_set_hash_save *hash_save =
++ struct ip_set_hash_save *hash_save =
+ (struct ip_set_hash_save *)(data + *used);
+ /* Ensure bindings size */
+ if (*used + sizeof(struct ip_set_hash_save) > len) {
@@ -2377,7 +2377,7 @@
+ index = ip_set_list[index]->id;
+ FOREACH_HASH_DO(__set_hash_save_bindings, index, data, used, len, &res);
+
-+ return res;
++ return res;
+}
+
+/*
@@ -2396,7 +2396,7 @@
+ /* Loop to restore sets */
+ while (1) {
+ line++;
-+
++
+ DP("%u %u %u", used, sizeof(struct ip_set_restore), len);
+ /* Get and ensure header size */
+ if (used + sizeof(struct ip_set_restore) > len)
@@ -2405,8 +2405,8 @@
+ used += sizeof(struct ip_set_restore);
+
+ /* Ensure data size */
-+ if (used
-+ + set_restore->header_size
++ if (used
++ + set_restore->header_size
+ + set_restore->members_size > len)
+ return line;
+
@@ -2415,7 +2415,7 @@
+ line--;
+ goto bindings;
+ }
-+
++
+ /* Try to create the set */
+ DP("restore %s %s", set_restore->name, set_restore->typename);
+ res = ip_set_create(set_restore->name,
@@ -2423,7 +2423,7 @@
+ set_restore->index,
+ data + used,
+ set_restore->header_size);
-+
++
+ if (res != 0)
+ return line;
+ used += set_restore->header_size;
@@ -2444,7 +2444,7 @@
+ res = __ip_set_addip(index,
+ data + used + members_size,
+ set->type->reqsize);
-+ if (!(res == 0 || res == -EEXIST))
++ if (!(res == 0 || res == -EEXIST))
+ return line;
+ members_size += set->type->reqsize;
+ }
@@ -2453,25 +2453,25 @@
+ set_restore->members_size, members_size);
+ if (members_size != set_restore->members_size)
+ return line++;
-+ used += set_restore->members_size;
++ used += set_restore->members_size;
+ }
-+
++
+ bindings:
+ /* Loop to restore bindings */
+ while (used < len) {
+ line++;
+
-+ DP("restore binding, line %u", line);
++ DP("restore binding, line %u", line);
+ /* Get and ensure size */
+ if (used + sizeof(struct ip_set_hash_save) > len)
+ return line;
+ hash_save = (struct ip_set_hash_save *) (data + used);
+ used += sizeof(struct ip_set_hash_save);
-+
++
+ /* hash_save->id is used to store the index */
+ index = ip_set_find_byindex(hash_save->id);
+ DP("restore binding index %u, id %u, %u -> %u",
-+ index, hash_save->id, hash_save->ip, hash_save->binding);
++ index, hash_save->id, hash_save->ip, hash_save->binding);
+ if (index != hash_save->id)
+ return line;
+ if (ip_set_find_byindex(hash_save->binding) == IP_SET_INVALID_ID) {
@@ -2481,7 +2481,7 @@
+ set = ip_set_list[hash_save->id];
+ /* Null valued IP means default binding */
+ if (hash_save->ip)
-+ res = ip_set_hash_add(set->id,
++ res = ip_set_hash_add(set->id,
+ hash_save->ip,
+ hash_save->binding);
+ else {
@@ -2497,8 +2497,8 @@
+ }
+ if (used != len)
+ return line;
-+
-+ return 0;
++
++ return 0;
+}
+
+static int
@@ -2545,7 +2545,7 @@
+
+ op = (unsigned *)data;
+ DP("op=%x", *op);
-+
++
+ if (*op < IP_SET_OP_VERSION) {
+ /* Check the version at the beginning of operations */
+ struct ip_set_req_version *req_version =
@@ -2560,7 +2560,7 @@
+ case IP_SET_OP_CREATE:{
+ struct ip_set_req_create *req_create
+ = (struct ip_set_req_create *) data;
-+
++
+ if (len < sizeof(struct ip_set_req_create)) {
+ ip_set_printk("short CREATE data (want >=%zu, got %u)",
+ sizeof(struct ip_set_req_create), len);
@@ -2579,7 +2579,7 @@
+ case IP_SET_OP_DESTROY:{
+ struct ip_set_req_std *req_destroy
+ = (struct ip_set_req_std *) data;
-+
++
+ if (len != sizeof(struct ip_set_req_std)) {
+ ip_set_printk("invalid DESTROY data (want %zu, got %u)",
+ sizeof(struct ip_set_req_std), len);
@@ -2598,7 +2598,7 @@
+ goto done;
+ }
+ }
-+
++
+ res = ip_set_destroy(index);
+ goto done;
+ }
@@ -2640,7 +2640,7 @@
+
+ req_rename->name[IP_SET_MAXNAMELEN - 1] = '\0';
+ req_rename->typename[IP_SET_MAXNAMELEN - 1] = '\0';
-+
++
+ index = ip_set_find_byname(req_rename->name);
+ if (index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2677,10 +2677,10 @@
+ res = ip_set_swap(index, to_index);
+ goto done;
+ }
-+ default:
++ default:
+ break; /* Set identified by id */
+ }
-+
++
+ /* There we may have add/del/test/bind/unbind/test_bind operations */
+ if (*op < IP_SET_OP_ADD_IP || *op > IP_SET_OP_TEST_BIND_SET) {
+ res = -EBADMSG;
@@ -2697,7 +2697,7 @@
+ req_adt = (struct ip_set_req_adt *) data;
+
+ /* -U :all: :all:|:default: uses IP_SET_INVALID_ID */
-+ if (!(*op == IP_SET_OP_UNBIND_SET
++ if (!(*op == IP_SET_OP_UNBIND_SET
+ && req_adt->index == IP_SET_INVALID_ID)) {
+ index = ip_set_find_byindex(req_adt->index);
+ if (index == IP_SET_INVALID_ID) {
@@ -2716,7 +2716,7 @@
+ return res;
+}
+
-+static int
++static int
+ip_set_sockfn_get(struct sock *sk, int optval, void *user, int *len)
+{
+ int res = 0;
@@ -2851,7 +2851,7 @@
+ req_max_sets->set.index = IP_SET_INVALID_ID;
+ } else {
+ req_max_sets->set.name[IP_SET_MAXNAMELEN - 1] = '\0';
-+ req_max_sets->set.index =
++ req_max_sets->set.index =
+ ip_set_find_byname(req_max_sets->set.name);
+ if (req_max_sets->set.index == IP_SET_INVALID_ID) {
+ res = -ENOENT;
@@ -2866,7 +2866,7 @@
+ }
+ goto copy;
+ }
-+ case IP_SET_OP_LIST_SIZE:
++ case IP_SET_OP_LIST_SIZE:
+ case IP_SET_OP_SAVE_SIZE: {
+ struct ip_set_req_setnames *req_setnames
+ = (struct ip_set_req_setnames *) data;
@@ -2887,7 +2887,7 @@
+ for (i = 0; i < ip_set_max; i++) {
+ if (ip_set_list[i] == NULL)
+ continue;
-+ name_list = (struct ip_set_name_list *)
++ name_list = (struct ip_set_name_list *)
+ (data + used);
+ used += sizeof(struct ip_set_name_list);
+ if (used > copylen) {
@@ -2917,7 +2917,7 @@
+ + set->type->header_size
+ + set->type->list_members_size(set);
+ /* Sets are identified by id in the hash */
-+ FOREACH_HASH_DO(__set_hash_bindings_size_list,
++ FOREACH_HASH_DO(__set_hash_bindings_size_list,
+ set->id, &req_setnames->size);
+ break;
+ }
@@ -3007,7 +3007,7 @@
+ }
+ if (res == 0)
+ res = ip_set_save_bindings(index, data, &used, *len);
-+
++
+ if (res != 0)
+ goto done;
+ else if (copylen != used) {
@@ -3050,7 +3050,7 @@
+ ? ip_set_list[index]->name
+ : ":all:", copylen);
+ res = copy_to_user(user, data, copylen);
-+
++
+ done:
+ up(&ip_set_app_mutex);
+ vfree(data);
@@ -3148,7 +3148,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip hash set */
@@ -3191,7 +3191,7 @@
+ *hash_ip = ip & map->netmask;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip), HIPQUAD(map->netmask));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
@@ -3214,7 +3214,7 @@
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_iphash *req =
++ struct ip_set_req_iphash *req =
+ (struct ip_set_req_iphash *) data;
+
+ if (size != sizeof(struct ip_set_req_iphash)) {
@@ -3227,19 +3227,19 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -3251,12 +3251,12 @@
+ __u32 probe;
+ u_int16_t i;
+ ip_set_ip_t *elem;
-+
++
+ if (!ip || map->elements >= limit)
+ return -ERANGE;
+
+ *hash_ip = ip & map->netmask;
-+
++
+ for (i = 0; i < map->probes; i++) {
+ probe = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
@@ -3276,7 +3276,7 @@
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_iphash *req =
++ struct ip_set_req_iphash *req =
+ (struct ip_set_req_iphash *) data;
+
+ if (size != sizeof(struct ip_set_req_iphash)) {
@@ -3289,19 +3289,19 @@
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __addip((struct ip_set_iphash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -3315,23 +3315,23 @@
+ u_int32_t i, hashsize = map->hashsize;
+ int res;
+ struct ip_set_iphash *tmp;
-+
++
+ if (map->resize == 0)
+ return -ERANGE;
+
+ again:
+ res = 0;
-+
++
+ /* Calculate new hash size */
+ hashsize += (hashsize * map->resize)/100;
+ if (hashsize == map->hashsize)
+ hashsize++;
-+
++
+ ip_set_printk("rehashing of set %s triggered: "
+ "hashsize grows from %u to %u",
+ set->name, map->hashsize, hashsize);
+
-+ tmp = kmalloc(sizeof(struct ip_set_iphash)
++ tmp = kmalloc(sizeof(struct ip_set_iphash)
+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
+ if (!tmp) {
+ DP("out of memory for %d bytes",
@@ -3351,11 +3351,11 @@
+ tmp->resize = map->resize;
+ tmp->netmask = map->netmask;
+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
++
+ write_lock_bh(&set->lock);
+ map = (struct ip_set_iphash *) set->data; /* Play safe */
+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ if (*elem)
+ res = __addip(tmp, *elem, &hash_ip);
+ }
@@ -3366,7 +3366,7 @@
+ kfree(tmp);
+ goto again;
+ }
-+
++
+ /* Success at resizing! */
+ members = map->members;
+
@@ -3392,7 +3392,7 @@
+ id = hash_id(set, ip, hash_ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -3417,19 +3417,19 @@
+}
+
+static int
-+delip_kernel(struct ip_set *set,
++delip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -3459,7 +3459,7 @@
+ return -ENOEXEC;
+ }
+
-+ map = kmalloc(sizeof(struct ip_set_iphash)
++ map = kmalloc(sizeof(struct ip_set_iphash)
+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
@@ -3527,7 +3527,7 @@
+ ip_set_ip_t i, *elem;
+
+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ ((ip_set_ip_t *)data)[i] = *elem;
+ }
+}
@@ -3582,7 +3582,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the single bitmap type */
@@ -3610,7 +3610,7 @@
+__testip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -3624,7 +3624,7 @@
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_ipmap *req =
++ struct ip_set_req_ipmap *req =
+ (struct ip_set_req_ipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_ipmap)) {
@@ -3637,7 +3637,7 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -3646,10 +3646,10 @@
+ int res = __testip(set,
+ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -3676,7 +3676,7 @@
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_ipmap *req =
++ struct ip_set_req_ipmap *req =
+ (struct ip_set_req_ipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_ipmap)) {
@@ -3690,25 +3690,25 @@
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
+}
+
-+static inline int
++static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
@@ -3720,7 +3720,7 @@
+ DP("%u.%u.%u.%u, %u.%u.%u.%u", HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (!test_and_clear_bit(ip_to_id(map, *hash_ip), map->members))
+ return -EEXIST;
-+
++
+ return 0;
+}
+
@@ -3748,12 +3748,12 @@
+ unsigned char index)
+{
+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -3797,12 +3797,12 @@
+ } else {
+ unsigned int mask_bits, netmask_bits;
+ ip_set_ip_t mask;
-+
++
+ map->first_ip &= map->netmask; /* Should we better bark? */
-+
++
+ mask = range_to_mask(map->first_ip, map->last_ip, &mask_bits);
+ netmask_bits = mask_to_bits(map->netmask);
-+
++
+ if ((!mask && (map->first_ip || map->last_ip != 0xFFFFFFFF))
+ || netmask_bits <= mask_bits)
+ return -ENOEXEC;
@@ -3827,7 +3827,7 @@
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
-+
++
+ set->data = map;
+ return 0;
+}
@@ -3835,10 +3835,10 @@
+static void destroy(struct ip_set *set)
+{
+ struct ip_set_ipmap *map = (struct ip_set_ipmap *) set->data;
-+
++
+ kfree(map->members);
+ kfree(map);
-+
++
+ set->data = NULL;
+}
+
@@ -3919,7 +3919,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an ip+port hash set */
@@ -3961,7 +3961,7 @@
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr tcph;
-+
++
+ /* See comments at tcp_match in ip_tables.c */
+ if (offset)
+ return INVALID_PORT;
@@ -3973,7 +3973,7 @@
+#endif
+ /* No choice either */
+ return INVALID_PORT;
-+
++
+ return ntohs(flags & IPSET_SRC ?
+ tcph.source : tcph.dest);
+ }
@@ -3990,7 +3990,7 @@
+#endif
+ /* No choice either */
+ return INVALID_PORT;
-+
++
+ return ntohs(flags & IPSET_SRC ?
+ udph.source : udph.dest);
+ }
@@ -4011,7 +4011,7 @@
+hash_id(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t port,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_ipporthash *map =
++ struct ip_set_ipporthash *map =
+ (struct ip_set_ipporthash *) set->data;
+ __u32 id;
+ u_int16_t i;
@@ -4020,7 +4020,7 @@
+ *hash_ip = HASH_IP(map, ip, port);
+ DP("set: %s, ipport:%u.%u.%u.%u:%u, %u.%u.%u.%u",
+ set->name, HIPQUAD(ip), port, HIPQUAD(*hash_ip));
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
@@ -4038,7 +4038,7 @@
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_ipporthash *map = (struct ip_set_ipporthash *) set->data;
-+
++
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
+
@@ -4049,7 +4049,7 @@
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_ipporthash *req =
++ struct ip_set_req_ipporthash *req =
+ (struct ip_set_req_ipporthash *) data;
+
+ if (size != sizeof(struct ip_set_req_ipporthash)) {
@@ -4062,7 +4062,7 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -4073,7 +4073,7 @@
+
+ if (flags[index+1] == 0)
+ return 0;
-+
++
+ port = get_port(skb, flags[index+1]);
+
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
@@ -4086,24 +4086,24 @@
+ NIPQUAD(skb->nh.iph->daddr));
+#endif
+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
+ if (port == INVALID_PORT)
-+ return 0;
++ return 0;
+
+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ port,
+ hash_ip);
+ return (res < 0 ? 0 : res);
-+
++
+}
+
+static inline int
@@ -4138,7 +4138,7 @@
+ return -ERANGE;
+
+ *hash_ip = HASH_IP(map, ip, port);
-+
++
+ return __add_haship(map, *hash_ip);
+}
+
@@ -4146,7 +4146,7 @@
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_ipporthash *req =
++ struct ip_set_req_ipporthash *req =
+ (struct ip_set_req_ipporthash *) data;
+
+ if (size != sizeof(struct ip_set_req_ipporthash)) {
@@ -4155,12 +4155,12 @@
+ size);
+ return -EINVAL;
+ }
-+ return __addip((struct ip_set_ipporthash *) set->data,
++ return __addip((struct ip_set_ipporthash *) set->data,
+ req->ip, req->port, hash_ip);
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -4170,7 +4170,7 @@
+
+ if (flags[index+1] == 0)
+ return -EINVAL;
-+
++
+ port = get_port(skb, flags[index+1]);
+
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
@@ -4182,19 +4182,19 @@
+ NIPQUAD(skb->nh.iph->saddr),
+ NIPQUAD(skb->nh.iph->daddr));
+#endif
-+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
++ DP("flag %s port %u",
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
+ if (port == INVALID_PORT)
-+ return -EINVAL;
++ return -EINVAL;
+
+ return __addip((struct ip_set_ipporthash *) set->data,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ port,
@@ -4209,23 +4209,23 @@
+ u_int32_t i, hashsize = map->hashsize;
+ int res;
+ struct ip_set_ipporthash *tmp;
-+
++
+ if (map->resize == 0)
+ return -ERANGE;
+
+ again:
+ res = 0;
-+
++
+ /* Calculate new hash size */
+ hashsize += (hashsize * map->resize)/100;
+ if (hashsize == map->hashsize)
+ hashsize++;
-+
++
+ ip_set_printk("rehashing of set %s triggered: "
+ "hashsize grows from %u to %u",
+ set->name, map->hashsize, hashsize);
+
-+ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
++ tmp = kmalloc(sizeof(struct ip_set_ipporthash)
+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
+ if (!tmp) {
+ DP("out of memory for %d bytes",
@@ -4246,11 +4246,11 @@
+ tmp->first_ip = map->first_ip;
+ tmp->last_ip = map->last_ip;
+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
-+
++
+ write_lock_bh(&set->lock);
+ map = (struct ip_set_ipporthash *) set->data; /* Play safe */
+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ if (*elem)
+ res = __add_haship(tmp, *elem);
+ }
@@ -4261,7 +4261,7 @@
+ kfree(tmp);
+ goto again;
+ }
-+
++
+ /* Success at resizing! */
+ members = map->members;
+
@@ -4290,7 +4290,7 @@
+
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -4315,7 +4315,7 @@
+}
+
+static int
-+delip_kernel(struct ip_set *set,
++delip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -4325,7 +4325,7 @@
+
+ if (flags[index+1] == 0)
+ return -EINVAL;
-+
++
+ port = get_port(skb, flags[index+1]);
+
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
@@ -4338,18 +4338,18 @@
+ NIPQUAD(skb->nh.iph->daddr));
+#endif
+ DP("flag %s port %u",
-+ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
-+ port);
++ flags[index+1] & IPSET_SRC ? "SRC" : "DST",
++ port);
+ if (port == INVALID_PORT)
-+ return -EINVAL;
++ return -EINVAL;
+
+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ port,
@@ -4380,7 +4380,7 @@
+ return -ENOEXEC;
+ }
+
-+ map = kmalloc(sizeof(struct ip_set_ipporthash)
++ map = kmalloc(sizeof(struct ip_set_ipporthash)
+ + req->probes * sizeof(uint32_t), GFP_KERNEL);
+ if (!map) {
+ DP("out of memory for %d bytes",
@@ -4450,7 +4450,7 @@
+ ip_set_ip_t i, *elem;
+
+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ ((ip_set_ip_t *)data)[i] = *elem;
+ }
+}
@@ -4503,7 +4503,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the iptree type */
@@ -4532,8 +4532,8 @@
+
+/* Garbage collection interval in seconds: */
+#define IPTREE_GC_TIME 5*60
-+/* Sleep so many milliseconds before trying again
-+ * to delete the gc timer at destroying/flushing a set */
++/* Sleep so many milliseconds before trying again
++ * to delete the gc timer at destroying/flushing a set */
+#define IPTREE_DESTROY_SLEEP 100
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
@@ -4580,7 +4580,7 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, map->timeout);
@@ -4597,7 +4597,7 @@
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_iptree *req =
++ struct ip_set_req_iptree *req =
+ (struct ip_set_req_iptree *) data;
+
+ if (size != sizeof(struct ip_set_req_iptree)) {
@@ -4610,14 +4610,14 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ int res;
-+
++
+ DP("flag: %s src: %u.%u.%u.%u dst: %u.%u.%u.%u",
+ flags[index] & IPSET_SRC ? "SRC" : "DST",
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
@@ -4629,12 +4629,12 @@
+#endif
+
+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -4654,7 +4654,7 @@
+ (map)->tree[elem] = branch; \
+ DP("alloc %u", elem); \
+ } \
-+} while (0)
++} while (0)
+
+static inline int
+__addip(struct ip_set *set, ip_set_ip_t ip, unsigned int timeout,
@@ -4666,12 +4666,12 @@
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
+ int ret = 0;
-+
++
+ if (!ip || map->elements >= limit)
+ /* We could call the garbage collector
+ * but it's probably overkill */
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DP("%u %u %u %u timeout %u", a, b, c, d, timeout);
@@ -4696,7 +4696,7 @@
+ ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
-+ struct ip_set_req_iptree *req =
++ struct ip_set_req_iptree *req =
+ (struct ip_set_req_iptree *) data;
+
+ if (size != sizeof(struct ip_set_req_iptree)) {
@@ -4712,7 +4712,7 @@
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -4721,12 +4721,12 @@
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+
+ return __addip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ map->timeout,
@@ -4740,7 +4740,7 @@
+ return -EEXIST; \
+} while (0)
+
-+static inline int
++static inline int
+__delip(struct ip_set *set, ip_set_ip_t ip, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
@@ -4748,10 +4748,10 @@
+ struct ip_set_iptreec *ctree;
+ struct ip_set_iptreed *dtree;
+ unsigned char a,b,c,d;
-+
++
+ if (!ip)
+ return -ERANGE;
-+
++
+ *hash_ip = ip;
+ ABCD(a, b, c, d, hash_ip);
+ DELIP_WALK(map, a, btree);
@@ -4783,19 +4783,19 @@
+}
+
+static int
-+delip_kernel(struct ip_set *set,
++delip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -4874,7 +4874,7 @@
+ }
+ LOOP_WALK_END;
+ write_unlock_bh(&set->lock);
-+
++
+ map->gc.expires = jiffies + map->gc_interval * HZ;
+ add_timer(&map->gc);
+}
@@ -4958,7 +4958,7 @@
+{
+ struct ip_set_iptree *map = (struct ip_set_iptree *) set->data;
+ unsigned int timeout = map->timeout;
-+
++
+ /* gc might be running */
+ while (!del_timer(&map->gc))
+ msleep(IPTREE_DESTROY_SLEEP);
@@ -5021,7 +5021,7 @@
+ && (!map->timeout || time_after(dtree->expires[d], jiffies))) {
+ entry = (struct ip_set_req_iptree *)(data + offset);
+ entry->ip = ((a << 24) | (b << 16) | (c << 8) | d);
-+ entry->timeout = !map->timeout ? 0
++ entry->timeout = !map->timeout ? 0
+ : (dtree->expires[d] - jiffies)/HZ;
+ offset += sizeof(struct ip_set_req_iptree);
+ }
@@ -5061,7 +5061,7 @@
+static int __init ip_set_iptree_init(void)
+{
+ int ret;
-+
++
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
+ branch_cachep = kmem_cache_create("ip_set_iptreeb",
+ sizeof(struct ip_set_iptreeb),
@@ -5095,7 +5095,7 @@
+ goto out;
+
+ kmem_cache_destroy(leaf_cachep);
-+ free_branch:
++ free_branch:
+ kmem_cache_destroy(branch_cachep);
+ out:
+ return ret;
@@ -5411,13 +5411,13 @@
+{
+ int res;
+
-+ res = __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ res = __testip(set,
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -5501,12 +5501,12 @@
+{
+
+ return __addip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -5586,13 +5586,13 @@
+static int
+delip_kernel(struct ip_set *set, const struct sk_buff *skb, ip_set_ip_t *hash_ip, const u_int32_t *flags, unsigned char index)
+{
-+ return __delip_single(set,
-+ ntohl(flags[index] & IPSET_SRC
++ return __delip_single(set,
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip,
@@ -5844,12 +5844,12 @@
+ int a;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
+ 0, 0, NULL);
+#else
-+ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
-+ sizeof(struct ip_set_iptreemap_b),
++ cachep_b = kmem_cache_create("ip_set_iptreemap_b",
++ sizeof(struct ip_set_iptreemap_b),
+ 0, 0, NULL, NULL);
+#endif
+ if (!cachep_b) {
@@ -5858,11 +5858,11 @@
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
+ sizeof(struct ip_set_iptreemap_c),
+ 0, 0, NULL);
+#else
-+ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
++ cachep_c = kmem_cache_create("ip_set_iptreemap_c",
+ sizeof(struct ip_set_iptreemap_c),
+ 0, 0, NULL, NULL);
+#endif
@@ -5953,7 +5953,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing an IP set type: the macipmap type */
@@ -5978,7 +5978,7 @@
+testip(struct ip_set *set, const void *data, size_t size, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map = (struct ip_set_macipmap *) set->data;
-+ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
++ struct ip_set_macip *table = (struct ip_set_macip *) map->members;
+ struct ip_set_req_macipmap *req = (struct ip_set_req_macipmap *) data;
+
+ if (size != sizeof(struct ip_set_req_macipmap)) {
@@ -5993,7 +5993,7 @@
+
+ *hash_ip = req->ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
++ set->name, HIPQUAD(req->ip), HIPQUAD(*hash_ip));
+ if (test_bit(IPSET_MACIP_ISSET,
+ (void *) &table[req->ip - map->first_ip].flags)) {
+ return (memcmp(req->ethernet,
@@ -6005,7 +6005,7 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -6016,10 +6016,10 @@
+ struct ip_set_macip *table =
+ (struct ip_set_macip *) map->members;
+ ip_set_ip_t ip;
-+
++
+ ip = ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr);
+#else
+ ? skb->nh.iph->saddr
@@ -6029,9 +6029,9 @@
+ if (ip < map->first_ip || ip > map->last_ip)
+ return 0;
+
-+ *hash_ip = ip;
++ *hash_ip = ip;
+ DP("set: %s, ip:%u.%u.%u.%u, %u.%u.%u.%u",
-+ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
++ set->name, HIPQUAD(ip), HIPQUAD(*hash_ip));
+ if (test_bit(IPSET_MACIP_ISSET,
+ (void *) &table[ip - map->first_ip].flags)) {
+ /* Is mac pointer valid?
@@ -6053,7 +6053,7 @@
+
+/* returns 0 on success */
+static inline int
-+__addip(struct ip_set *set,
++__addip(struct ip_set *set,
+ ip_set_ip_t ip, unsigned char *ethernet, ip_set_ip_t *hash_ip)
+{
+ struct ip_set_macipmap *map =
@@ -6063,7 +6063,7 @@
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (test_and_set_bit(IPSET_MACIP_ISSET,
++ if (test_and_set_bit(IPSET_MACIP_ISSET,
+ (void *) &table[ip - map->first_ip].flags))
+ return -EEXIST;
+
@@ -6090,17 +6090,17 @@
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ ip_set_ip_t ip;
-+
++
+ ip = ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr);
+#else
+ ? skb->nh.iph->saddr
@@ -6129,7 +6129,7 @@
+
+ if (ip < map->first_ip || ip > map->last_ip)
+ return -ERANGE;
-+ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
++ if (!test_and_clear_bit(IPSET_MACIP_ISSET,
+ (void *)&table[ip - map->first_ip].flags))
+ return -EEXIST;
+
@@ -6162,12 +6162,12 @@
+ unsigned char index)
+{
+ return __delip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -6224,7 +6224,7 @@
+ return -ENOMEM;
+ }
+ memset(map->members, 0, newbytes);
-+
++
+ set->data = map;
+ return 0;
+}
@@ -6328,7 +6328,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a cidr nethash set */
@@ -6371,7 +6371,7 @@
+ ip_set_ip_t *elem;
+
+ *hash_ip = pack(ip, cidr);
-+
++
+ for (i = 0; i < map->probes; i++) {
+ id = jhash_ip(map, i, *hash_ip) % map->hashsize;
+ DP("hash key: %u", id);
@@ -6416,7 +6416,7 @@
+testip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
++ struct ip_set_req_nethash *req =
+ (struct ip_set_req_nethash *) data;
+
+ if (size != sizeof(struct ip_set_req_nethash)) {
@@ -6430,19 +6430,19 @@
+}
+
+static int
-+testip_kernel(struct ip_set *set,
++testip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ return __testip(set,
-+ ntohl(flags[index] & IPSET_SRC
++ ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr),
+#else
-+ ? skb->nh.iph->saddr
++ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr),
+#endif
+ hash_ip);
@@ -6454,7 +6454,7 @@
+ __u32 probe;
+ u_int16_t i;
+ ip_set_ip_t *elem;
-+
++
+ for (i = 0; i < map->probes; i++) {
+ probe = jhash_ip(map, i, ip) % map->hashsize;
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, probe);
@@ -6476,10 +6476,10 @@
+{
+ if (!ip || map->elements >= limit)
+ return -ERANGE;
-+
++
+ *hash_ip = pack(ip, cidr);
+ DP("%u.%u.%u.%u/%u, %u.%u.%u.%u", HIPQUAD(ip), cidr, HIPQUAD(*hash_ip));
-+
++
+ return __addip_base(map, *hash_ip);
+}
+
@@ -6488,7 +6488,7 @@
+{
+ unsigned char next;
+ int i;
-+
++
+ for (i = 0; i < 30 && map->cidr[i]; i++) {
+ if (map->cidr[i] == cidr) {
+ return;
@@ -6506,7 +6506,7 @@
+addip(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_ip)
+{
-+ struct ip_set_req_nethash *req =
++ struct ip_set_req_nethash *req =
+ (struct ip_set_req_nethash *) data;
+ int ret;
+
@@ -6516,18 +6516,18 @@
+ size);
+ return -EINVAL;
+ }
-+ ret = __addip((struct ip_set_nethash *) set->data,
++ ret = __addip((struct ip_set_nethash *) set->data,
+ req->ip, req->cidr, hash_ip);
-+
++
+ if (ret == 0)
+ update_cidr_sizes((struct ip_set_nethash *) set->data,
+ req->cidr);
-+
++
+ return ret;
+}
+
+static int
-+addip_kernel(struct ip_set *set,
++addip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -6535,18 +6535,18 @@
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr);
+#else
+ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+#endif
-+
++
+ if (map->cidr[0])
+ ret = __addip(map, ip, map->cidr[0], hash_ip);
-+
++
+ return ret;
+}
+
@@ -6558,23 +6558,23 @@
+ u_int32_t i, hashsize = map->hashsize;
+ int res;
+ struct ip_set_nethash *tmp;
-+
++
+ if (map->resize == 0)
+ return -ERANGE;
+
+ again:
+ res = 0;
-+
++
+ /* Calculate new parameters */
+ hashsize += (hashsize * map->resize)/100;
+ if (hashsize == map->hashsize)
+ hashsize++;
-+
++
+ ip_set_printk("rehashing of set %s triggered: "
+ "hashsize grows from %u to %u",
+ set->name, map->hashsize, hashsize);
+
-+ tmp = kmalloc(sizeof(struct ip_set_nethash)
++ tmp = kmalloc(sizeof(struct ip_set_nethash)
+ + map->probes * sizeof(uint32_t), GFP_ATOMIC);
+ if (!tmp) {
+ DP("out of memory for %d bytes",
@@ -6594,11 +6594,11 @@
+ tmp->resize = map->resize;
+ memcpy(tmp->initval, map->initval, map->probes * sizeof(uint32_t));
+ memcpy(tmp->cidr, map->cidr, 30 * sizeof(unsigned char));
-+
++
+ write_lock_bh(&set->lock);
+ map = (struct ip_set_nethash *) set->data; /* Play safe */
+ for (i = 0; i < map->hashsize && res == 0; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ if (*elem)
+ res = __addip_base(tmp, *elem);
+ }
@@ -6609,10 +6609,10 @@
+ kfree(tmp);
+ goto again;
+ }
-+
++
+ /* Success at resizing! */
+ members = map->members;
-+
++
+ map->hashsize = tmp->hashsize;
+ map->members = tmp->members;
+ write_unlock_bh(&set->lock);
@@ -6631,11 +6631,11 @@
+
+ if (!ip)
+ return -ERANGE;
-+
++
+ id = hash_id_cidr(map, ip, cidr, hash_ip);
+ if (id == UINT_MAX)
+ return -EEXIST;
-+
++
+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, id);
+ *elem = 0;
+ map->elements--;
@@ -6655,13 +6655,13 @@
+ size);
+ return -EINVAL;
+ }
-+ /* TODO: no garbage collection in map->cidr */
-+ return __delip((struct ip_set_nethash *) set->data,
++ /* TODO: no garbage collection in map->cidr */
++ return __delip((struct ip_set_nethash *) set->data,
+ req->ip, req->cidr, hash_ip);
+}
+
+static int
-+delip_kernel(struct ip_set *set,
++delip_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_ip,
+ const u_int32_t *flags,
@@ -6669,18 +6669,18 @@
+{
+ struct ip_set_nethash *map = (struct ip_set_nethash *) set->data;
+ int ret = -ERANGE;
-+ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
++ ip_set_ip_t ip = ntohl(flags[index] & IPSET_SRC
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
-+ ? ip_hdr(skb)->saddr
++ ? ip_hdr(skb)->saddr
+ : ip_hdr(skb)->daddr);
+#else
+ ? skb->nh.iph->saddr
+ : skb->nh.iph->daddr);
+#endif
-+
++
+ if (map->cidr[0])
+ ret = __delip(map, ip, map->cidr[0], hash_ip);
-+
++
+ return ret;
+}
+
@@ -6728,7 +6728,7 @@
+ kfree(map);
+ return -ENOMEM;
+ }
-+
++
+ set->data = map;
+ return 0;
+}
@@ -6775,7 +6775,7 @@
+ ip_set_ip_t i, *elem;
+
+ for (i = 0; i < map->hashsize; i++) {
-+ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
++ elem = HARRAY_ELEM(map->members, ip_set_ip_t *, i);
+ ((ip_set_ip_t *)data)[i] = *elem;
+ }
+}
@@ -6828,7 +6828,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module implementing a port set type as a bitmap */
@@ -6863,7 +6863,7 @@
+ switch (iph->protocol) {
+ case IPPROTO_TCP: {
+ struct tcphdr tcph;
-+
++
+ /* See comments at tcp_match in ip_tables.c */
+ if (offset)
+ return INVALID_PORT;
@@ -6875,7 +6875,7 @@
+#endif
+ /* No choice either */
+ return INVALID_PORT;
-+
++
+ return ntohs(flags & IPSET_SRC ?
+ tcph.source : tcph.dest);
+ }
@@ -6892,7 +6892,7 @@
+#endif
+ /* No choice either */
+ return INVALID_PORT;
-+
++
+ return ntohs(flags & IPSET_SRC ?
+ udph.source : udph.dest);
+ }
@@ -6908,7 +6908,7 @@
+
+ if (port < map->first_port || port > map->last_port)
+ return -ERANGE;
-+
++
+ *hash_port = port;
+ DP("set: %s, port:%u, %u", set->name, port, *hash_port);
+ return !!test_bit(port - map->first_port, map->members);
@@ -6918,7 +6918,7 @@
+testport(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_port)
+{
-+ struct ip_set_req_portmap *req =
++ struct ip_set_req_portmap *req =
+ (struct ip_set_req_portmap *) data;
+
+ if (size != sizeof(struct ip_set_req_portmap)) {
@@ -6931,7 +6931,7 @@
+}
+
+static int
-+testport_kernel(struct ip_set *set,
++testport_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_port,
+ const u_int32_t *flags,
@@ -6940,12 +6940,12 @@
+ int res;
+ ip_set_ip_t port = get_port(skb, flags[index]);
+
-+ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
++ DP("flag %s port %u", flags[index] & IPSET_SRC ? "SRC" : "DST", port);
+ if (port == INVALID_PORT)
-+ return 0;
++ return 0;
+
+ res = __testport(set, port, hash_port);
-+
++
+ return (res < 0 ? 0 : res);
+}
+
@@ -6958,7 +6958,7 @@
+ return -ERANGE;
+ if (test_and_set_bit(port - map->first_port, map->members))
+ return -EEXIST;
-+
++
+ *hash_port = port;
+ DP("port %u", port);
+ return 0;
@@ -6968,7 +6968,7 @@
+addport(struct ip_set *set, const void *data, size_t size,
+ ip_set_ip_t *hash_port)
+{
-+ struct ip_set_req_portmap *req =
++ struct ip_set_req_portmap *req =
+ (struct ip_set_req_portmap *) data;
+
+ if (size != sizeof(struct ip_set_req_portmap)) {
@@ -6981,14 +6981,14 @@
+}
+
+static int
-+addport_kernel(struct ip_set *set,
++addport_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_port,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
++
+ if (port == INVALID_PORT)
+ return -EINVAL;
+
@@ -7004,7 +7004,7 @@
+ return -ERANGE;
+ if (!test_and_clear_bit(port - map->first_port, map->members))
+ return -EEXIST;
-+
++
+ *hash_port = port;
+ DP("port %u", port);
+ return 0;
@@ -7027,14 +7027,14 @@
+}
+
+static int
-+delport_kernel(struct ip_set *set,
++delport_kernel(struct ip_set *set,
+ const struct sk_buff *skb,
+ ip_set_ip_t *hash_port,
+ const u_int32_t *flags,
+ unsigned char index)
+{
+ ip_set_ip_t port = get_port(skb, flags[index]);
-+
++
+ if (port == INVALID_PORT)
+ return -EINVAL;
+
@@ -7180,7 +7180,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* Kernel module to match an IP set. */
@@ -7198,7 +7198,7 @@
+match_set(const struct ipt_set_info *info,
+ const struct sk_buff *skb,
+ int inv)
-+{
++{
+ if (ip_set_testip_kernel(info->index, skb, info->flags))
+ inv = !inv;
+ return inv;
@@ -7225,7 +7225,7 @@
+#endif
+{
+ const struct ipt_set_info_match *info = matchinfo;
-+
++
+ return match_set(&info->match_set,
+ skb,
+ info->match_set.flags[0] & IPSET_MATCH_INV);
@@ -7251,7 +7251,7 @@
+#endif
+ unsigned int hook_mask)
+{
-+ struct ipt_set_info_match *info =
++ struct ipt_set_info_match *info =
+ (struct ipt_set_info_match *) matchinfo;
+ ip_set_id_t index;
+
@@ -7263,7 +7263,7 @@
+#endif
+
+ index = ip_set_get_byindex(info->match_set.index);
-+
++
+ if (index == IP_SET_INVALID_ID) {
+ ip_set_printk("Cannot find set indentified by id %u to match",
+ info->match_set.index);
@@ -7343,7 +7343,7 @@
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
-+ * published by the Free Software Foundation.
++ * published by the Free Software Foundation.
+ */
+
+/* ipt_SET.c - netfilter target to manipulate IP sets */
@@ -7386,7 +7386,7 @@
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
+ struct sk_buff *skb = *pskb;
+#endif
-+
++
+ if (info->add_set.index != IP_SET_INVALID_ID)
+ ip_set_addip_kernel(info->add_set.index,
+ skb,
@@ -7415,11 +7415,11 @@
+#endif
+ void *targinfo,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
-+ unsigned int targinfosize,
++ unsigned int targinfosize,
+#endif
+ unsigned int hook_mask)
+{
-+ struct ipt_set_info_target *info =
++ struct ipt_set_info_target *info =
+ (struct ipt_set_info_target *) targinfo;
+ ip_set_id_t index;
+
diff --git a/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch
index 830fb5d80e..ba9a93c135 100644
--- a/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch
@@ -50,11 +50,11 @@
+ * I didn't forget anybody). I apologize again for my lack of time.
+ *
+ *
-+ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
++ * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead
+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid
+ * recursive locking. New initialization routines to fix 'rmmod' not
+ * working anymore. Used code from ifb.c. (Jussi Kivilinna)
-+ *
++ *
+ * Also, many thanks to pablo Sebastian Greco for making the initial
+ * patch and to those who helped the testing.
+ *
diff --git a/target/linux/generic-2.6/patches-2.6.26/200-sched_esfq.patch b/target/linux/generic-2.6/patches-2.6.26/200-sched_esfq.patch
index 6d97dc356e..4a336a9596 100644
--- a/target/linux/generic-2.6/patches-2.6.26/200-sched_esfq.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/200-sched_esfq.patch
@@ -4,7 +4,7 @@
*
* The only reason for this is efficiency, it is possible
* to change these parameters in compile time.
-+ *
++ *
+ * If you need to play with these values, use esfq instead.
*/
@@ -60,7 +60,7 @@
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
-+
++
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
@@ -149,7 +149,7 @@
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
-+
++
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
@@ -157,8 +157,8 @@
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
-+ ctreplsrc: reply source IP
-+
++ ctreplsrc: reply source IP
++
+*/
+
+#define ESFQ_HEAD 0
@@ -231,7 +231,7 @@
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
-+
++
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
@@ -489,13 +489,13 @@
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
-+
++
+ a = old_a = q->next[q->tail];
-+
++
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
-+
++
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
@@ -511,7 +511,7 @@
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
-+
++
+ return skb;
+}
+
@@ -601,13 +601,13 @@
+ }
+ }
+}
-+
++
+static int esfq_q_init(struct esfq_sched_data *q, struct rtattr *opt)
+{
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
+ esfq_index p = ~0U/2;
+ int i;
-+
++
+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+ return -EINVAL;
+
@@ -618,7 +618,7 @@
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
-+
++
+ } else {
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
+ if (ctl->quantum)
@@ -626,18 +626,18 @@
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
-+
++
+ if ( q->depth > p - 1 )
+ return -EINVAL;
-+
++
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
-+
++
+ if (ctl->hash_kind) {
+ q->hash_kind = esfq_check_hash(ctl->hash_kind);
+ }
+ }
-+
++
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
@@ -656,7 +656,7 @@
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
-+
++
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
@@ -664,7 +664,7 @@
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
-+
++
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
@@ -677,7 +677,7 @@
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ int err;
-+
++
+ q->quantum = psched_mtu(sch->dev); /* default */
+ if ((err = esfq_q_init(q, opt)))
+ return err;
@@ -689,7 +689,7 @@
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
-+
++
+ return 0;
+}
+
@@ -699,7 +699,7 @@
+ struct esfq_sched_data new;
+ struct sk_buff *skb;
+ int err;
-+
++
+ /* set up new queue */
+ memset(&new, 0, sizeof(struct esfq_sched_data));
+ new.quantum = psched_mtu(sch->dev); /* default */
@@ -710,7 +710,7 @@
+ sch_tree_lock(sch);
+ while ((skb = esfq_q_dequeue(q)) != NULL)
+ esfq_q_enqueue(skb, &new, ESFQ_TAIL);
-+
++
+ /* clean up the old queue */
+ esfq_q_destroy(q);
+
@@ -786,7 +786,7 @@
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
-+static void __exit esfq_module_exit(void)
++static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
diff --git a/target/linux/generic-2.6/patches-2.6.26/204-jffs2_eofdetect.patch b/target/linux/generic-2.6/patches-2.6.26/204-jffs2_eofdetect.patch
index de554ad01f..17a082d2c2 100644
--- a/target/linux/generic-2.6/patches-2.6.26/204-jffs2_eofdetect.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/204-jffs2_eofdetect.patch
@@ -9,7 +9,7 @@
+ if (c->mtd->unlock)
+ c->mtd->unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
-+
++
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
@@ -20,14 +20,21 @@
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
-@@ -143,9 +143,12 @@
-
+@@ -72,7 +72,7 @@
+ return ret;
+ if ((ret = jffs2_scan_dirty_space(c, jeb, jeb->free_size)))
+ return ret;
+- /* Turned wasted size into dirty, since we apparently
++ /* Turned wasted size into dirty, since we apparently
+ think it's recoverable now. */
+ jeb->dirty_size += jeb->wasted_size;
+ c->dirty_size += jeb->wasted_size;
+@@ -144,8 +144,11 @@
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
--
+
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
-+
+ if (c->flags & (1 << 7))
+ ret = BLK_STATE_ALLFF;
+ else
@@ -36,6 +43,57 @@
if (ret < 0)
goto out;
+@@ -400,7 +403,7 @@
+ if (!ref)
+ return -ENOMEM;
+
+- /* BEFORE jffs2_build_xattr_subsystem() called,
++ /* BEFORE jffs2_build_xattr_subsystem() called,
+ * and AFTER xattr_ref is marked as a dead xref,
+ * ref->xid is used to store 32bit xid, xd is not used
+ * ref->ino is used to store 32bit inode-number, ic is not used
+@@ -473,7 +476,7 @@
+ struct jffs2_sum_marker *sm;
+ void *sumptr = NULL;
+ uint32_t sumlen;
+-
++
+ if (!buf_size) {
+ /* XIP case. Just look, point at the summary if it's there */
+ sm = (void *)buf + c->sector_size - sizeof(*sm);
+@@ -489,9 +492,9 @@
+ buf_len = sizeof(*sm);
+
+ /* Read as much as we want into the _end_ of the preallocated buffer */
+- err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
++ err = jffs2_fill_scan_buf(c, buf + buf_size - buf_len,
+ jeb->offset + c->sector_size - buf_len,
+- buf_len);
++ buf_len);
+ if (err)
+ return err;
+
+@@ -510,9 +513,9 @@
+ }
+ if (buf_len < sumlen) {
+ /* Need to read more so that the entire summary node is present */
+- err = jffs2_fill_scan_buf(c, sumptr,
++ err = jffs2_fill_scan_buf(c, sumptr,
+ jeb->offset + c->sector_size - sumlen,
+- sumlen - buf_len);
++ sumlen - buf_len);
+ if (err)
+ return err;
+ }
+@@ -525,7 +528,7 @@
+
+ if (buf_size && sumlen > buf_size)
+ kfree(sumptr);
+- /* If it returns with a real error, bail.
++ /* If it returns with a real error, bail.
+ If it returns positive, that's a block classification
+ (i.e. BLK_STATE_xxx) so return that too.
+ If it returns zero, fall through to full scan. */
@@ -546,6 +549,17 @@
return err;
}
@@ -50,7 +108,25 @@
+
+ return BLK_STATE_ALLFF;
+ }
-+
++
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;
+@@ -671,7 +685,7 @@
+ scan_end = buf_len;
+ goto more_empty;
+ }
+-
++
+ /* See how much more there is to read in this eraseblock... */
+ buf_len = min_t(uint32_t, buf_size, jeb->offset + c->sector_size - ofs);
+ if (!buf_len) {
+@@ -907,7 +921,7 @@
+
+ D1(printk(KERN_DEBUG "Block at 0x%08x: free 0x%08x, dirty 0x%08x, unchecked 0x%08x, used 0x%08x, wasted 0x%08x\n",
+ jeb->offset,jeb->free_size, jeb->dirty_size, jeb->unchecked_size, jeb->used_size, jeb->wasted_size));
+-
++
+ /* mark_node_obsolete can add to wasted !! */
+ if (jeb->wasted_size) {
+ jeb->dirty_size += jeb->wasted_size;
diff --git a/target/linux/generic-2.6/patches-2.6.26/213-kobject_uevent.patch b/target/linux/generic-2.6/patches-2.6.26/213-kobject_uevent.patch
index 7450689082..93ed1d035c 100644
--- a/target/linux/generic-2.6/patches-2.6.26/213-kobject_uevent.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/213-kobject_uevent.patch
@@ -21,7 +21,7 @@
+ spin_lock(&sequence_lock);
+ seq = ++uevent_seqnum;
+ spin_unlock(&sequence_lock);
-+
++
+ return seq;
+}
+EXPORT_SYMBOL_GPL(uevent_next_seqnum);
diff --git a/target/linux/generic-2.6/patches-2.6.26/600-phy_extension.patch b/target/linux/generic-2.6/patches-2.6.26/600-phy_extension.patch
index ec0a402b66..39f22bbdef 100644
--- a/target/linux/generic-2.6/patches-2.6.26/600-phy_extension.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/600-phy_extension.patch
@@ -20,7 +20,7 @@
+ if (copy_to_user(useraddr, &ecmd, sizeof(ecmd)))
+ return -EFAULT;
+ return 0;
-+
++
+ case ETHTOOL_SSET:
+ if (copy_from_user(&ecmd, useraddr, sizeof(ecmd)))
+ return -EFAULT;
@@ -33,7 +33,7 @@
+ tmp |= (BMCR_ANRESTART);
+ phy_write(phydev, MII_BMCR, tmp);
+ return 0;
-+ }
++ }
+ return -EINVAL;
+
+ case ETHTOOL_GLINK:
@@ -51,6 +51,26 @@
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
+@@ -403,8 +447,8 @@
+ }
+
+ phy_write(phydev, mii_data->reg_num, val);
+-
+- if (mii_data->reg_num == MII_BMCR
++
++ if (mii_data->reg_num == MII_BMCR
+ && val & BMCR_RESET
+ && phydev->drv->config_init) {
+ phy_scan_fixups(phydev);
+@@ -524,7 +568,7 @@
+ int idx;
+
+ idx = phy_find_setting(phydev->speed, phydev->duplex);
+-
++
+ idx++;
+
+ idx = phy_find_valid(idx, phydev->supported);
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -434,6 +434,7 @@
diff --git a/target/linux/generic-2.6/patches-2.6.26/903-hostap_txpower.patch b/target/linux/generic-2.6/patches-2.6.26/903-hostap_txpower.patch
index 81e10834e3..3d6d5d0976 100644
--- a/target/linux/generic-2.6/patches-2.6.26/903-hostap_txpower.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/903-hostap_txpower.patch
@@ -80,7 +80,7 @@
/* Get BSSID if we have a valid AP address */
+
+ if ( val == HFA384X_LINKSTATUS_CONNECTED ||
-+ val == HFA384X_LINKSTATUS_DISCONNECTED )
++ val == HFA384X_LINKSTATUS_DISCONNECTED )
+ hostap_restore_power(local->dev);
+
if (connected) {
@@ -127,7 +127,7 @@
+{
+ struct hostap_interface *iface = dev->priv;
+ local_info_t *local = iface->local;
-+
++
+ u16 val;
+ int ret = 0;
+
diff --git a/target/linux/generic-2.6/patches-2.6.26/910-cryptodev_backport.patch b/target/linux/generic-2.6/patches-2.6.26/910-cryptodev_backport.patch
index bca461ed47..682d2a13fd 100644
--- a/target/linux/generic-2.6/patches-2.6.26/910-cryptodev_backport.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/910-cryptodev_backport.patch
@@ -573,7 +573,12 @@
/* max = 24: 128bit encrypt, max = 32: 256bit encrypt */
--- a/crypto/crc32c.c
+++ b/crypto/crc32c.c
-@@ -5,20 +5,23 @@
+@@ -1,24 +1,27 @@
+-/*
++/*
+ * Cryptographic API.
+ *
+ * CRC32C chksum
*
* This module file is a wrapper to invoke the lib/crc32c routines.
*
@@ -581,7 +586,8 @@
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
+- * Software Foundation; either version 2 of the License, or (at your option)
++ * Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
@@ -599,7 +605,21 @@
#define CHKSUM_DIGEST_SIZE 4
struct chksum_ctx {
-@@ -71,7 +74,7 @@
+@@ -27,7 +30,7 @@
+ };
+
+ /*
+- * Steps through buffer one byte at at time, calculates reflected
++ * Steps through buffer one byte at at time, calculates reflected
+ * crc using table.
+ */
+
+@@ -67,11 +70,11 @@
+ static void chksum_final(struct crypto_tfm *tfm, u8 *out)
+ {
+ struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+-
++
*(__le32 *)out = ~cpu_to_le32(mctx->crc);
}
@@ -675,7 +695,7 @@
+static int crc32c_final(struct ahash_request *req)
+{
+ u32 *crcp = ahash_request_ctx(req);
-+
++
+ *(__le32 *)req->result = ~cpu_to_le32p(crcp);
+ return 0;
+}
@@ -8329,12 +8349,12 @@
@@ -0,0 +1,154 @@
+/*
+ * Hash: Hash algorithms under the crypto API
-+ *
++ *
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
++ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
@@ -8486,12 +8506,12 @@
@@ -0,0 +1,78 @@
+/*
+ * Hash algorithms.
-+ *
++ *
+ * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
-+ * Software Foundation; either version 2 of the License, or (at your option)
++ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
diff --git a/target/linux/generic-2.6/patches-2.6.26/915-hso-backport.patch b/target/linux/generic-2.6/patches-2.6.26/915-hso-backport.patch
index 34c2d94259..e60ca2ea47 100644
--- a/target/linux/generic-2.6/patches-2.6.26/915-hso-backport.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/915-hso-backport.patch
@@ -26,11 +26,9 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
---
-diff --git a/drivers/net/Makefile b/drivers/net/Makefile
-index c52738a..c96fe20 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
-@@ -237,6 +237,7 @@ obj-$(CONFIG_USB_CATC) += usb/
+@@ -237,6 +237,7 @@
obj-$(CONFIG_USB_KAWETH) += usb/
obj-$(CONFIG_USB_PEGASUS) += usb/
obj-$(CONFIG_USB_RTL8150) += usb/
@@ -38,11 +36,9 @@ index c52738a..c96fe20 100644
obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
-diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
-index 0604f3f..68e198b 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
-@@ -154,6 +154,16 @@ config USB_NET_AX8817X
+@@ -154,6 +154,16 @@
This driver creates an interface named "ethX", where X depends on
what other networking devices you have in use.
@@ -59,11 +55,9 @@ index 0604f3f..68e198b 100644
config USB_NET_CDCETHER
tristate "CDC Ethernet support (smart devices such as cable modems)"
-diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
-index 595a539..24800c1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
-@@ -6,6 +6,7 @@ obj-$(CONFIG_USB_CATC) += catc.o
+@@ -6,6 +6,7 @@
obj-$(CONFIG_USB_KAWETH) += kaweth.o
obj-$(CONFIG_USB_PEGASUS) += pegasus.o
obj-$(CONFIG_USB_RTL8150) += rtl8150.o
@@ -71,9 +65,6 @@ index 595a539..24800c1 100644
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
-diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
-new file mode 100644
-index 0000000..031d07b
--- /dev/null
+++ b/drivers/net/usb/hso.c
@@ -0,0 +1,2836 @@
diff --git a/target/linux/generic-2.6/patches-2.6.26/970-ocf_20080704.patch b/target/linux/generic-2.6/patches-2.6.26/970-ocf_20080704.patch
index a227284c63..627f3538af 100644
--- a/target/linux/generic-2.6/patches-2.6.26/970-ocf_20080704.patch
+++ b/target/linux/generic-2.6/patches-2.6.26/970-ocf_20080704.patch
@@ -86,13 +86,13 @@
+{
+ int count;
+
-+ wait_event_interruptible(random_write_wait,
++ wait_event_interruptible(random_write_wait,
+ input_pool.entropy_count < random_write_wakeup_thresh);
+
+ count = random_write_wakeup_thresh - input_pool.entropy_count;
+
+ /* likely we got woken up due to a signal */
-+ if (count <= 0) count = random_read_wakeup_thresh;
++ if (count <= 0) count = random_read_wakeup_thresh;
+
+ DEBUG_ENT("requesting %d bits from input_wait()er %d<%d\n",
+ count,
@@ -643,7 +643,7 @@
+
+ cd linux-2.4*; gunzip < ocf-linux-24-XXXXXXXX.patch.gz | patch -p1
+ cd linux-2.6*; gunzip < ocf-linux-26-XXXXXXXX.patch.gz | patch -p1
-+
++
+ if you do one of the above, then you can proceed to the next step,
+ or you can do the above process by hand with using the patches against
+ linux-2.4.35 and 2.6.23 to include the ocf code under crypto/ocf.
@@ -666,7 +666,7 @@
+ It should be easy to take this patch and apply it to other more
+ recent versions of the kernels. The same patches should also work
+ relatively easily on kernels as old as 2.6.11 and 2.4.18.
-+
++
+ * under 2.4 if you are on a non-x86 platform, you may need to:
+
+ cp linux-2.X.x/include/asm-i386/kmap_types.h linux-2.X.x/include/asm-YYY
@@ -869,7 +869,7 @@
+ * MAX_COMMAND = base command + mac command + encrypt command +
+ * mac-key + rc4-key
+ * MAX_RESULT = base result + mac result + mac + encrypt result
-+ *
++ *
+ *
+ */
+#define HIFN_MAX_COMMAND (8 + 8 + 8 + 64 + 260)
@@ -1227,7 +1227,7 @@
+
+
+/*********************************************************************
-+ * Structs for board commands
++ * Structs for board commands
+ *
+ *********************************************************************/
+
@@ -1437,7 +1437,7 @@
+
+ /*
+ * Our current positions for insertion and removal from the desriptor
-+ * rings.
++ * rings.
+ */
+ int cmdi, srci, dsti, resi;
+ volatile int cmdu, srcu, dstu, resu;
@@ -1559,7 +1559,7 @@
+ *
+ * session_num
+ * -----------
-+ * A number between 0 and 2048 (for DRAM models) or a number between
++ * A number between 0 and 2048 (for DRAM models) or a number between
+ * 0 and 768 (for SRAM models). Those who don't want to use session
+ * numbers should leave value at zero and send a new crypt key and/or
+ * new MAC key on every command. If you use session numbers and
@@ -1573,7 +1573,7 @@
+ * ----
+ * Either fill in the mbuf pointer and npa=0 or
+ * fill packp[] and packl[] and set npa to > 0
-+ *
++ *
+ * mac_header_skip
+ * ---------------
+ * The number of bytes of the source_buf that are skipped over before
@@ -1661,7 +1661,7 @@
+ * 0 for success, negative values on error
+ *
+ * Defines for negative error codes are:
-+ *
++ *
+ * HIFN_CRYPTO_BAD_INPUT : The passed in command had invalid settings.
+ * HIFN_CRYPTO_RINGS_FULL : All DMA rings were full and non-blocking
+ * behaviour was requested.
@@ -2465,7 +2465,7 @@
+ sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
+ WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
+#ifdef HIFN_VULCANDEV
-+ sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
++ sc->sc_pkdev = make_dev(&vulcanpk_cdevsw, 0,
+ UID_ROOT, GID_WHEEL, 0666,
+ "vulcanpk");
+ sc->sc_pkdev->si_drv1 = sc;
@@ -2664,7 +2664,7 @@
+ * "hifn_enable_crypto" is called to enable it. The check is important,
+ * as enabling crypto twice will lock the board.
+ */
-+static int
++static int
+hifn_enable_crypto(struct hifn_softc *sc)
+{
+ u_int32_t dmacfg, ramcfg, encl, addr, i;
@@ -2756,7 +2756,7 @@
+ * Give initial values to the registers listed in the "Register Space"
+ * section of the HIFN Software Development reference manual.
+ */
-+static void
++static void
+hifn_init_pci_registers(struct hifn_softc *sc)
+{
+ DPRINTF("%s()\n", __FUNCTION__);
@@ -3141,7 +3141,7 @@
+/*
+ * Initialize the descriptor rings.
+ */
-+static void
++static void
+hifn_init_dma(struct hifn_softc *sc)
+{
+ struct hifn_dma *dma = sc->sc_dma;
@@ -3429,10 +3429,10 @@
+ dma->srci = idx;
+ dma->srcu += src->nsegs;
+ return (idx);
-+}
++}
+
+
-+static int
++static int
+hifn_crypto(
+ struct hifn_softc *sc,
+ struct hifn_command *cmd,
@@ -4301,7 +4301,7 @@
+ cmd->cklen = enccrd->crd_klen >> 3;
+ cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
+
-+ /*
++ /*
+ * Need to specify the size for the AES key in the masks.
+ */
+ if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
@@ -4858,9 +4858,9 @@
+static ssize_t
+cryptoid_show(struct device *dev,
+ struct device_attribute *attr,
-+ char *buf)
-+{
-+ struct hipp_softc *sc;
++ char *buf)
++{
++ struct hipp_softc *sc;
+
+ sc = pci_get_drvdata(to_pci_dev (dev));
+ return sprintf (buf, "%d\n", sc->sc_cid);
@@ -4992,13 +4992,13 @@
+ crypto_unregister_all(sc->sc_cid);
+ if (sc->sc_irq != -1)
+ free_irq(sc->sc_irq, sc);
-+
++
+#if 0
+ if (sc->sc_dma) {
+ /* Turn off DMA polling */
+ WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
+ HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
-+
++
+ pci_free_consistent(sc->sc_pcidev,
+ sizeof(*sc->sc_dma),
+ sc->sc_dma, sc->sc_dma_physaddr);
@@ -5151,7 +5151,7 @@
@@ -0,0 +1,93 @@
+/*
+ * Hifn HIPP-I/HIPP-II (7855/8155) driver.
-+ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
++ * Copyright (c) 2006 Michael Richardson <mcr@xelerance.com> *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
@@ -5374,7 +5374,7 @@
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
-+ 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0,
+};
+
+static void md5_calc(u_int8_t *, md5_ctxt *);
@@ -5409,7 +5409,7 @@
+ for (i = gap; i + MD5_BUFLEN <= len; i += MD5_BUFLEN) {
+ md5_calc((u_int8_t *)(input + i), ctxt);
+ }
-+
++
+ ctxt->md5_i = len - i;
+ bcopy((void *)(input + i), (void *)ctxt->md5_buf, ctxt->md5_i);
+ } else {
@@ -5424,7 +5424,7 @@
+{
+ u_int gap;
+
-+ /* Don't count up padding. Keep md5_n. */
++ /* Don't count up padding. Keep md5_n. */
+ gap = MD5_BUFLEN - ctxt->md5_i;
+ if (gap > 8) {
+ bcopy(md5_paddat,
@@ -5440,7 +5440,7 @@
+ MD5_BUFLEN - sizeof(ctxt->md5_n));
+ }
+
-+ /* 8 byte word */
++ /* 8 byte word */
+#if BYTE_ORDER == LITTLE_ENDIAN
+ bcopy(&ctxt->md5_n8[0], &ctxt->md5_buf[56], 8);
+#endif
@@ -5488,7 +5488,7 @@
+ u_int32_t D = ctxt->md5_std;
+#if BYTE_ORDER == LITTLE_ENDIAN
+ u_int32_t *X = (u_int32_t *)b64;
-+#endif
++#endif
+#if BYTE_ORDER == BIG_ENDIAN
+ /* 4 byte words */
+ /* what a brute force but fast! */
@@ -5520,7 +5520,7 @@
+ ROUND1(C, D, A, B, 10, Sc, 11); ROUND1(B, C, D, A, 11, Sd, 12);
+ ROUND1(A, B, C, D, 12, Sa, 13); ROUND1(D, A, B, C, 13, Sb, 14);
+ ROUND1(C, D, A, B, 14, Sc, 15); ROUND1(B, C, D, A, 15, Sd, 16);
-+
++
+ ROUND2(A, B, C, D, 1, Se, 17); ROUND2(D, A, B, C, 6, Sf, 18);
+ ROUND2(C, D, A, B, 11, Sg, 19); ROUND2(B, C, D, A, 0, Sh, 20);
+ ROUND2(A, B, C, D, 5, Se, 21); ROUND2(D, A, B, C, 10, Sf, 22);
@@ -5538,14 +5538,14 @@
+ ROUND3(C, D, A, B, 3, Sk, 43); ROUND3(B, C, D, A, 6, Sl, 44);
+ ROUND3(A, B, C, D, 9, Si, 45); ROUND3(D, A, B, C, 12, Sj, 46);
+ ROUND3(C, D, A, B, 15, Sk, 47); ROUND3(B, C, D, A, 2, Sl, 48);
-+
-+ ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
-+ ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
-+ ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
-+ ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
-+ ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
-+ ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
-+ ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
++
++ ROUND4(A, B, C, D, 0, Sm, 49); ROUND4(D, A, B, C, 7, Sn, 50);
++ ROUND4(C, D, A, B, 14, So, 51); ROUND4(B, C, D, A, 5, Sp, 52);
++ ROUND4(A, B, C, D, 12, Sm, 53); ROUND4(D, A, B, C, 3, Sn, 54);
++ ROUND4(C, D, A, B, 10, So, 55); ROUND4(B, C, D, A, 1, Sp, 56);
++ ROUND4(A, B, C, D, 8, Sm, 57); ROUND4(D, A, B, C, 15, Sn, 58);
++ ROUND4(C, D, A, B, 6, So, 59); ROUND4(B, C, D, A, 13, Sp, 60);
++ ROUND4(A, B, C, D, 4, Sm, 61); ROUND4(D, A, B, C, 11, Sn, 62);
+ ROUND4(C, D, A, B, 2, So, 63); ROUND4(B, C, D, A, 9, Sp, 64);
+
+ ctxt->md5_sta += A;
@@ -6004,7 +6004,7 @@
+ sc->sc_needwakeup &= ~wakeup;
+ crypto_unblock(sc->sc_cid, wakeup);
+ }
-+
++
+ return IRQ_HANDLED;
+}
+
@@ -6540,7 +6540,7 @@
+ /*
+ * Tell the hardware to copy the header to the output.
+ * The header is defined as the data from the end of
-+ * the bypass to the start of data to be encrypted.
++ * the bypass to the start of data to be encrypted.
+ * Typically this is the inline IV. Note that you need
+ * to do this even if src+dst are the same; it appears
+ * that w/o this bit the crypted data is written
@@ -6639,7 +6639,7 @@
+ * destination wil result in a
+ * destination particle list that does
+ * the necessary scatter DMA.
-+ */
++ */
+ safestats.st_iovnotuniform++;
+ err = EINVAL;
+ goto errout;
@@ -6752,7 +6752,7 @@
+ pci_unmap_operand(sc, &re->re_dst);
+ pci_unmap_operand(sc, &re->re_src);
+
-+ /*
++ /*
+ * If result was written to a differet mbuf chain, swap
+ * it in as the return value and reclaim the original.
+ */
@@ -6802,14 +6802,14 @@
+ */
+ re->re_sastate.sa_saved_indigest[0] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[0]);
-+ re->re_sastate.sa_saved_indigest[1] =
++ re->re_sastate.sa_saved_indigest[1] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ cpu_to_be32(re->re_sastate.sa_saved_indigest[2]);
+ } else {
+ re->re_sastate.sa_saved_indigest[0] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[0]);
-+ re->re_sastate.sa_saved_indigest[1] =
++ re->re_sastate.sa_saved_indigest[1] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ cpu_to_le32(re->re_sastate.sa_saved_indigest[2]);
@@ -6851,7 +6851,7 @@
+ * status reg in the read in case it is initialized. Then read
+ * the data register until it changes from the first read.
+ * Once it changes read the data register until it changes
-+ * again. At this time the RNG is considered initialized.
++ * again. At this time the RNG is considered initialized.
+ * This could take between 750ms - 1000ms in time.
+ */
+ i = 0;
@@ -6889,7 +6889,7 @@
+{
+ DPRINTF(("%s()\n", __FUNCTION__));
+
-+ WRITE_REG(sc, SAFE_RNG_CTRL,
++ WRITE_REG(sc, SAFE_RNG_CTRL,
+ READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
+}
+
@@ -6911,7 +6911,7 @@
+ int i, rc;
+
+ DPRINTF(("%s()\n", __FUNCTION__));
-+
++
+ safestats.st_rng++;
+ /*
+ * Fetch the next block of data.
@@ -7131,9 +7131,9 @@
+#endif
+
+ crp = (struct cryptop *)re->re_crp;
-+
++
+ re->re_desc.d_csr = 0;
-+
++
+ crp->crp_etype = EFAULT;
+ crypto_done(crp);
+ return(0);
@@ -7295,7 +7295,7 @@
+ ((base_bits + 7) / 8) - 1;
+ modp = krp->krp_param[SAFE_CRK_PARAM_MOD].crp_p +
+ ((mod_bits + 7) / 8) - 1;
-+
++
+ for (i = 0; i < (mod_bits + 7) / 8; i++, basep--, modp--) {
+ if (*modp < *basep)
+ goto too_small;
@@ -8695,7 +8695,7 @@
+#define SAFE_SA_CMD1_AES192 0x03000000 /* 192-bit AES key */
+#define SAFE_SA_CMD1_AES256 0x04000000 /* 256-bit AES key */
+
-+/*
++/*
+ * Security Associate State Record (Rev 1).
+ */
+struct safe_sastate {
@@ -10642,7 +10642,7 @@
+
+ /* XXX flush queues??? */
+
-+ /*
++ /*
+ * Reclaim dynamically allocated resources.
+ */
+ if (crypto_drivers != NULL)
@@ -11001,12 +11001,12 @@
+ * The Freescale SEC (also known as 'talitos') resides on the
+ * internal bus, and runs asynchronous to the processor core. It has
+ * a wide gamut of cryptographic acceleration features, including single-
-+ * pass IPsec (also known as algorithm chaining). To properly utilize
-+ * all of the SEC's performance enhancing features, further reworking
++ * pass IPsec (also known as algorithm chaining). To properly utilize
++ * all of the SEC's performance enhancing features, further reworking
+ * of higher level code (framework, applications) will be necessary.
+ *
+ * The following table shows which SEC version is present in which devices:
-+ *
++ *
+ * Devices SEC version
+ *
+ * 8272, 8248 SEC 1.0
@@ -11050,13 +11050,13 @@
+ *
+ * Channel ch0 may drive an aes operation to the aes unit (AESU),
+ * and, at the same time, ch1 may drive a message digest operation
-+ * to the mdeu. Each channel has an input descriptor FIFO, and the
++ * to the mdeu. Each channel has an input descriptor FIFO, and the
+ * FIFO can contain, e.g. on the 8541E, up to 24 entries, before a
+ * a buffer overrun error is triggered. The controller is responsible
-+ * for fetching the data from descriptor pointers, and passing the
-+ * data to the appropriate EUs. The controller also writes the
-+ * cryptographic operation's result to memory. The SEC notifies
-+ * completion by triggering an interrupt and/or setting the 1st byte
++ * for fetching the data from descriptor pointers, and passing the
++ * data to the appropriate EUs. The controller also writes the
++ * cryptographic operation's result to memory. The SEC notifies
++ * completion by triggering an interrupt and/or setting the 1st byte
+ * of the hdr field to 0xff.
+ *
+ * TODO:
@@ -11093,7 +11093,7 @@
+#include <cryptodev.h>
+#include <uio.h>
+
-+#define DRV_NAME "talitos"
++#define DRV_NAME "talitos"
+
+#include "talitos_dev.h"
+#include "talitos_soft.h"
@@ -11108,7 +11108,7 @@
+static int talitos_freesession(device_t dev, u_int64_t tid);
+static int talitos_process(device_t dev, struct cryptop *crp, int hint);
+static void dump_talitos_status(struct talitos_softc *sc);
-+static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
++static int talitos_submit(struct talitos_softc *sc, struct talitos_desc *td,
+ int chsel);
+static void talitos_doneprocessing(struct talitos_softc *sc);
+static void talitos_init_device(struct talitos_softc *sc);
@@ -11166,26 +11166,26 @@
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
+ printk(KERN_INFO "%s: ISR 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi);
-+ for (i = 0; i < sc->sc_num_channels; i++) {
-+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++ for (i = 0; i < sc->sc_num_channels; i++) {
++ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CDPR);
-+ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CDPR_HI);
-+ printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
++ printk(KERN_INFO "%s: CDPR ch%d 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+ }
-+ for (i = 0; i < sc->sc_num_channels; i++) {
-+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++ for (i = 0; i < sc->sc_num_channels; i++) {
++ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCPSR);
-+ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
++ v_hi = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
+ TALITOS_CH_CCPSR_HI);
-+ printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
++ printk(KERN_INFO "%s: CCPSR ch%d 0x%08x_%08x\n",
+ device_get_nameunit(sc->sc_cdev), i, v, v_hi);
+ }
+ ptr = sc->sc_base_addr + TALITOS_CH_DESCBUF;
-+ for (i = 0; i < 16; i++) {
++ for (i = 0; i < 16; i++) {
+ v = talitos_read(ptr++); v_hi = talitos_read(ptr++);
-+ printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
++ printk(KERN_INFO "%s: DESCBUF ch0 0x%08x_%08x (tdp%02d)\n",
+ device_get_nameunit(sc->sc_cdev), v, v_hi, i);
+ }
+ return;
@@ -11193,7 +11193,7 @@
+
+
+#ifdef CONFIG_OCF_RANDOMHARVEST
-+/*
++/*
+ * pull random numbers off the RNG FIFO, not exceeding amount available
+ */
+static int
@@ -11213,7 +11213,7 @@
+ return 0;
+ }
+ /*
-+ * OFL is number of available 64-bit words,
++ * OFL is number of available 64-bit words,
+ * shift and convert to a 32-bit word count
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI);
@@ -11221,16 +11221,16 @@
+ if (maxwords > v)
+ maxwords = v;
+ for (rc = 0; rc < maxwords; rc++) {
-+ buf[rc] = talitos_read(sc->sc_base_addr +
++ buf[rc] = talitos_read(sc->sc_base_addr +
+ TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+ }
+ if (maxwords & 1) {
-+ /*
++ /*
+ * RNG will complain with an AE in the RNGISR
+ * if we don't complete the pairs of 32-bit reads
+ * to its 64-bit register based FIFO
+ */
-+ v = talitos_read(sc->sc_base_addr +
++ v = talitos_read(sc->sc_base_addr +
+ TALITOS_RNG_FIFO + rc*sizeof(u_int32_t));
+ }
+
@@ -11247,18 +11247,18 @@
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGRCR_HI);
+ v |= TALITOS_RNGRCR_HI_SR;
+ talitos_write(sc->sc_base_addr + TALITOS_RNGRCR_HI, v);
-+ while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
++ while ((talitos_read(sc->sc_base_addr + TALITOS_RNGSR_HI)
+ & TALITOS_RNGSR_HI_RD) == 0)
+ cpu_relax();
+ /*
+ * we tell the RNG to start filling the RNG FIFO
-+ * by writing the RNGDSR
++ * by writing the RNGDSR
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGDSR_HI);
+ talitos_write(sc->sc_base_addr + TALITOS_RNGDSR_HI, v);
+ /*
-+ * 64 bits of data will be pushed onto the FIFO every
-+ * 256 SEC cycles until the FIFO is full. The RNG then
++ * 64 bits of data will be pushed onto the FIFO every
++ * 256 SEC cycles until the FIFO is full. The RNG then
+ * attempts to keep the FIFO full.
+ */
+ v = talitos_read(sc->sc_base_addr + TALITOS_RNGISR_HI);
@@ -11268,7 +11268,7 @@
+ return;
+ }
+ /*
-+ * n.b. we need to add a FIPS test here - if the RNG is going
++ * n.b. we need to add a FIPS test here - if the RNG is going
+ * to fail, it's going to fail at reset time
+ */
+ return;
@@ -11314,7 +11314,7 @@
+ }
+ if (encini == NULL && macini == NULL)
+ return EINVAL;
-+ if (encini) {
++ if (encini) {
+ /* validate key length */
+ switch (encini->cri_alg) {
+ case CRYPTO_DES_CBC:
@@ -11333,7 +11333,7 @@
+ return EINVAL;
+ break;
+ default:
-+ DPRINTF("UNKNOWN encini->cri_alg %d\n",
++ DPRINTF("UNKNOWN encini->cri_alg %d\n",
+ encini->cri_alg);
+ return EINVAL;
+ }
@@ -11359,13 +11359,13 @@
+ /* allocating session */
+ sesn = sc->sc_nsessions;
+ ses = (struct talitos_session *) kmalloc(
-+ (sesn + 1) * sizeof(struct talitos_session),
++ (sesn + 1) * sizeof(struct talitos_session),
+ SLAB_ATOMIC);
+ if (ses == NULL)
+ return ENOMEM;
+ memset(ses, 0,
+ (sesn + 1) * sizeof(struct talitos_session));
-+ memcpy(ses, sc->sc_sessions,
++ memcpy(ses, sc->sc_sessions,
+ sesn * sizeof(struct talitos_session));
+ memset(sc->sc_sessions, 0,
+ sesn * sizeof(struct talitos_session));
@@ -11408,7 +11408,7 @@
+ }
+ }
+
-+ /* really should make up a template td here,
++ /* really should make up a template td here,
+ * and only fill things like i/o and direction in process() */
+
+ /* assign session ID */
@@ -11439,10 +11439,10 @@
+}
+
+/*
-+ * launch device processing - it will come back with done notification
-+ * in the form of an interrupt and/or HDR_DONE_BITS in header
++ * launch device processing - it will come back with done notification
++ * in the form of an interrupt and/or HDR_DONE_BITS in header
+ */
-+static int
++static int
+talitos_submit(
+ struct talitos_softc *sc,
+ struct talitos_desc *td,
@@ -11451,9 +11451,9 @@
+ u_int32_t v;
+
+ v = dma_map_single(NULL, td, sizeof(*td), DMA_TO_DEVICE);
-+ talitos_write(sc->sc_base_addr +
++ talitos_write(sc->sc_base_addr +
+ chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF, 0);
-+ talitos_write(sc->sc_base_addr +
++ talitos_write(sc->sc_base_addr +
+ chsel*TALITOS_CH_OFFSET + TALITOS_CH_FF_HI, v);
+ return 0;
+}
@@ -11469,7 +11469,7 @@
+ struct talitos_desc *td;
+ unsigned long flags;
+ /* descriptor mappings */
-+ int hmac_key, hmac_data, cipher_iv, cipher_key,
++ int hmac_key, hmac_data, cipher_iv, cipher_key,
+ in_fifo, out_fifo, cipher_iv_out;
+ static int chsel = -1;
+
@@ -11485,7 +11485,7 @@
+
+ ses = &sc->sc_sessions[TALITOS_SESSION(crp->crp_sid)];
+
-+ /* enter the channel scheduler */
++ /* enter the channel scheduler */
+ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ /* reuse channel that already had/has requests for the required EU */
@@ -11497,19 +11497,19 @@
+ /*
+ * haven't seen this algo the last sc_num_channels or more
+ * use round robin in this case
-+ * nb: sc->sc_num_channels must be power of 2
++ * nb: sc->sc_num_channels must be power of 2
+ */
+ chsel = (chsel + 1) & (sc->sc_num_channels - 1);
+ } else {
+ /*
-+ * matches channel with same target execution unit;
++ * matches channel with same target execution unit;
+ * use same channel in this case
+ */
+ chsel = i;
+ }
+ sc->sc_chnlastalg[chsel] = crp->crp_desc->crd_alg;
+
-+ /* release the channel scheduler lock */
++ /* release the channel scheduler lock */
+ spin_unlock_irqrestore(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ /* acquire the selected channel fifo lock */
@@ -11518,7 +11518,7 @@
+ /* find and reserve next available descriptor-cryptop pair */
+ for (i = 0; i < sc->sc_chfifo_len; i++) {
+ if (sc->sc_chnfifo[chsel][i].cf_desc.hdr == 0) {
-+ /*
++ /*
+ * ensure correct descriptor formation by
+ * avoiding inadvertently setting "optional" entries
+ * e.g. not using "optional" dptr2 for MD/HMAC descs
@@ -11526,7 +11526,7 @@
+ memset(&sc->sc_chnfifo[chsel][i].cf_desc,
+ 0, sizeof(*td));
+ /* reserve it with done notification request bit */
-+ sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
++ sc->sc_chnfifo[chsel][i].cf_desc.hdr |=
+ TALITOS_DONE_NOTIFY;
+ break;
+ }
@@ -11538,7 +11538,7 @@
+ err = ERESTART;
+ goto errout;
+ }
-+
++
+ td = &sc->sc_chnfifo[chsel][i].cf_desc;
+ sc->sc_chnfifo[chsel][i].cf_crp = crp;
+
@@ -11633,10 +11633,10 @@
+ err = EINVAL;
+ goto errout;
+ }
-+ td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
++ td->ptr[in_fifo].ptr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ td->ptr[in_fifo].len = skb->len;
-+ td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
++ td->ptr[out_fifo].ptr = dma_map_single(NULL, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ td->ptr[out_fifo].len = skb->len;
+ td->ptr[hmac_data].ptr = dma_map_single(NULL, skb->data,
@@ -11709,7 +11709,7 @@
+ * copy both the header+IV.
+ */
+ if (enccrd->crd_flags & CRD_F_ENCRYPT) {
-+ td->hdr |= TALITOS_DIR_OUTBOUND;
++ td->hdr |= TALITOS_DIR_OUTBOUND;
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
+ iv = enccrd->crd_iv;
+ else
@@ -11719,7 +11719,7 @@
+ enccrd->crd_inject, ivsize, iv);
+ }
+ } else {
-+ td->hdr |= TALITOS_DIR_INBOUND;
++ td->hdr |= TALITOS_DIR_INBOUND;
+ if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
+ iv = enccrd->crd_iv;
+ bcopy(enccrd->crd_iv, iv, ivsize);
@@ -11729,7 +11729,7 @@
+ enccrd->crd_inject, ivsize, iv);
+ }
+ }
-+ td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
++ td->ptr[cipher_iv].ptr = dma_map_single(NULL, iv, ivsize,
+ DMA_TO_DEVICE);
+ td->ptr[cipher_iv].len = ivsize;
+ /*
@@ -11747,16 +11747,16 @@
+ | TALITOS_MODE1_MDEU_INIT
+ | TALITOS_MODE1_MDEU_PAD;
+ switch (maccrd->crd_alg) {
-+ case CRYPTO_MD5:
++ case CRYPTO_MD5:
+ td->hdr |= TALITOS_MODE1_MDEU_MD5;
+ break;
-+ case CRYPTO_MD5_HMAC:
++ case CRYPTO_MD5_HMAC:
+ td->hdr |= TALITOS_MODE1_MDEU_MD5_HMAC;
+ break;
-+ case CRYPTO_SHA1:
++ case CRYPTO_SHA1:
+ td->hdr |= TALITOS_MODE1_MDEU_SHA1;
+ break;
-+ case CRYPTO_SHA1_HMAC:
++ case CRYPTO_SHA1_HMAC:
+ td->hdr |= TALITOS_MODE1_MDEU_SHA1_HMAC;
+ break;
+ default:
@@ -11773,7 +11773,7 @@
+ * crypt data is the difference in the skips.
+ */
+ /* ipsec only for now */
-+ td->ptr[hmac_key].ptr = dma_map_single(NULL,
++ td->ptr[hmac_key].ptr = dma_map_single(NULL,
+ ses->ses_hmac, ses->ses_hmac_len, DMA_TO_DEVICE);
+ td->ptr[hmac_key].len = ses->ses_hmac_len;
+ td->ptr[in_fifo].ptr += enccrd->crd_skip;
@@ -11782,7 +11782,7 @@
+ td->ptr[out_fifo].len = enccrd->crd_len;
+ /* bytes of HMAC to postpend to ciphertext */
+ td->ptr[out_fifo].extent = ses->ses_mlen;
-+ td->ptr[hmac_data].ptr += maccrd->crd_skip;
++ td->ptr[hmac_data].ptr += maccrd->crd_skip;
+ td->ptr[hmac_data].len = enccrd->crd_skip - maccrd->crd_skip;
+ }
+ if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
@@ -11796,22 +11796,22 @@
+ | TALITOS_MODE0_MDEU_INIT
+ | TALITOS_MODE0_MDEU_PAD;
+ switch (maccrd->crd_alg) {
-+ case CRYPTO_MD5:
++ case CRYPTO_MD5:
+ td->hdr |= TALITOS_MODE0_MDEU_MD5;
+ DPRINTF("MD5 ses %d ch %d len %d\n",
-+ (u32)TALITOS_SESSION(crp->crp_sid),
++ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
-+ case CRYPTO_MD5_HMAC:
++ case CRYPTO_MD5_HMAC:
+ td->hdr |= TALITOS_MODE0_MDEU_MD5_HMAC;
+ break;
-+ case CRYPTO_SHA1:
++ case CRYPTO_SHA1:
+ td->hdr |= TALITOS_MODE0_MDEU_SHA1;
+ DPRINTF("SHA1 ses %d ch %d len %d\n",
-+ (u32)TALITOS_SESSION(crp->crp_sid),
++ (u32)TALITOS_SESSION(crp->crp_sid),
+ chsel, td->ptr[in_fifo].len);
+ break;
-+ case CRYPTO_SHA1_HMAC:
++ case CRYPTO_SHA1_HMAC:
+ td->hdr |= TALITOS_MODE0_MDEU_SHA1_HMAC;
+ break;
+ default:
@@ -11826,16 +11826,16 @@
+
+ if ((maccrd->crd_alg == CRYPTO_MD5_HMAC) ||
+ (maccrd->crd_alg == CRYPTO_SHA1_HMAC)) {
-+ td->ptr[hmac_key].ptr = dma_map_single(NULL,
-+ ses->ses_hmac, ses->ses_hmac_len,
++ td->ptr[hmac_key].ptr = dma_map_single(NULL,
++ ses->ses_hmac, ses->ses_hmac_len,
+ DMA_TO_DEVICE);
+ td->ptr[hmac_key].len = ses->ses_hmac_len;
+ }
-+ }
++ }
+ else {
+ /* using process key (session data has duplicate) */
-+ td->ptr[cipher_key].ptr = dma_map_single(NULL,
-+ enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
++ td->ptr[cipher_key].ptr = dma_map_single(NULL,
++ enccrd->crd_key, (enccrd->crd_klen + 7) / 8,
+ DMA_TO_DEVICE);
+ td->ptr[cipher_key].len = (enccrd->crd_klen + 7) / 8;
+ }
@@ -11850,8 +11850,8 @@
+ return err;
+}
+
-+/* go through all channels descriptors, notifying OCF what has
-+ * _and_hasn't_ successfully completed and reset the device
++/* go through all channels descriptors, notifying OCF what has
++ * _and_hasn't_ successfully completed and reset the device
+ * (otherwise it's up to decoding desc hdrs!)
+ */
+static void talitos_errorprocessing(struct talitos_softc *sc)
@@ -11863,19 +11863,19 @@
+ spin_lock_irqsave(&sc->sc_chnfifolock[sc->sc_num_channels], flags);
+
+ if (debug) dump_talitos_status(sc);
-+ /* go through descriptors, try and salvage those successfully done,
++ /* go through descriptors, try and salvage those successfully done,
+ * and EIO those that weren't
+ */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+ for (j = 0; j < sc->sc_chfifo_len; j++) {
+ if (sc->sc_chnfifo[i][j].cf_desc.hdr) {
-+ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
-+ & TALITOS_HDR_DONE_BITS)
++ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
++ & TALITOS_HDR_DONE_BITS)
+ != TALITOS_HDR_DONE_BITS) {
+ /* this one didn't finish */
+ /* signify in crp->etype */
-+ sc->sc_chnfifo[i][j].cf_crp->crp_etype
++ sc->sc_chnfifo[i][j].cf_crp->crp_etype
+ = EIO;
+ }
+ } else
@@ -11918,8 +11918,8 @@
+ spin_lock_irqsave(&sc->sc_chnfifolock[i], flags);
+ for (j = 0; j < sc->sc_chfifo_len; j++) {
+ /* descriptor has done bits set? */
-+ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
-+ & TALITOS_HDR_DONE_BITS)
++ if ((sc->sc_chnfifo[i][j].cf_desc.hdr
++ & TALITOS_HDR_DONE_BITS)
+ == TALITOS_HDR_DONE_BITS) {
+ /* notify ocf */
+ crypto_done(sc->sc_chnfifo[i][j].cf_crp);
@@ -11947,7 +11947,7 @@
+{
+ struct talitos_softc *sc = arg;
+ u_int32_t v, v_hi;
-+
++
+ /* ack */
+ v = talitos_read(sc->sc_base_addr + TALITOS_ISR);
+ v_hi = talitos_read(sc->sc_base_addr + TALITOS_ISR_HI);
@@ -11979,11 +11979,11 @@
+
+ /* init all channels */
+ for (i = 0; i < sc->sc_num_channels; i++) {
-+ v = talitos_read(sc->sc_base_addr +
++ v = talitos_read(sc->sc_base_addr +
+ i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI);
+ v |= TALITOS_CH_CCCR_HI_CDWE
+ | TALITOS_CH_CCCR_HI_CDIE; /* invoke interrupt if done */
-+ talitos_write(sc->sc_base_addr +
++ talitos_write(sc->sc_base_addr +
+ i*TALITOS_CH_OFFSET + TALITOS_CH_CCCR_HI, v);
+ }
+ /* enable all interrupts */
@@ -12028,13 +12028,13 @@
+
+ /*
+ * Master reset
-+ * errata documentation: warning: certain SEC interrupts
-+ * are not fully cleared by writing the MCR:SWR bit,
-+ * set bit twice to completely reset
++ * errata documentation: warning: certain SEC interrupts
++ * are not fully cleared by writing the MCR:SWR bit,
++ * set bit twice to completely reset
+ */
+ talitos_reset_device_master(sc); /* once */
+ talitos_reset_device_master(sc); /* and once again */
-+
++
+ /* reset all channels */
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ v = talitos_read(sc->sc_base_addr + i*TALITOS_CH_OFFSET +
@@ -12104,7 +12104,7 @@
+ rc = request_irq(sc->sc_irq, talitos_intr, 0,
+ device_get_nameunit(sc->sc_cdev), sc);
+ if (rc) {
-+ printk(KERN_ERR "%s: failed to hook irq %d\n",
++ printk(KERN_ERR "%s: failed to hook irq %d\n",
+ device_get_nameunit(sc->sc_cdev), sc->sc_irq);
+ sc->sc_irq = -1;
+ goto out;
@@ -12166,17 +12166,17 @@
+ memset(sc->sc_chnlastalg, 0, sc->sc_num_channels * sizeof(int));
+
+ sc->sc_chnfifo = (struct desc_cryptop_pair **) kmalloc(
-+ sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
++ sc->sc_num_channels * sizeof(struct desc_cryptop_pair *),
+ GFP_KERNEL);
+ if (!sc->sc_chnfifo)
+ goto out;
+ for (i = 0; i < sc->sc_num_channels; i++) {
+ sc->sc_chnfifo[i] = (struct desc_cryptop_pair *) kmalloc(
-+ sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
++ sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair),
+ GFP_KERNEL);
+ if (!sc->sc_chnfifo[i])
+ goto out;
-+ memset(sc->sc_chnfifo[i], 0,
++ memset(sc->sc_chnfifo[i], 0,
+ sc->sc_chfifo_len * sizeof(struct desc_cryptop_pair));
+ }
+
@@ -12436,7 +12436,7 @@
+#define TALITOS_ID_SEC_2_1 0x40 /* cross ref with IP block revision reg */
+
+/*
-+ * following num_channels, channel-fifo-depth, exec-unit-mask, and
++ * following num_channels, channel-fifo-depth, exec-unit-mask, and
+ * descriptor-types-mask are for forward-compatibility with openfirmware
+ * flat device trees
+ */
@@ -12464,11 +12464,11 @@
+#define TALITOS_CHFIFOLEN_SEC_2_1 24
+#define TALITOS_CHFIFOLEN_SEC_2_4 24
+
-+/*
++/*
+ * exec-unit-mask : The bitmask representing what Execution Units (EUs)
-+ * are available. EU information should be encoded following the SEC's
++ * are available. EU information should be encoded following the SEC's
+ * EU_SEL0 bitfield documentation, i.e. as follows:
-+ *
++ *
+ * bit 31 = set if SEC permits no-EU selection (should be always set)
+ * bit 30 = set if SEC has the ARC4 EU (AFEU)
+ * bit 29 = set if SEC has the des/3des EU (DEU)
@@ -12477,7 +12477,7 @@
+ * bit 26 = set if SEC has the public key EU (PKEU)
+ * bit 25 = set if SEC has the aes EU (AESU)
+ * bit 24 = set if SEC has the Kasumi EU (KEU)
-+ *
++ *
+ */
+#define TALITOS_HAS_EU_NONE (1<<0)
+#define TALITOS_HAS_EU_AFEU (1<<1)
@@ -12498,8 +12498,8 @@
+
+/*
+ * descriptor-types-mask : The bitmask representing what descriptors
-+ * are available. Descriptor type information should be encoded
-+ * following the SEC's Descriptor Header Dword DESC_TYPE field
++ * are available. Descriptor type information should be encoded
++ * following the SEC's Descriptor Header Dword DESC_TYPE field
+ * documentation, i.e. as follows:
+ *
+ * bit 0 = set if SEC supports the aesu_ctr_nonsnoop desc. type
@@ -12525,7 +12525,7 @@
+#define TALITOS_HAS_DESCTYPES_SEC_2_0 0x01010ebf
+#define TALITOS_HAS_DESCTYPES_SEC_2_1 0x012b0ebf
+
-+/*
++/*
+ * a TALITOS_xxx_HI address points to the low data bits (32-63) of the register
+ */
+
@@ -12564,7 +12564,7 @@
+#define TALITOS_CH_FF_HI 0x114c /* Fetch FIFO's FETCH_ADRS */
+#define TALITOS_CH_CDPR 0x1140 /* Crypto-Channel Pointer Status Reg */
+#define TALITOS_CH_CDPR_HI 0x1144 /* Crypto-Channel Pointer Status Reg */
-+#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
++#define TALITOS_CH_DESCBUF 0x1180 /* (thru 11bf) Crypto-Channel
+ * Descriptor Buffer (debug) */
+
+/* execution unit register offset addresses and bits */
@@ -12986,7 +12986,7 @@
+#endif
+ }
+ }
-+
++
+ kfree(buf);
+
+bad_alloc:
@@ -13963,7 +13963,7 @@
+ IX_MBUF_MLEN(&q->ixp_q_mbuf) = IX_MBUF_PKT_LEN(&q->ixp_q_mbuf) =
+ ((IX_MBUF_MLEN(&q->ixp_q_mbuf) * 8) + 72 + 511) / 8;
+ tbuf = kmalloc(IX_MBUF_MLEN(&q->ixp_q_mbuf), SLAB_ATOMIC);
-+
++
+ if (IX_MBUF_MDATA(&q->ixp_q_mbuf) == NULL) {
+ printk("ixp: kmalloc(%u, SLAB_ATOMIC) failed\n",
+ IX_MBUF_MLEN(&q->ixp_q_mbuf));
@@ -14530,7 +14530,7 @@
+ &q->pkq_op,
+ ixp_kperform_cb,
+ &q->pkq_result);
-+
++
+ if (status == IX_CRYPTO_ACC_STATUS_SUCCESS) {
+ dprintk("%s() - ixCryptoAccPkeEauPerform SUCCESS\n", __FUNCTION__);
+ return; /* callback will return here for callback */
@@ -14907,7 +14907,7 @@
+ int hid = crid & ~(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+ int typ = crid & (CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
+ int caps = 0;
-+
++
+ /* if the user hasn't selected a driver, then just call newsession */
+ if (hid == 0 && typ != 0)
+ return 0;
@@ -14919,7 +14919,7 @@
+ dprintk("%s: hid=%x typ=%x not matched\n", __FUNCTION__, hid, typ);
+ return EINVAL;
+ }
-+
++
+ /* the user didn't specify SW or HW, so the driver is ok */
+ if (typ == 0)
+ return 0;
@@ -15256,7 +15256,7 @@
+ } while ((krp->krp_flags & CRYPTO_KF_DONE) == 0);
+
+ dprintk("%s finished WAITING error=%d\n", __FUNCTION__, error);
-+
++
+ kop->crk_crid = krp->krp_crid; /* device that did the work */
+ if (krp->krp_status != 0) {
+ error = krp->krp_status;
@@ -15330,7 +15330,7 @@
+ }
+ return (0);
+}
-+
++
+static struct csession *
+cseadd(struct fcrypt *fcr, struct csession *cse)
+{
@@ -16008,7 +16008,7 @@
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
-+ u_int32_t ses; /* returns: session # */
++ u_int32_t ses; /* returns: session # */
+};
+
+struct session2_op {
@@ -16020,7 +16020,7 @@
+ int mackeylen; /* mac key */
+ caddr_t mackey;
+
-+ u_int32_t ses; /* returns: session # */
++ u_int32_t ses; /* returns: session # */
+ int crid; /* driver id + flags (rw) */
+ int pad[4]; /* for future expansion */
+};
@@ -16310,7 +16310,7 @@
+ * since it does no crypto at all.
+ *
+ * Written by David McCullough <david_mccullough@securecomputing.com>
-+ * Copyright (C) 2006-2007 David McCullough
++ * Copyright (C) 2006-2007 David McCullough
+ *
+ * LICENSE TERMS
+ *
@@ -17087,7 +17087,7 @@
+ offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+ sg_len += len;
+ skip = 0;
-+ } else
++ } else
+ skip -= uiop->uio_iov[sg_num].iov_len;
+ }
+ } else {
@@ -17104,7 +17104,7 @@
+ case SW_TYPE_BLKCIPHER: {
+ unsigned char iv[EALG_MAX_BLOCK_LEN];
+ unsigned char *ivp = iv;
-+ int ivsize =
++ int ivsize =
+ crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
+ struct blkcipher_desc desc;
+
@@ -17205,7 +17205,7 @@
+ sw->u.hmac.sw_klen);
+ crypto_hash_digest(&desc, sg, sg_len, result);
+#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
-+
++
+ } else { /* SW_TYPE_HASH */
+ crypto_hash_digest(&desc, sg, sg_len, result);
+ }
@@ -17314,7 +17314,7 @@
+
+ for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
+ {
-+
++
+ algo = crypto_details[i].alg_name;
+ if (!algo || !*algo)
+ {