diff options
author | Jo-Philipp Wich <jow@openwrt.org> | 2009-05-07 03:09:04 +0000 |
---|---|---|
committer | Jo-Philipp Wich <jow@openwrt.org> | 2009-05-07 03:09:04 +0000 |
commit | f6bf23b9239dd1514bd0aa7bf857a16d1a7f35ed (patch) | |
tree | 60b86ca9840078ffc27a9889354c6df446b92485 /target/linux/generic-2.6 | |
parent | 3a8000d301a2be1b7314fa7daab82bae88e67620 (diff) | |
download | upstream-f6bf23b9239dd1514bd0aa7bf857a16d1a7f35ed.tar.gz upstream-f6bf23b9239dd1514bd0aa7bf857a16d1a7f35ed.tar.bz2 upstream-f6bf23b9239dd1514bd0aa7bf857a16d1a7f35ed.zip |
[kernel] update imq patches for 2.6.26, 2.6.27, 2.6.28, 2.6.29 and 2.6.30
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@15655 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/generic-2.6')
22 files changed, 3022 insertions, 1727 deletions
diff --git a/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch index d18e0cb5d6..29485c1efa 100644 --- a/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch +++ b/target/linux/generic-2.6/patches-2.6.26/150-netfilter_imq.patch @@ -1,6 +1,6 @@ --- /dev/null +++ b/drivers/net/imq.c -@@ -0,0 +1,474 @@ +@@ -0,0 +1,565 @@ +/* + * Pseudo-driver for the intermediate queue device. + * @@ -51,10 +51,30 @@ + * + * + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead -+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid ++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid + * recursive locking. New initialization routines to fix 'rmmod' not + * working anymore. Used code from ifb.c. (Jussi Kivilinna) + * ++ * 2008/08/06 - 2.6.26 - (JK) ++ * - Replaced tasklet with 'netif_schedule()'. ++ * - Cleaned up and added comments for imq_nf_queue(). ++ * ++ * 2009/05/02 - Backported 2.6.27 fixes to 2.6.26 (Jussi Kivilinna) ++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping ++ * control buffer. This is needed because some networking layers ++ * on kernels before 2.6.27 overwrite control buffer when they ++ * should not. These errornous uses (wireless for example) of cb ++ * were found when qdisc-layer started using cb in 2.6.27. As we ++ * don't want to break up any code, even if it's buggy, use ++ * same backup-cb trick as used with 2.6.27-patch. ++ * - Add better locking for IMQ device by using spin_lock_bh ++ * instead of spin_lock. There was problem where NIC-interrupt ++ * would happen while IMQ-spin_lock was held which could lead to ++ * deadlock. Hopefully this will solve the SMP issues. ++ * - Fix rmmod not working. ++ * - Use netdevice feature flags to avoid extra packet handling ++ * by core networking layer and possibly increase performance. ++ * + * Also, many thanks to pablo Sebastian Greco for making the initial + * patch and to those who helped the testing. + * @@ -64,8 +84,10 @@ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> ++#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> ++#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_arp.h> +#include <linux/netfilter.h> @@ -77,11 +99,6 @@ +#include <net/pkt_sched.h> +#include <net/netfilter/nf_queue.h> + -+struct imq_private { -+ struct tasklet_struct tasklet; -+ unsigned long tasklet_pending; -+}; -+ +static nf_hookfn imq_nf_hook; + +static struct nf_hook_ops imq_ingress_ipv4 = { @@ -140,6 +157,8 @@ +static unsigned int numdevs = IMQ_MAX_DEVS; +#endif + ++static DEFINE_SPINLOCK(imq_nf_queue_lock); ++ +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS]; + +static struct net_device_stats *imq_get_stats(struct net_device *dev) @@ -153,12 +172,35 @@ + struct nf_queue_entry *entry = skb->nf_queue_entry; + + if (entry) { -+ if (entry->indev) -+ dev_put(entry->indev); -+ if (entry->outdev) -+ dev_put(entry->outdev); ++ nf_queue_entry_release_refs(entry); + kfree(entry); + } ++ ++ skb_restore_cb(skb); /* kfree backup */ ++} ++ ++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ++{ ++ int status; ++ ++ if (!entry->next_outfn) { ++ spin_lock_bh(&imq_nf_queue_lock); ++ nf_reinject(entry, verdict); ++ spin_unlock_bh(&imq_nf_queue_lock); ++ return; ++ } ++ ++ rcu_read_lock(); ++ local_bh_disable(); ++ status = entry->next_outfn(entry, entry->next_queuenum); ++ local_bh_enable(); ++ if (status < 0) { ++ nf_queue_entry_release_refs(entry); ++ kfree_skb(entry->skb); ++ kfree(entry); ++ } ++ ++ rcu_read_unlock(); +} + +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -169,26 +211,35 @@ + skb->imq_flags = 0; + skb->destructor = NULL; + ++ skb_restore_cb(skb); /* restore skb->cb */ ++ + dev->trans_start = jiffies; -+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT); ++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT); + return 0; +} + +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) +{ + struct net_device *dev; -+ struct imq_private *priv; -+ struct sk_buff *skb2 = NULL; ++ struct sk_buff *skb_orig, *skb, *skb_shared; + struct Qdisc *q; -+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -+ int ret = -1; -+ -+ if (index > numdevs) -+ return -1; ++ /*spinlock_t *root_lock;*/ ++ int users, index; ++ int retval = -EINVAL; ++ ++ index = entry->skb->imq_flags & IMQ_F_IFMASK; ++ if (unlikely(index > numdevs - 1)) { ++ if (net_ratelimit()) ++ printk(KERN_WARNING ++ "IMQ: invalid device specified, highest is %u\n", ++ numdevs - 1); ++ retval = -EINVAL; ++ goto out; ++ } + + /* check for imq device by index from cache */ + dev = imq_devs_cache[index]; -+ if (!dev) { ++ if (unlikely(!dev)) { + char buf[8]; + + /* get device by name and cache result */ @@ -197,49 +248,87 @@ + if (!dev) { + /* not found ?!*/ + BUG(); -+ return -1; ++ retval = -ENODEV; ++ goto out; + } + + imq_devs_cache[index] = dev; ++ dev_put(dev); + } + -+ priv = netdev_priv(dev); -+ if (!(dev->flags & IFF_UP)) { ++ if (unlikely(!(dev->flags & IFF_UP))) { + entry->skb->imq_flags = 0; -+ nf_reinject(entry, NF_ACCEPT); -+ return 0; ++ imq_nf_reinject(entry, NF_ACCEPT); ++ retval = 0; ++ goto out; + } + dev->last_rx = jiffies; + -+ if (entry->skb->destructor) { -+ skb2 = entry->skb; -+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC); -+ if (!entry->skb) -+ return -1; ++ skb = entry->skb; ++ skb_orig = NULL; ++ ++ /* skb has owner? => make clone */ ++ if (unlikely(skb->destructor)) { ++ skb_orig = skb; ++ skb = skb_clone(skb, GFP_ATOMIC); ++ if (!skb) { ++ retval = -ENOMEM; ++ goto out; ++ } ++ entry->skb = skb; + } -+ entry->skb->nf_queue_entry = entry; + -+ dev->stats.rx_bytes += entry->skb->len; ++ skb->nf_queue_entry = entry; ++ ++ dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + ++ q = rcu_dereference(dev->qdisc); ++ if (unlikely(!q->enqueue)) ++ goto packet_not_eaten_by_imq_dev; ++ + spin_lock_bh(&dev->queue_lock); -+ q = dev->qdisc; -+ if (q->enqueue) { -+ q->enqueue(skb_get(entry->skb), q); -+ if (skb_shared(entry->skb)) { -+ entry->skb->destructor = imq_skb_destructor; -+ kfree_skb(entry->skb); -+ ret = 0; -+ } -+ } -+ if (!test_and_set_bit(1, &priv->tasklet_pending)) -+ tasklet_schedule(&priv->tasklet); -+ spin_unlock_bh(&dev->queue_lock); + -+ if (skb2) -+ kfree_skb(ret ? entry->skb : skb2); ++ users = atomic_read(&skb->users); + -+ return ret; ++ skb_shared = skb_get(skb); /* increase reference count by one */ ++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will ++ overwrite it */ ++ q->enqueue(skb_shared, q); /* might kfree_skb */ ++ ++ if (likely(atomic_read(&skb_shared->users) == users + 1)) { ++ kfree_skb(skb_shared); /* decrease reference count by one */ ++ ++ skb->destructor = &imq_skb_destructor; ++ ++ /* cloned? */ ++ if (skb_orig) ++ kfree_skb(skb_orig); /* free original */ ++ ++ /* schedule qdisc dequeue */ ++ netif_schedule(dev); ++ ++ spin_unlock_bh(&dev->queue_lock); ++ retval = 0; ++ goto out; ++ } else { ++ skb_restore_cb(skb_shared); /* restore skb->cb */ ++ /* qdisc dropped packet and decreased skb reference count of ++ * skb, so we don't really want to and try refree as that would ++ * actually destroy the skb. */ ++ spin_unlock_bh(&dev->queue_lock); ++ goto packet_not_eaten_by_imq_dev; ++ } ++ ++packet_not_eaten_by_imq_dev: ++ /* cloned? restore original */ ++ if (skb_orig) { ++ kfree_skb(skb); ++ entry->skb = skb_orig; ++ } ++ retval = -1; ++out: ++ return retval; +} + +static struct nf_queue_handler nfqh = { @@ -247,17 +336,6 @@ + .outfn = imq_nf_queue, +}; + -+static void qdisc_run_tasklet(unsigned long arg) -+{ -+ struct net_device *dev = (struct net_device *)arg; -+ struct imq_private *priv = netdev_priv(dev); -+ -+ spin_lock(&dev->queue_lock); -+ qdisc_run(dev); -+ clear_bit(1, &priv->tasklet_pending); -+ spin_unlock(&dev->queue_lock); -+} -+ +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, + const struct net_device *indev, + const struct net_device *outdev, @@ -271,21 +349,13 @@ + +static int imq_close(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tasklet); + netif_stop_queue(dev); -+ + return 0; +} + +static int imq_open(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev); + netif_start_queue(dev); -+ + return 0; +} + @@ -299,59 +369,74 @@ + dev->mtu = 16000; + dev->tx_queue_len = 11000; + dev->flags = IFF_NOARP; ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | ++ NETIF_F_GSO | NETIF_F_HW_CSUM | ++ NETIF_F_HIGHDMA; ++} ++ ++static int imq_validate(struct nlattr *tb[], struct nlattr *data[]) ++{ ++ int ret = 0; ++ ++ if (tb[IFLA_ADDRESS]) { ++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { ++ ret = -EINVAL; ++ goto end; ++ } ++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { ++ ret = -EADDRNOTAVAIL; ++ goto end; ++ } ++ } ++ return 0; ++end: ++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret); ++ return ret; +} + +static struct rtnl_link_ops imq_link_ops __read_mostly = { + .kind = "imq", -+ .priv_size = sizeof(struct imq_private), ++ .priv_size = 0, + .setup = imq_setup, ++ .validate = imq_validate, +}; + +static int __init imq_init_hooks(void) +{ + int err; + -+ err = nf_register_queue_handler(PF_INET, &nfqh); -+ if (err) -+ goto err1; ++ nf_register_queue_imq_handler(&nfqh); + + err = nf_register_hook(&imq_ingress_ipv4); + if (err) -+ goto err2; ++ goto err1; + + err = nf_register_hook(&imq_egress_ipv4); + if (err) -+ goto err3; ++ goto err2; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+ err = nf_register_queue_handler(PF_INET6, &nfqh); -+ if (err) -+ goto err4; -+ + err = nf_register_hook(&imq_ingress_ipv6); + if (err) -+ goto err5; ++ goto err3; + + err = nf_register_hook(&imq_egress_ipv6); + if (err) -+ goto err6; ++ goto err4; +#endif + + return 0; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+err6: -+ nf_unregister_hook(&imq_ingress_ipv6); -+err5: -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +err4: ++ nf_unregister_hook(&imq_ingress_ipv6); ++err3: + nf_unregister_hook(&imq_egress_ipv4); +#endif -+err3: -+ nf_unregister_hook(&imq_ingress_ipv4); +err2: -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ nf_unregister_hook(&imq_ingress_ipv4); +err1: ++ nf_unregister_queue_imq_handler(); + return err; +} + @@ -360,7 +445,7 @@ + struct net_device *dev; + int ret; + -+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup); ++ dev = alloc_netdev(0, "imq%d", imq_setup); + if (!dev) + return -ENOMEM; + @@ -383,7 +468,7 @@ +{ + int err, i; + -+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { ++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) { + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + IMQ_MAX_DEVS); + return -EINVAL; @@ -408,6 +493,12 @@ +{ + int err; + ++#if defined(CONFIG_IMQ_NUM_DEVS) ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK); ++#endif ++ + err = imq_init_devs(); + if (err) { + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); @@ -443,11 +534,11 @@ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_egress_ipv6); -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +#endif + nf_unregister_hook(&imq_ingress_ipv4); + nf_unregister_hook(&imq_egress_ipv4); -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ ++ nf_unregister_queue_imq_handler(); +} + +static void __exit imq_cleanup_devs(void) @@ -477,7 +568,7 @@ + --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -117,6 +117,129 @@ config EQUALIZER +@@ -117,6 +117,129 @@ To compile this driver as a module, choose M here: the module will be called eql. If unsure, say N. @@ -609,7 +700,7 @@ select CRC32 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -142,6 +142,7 @@ obj-$(CONFIG_SLHC) += slhc.o +@@ -142,6 +142,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o @@ -619,52 +710,102 @@ obj-$(CONFIG_DE600) += de600.o --- /dev/null +++ b/include/linux/imq.h -@@ -0,0 +1,9 @@ +@@ -0,0 +1,13 @@ +#ifndef _IMQ_H +#define _IMQ_H + -+#define IMQ_MAX_DEVS 16 ++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */ ++#define IMQ_F_BITS 5 ++ ++#define IMQ_F_IFMASK 0x0f ++#define IMQ_F_ENQUEUE 0x10 + -+#define IMQ_F_IFMASK 0x7f -+#define IMQ_F_ENQUEUE 0x80 ++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1) + +#endif /* _IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IPT_IMQ_H +#define _IPT_IMQ_H + -+struct ipt_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ipt_imq_info xt_imq_info + +#endif /* _IPT_IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IP6T_IMQ_H +#define _IP6T_IMQ_H + -+struct ip6t_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ip6t_imq_info xt_imq_info + +#endif /* _IP6T_IMQ_H */ ++ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -300,6 +300,10 @@ struct sk_buff { +@@ -28,6 +28,9 @@ + #include <linux/rcupdate.h> + #include <linux/dmaengine.h> + #include <linux/hrtimer.h> ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#include <linux/imq.h> ++#endif + + #define HAVE_ALLOC_SKB /* For the drivers to know */ + #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +@@ -270,6 +273,9 @@ + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ void *cb_next; ++#endif + + unsigned int len, + data_len; +@@ -300,6 +306,9 @@ struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) -+ unsigned char imq_flags; + struct nf_queue_entry *nf_queue_entry; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -@@ -1633,6 +1637,10 @@ static inline void __nf_copy(struct sk_b +@@ -318,6 +327,9 @@ + __u8 ndisc_nodetype:2; + #endif + /* 14 bit hole */ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ __u8 imq_flags:IMQ_F_BITS; ++#endif + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -348,6 +360,12 @@ + + #include <asm/system.h> + ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern int skb_save_cb(struct sk_buff *skb); ++extern int skb_restore_cb(struct sk_buff *skb); ++#endif ++ + extern void kfree_skb(struct sk_buff *skb); + extern void __kfree_skb(struct sk_buff *skb); + extern struct sk_buff *__alloc_skb(unsigned int size, +@@ -1633,6 +1651,10 @@ dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -687,7 +828,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> -@@ -1569,7 +1572,11 @@ static int dev_gso_segment(struct sk_buf +@@ -1569,7 +1572,11 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { if (likely(!skb->next)) { @@ -701,117 +842,331 @@ if (netif_needs_gso(dev, skb)) { --- /dev/null -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -0,0 +1,69 @@ -+/* -+ * This target marks packets to be enqueued to an imq device -+ */ -+#include <linux/module.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter_ipv4/ip_tables.h> -+#include <linux/netfilter_ipv4/ipt_IMQ.h> -+#include <linux/imq.h> ++++ b/include/linux/netfilter/xt_IMQ.h +@@ -0,0 +1,9 @@ ++#ifndef _XT_IMQ_H ++#define _XT_IMQ_H + -+static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++struct xt_imq_info { ++ unsigned int todev; /* target imq device */ ++}; ++ ++#endif /* _XT_IMQ_H */ ++ +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -13,6 +13,12 @@ + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ int (*next_outfn)(struct nf_queue_entry *entry, ++ unsigned int queuenum); ++ unsigned int next_queuenum; ++#endif + }; + + #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) +@@ -30,5 +36,11 @@ + const struct nf_queue_handler *qh); + extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); + extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); ++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh); ++extern void nf_unregister_queue_imq_handler(void); ++#endif + + #endif /* _NF_QUEUE_H */ +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -71,6 +71,9 @@ + + static struct kmem_cache *skbuff_head_cache __read_mostly; + static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static struct kmem_cache *skbuff_cb_store_cache __read_mostly; ++#endif + + static void sock_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +@@ -94,6 +97,81 @@ + return 1; + } + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++/* Control buffer save/restore for IMQ devices */ ++struct skb_cb_table { ++ void *cb_next; ++ atomic_t refcnt; ++ char cb[48]; ++}; ++ ++static DEFINE_SPINLOCK(skb_cb_store_lock); ++ ++int skb_save_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC); ++ if (!next) ++ return -ENOMEM; + -+ return XT_CONTINUE; ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(next->cb, skb->cb, sizeof(skb->cb)); ++ next->cb_next = skb->cb_next; ++ ++ atomic_set(&next->refcnt, 1); ++ ++got_next: ++ skb->cb_next = next; ++ return 0; +} ++EXPORT_SYMBOL(skb_save_cb); + -+static bool imq_checkentry(const char *tablename, -+ const void *e, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++int skb_restore_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr; ++ struct skb_cb_table *next; + -+ mr = (struct ipt_imq_info *)targinfo; -+ -+ if (mr->todev > IMQ_MAX_DEVS) { -+ printk(KERN_WARNING -+ "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ if (!skb->cb_next) + return 0; ++ ++ next = skb->cb_next; ++ ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(skb->cb, next->cb, sizeof(skb->cb)); ++ skb->cb_next = next->cb_next; ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ if (atomic_dec_and_test(&next->refcnt)) { ++ kmem_cache_free(skbuff_cb_store_cache, next); + } + -+ return 1; ++ spin_unlock(&skb_cb_store_lock); ++ ++ return 0; +} ++EXPORT_SYMBOL(skb_restore_cb); + -+static struct xt_target ipt_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET, -+ .target = imq_target, -+ .targetsize = sizeof(struct ipt_imq_info), -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE, -+ .table = "mangle" -+}; ++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old) ++{ ++ struct skb_cb_table *next; ++ ++ if (!old->cb_next) { ++ new->cb_next = 0; ++ return; ++ } ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ next = old->cb_next; ++ atomic_inc(&next->refcnt); ++ new->cb_next = next; ++ ++ spin_unlock(&skb_cb_store_lock); ++} ++#endif + + /* Pipe buffer operations for a socket. */ + static struct pipe_buf_operations sock_pipe_buf_ops = { +@@ -376,6 +454,15 @@ + WARN_ON(in_irq()); + skb->destructor(skb); + } ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ /* This should not happen. When it does, avoid memleak by restoring ++ the chain of cb-backups. */ ++ while(skb->cb_next != NULL) { ++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n", ++ skb->cb_next); ++ skb_restore_cb(skb); ++ } ++#endif + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + nf_conntrack_put_reasm(skb->nfct_reasm); +@@ -438,6 +525,9 @@ + new->sp = secpath_get(old->sp); + #endif + memcpy(new->cb, old->cb, sizeof(old->cb)); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skb_copy_stored_cb(new, old); ++#endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->local_df = old->local_df; +@@ -2290,6 +2380,7 @@ + nskb->protocol = skb->protocol; + nskb->dst = dst_clone(skb->dst); + memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); ++ skb_copy_stored_cb(nskb, skb); + nskb->pkt_type = skb->pkt_type; + nskb->mac_len = skb->mac_len; + +@@ -2371,6 +2462,13 @@ + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache", ++ sizeof(struct skb_cb_table), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++#endif + } + + /** +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -334,6 +334,18 @@ + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_TARGET_IMQ ++ tristate '"IMQ" target support' ++ depends on NETFILTER_XTABLES ++ depends on IP_NF_MANGLE || IP6_NF_MANGLE ++ select IMQ ++ default m if NETFILTER_ADVANCED=n ++ help ++ This option adds a `IMQ' target which is used to specify if and ++ to which imq device packets should get enqueued/dequeued. ++ ++ To compile it as a module, choose M here. If unsure, say N. + -+static int __init init(void) + config NETFILTER_XT_TARGET_MARK + tristate '"MARK" target support' + depends on NETFILTER_XTABLES +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -42,6 +42,7 @@ + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o ++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o + obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -20,6 +20,26 @@ + + static DEFINE_MUTEX(queue_handler_mutex); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static const struct nf_queue_handler *queue_imq_handler; ++ ++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh) +{ -+ return xt_register_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, qh); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_register_queue_imq_handler); + -+static void __exit fini(void) ++void nf_unregister_queue_imq_handler(void) +{ -+ xt_unregister_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, NULL); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_unregister_queue_imq_handler); ++#endif + -+module_init(init); -+module_exit(fini); + /* return EBUSY when somebody else is registered, return EEXIST if the + * same handler is registered, return 0 in case of success. */ + int nf_register_queue_handler(int pf, const struct nf_queue_handler *qh) +@@ -80,7 +100,7 @@ + } + EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); + +-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ++void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) +@@ -100,6 +120,7 @@ + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); + } ++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* + * Any packet that leaves via this function must come back +@@ -121,12 +142,26 @@ + #endif + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ const struct nf_queue_handler *qih = NULL; ++#endif + + /* QUEUE == DROP if noone is waiting, to be safe. */ + rcu_read_lock(); + + qh = rcu_dereference(queue_handler[pf]); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (pf == PF_INET || pf == PF_INET6) ++#else ++ if (pf == PF_INET) ++#endif ++ qih = rcu_dereference(queue_imq_handler); + -+MODULE_AUTHOR("http://www.linuximq.net"); -+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); -+MODULE_LICENSE("GPL"); ---- a/net/ipv4/netfilter/Kconfig -+++ b/net/ipv4/netfilter/Kconfig -@@ -123,6 +123,17 @@ config IP_NF_FILTER ++ if (!qh && !qih) ++#else /* !IMQ */ + if (!qh) ++#endif + goto err_unlock; - To compile it as a module, choose M here. If unsure, say N. + afinfo = nf_get_afinfo(pf); +@@ -145,6 +180,10 @@ + .indev = indev, + .outdev = outdev, + .okfn = okfn, ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ .next_outfn = qh ? qh->outfn : NULL, ++ .next_queuenum = queuenum, ++#endif + }; -+config IP_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which IMQ device packets should get enqueued/dequeued. -+ -+ For more information visit: http://www.linuximq.net/ -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP_NF_TARGET_REJECT - tristate "REJECT target support" - depends on IP_NF_FILTER ---- a/net/ipv4/netfilter/Makefile -+++ b/net/ipv4/netfilter/Makefile -@@ -55,6 +55,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set - obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o - obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o - obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o -+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o - obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o - obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o - obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o + /* If it's going away, ignore hook. */ +@@ -170,8 +209,19 @@ + } + #endif + afinfo->saveroute(skb, entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ if (qih) { ++ status = qih->outfn(entry, queuenum); ++ goto imq_skip_queue; ++ } ++#endif ++ + status = qh->outfn(entry, queuenum); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++imq_skip_queue: ++#endif + rcu_read_unlock(); + + if (status < 0) { --- /dev/null -+++ b/net/ipv6/netfilter/ip6t_IMQ.c -@@ -0,0 +1,69 @@ ++++ b/net/netfilter/xt_IMQ.c +@@ -0,0 +1,81 @@ +/* + * This target marks packets to be enqueued to an imq device + */ +#include <linux/module.h> +#include <linux/skbuff.h> -+#include <linux/netfilter_ipv6/ip6_tables.h> -+#include <linux/netfilter_ipv6/ip6t_IMQ.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_IMQ.h> +#include <linux/imq.h> + +static unsigned int imq_target(struct sk_buff *pskb, @@ -821,9 +1176,9 @@ + const struct xt_target *target, + const void *targinfo) +{ -+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo; ++ const struct xt_imq_info *mr = targinfo; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE; + + return XT_CONTINUE; +} @@ -834,81 +1189,55 @@ + void *targinfo, + unsigned int hook_mask) +{ -+ struct ip6t_imq_info *mr; -+ -+ mr = (struct ip6t_imq_info *)targinfo; ++ struct xt_imq_info *mr = targinfo; + -+ if (mr->todev > IMQ_MAX_DEVS) { ++ if (mr->todev > IMQ_MAX_DEVS - 1) { + printk(KERN_WARNING + "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ IMQ_MAX_DEVS - 1); + return 0; + } + + return 1; +} + -+static struct xt_target ip6t_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET6, -+ .target = imq_target, -+ .targetsize = sizeof(struct ip6t_imq_info), -+ .table = "mangle", -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE ++static struct xt_target xt_imq_reg[] __read_mostly = { ++ { ++ .name = "IMQ", ++ .family = AF_INET, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .checkentry = imq_checkentry, ++ .me = THIS_MODULE ++ }, ++ { ++ .name = "IMQ", ++ .family = AF_INET6, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .checkentry = imq_checkentry, ++ .me = THIS_MODULE ++ }, +}; + -+static int __init init(void) ++static int __init imq_init(void) +{ -+ return xt_register_target(&ip6t_imq_reg); ++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+static void __exit fini(void) ++static void __exit imq_fini(void) +{ -+ xt_unregister_target(&ip6t_imq_reg); ++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+module_init(init); -+module_exit(fini); ++module_init(imq_init); ++module_exit(imq_fini); + +MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_LICENSE("GPL"); ---- a/net/ipv6/netfilter/Kconfig -+++ b/net/ipv6/netfilter/Kconfig -@@ -179,6 +179,15 @@ config IP6_NF_MANGLE - - To compile it as a module, choose M here. If unsure, say N. - -+config IP6_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP6_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which imq device packets should get enqueued/dequeued. ++MODULE_ALIAS("ipt_IMQ"); ++MODULE_ALIAS("ip6t_IMQ"); + -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP6_NF_TARGET_HL - tristate 'HL (hoplimit) target support' - depends on IP6_NF_MANGLE ---- a/net/ipv6/netfilter/Makefile -+++ b/net/ipv6/netfilter/Makefile -@@ -6,6 +6,7 @@ - obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o - obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o - obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o - obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o - obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o - ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -203,6 +203,7 @@ void __qdisc_run(struct net_device *dev) - - clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state); - } -+EXPORT_SYMBOL(__qdisc_run); - - static void dev_watchdog(unsigned long arg) - { diff --git a/target/linux/generic-2.6/patches-2.6.26/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.26/180-netfilter_depends.patch index e495b3db41..e7d666e22d 100644 --- a/target/linux/generic-2.6/patches-2.6.26/180-netfilter_depends.patch +++ b/target/linux/generic-2.6/patches-2.6.26/180-netfilter_depends.patch @@ -1,6 +1,6 @@ --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -157,7 +157,7 @@ config NF_CONNTRACK_FTP +@@ -157,7 +157,7 @@ config NF_CONNTRACK_H323 tristate "H.323 protocol support" @@ -9,7 +9,7 @@ depends on NETFILTER_ADVANCED help H.323 is a VoIP signalling protocol from ITU-T. As one of the most -@@ -435,7 +435,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK +@@ -447,7 +447,7 @@ config NETFILTER_XT_TARGET_TCPMSS tristate '"TCPMSS" target support' diff --git a/target/linux/generic-2.6/patches-2.6.26/190-netfilter_rtsp.patch b/target/linux/generic-2.6/patches-2.6.26/190-netfilter_rtsp.patch index 73ce330db5..097cbb651c 100644 --- a/target/linux/generic-2.6/patches-2.6.26/190-netfilter_rtsp.patch +++ b/target/linux/generic-2.6/patches-2.6.26/190-netfilter_rtsp.patch @@ -294,7 +294,7 @@ +#endif /* _NETFILTER_MIME_H */ --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile -@@ -23,6 +23,7 @@ obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_am +@@ -23,6 +23,7 @@ obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o @@ -304,7 +304,7 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -270,6 +270,16 @@ config NF_CONNTRACK_TFTP +@@ -270,6 +270,16 @@ To compile it as a module, choose M here. If unsure, say N. @@ -323,7 +323,7 @@ depends on NF_CONNTRACK --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co +@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o @@ -333,7 +333,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig -@@ -282,6 +282,11 @@ config NF_NAT_IRC +@@ -271,6 +271,11 @@ depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC diff --git a/target/linux/generic-2.6/patches-2.6.27/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.27/150-netfilter_imq.patch index 46adc66e45..0814520f9a 100644 --- a/target/linux/generic-2.6/patches-2.6.27/150-netfilter_imq.patch +++ b/target/linux/generic-2.6/patches-2.6.27/150-netfilter_imq.patch @@ -1,6 +1,6 @@ --- /dev/null +++ b/drivers/net/imq.c -@@ -0,0 +1,474 @@ +@@ -0,0 +1,566 @@ +/* + * Pseudo-driver for the intermediate queue device. + * @@ -51,10 +51,27 @@ + * + * + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead -+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid ++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid + * recursive locking. New initialization routines to fix 'rmmod' not + * working anymore. Used code from ifb.c. (Jussi Kivilinna) + * ++ * 2008/08/06 - 2.6.26 - (JK) ++ * - Replaced tasklet with 'netif_schedule()'. ++ * - Cleaned up and added comments for imq_nf_queue(). ++ * ++ * 2009/04/12 ++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping ++ * control buffer. This is needed because qdisc-layer on kernels ++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna) ++ * - Add better locking for IMQ device. Hopefully this will solve ++ * SMP issues. (Jussi Kivilinna) ++ * - Port to 2.6.27 ++ * ++ * 2009/04/20 - (Jussi Kivilinna) ++ * - Fix rmmod not working ++ * - Use netdevice feature flags to avoid extra packet handling ++ * by core networking layer and possibly increase performance. ++ * + * Also, many thanks to pablo Sebastian Greco for making the initial + * patch and to those who helped the testing. + * @@ -64,8 +81,10 @@ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> ++#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> ++#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_arp.h> +#include <linux/netfilter.h> @@ -77,11 +96,6 @@ +#include <net/pkt_sched.h> +#include <net/netfilter/nf_queue.h> + -+struct imq_private { -+ struct tasklet_struct tasklet; -+ unsigned long tasklet_pending; -+}; -+ +static nf_hookfn imq_nf_hook; + +static struct nf_hook_ops imq_ingress_ipv4 = { @@ -140,8 +154,11 @@ +static unsigned int numdevs = IMQ_MAX_DEVS; +#endif + ++static DEFINE_SPINLOCK(imq_nf_queue_lock); ++ +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS]; + ++ +static struct net_device_stats *imq_get_stats(struct net_device *dev) +{ + return &dev->stats; @@ -153,12 +170,35 @@ + struct nf_queue_entry *entry = skb->nf_queue_entry; + + if (entry) { -+ if (entry->indev) -+ dev_put(entry->indev); -+ if (entry->outdev) -+ dev_put(entry->outdev); ++ nf_queue_entry_release_refs(entry); + kfree(entry); + } ++ ++ skb_restore_cb(skb); /* kfree backup */ ++} ++ ++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ++{ ++ int status; ++ ++ if (!entry->next_outfn) { ++ spin_lock_bh(&imq_nf_queue_lock); ++ nf_reinject(entry, verdict); ++ spin_unlock_bh(&imq_nf_queue_lock); ++ return; ++ } ++ ++ rcu_read_lock(); ++ local_bh_disable(); ++ status = entry->next_outfn(entry, entry->next_queuenum); ++ local_bh_enable(); ++ if (status < 0) { ++ nf_queue_entry_release_refs(entry); ++ kfree_skb(entry->skb); ++ kfree(entry); ++ } ++ ++ rcu_read_unlock(); +} + +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -169,26 +209,35 @@ + skb->imq_flags = 0; + skb->destructor = NULL; + ++ skb_restore_cb(skb); /* restore skb->cb */ ++ + dev->trans_start = jiffies; -+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT); ++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT); + return 0; +} + +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) +{ + struct net_device *dev; -+ struct imq_private *priv; -+ struct sk_buff *skb2 = NULL; ++ struct sk_buff *skb_orig, *skb, *skb_shared; + struct Qdisc *q; -+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -+ int ret = -1; -+ -+ if (index > numdevs) -+ return -1; ++ struct netdev_queue *txq; ++ int users, index; ++ int retval = -EINVAL; ++ ++ index = entry->skb->imq_flags & IMQ_F_IFMASK; ++ if (unlikely(index > numdevs - 1)) { ++ if (net_ratelimit()) ++ printk(KERN_WARNING ++ "IMQ: invalid device specified, highest is %u\n", ++ numdevs - 1); ++ retval = -EINVAL; ++ goto out; ++ } + + /* check for imq device by index from cache */ + dev = imq_devs_cache[index]; -+ if (!dev) { ++ if (unlikely(!dev)) { + char buf[8]; + + /* get device by name and cache result */ @@ -197,49 +246,90 @@ + if (!dev) { + /* not found ?!*/ + BUG(); -+ return -1; ++ retval = -ENODEV; ++ goto out; + } + + imq_devs_cache[index] = dev; ++ dev_put(dev); + } + -+ priv = netdev_priv(dev); -+ if (!(dev->flags & IFF_UP)) { ++ if (unlikely(!(dev->flags & IFF_UP))) { + entry->skb->imq_flags = 0; -+ nf_reinject(entry, NF_ACCEPT); -+ return 0; ++ imq_nf_reinject(entry, NF_ACCEPT); ++ retval = 0; ++ goto out; + } + dev->last_rx = jiffies; + -+ if (entry->skb->destructor) { -+ skb2 = entry->skb; -+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC); -+ if (!entry->skb) -+ return -1; ++ skb = entry->skb; ++ skb_orig = NULL; ++ ++ /* skb has owner? => make clone */ ++ if (unlikely(skb->destructor)) { ++ skb_orig = skb; ++ skb = skb_clone(skb, GFP_ATOMIC); ++ if (!skb) { ++ retval = -ENOMEM; ++ goto out; ++ } ++ entry->skb = skb; + } -+ entry->skb->nf_queue_entry = entry; + -+ dev->stats.rx_bytes += entry->skb->len; ++ skb->nf_queue_entry = entry; ++ ++ dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + -+ spin_lock_bh(&dev->queue_lock); -+ q = dev->qdisc; -+ if (q->enqueue) { -+ q->enqueue(skb_get(entry->skb), q); -+ if (skb_shared(entry->skb)) { -+ entry->skb->destructor = imq_skb_destructor; -+ kfree_skb(entry->skb); -+ ret = 0; -+ } -+ } -+ if (!test_and_set_bit(1, &priv->tasklet_pending)) -+ tasklet_schedule(&priv->tasklet); -+ spin_unlock_bh(&dev->queue_lock); ++ txq = dev_pick_tx(dev, skb); + -+ if (skb2) -+ kfree_skb(ret ? entry->skb : skb2); ++ q = rcu_dereference(txq->qdisc); ++ if (unlikely(!q->enqueue)) ++ goto packet_not_eaten_by_imq_dev; + -+ return ret; ++ spin_lock_bh(qdisc_lock(q)); ++ ++ users = atomic_read(&skb->users); ++ ++ skb_shared = skb_get(skb); /* increase reference count by one */ ++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will ++ overwrite it */ ++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */ ++ ++ if (likely(atomic_read(&skb_shared->users) == users + 1)) { ++ kfree_skb(skb_shared); /* decrease reference count by one */ ++ ++ skb->destructor = &imq_skb_destructor; ++ ++ /* cloned? */ ++ if (skb_orig) ++ kfree_skb(skb_orig); /* free original */ ++ ++ spin_unlock_bh(qdisc_lock(q)); ++ ++ /* schedule qdisc dequeue */ ++ __netif_schedule(q); ++ ++ retval = 0; ++ goto out; ++ } else { ++ skb_restore_cb(skb_shared); /* restore skb->cb */ ++ /* qdisc dropped packet and decreased skb reference count of ++ * skb, so we don't really want to and try refree as that would ++ * actually destroy the skb. */ ++ spin_unlock_bh(qdisc_lock(q)); ++ goto packet_not_eaten_by_imq_dev; ++ } ++ ++packet_not_eaten_by_imq_dev: ++ /* cloned? restore original */ ++ if (skb_orig) { ++ kfree_skb(skb); ++ entry->skb = skb_orig; ++ } ++ retval = -1; ++out: ++ return retval; +} + +static struct nf_queue_handler nfqh = { @@ -247,17 +337,6 @@ + .outfn = imq_nf_queue, +}; + -+static void qdisc_run_tasklet(unsigned long arg) -+{ -+ struct net_device *dev = (struct net_device *)arg; -+ struct imq_private *priv = netdev_priv(dev); -+ -+ spin_lock(&dev->queue_lock); -+ qdisc_run(dev); -+ clear_bit(1, &priv->tasklet_pending); -+ spin_unlock(&dev->queue_lock); -+} -+ +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, + const struct net_device *indev, + const struct net_device *outdev, @@ -271,21 +350,13 @@ + +static int imq_close(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tasklet); + netif_stop_queue(dev); -+ + return 0; +} + +static int imq_open(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev); + netif_start_queue(dev); -+ + return 0; +} + @@ -299,59 +370,74 @@ + dev->mtu = 16000; + dev->tx_queue_len = 11000; + dev->flags = IFF_NOARP; ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | ++ NETIF_F_GSO | NETIF_F_HW_CSUM | ++ NETIF_F_HIGHDMA; ++} ++ ++static int imq_validate(struct nlattr *tb[], struct nlattr *data[]) ++{ ++ int ret = 0; ++ ++ if (tb[IFLA_ADDRESS]) { ++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { ++ ret = -EINVAL; ++ goto end; ++ } ++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { ++ ret = -EADDRNOTAVAIL; ++ goto end; ++ } ++ } ++ return 0; ++end: ++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret); ++ return ret; +} + +static struct rtnl_link_ops imq_link_ops __read_mostly = { + .kind = "imq", -+ .priv_size = sizeof(struct imq_private), ++ .priv_size = 0, + .setup = imq_setup, ++ .validate = imq_validate, +}; + +static int __init imq_init_hooks(void) +{ + int err; + -+ err = nf_register_queue_handler(PF_INET, &nfqh); -+ if (err) -+ goto err1; ++ nf_register_queue_imq_handler(&nfqh); + + err = nf_register_hook(&imq_ingress_ipv4); + if (err) -+ goto err2; ++ goto err1; + + err = nf_register_hook(&imq_egress_ipv4); + if (err) -+ goto err3; ++ goto err2; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+ err = nf_register_queue_handler(PF_INET6, &nfqh); -+ if (err) -+ goto err4; -+ + err = nf_register_hook(&imq_ingress_ipv6); + if (err) -+ goto err5; ++ goto err3; + + err = nf_register_hook(&imq_egress_ipv6); + if (err) -+ goto err6; ++ goto err4; +#endif + + return 0; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+err6: -+ nf_unregister_hook(&imq_ingress_ipv6); -+err5: -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +err4: ++ nf_unregister_hook(&imq_ingress_ipv6); ++err3: + nf_unregister_hook(&imq_egress_ipv4); +#endif -+err3: -+ nf_unregister_hook(&imq_ingress_ipv4); +err2: -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ nf_unregister_hook(&imq_ingress_ipv4); +err1: ++ nf_unregister_queue_imq_handler(); + return err; +} + @@ -360,7 +446,7 @@ + struct net_device *dev; + int ret; + -+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup); ++ dev = alloc_netdev(0, "imq%d", imq_setup); + if (!dev) + return -ENOMEM; + @@ -383,7 +469,7 @@ +{ + int err, i; + -+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { ++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) { + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + IMQ_MAX_DEVS); + return -EINVAL; @@ -408,6 +494,12 @@ +{ + int err; + ++#if defined(CONFIG_IMQ_NUM_DEVS) ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK); ++#endif ++ + err = imq_init_devs(); + if (err) { + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); @@ -443,11 +535,11 @@ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_egress_ipv6); -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +#endif + nf_unregister_hook(&imq_ingress_ipv4); + nf_unregister_hook(&imq_egress_ipv4); -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ ++ nf_unregister_queue_imq_handler(); +} + +static void __exit imq_cleanup_devs(void) @@ -477,7 +569,7 @@ + --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -109,6 +109,129 @@ config EQUALIZER +@@ -109,6 +109,129 @@ To compile this driver as a module, choose M here: the module will be called eql. If unsure, say N. @@ -609,7 +701,7 @@ select CRC32 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -144,6 +144,7 @@ obj-$(CONFIG_SLHC) += slhc.o +@@ -144,6 +144,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o @@ -619,52 +711,102 @@ obj-$(CONFIG_DE600) += de600.o --- /dev/null +++ b/include/linux/imq.h -@@ -0,0 +1,9 @@ +@@ -0,0 +1,13 @@ +#ifndef _IMQ_H +#define _IMQ_H + -+#define IMQ_MAX_DEVS 16 ++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */ ++#define IMQ_F_BITS 5 + -+#define IMQ_F_IFMASK 0x7f -+#define IMQ_F_ENQUEUE 0x80 ++#define IMQ_F_IFMASK 0x0f ++#define IMQ_F_ENQUEUE 0x10 ++ ++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1) + +#endif /* _IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IPT_IMQ_H +#define _IPT_IMQ_H + -+struct ipt_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ipt_imq_info xt_imq_info + +#endif /* _IPT_IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IP6T_IMQ_H +#define _IP6T_IMQ_H + -+struct ip6t_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ip6t_imq_info xt_imq_info + +#endif /* _IP6T_IMQ_H */ ++ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -302,6 +302,10 @@ struct sk_buff { +@@ -28,6 +28,9 @@ + #include <linux/rcupdate.h> + #include <linux/dmaengine.h> + #include <linux/hrtimer.h> ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#include <linux/imq.h> ++#endif + + #define HAVE_ALLOC_SKB /* For the drivers to know */ + #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +@@ -272,6 +275,9 @@ + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ void *cb_next; ++#endif + + unsigned int len, + data_len; +@@ -302,6 +308,9 @@ struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) -+ unsigned char imq_flags; + struct nf_queue_entry *nf_queue_entry; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -@@ -1633,6 +1637,10 @@ static inline void __nf_copy(struct sk_b +@@ -321,6 +330,9 @@ + __u8 do_not_encrypt:1; + #endif + /* 0/13/14 bit hole */ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ __u8 imq_flags:IMQ_F_BITS; ++#endif + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -353,6 +365,12 @@ + + #include <asm/system.h> + ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern int skb_save_cb(struct sk_buff *skb); ++extern int skb_restore_cb(struct sk_buff *skb); ++#endif ++ + extern void kfree_skb(struct sk_buff *skb); + extern void __kfree_skb(struct sk_buff *skb); + extern struct sk_buff *__alloc_skb(unsigned int size, +@@ -1633,6 +1651,10 @@ dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -687,7 +829,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> -@@ -1624,7 +1627,11 @@ int dev_hard_start_xmit(struct sk_buff * +@@ -1624,7 +1627,11 @@ struct netdev_queue *txq) { if (likely(!skb->next)) { @@ -700,118 +842,351 @@ dev_queue_xmit_nit(skb, dev); if (netif_needs_gso(dev, skb)) { +@@ -1715,8 +1722,7 @@ + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); + } + +-static struct netdev_queue *dev_pick_tx(struct net_device *dev, +- struct sk_buff *skb) ++struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) + { + u16 queue_index = 0; + +@@ -1728,6 +1734,7 @@ + skb_set_queue_mapping(skb, queue_index); + return netdev_get_tx_queue(dev, queue_index); + } ++EXPORT_SYMBOL(dev_pick_tx); + + /** + * dev_queue_xmit - transmit a buffer +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -915,6 +915,7 @@ + extern int dev_open(struct net_device *dev); + extern int dev_close(struct net_device *dev); + extern void dev_disable_lro(struct net_device *dev); ++extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb); + extern int dev_queue_xmit(struct sk_buff *skb); + extern int register_netdevice(struct net_device *dev); + extern void unregister_netdevice(struct net_device *dev); --- /dev/null -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -0,0 +1,69 @@ -+/* -+ * This target marks packets to be enqueued to an imq device -+ */ -+#include <linux/module.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter_ipv4/ip_tables.h> -+#include <linux/netfilter_ipv4/ipt_IMQ.h> -+#include <linux/imq.h> ++++ b/include/linux/netfilter/xt_IMQ.h +@@ -0,0 +1,9 @@ ++#ifndef _XT_IMQ_H ++#define _XT_IMQ_H + -+static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++struct xt_imq_info { ++ unsigned int todev; /* target imq device */ ++}; ++ ++#endif /* _XT_IMQ_H */ ++ +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -13,6 +13,12 @@ + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ int (*next_outfn)(struct nf_queue_entry *entry, ++ unsigned int queuenum); ++ unsigned int next_queuenum; ++#endif + }; + + #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) +@@ -30,5 +36,11 @@ + const struct nf_queue_handler *qh); + extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); + extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); ++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh); ++extern void nf_unregister_queue_imq_handler(void); ++#endif + + #endif /* _NF_QUEUE_H */ +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -69,6 +69,9 @@ + + static struct kmem_cache *skbuff_head_cache __read_mostly; + static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static struct kmem_cache *skbuff_cb_store_cache __read_mostly; ++#endif + + static void sock_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +@@ -88,6 +91,80 @@ + return 1; + } + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++/* Control buffer save/restore for IMQ devices */ ++struct skb_cb_table { ++ void *cb_next; ++ atomic_t refcnt; ++ char cb[48]; ++}; ++ ++static DEFINE_SPINLOCK(skb_cb_store_lock); ++ ++int skb_save_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC); ++ if (!next) ++ return -ENOMEM; + -+ return XT_CONTINUE; ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(next->cb, skb->cb, sizeof(skb->cb)); ++ next->cb_next = skb->cb_next; ++ ++ atomic_set(&next->refcnt, 1); ++ ++ skb->cb_next = next; ++ return 0; +} ++EXPORT_SYMBOL(skb_save_cb); + -+static bool imq_checkentry(const char *tablename, -+ const void *e, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++int skb_restore_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr; ++ struct skb_cb_table *next; + -+ mr = (struct ipt_imq_info *)targinfo; -+ -+ if (mr->todev > IMQ_MAX_DEVS) { -+ printk(KERN_WARNING -+ "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ if (!skb->cb_next) + return 0; ++ ++ next = skb->cb_next; ++ ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(skb->cb, next->cb, sizeof(skb->cb)); ++ skb->cb_next = next->cb_next; ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ if (atomic_dec_and_test(&next->refcnt)) { ++ kmem_cache_free(skbuff_cb_store_cache, next); + } + -+ return 1; ++ spin_unlock(&skb_cb_store_lock); ++ ++ return 0; +} ++EXPORT_SYMBOL(skb_restore_cb); + -+static struct xt_target ipt_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET, -+ .target = imq_target, -+ .targetsize = sizeof(struct ipt_imq_info), -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE, -+ .table = "mangle" -+}; ++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old) ++{ ++ struct skb_cb_table *next; ++ ++ if (!old->cb_next) { ++ new->cb_next = 0; ++ return; ++ } ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ next = old->cb_next; ++ atomic_inc(&next->refcnt); ++ new->cb_next = next; ++ ++ spin_unlock(&skb_cb_store_lock); ++} ++#endif + + /* Pipe buffer operations for a socket. */ + static struct pipe_buf_operations sock_pipe_buf_ops = { +@@ -362,6 +439,15 @@ + WARN_ON(in_irq()); + skb->destructor(skb); + } ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ /* This should not happen. When it does, avoid memleak by restoring ++ the chain of cb-backups. */ ++ while(skb->cb_next != NULL) { ++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n", ++ skb->cb_next); ++ skb_restore_cb(skb); ++ } ++#endif + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + nf_conntrack_put_reasm(skb->nfct_reasm); +@@ -424,6 +510,9 @@ + new->sp = secpath_get(old->sp); + #endif + memcpy(new->cb, old->cb, sizeof(old->cb)); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skb_copy_stored_cb(new, old); ++#endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->local_df = old->local_df; +@@ -2326,6 +2415,13 @@ + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache", ++ sizeof(struct skb_cb_table), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++#endif + } + + /** +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -342,6 +342,18 @@ + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_TARGET_IMQ ++ tristate '"IMQ" target support' ++ depends on NETFILTER_XTABLES ++ depends on IP_NF_MANGLE || IP6_NF_MANGLE ++ select IMQ ++ default m if NETFILTER_ADVANCED=n ++ help ++ This option adds a `IMQ' target which is used to specify if and ++ to which imq device packets should get enqueued/dequeued. ++ ++ To compile it as a module, choose M here. If unsure, say N. ++ + config NETFILTER_XT_TARGET_MARK + tristate '"MARK" target support' + depends on NETFILTER_XTABLES +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -42,6 +42,7 @@ + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o ++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o + obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -20,6 +20,26 @@ + + static DEFINE_MUTEX(queue_handler_mutex); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static const struct nf_queue_handler *queue_imq_handler; + -+static int __init init(void) ++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh) +{ -+ return xt_register_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, qh); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_register_queue_imq_handler); + -+static void __exit fini(void) ++void nf_unregister_queue_imq_handler(void) +{ -+ xt_unregister_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, NULL); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_unregister_queue_imq_handler); ++#endif + -+module_init(init); -+module_exit(fini); + /* return EBUSY when somebody else is registered, return EEXIST if the + * same handler is registered, return 0 in case of success. */ + int nf_register_queue_handler(int pf, const struct nf_queue_handler *qh) +@@ -80,7 +100,7 @@ + } + EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); + +-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ++void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) +@@ -100,6 +120,7 @@ + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); + } ++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* + * Any packet that leaves via this function must come back +@@ -121,12 +142,26 @@ + #endif + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ const struct nf_queue_handler *qih = NULL; ++#endif + + /* QUEUE == DROP if noone is waiting, to be safe. */ + rcu_read_lock(); + + qh = rcu_dereference(queue_handler[pf]); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (pf == PF_INET || pf == PF_INET6) ++#else ++ if (pf == PF_INET) ++#endif ++ qih = rcu_dereference(queue_imq_handler); + -+MODULE_AUTHOR("http://www.linuximq.net"); -+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); -+MODULE_LICENSE("GPL"); ---- a/net/ipv4/netfilter/Kconfig -+++ b/net/ipv4/netfilter/Kconfig -@@ -123,6 +123,17 @@ config IP_NF_FILTER ++ if (!qh && !qih) ++#else /* !IMQ */ + if (!qh) ++#endif + goto err_unlock; - To compile it as a module, choose M here. If unsure, say N. + afinfo = nf_get_afinfo(pf); +@@ -145,6 +180,10 @@ + .indev = indev, + .outdev = outdev, + .okfn = okfn, ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ .next_outfn = qh ? qh->outfn : NULL, ++ .next_queuenum = queuenum, ++#endif + }; + + /* If it's going away, ignore hook. */ +@@ -170,8 +209,19 @@ + } + #endif + afinfo->saveroute(skb, entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ if (qih) { ++ status = qih->outfn(entry, queuenum); ++ goto imq_skip_queue; ++ } ++#endif ++ + status = qh->outfn(entry, queuenum); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++imq_skip_queue: ++#endif + rcu_read_unlock(); -+config IP_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which IMQ device packets should get enqueued/dequeued. -+ -+ For more information visit: http://www.linuximq.net/ -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP_NF_TARGET_REJECT - tristate "REJECT target support" - depends on IP_NF_FILTER ---- a/net/ipv4/netfilter/Makefile -+++ b/net/ipv4/netfilter/Makefile -@@ -56,6 +56,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set - obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o - obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o - obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o -+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o - obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o - obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o - obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o + if (status < 0) { --- /dev/null -+++ b/net/ipv6/netfilter/ip6t_IMQ.c -@@ -0,0 +1,69 @@ ++++ b/net/netfilter/xt_IMQ.c +@@ -0,0 +1,81 @@ +/* + * This target marks packets to be enqueued to an imq device + */ +#include <linux/module.h> +#include <linux/skbuff.h> -+#include <linux/netfilter_ipv6/ip6_tables.h> -+#include <linux/netfilter_ipv6/ip6t_IMQ.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_IMQ.h> +#include <linux/imq.h> + +static unsigned int imq_target(struct sk_buff *pskb, @@ -821,9 +1196,9 @@ + const struct xt_target *target, + const void *targinfo) +{ -+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo; ++ const struct xt_imq_info *mr = targinfo; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE; + + return XT_CONTINUE; +} @@ -834,81 +1209,55 @@ + void *targinfo, + unsigned int hook_mask) +{ -+ struct ip6t_imq_info *mr; -+ -+ mr = (struct ip6t_imq_info *)targinfo; ++ struct xt_imq_info *mr = targinfo; + -+ if (mr->todev > IMQ_MAX_DEVS) { ++ if (mr->todev > IMQ_MAX_DEVS - 1) { + printk(KERN_WARNING + "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ IMQ_MAX_DEVS - 1); + return 0; + } + + return 1; +} + -+static struct xt_target ip6t_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET6, -+ .target = imq_target, -+ .targetsize = sizeof(struct ip6t_imq_info), -+ .table = "mangle", -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE ++static struct xt_target xt_imq_reg[] __read_mostly = { ++ { ++ .name = "IMQ", ++ .family = AF_INET, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .checkentry = imq_checkentry, ++ .me = THIS_MODULE ++ }, ++ { ++ .name = "IMQ", ++ .family = AF_INET6, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .checkentry = imq_checkentry, ++ .me = THIS_MODULE ++ }, +}; + -+static int __init init(void) ++static int __init imq_init(void) +{ -+ return xt_register_target(&ip6t_imq_reg); ++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+static void __exit fini(void) ++static void __exit imq_fini(void) +{ -+ xt_unregister_target(&ip6t_imq_reg); ++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+module_init(init); -+module_exit(fini); ++module_init(imq_init); ++module_exit(imq_fini); + +MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_LICENSE("GPL"); ---- a/net/ipv6/netfilter/Kconfig -+++ b/net/ipv6/netfilter/Kconfig -@@ -179,6 +179,15 @@ config IP6_NF_MANGLE - - To compile it as a module, choose M here. If unsure, say N. - -+config IP6_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP6_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which imq device packets should get enqueued/dequeued. -+ -+ To compile it as a module, choose M here. If unsure, say N. ++MODULE_ALIAS("ipt_IMQ"); ++MODULE_ALIAS("ip6t_IMQ"); + - config IP6_NF_TARGET_HL - tristate 'HL (hoplimit) target support' - depends on IP6_NF_MANGLE ---- a/net/ipv6/netfilter/Makefile -+++ b/net/ipv6/netfilter/Makefile -@@ -6,6 +6,7 @@ - obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o - obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o - obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o - obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o - obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o - obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -188,6 +188,7 @@ void __qdisc_run(struct Qdisc *q) - - clear_bit(__QDISC_STATE_RUNNING, &q->state); - } -+EXPORT_SYMBOL(__qdisc_run); - - static void dev_watchdog(unsigned long arg) - { diff --git a/target/linux/generic-2.6/patches-2.6.27/151-netfilter_imq_2.6.27.patch b/target/linux/generic-2.6/patches-2.6.27/151-netfilter_imq_2.6.27.patch deleted file mode 100644 index 9390db7424..0000000000 --- a/target/linux/generic-2.6/patches-2.6.27/151-netfilter_imq_2.6.27.patch +++ /dev/null @@ -1,75 +0,0 @@ ---- a/drivers/net/imq.c -+++ b/drivers/net/imq.c -@@ -178,10 +178,11 @@ static int imq_nf_queue(struct nf_queue_ - struct sk_buff *skb2 = NULL; - struct Qdisc *q; - unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -- int ret = -1; -+ struct netdev_queue *txq; -+ int ret = -EINVAL; - - if (index > numdevs) -- return -1; -+ return ret; - - /* check for imq device by index from cache */ - dev = imq_devs_cache[index]; -@@ -194,7 +195,7 @@ static int imq_nf_queue(struct nf_queue_ - if (!dev) { - /* not found ?!*/ - BUG(); -- return -1; -+ return ret; - } - - imq_devs_cache[index] = dev; -@@ -212,17 +213,19 @@ static int imq_nf_queue(struct nf_queue_ - skb2 = entry->skb; - entry->skb = skb_clone(entry->skb, GFP_ATOMIC); - if (!entry->skb) -- return -1; -+ return -ENOMEM; - } - entry->skb->nf_queue_entry = entry; - - dev->stats.rx_bytes += entry->skb->len; - dev->stats.rx_packets++; - -- spin_lock_bh(&dev->queue_lock); -- q = dev->qdisc; -+ txq = netdev_get_tx_queue(dev, 0); -+ __netif_tx_lock_bh(txq); -+ q = txq->qdisc; -+ - if (q->enqueue) { -- q->enqueue(skb_get(entry->skb), q); -+ qdisc_enqueue_root(skb_get(entry->skb), q); - if (skb_shared(entry->skb)) { - entry->skb->destructor = imq_skb_destructor; - kfree_skb(entry->skb); -@@ -231,7 +234,7 @@ static int imq_nf_queue(struct nf_queue_ - } - if (!test_and_set_bit(1, &priv->tasklet_pending)) - tasklet_schedule(&priv->tasklet); -- spin_unlock_bh(&dev->queue_lock); -+ __netif_tx_unlock_bh(txq); - - if (skb2) - kfree_skb(ret ? entry->skb : skb2); -@@ -248,11 +251,13 @@ static void qdisc_run_tasklet(unsigned l - { - struct net_device *dev = (struct net_device *)arg; - struct imq_private *priv = netdev_priv(dev); -+ struct netdev_queue *txq; - -- spin_lock(&dev->queue_lock); -- qdisc_run(dev); -+ netif_tx_lock(dev); -+ txq = netdev_get_tx_queue(dev, 0); -+ qdisc_run(txq->qdisc); - clear_bit(1, &priv->tasklet_pending); -- spin_unlock(&dev->queue_lock); -+ netif_tx_unlock(dev); - } - - static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, diff --git a/target/linux/generic-2.6/patches-2.6.27/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.27/180-netfilter_depends.patch index 1914818744..0897def326 100644 --- a/target/linux/generic-2.6/patches-2.6.27/180-netfilter_depends.patch +++ b/target/linux/generic-2.6/patches-2.6.27/180-netfilter_depends.patch @@ -1,6 +1,6 @@ --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -165,7 +165,7 @@ config NF_CONNTRACK_FTP +@@ -165,7 +165,7 @@ config NF_CONNTRACK_H323 tristate "H.323 protocol support" @@ -9,7 +9,7 @@ depends on NETFILTER_ADVANCED help H.323 is a VoIP signalling protocol from ITU-T. As one of the most -@@ -443,7 +443,7 @@ config NETFILTER_XT_TARGET_CONNSECMARK +@@ -455,7 +455,7 @@ config NETFILTER_XT_TARGET_TCPMSS tristate '"TCPMSS" target support' diff --git a/target/linux/generic-2.6/patches-2.6.27/190-netfilter_rtsp.patch b/target/linux/generic-2.6/patches-2.6.27/190-netfilter_rtsp.patch index 858f60ce39..2b439cdddd 100644 --- a/target/linux/generic-2.6/patches-2.6.27/190-netfilter_rtsp.patch +++ b/target/linux/generic-2.6/patches-2.6.27/190-netfilter_rtsp.patch @@ -294,7 +294,7 @@ +#endif /* _NETFILTER_MIME_H */ --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile -@@ -23,6 +23,7 @@ obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_am +@@ -23,6 +23,7 @@ obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o @@ -304,7 +304,7 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -278,6 +278,16 @@ config NF_CONNTRACK_TFTP +@@ -278,6 +278,16 @@ To compile it as a module, choose M here. If unsure, say N. @@ -323,7 +323,7 @@ depends on NF_CONNTRACK --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co +@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o @@ -333,7 +333,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig -@@ -281,6 +281,11 @@ config NF_NAT_IRC +@@ -270,6 +270,11 @@ depends on IP_NF_IPTABLES && NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC diff --git a/target/linux/generic-2.6/patches-2.6.28/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.28/150-netfilter_imq.patch index 310d0fc966..415e775a4c 100644 --- a/target/linux/generic-2.6/patches-2.6.28/150-netfilter_imq.patch +++ b/target/linux/generic-2.6/patches-2.6.28/150-netfilter_imq.patch @@ -1,6 +1,6 @@ --- /dev/null +++ b/drivers/net/imq.c -@@ -0,0 +1,474 @@ +@@ -0,0 +1,567 @@ +/* + * Pseudo-driver for the intermediate queue device. + * @@ -51,10 +51,28 @@ + * + * + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead -+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid ++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid + * recursive locking. New initialization routines to fix 'rmmod' not + * working anymore. Used code from ifb.c. (Jussi Kivilinna) + * ++ * 2008/08/06 - 2.6.26 - (JK) ++ * - Replaced tasklet with 'netif_schedule()'. ++ * - Cleaned up and added comments for imq_nf_queue(). ++ * ++ * 2009/04/12 ++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping ++ * control buffer. This is needed because qdisc-layer on kernels ++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna) ++ * - Add better locking for IMQ device. Hopefully this will solve ++ * SMP issues. (Jussi Kivilinna) ++ * - Port to 2.6.27 ++ * - Port to 2.6.28 ++ * ++ * 2009/04/20 - (Jussi Kivilinna) ++ * - Fix rmmod not working ++ * - Use netdevice feature flags to avoid extra packet handling ++ * by core networking layer and possibly increase performance. ++ * + * Also, many thanks to pablo Sebastian Greco for making the initial + * patch and to those who helped the testing. + * @@ -64,8 +82,10 @@ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> ++#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> ++#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_arp.h> +#include <linux/netfilter.h> @@ -77,11 +97,6 @@ +#include <net/pkt_sched.h> +#include <net/netfilter/nf_queue.h> + -+struct imq_private { -+ struct tasklet_struct tasklet; -+ unsigned long tasklet_pending; -+}; -+ +static nf_hookfn imq_nf_hook; + +static struct nf_hook_ops imq_ingress_ipv4 = { @@ -140,8 +155,11 @@ +static unsigned int numdevs = IMQ_MAX_DEVS; +#endif + ++static DEFINE_SPINLOCK(imq_nf_queue_lock); ++ +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS]; + ++ +static struct net_device_stats *imq_get_stats(struct net_device *dev) +{ + return &dev->stats; @@ -153,12 +171,35 @@ + struct nf_queue_entry *entry = skb->nf_queue_entry; + + if (entry) { -+ if (entry->indev) -+ dev_put(entry->indev); -+ if (entry->outdev) -+ dev_put(entry->outdev); ++ nf_queue_entry_release_refs(entry); ++ kfree(entry); ++ } ++ ++ skb_restore_cb(skb); /* kfree backup */ ++} ++ ++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ++{ ++ int status; ++ ++ if (!entry->next_outfn) { ++ spin_lock_bh(&imq_nf_queue_lock); ++ nf_reinject(entry, verdict); ++ spin_unlock_bh(&imq_nf_queue_lock); ++ return; ++ } ++ ++ rcu_read_lock(); ++ local_bh_disable(); ++ status = entry->next_outfn(entry, entry->next_queuenum); ++ local_bh_enable(); ++ if (status < 0) { ++ nf_queue_entry_release_refs(entry); ++ kfree_skb(entry->skb); + kfree(entry); + } ++ ++ rcu_read_unlock(); +} + +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -169,26 +210,35 @@ + skb->imq_flags = 0; + skb->destructor = NULL; + ++ skb_restore_cb(skb); /* restore skb->cb */ ++ + dev->trans_start = jiffies; -+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT); ++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT); + return 0; +} + +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) +{ + struct net_device *dev; -+ struct imq_private *priv; -+ struct sk_buff *skb2 = NULL; ++ struct sk_buff *skb_orig, *skb, *skb_shared; + struct Qdisc *q; -+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -+ int ret = -1; -+ -+ if (index > numdevs) -+ return -1; ++ struct netdev_queue *txq; ++ int users, index; ++ int retval = -EINVAL; ++ ++ index = entry->skb->imq_flags & IMQ_F_IFMASK; ++ if (unlikely(index > numdevs - 1)) { ++ if (net_ratelimit()) ++ printk(KERN_WARNING ++ "IMQ: invalid device specified, highest is %u\n", ++ numdevs - 1); ++ retval = -EINVAL; ++ goto out; ++ } + + /* check for imq device by index from cache */ + dev = imq_devs_cache[index]; -+ if (!dev) { ++ if (unlikely(!dev)) { + char buf[8]; + + /* get device by name and cache result */ @@ -197,49 +247,90 @@ + if (!dev) { + /* not found ?!*/ + BUG(); -+ return -1; ++ retval = -ENODEV; ++ goto out; + } + + imq_devs_cache[index] = dev; ++ dev_put(dev); + } + -+ priv = netdev_priv(dev); -+ if (!(dev->flags & IFF_UP)) { ++ if (unlikely(!(dev->flags & IFF_UP))) { + entry->skb->imq_flags = 0; -+ nf_reinject(entry, NF_ACCEPT); -+ return 0; ++ imq_nf_reinject(entry, NF_ACCEPT); ++ retval = 0; ++ goto out; + } + dev->last_rx = jiffies; + -+ if (entry->skb->destructor) { -+ skb2 = entry->skb; -+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC); -+ if (!entry->skb) -+ return -1; ++ skb = entry->skb; ++ skb_orig = NULL; ++ ++ /* skb has owner? => make clone */ ++ if (unlikely(skb->destructor)) { ++ skb_orig = skb; ++ skb = skb_clone(skb, GFP_ATOMIC); ++ if (!skb) { ++ retval = -ENOMEM; ++ goto out; ++ } ++ entry->skb = skb; + } -+ entry->skb->nf_queue_entry = entry; + -+ dev->stats.rx_bytes += entry->skb->len; ++ skb->nf_queue_entry = entry; ++ ++ dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + -+ spin_lock_bh(&dev->queue_lock); -+ q = dev->qdisc; -+ if (q->enqueue) { -+ q->enqueue(skb_get(entry->skb), q); -+ if (skb_shared(entry->skb)) { -+ entry->skb->destructor = imq_skb_destructor; -+ kfree_skb(entry->skb); -+ ret = 0; -+ } -+ } -+ if (!test_and_set_bit(1, &priv->tasklet_pending)) -+ tasklet_schedule(&priv->tasklet); -+ spin_unlock_bh(&dev->queue_lock); ++ txq = dev_pick_tx(dev, skb); + -+ if (skb2) -+ kfree_skb(ret ? entry->skb : skb2); ++ q = rcu_dereference(txq->qdisc); ++ if (unlikely(!q->enqueue)) ++ goto packet_not_eaten_by_imq_dev; + -+ return ret; ++ spin_lock_bh(qdisc_lock(q)); ++ ++ users = atomic_read(&skb->users); ++ ++ skb_shared = skb_get(skb); /* increase reference count by one */ ++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will ++ overwrite it */ ++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */ ++ ++ if (likely(atomic_read(&skb_shared->users) == users + 1)) { ++ kfree_skb(skb_shared); /* decrease reference count by one */ ++ ++ skb->destructor = &imq_skb_destructor; ++ ++ /* cloned? */ ++ if (skb_orig) ++ kfree_skb(skb_orig); /* free original */ ++ ++ spin_unlock_bh(qdisc_lock(q)); ++ ++ /* schedule qdisc dequeue */ ++ __netif_schedule(q); ++ ++ retval = 0; ++ goto out; ++ } else { ++ skb_restore_cb(skb_shared); /* restore skb->cb */ ++ /* qdisc dropped packet and decreased skb reference count of ++ * skb, so we don't really want to and try refree as that would ++ * actually destroy the skb. */ ++ spin_unlock_bh(qdisc_lock(q)); ++ goto packet_not_eaten_by_imq_dev; ++ } ++ ++packet_not_eaten_by_imq_dev: ++ /* cloned? restore original */ ++ if (skb_orig) { ++ kfree_skb(skb); ++ entry->skb = skb_orig; ++ } ++ retval = -1; ++out: ++ return retval; +} + +static struct nf_queue_handler nfqh = { @@ -247,17 +338,6 @@ + .outfn = imq_nf_queue, +}; + -+static void qdisc_run_tasklet(unsigned long arg) -+{ -+ struct net_device *dev = (struct net_device *)arg; -+ struct imq_private *priv = netdev_priv(dev); -+ -+ spin_lock(&dev->queue_lock); -+ qdisc_run(dev); -+ clear_bit(1, &priv->tasklet_pending); -+ spin_unlock(&dev->queue_lock); -+} -+ +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, + const struct net_device *indev, + const struct net_device *outdev, @@ -271,21 +351,13 @@ + +static int imq_close(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tasklet); + netif_stop_queue(dev); -+ + return 0; +} + +static int imq_open(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev); + netif_start_queue(dev); -+ + return 0; +} + @@ -299,59 +371,74 @@ + dev->mtu = 16000; + dev->tx_queue_len = 11000; + dev->flags = IFF_NOARP; ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | ++ NETIF_F_GSO | NETIF_F_HW_CSUM | ++ NETIF_F_HIGHDMA; ++} ++ ++static int imq_validate(struct nlattr *tb[], struct nlattr *data[]) ++{ ++ int ret = 0; ++ ++ if (tb[IFLA_ADDRESS]) { ++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { ++ ret = -EINVAL; ++ goto end; ++ } ++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { ++ ret = -EADDRNOTAVAIL; ++ goto end; ++ } ++ } ++ return 0; ++end: ++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret); ++ return ret; +} + +static struct rtnl_link_ops imq_link_ops __read_mostly = { + .kind = "imq", -+ .priv_size = sizeof(struct imq_private), ++ .priv_size = 0, + .setup = imq_setup, ++ .validate = imq_validate, +}; + +static int __init imq_init_hooks(void) +{ + int err; + -+ err = nf_register_queue_handler(PF_INET, &nfqh); -+ if (err) -+ goto err1; ++ nf_register_queue_imq_handler(&nfqh); + + err = nf_register_hook(&imq_ingress_ipv4); + if (err) -+ goto err2; ++ goto err1; + + err = nf_register_hook(&imq_egress_ipv4); + if (err) -+ goto err3; ++ goto err2; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+ err = nf_register_queue_handler(PF_INET6, &nfqh); -+ if (err) -+ goto err4; -+ + err = nf_register_hook(&imq_ingress_ipv6); + if (err) -+ goto err5; ++ goto err3; + + err = nf_register_hook(&imq_egress_ipv6); + if (err) -+ goto err6; ++ goto err4; +#endif + + return 0; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+err6: -+ nf_unregister_hook(&imq_ingress_ipv6); -+err5: -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +err4: ++ nf_unregister_hook(&imq_ingress_ipv6); ++err3: + nf_unregister_hook(&imq_egress_ipv4); +#endif -+err3: -+ nf_unregister_hook(&imq_ingress_ipv4); +err2: -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ nf_unregister_hook(&imq_ingress_ipv4); +err1: ++ nf_unregister_queue_imq_handler(); + return err; +} + @@ -360,7 +447,7 @@ + struct net_device *dev; + int ret; + -+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup); ++ dev = alloc_netdev(0, "imq%d", imq_setup); + if (!dev) + return -ENOMEM; + @@ -383,7 +470,7 @@ +{ + int err, i; + -+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { ++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) { + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + IMQ_MAX_DEVS); + return -EINVAL; @@ -408,6 +495,12 @@ +{ + int err; + ++#if defined(CONFIG_IMQ_NUM_DEVS) ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK); ++#endif ++ + err = imq_init_devs(); + if (err) { + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); @@ -443,11 +536,11 @@ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_egress_ipv6); -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +#endif + nf_unregister_hook(&imq_ingress_ipv4); + nf_unregister_hook(&imq_egress_ipv4); -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ ++ nf_unregister_queue_imq_handler(); +} + +static void __exit imq_cleanup_devs(void) @@ -477,7 +570,7 @@ + --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -109,6 +109,129 @@ config EQUALIZER +@@ -109,6 +109,129 @@ To compile this driver as a module, choose M here: the module will be called eql. If unsure, say N. @@ -609,7 +702,7 @@ select CRC32 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -148,6 +148,7 @@ obj-$(CONFIG_SLHC) += slhc.o +@@ -148,6 +148,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o @@ -619,52 +712,102 @@ obj-$(CONFIG_DE600) += de600.o --- /dev/null +++ b/include/linux/imq.h -@@ -0,0 +1,9 @@ +@@ -0,0 +1,13 @@ +#ifndef _IMQ_H +#define _IMQ_H + -+#define IMQ_MAX_DEVS 16 ++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */ ++#define IMQ_F_BITS 5 ++ ++#define IMQ_F_IFMASK 0x0f ++#define IMQ_F_ENQUEUE 0x10 + -+#define IMQ_F_IFMASK 0x7f -+#define IMQ_F_ENQUEUE 0x80 ++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1) + +#endif /* _IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IPT_IMQ_H +#define _IPT_IMQ_H + -+struct ipt_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ipt_imq_info xt_imq_info + +#endif /* _IPT_IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IP6T_IMQ_H +#define _IP6T_IMQ_H + -+struct ip6t_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ip6t_imq_info xt_imq_info + +#endif /* _IP6T_IMQ_H */ ++ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -308,6 +308,10 @@ struct sk_buff { +@@ -28,6 +28,9 @@ + #include <linux/rcupdate.h> + #include <linux/dmaengine.h> + #include <linux/hrtimer.h> ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#include <linux/imq.h> ++#endif + + #define HAVE_ALLOC_SKB /* For the drivers to know */ + #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +@@ -278,6 +281,9 @@ + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ void *cb_next; ++#endif + + unsigned int len, + data_len; +@@ -308,6 +314,9 @@ struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) -+ unsigned char imq_flags; + struct nf_queue_entry *nf_queue_entry; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -@@ -1804,6 +1808,10 @@ static inline void __nf_copy(struct sk_b +@@ -327,6 +336,9 @@ + __u8 do_not_encrypt:1; + #endif + /* 0/13/14 bit hole */ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ __u8 imq_flags:IMQ_F_BITS; ++#endif + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -367,6 +379,12 @@ + enum dma_data_direction dir); + #endif + ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern int skb_save_cb(struct sk_buff *skb); ++extern int skb_restore_cb(struct sk_buff *skb); ++#endif ++ + extern void kfree_skb(struct sk_buff *skb); + extern void __kfree_skb(struct sk_buff *skb); + extern struct sk_buff *__alloc_skb(unsigned int size, +@@ -1804,6 +1822,10 @@ dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -687,7 +830,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> -@@ -1655,7 +1658,11 @@ int dev_hard_start_xmit(struct sk_buff * +@@ -1655,7 +1658,11 @@ struct netdev_queue *txq) { if (likely(!skb->next)) { @@ -700,215 +843,414 @@ dev_queue_xmit_nit(skb, dev); if (netif_needs_gso(dev, skb)) { +@@ -1746,8 +1753,7 @@ + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); + } + +-static struct netdev_queue *dev_pick_tx(struct net_device *dev, +- struct sk_buff *skb) ++struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) + { + u16 queue_index = 0; + +@@ -1759,6 +1765,7 @@ + skb_set_queue_mapping(skb, queue_index); + return netdev_get_tx_queue(dev, queue_index); + } ++EXPORT_SYMBOL(dev_pick_tx); + + /** + * dev_queue_xmit - transmit a buffer +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -950,6 +950,7 @@ + extern int dev_open(struct net_device *dev); + extern int dev_close(struct net_device *dev); + extern void dev_disable_lro(struct net_device *dev); ++extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb); + extern int dev_queue_xmit(struct sk_buff *skb); + extern int register_netdevice(struct net_device *dev); + extern void unregister_netdevice(struct net_device *dev); --- /dev/null -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -0,0 +1,69 @@ -+/* -+ * This target marks packets to be enqueued to an imq device -+ */ -+#include <linux/module.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter_ipv4/ip_tables.h> -+#include <linux/netfilter_ipv4/ipt_IMQ.h> -+#include <linux/imq.h> ++++ b/include/linux/netfilter/xt_IMQ.h +@@ -0,0 +1,9 @@ ++#ifndef _XT_IMQ_H ++#define _XT_IMQ_H + -+static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++struct xt_imq_info { ++ unsigned int todev; /* target imq device */ ++}; ++ ++#endif /* _XT_IMQ_H */ ++ +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -13,6 +13,12 @@ + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ int (*next_outfn)(struct nf_queue_entry *entry, ++ unsigned int queuenum); ++ unsigned int next_queuenum; ++#endif + }; + + #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) +@@ -30,5 +36,11 @@ + const struct nf_queue_handler *qh); + extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); + extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); ++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh); ++extern void nf_unregister_queue_imq_handler(void); ++#endif + + #endif /* _NF_QUEUE_H */ +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -69,6 +69,9 @@ + + static struct kmem_cache *skbuff_head_cache __read_mostly; + static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static struct kmem_cache *skbuff_cb_store_cache __read_mostly; ++#endif + + static void sock_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +@@ -88,6 +91,80 @@ + return 1; + } + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++/* Control buffer save/restore for IMQ devices */ ++struct skb_cb_table { ++ void *cb_next; ++ atomic_t refcnt; ++ char cb[48]; ++}; ++ ++static DEFINE_SPINLOCK(skb_cb_store_lock); ++ ++int skb_save_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC); ++ if (!next) ++ return -ENOMEM; + -+ return XT_CONTINUE; ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(next->cb, skb->cb, sizeof(skb->cb)); ++ next->cb_next = skb->cb_next; ++ ++ atomic_set(&next->refcnt, 1); ++ ++ skb->cb_next = next; ++ return 0; +} ++EXPORT_SYMBOL(skb_save_cb); + -+static bool imq_checkentry(const char *tablename, -+ const void *e, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++int skb_restore_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr; -+ -+ mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ if (mr->todev > IMQ_MAX_DEVS) { -+ printk(KERN_WARNING -+ "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ if (!skb->cb_next) + return 0; ++ ++ next = skb->cb_next; ++ ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(skb->cb, next->cb, sizeof(skb->cb)); ++ skb->cb_next = next->cb_next; ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ if (atomic_dec_and_test(&next->refcnt)) { ++ kmem_cache_free(skbuff_cb_store_cache, next); + } + -+ return 1; ++ spin_unlock(&skb_cb_store_lock); ++ ++ return 0; +} ++EXPORT_SYMBOL(skb_restore_cb); + -+static struct xt_target ipt_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET, -+ .target = imq_target, -+ .targetsize = sizeof(struct ipt_imq_info), -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE, -+ .table = "mangle" -+}; ++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old) ++{ ++ struct skb_cb_table *next; ++ ++ if (!old->cb_next) { ++ new->cb_next = 0; ++ return; ++ } ++ ++ spin_lock(&skb_cb_store_lock); + -+static int __init init(void) ++ next = old->cb_next; ++ atomic_inc(&next->refcnt); ++ new->cb_next = next; ++ ++ spin_unlock(&skb_cb_store_lock); ++} ++#endif + + /* Pipe buffer operations for a socket. */ + static struct pipe_buf_operations sock_pipe_buf_ops = { +@@ -381,6 +458,15 @@ + WARN_ON(in_irq()); + skb->destructor(skb); + } ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ /* This should not happen. When it does, avoid memleak by restoring ++ the chain of cb-backups. */ ++ while(skb->cb_next != NULL) { ++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n", ++ skb->cb_next); ++ skb_restore_cb(skb); ++ } ++#endif + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + nf_conntrack_put_reasm(skb->nfct_reasm); +@@ -493,6 +579,9 @@ + new->sp = secpath_get(old->sp); + #endif + memcpy(new->cb, old->cb, sizeof(old->cb)); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skb_copy_stored_cb(new, old); ++#endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->local_df = old->local_df; +@@ -2397,6 +2486,13 @@ + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache", ++ sizeof(struct skb_cb_table), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++#endif + } + + /** +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -357,6 +357,18 @@ + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_TARGET_IMQ ++ tristate '"IMQ" target support' ++ depends on NETFILTER_XTABLES ++ depends on IP_NF_MANGLE || IP6_NF_MANGLE ++ select IMQ ++ default m if NETFILTER_ADVANCED=n ++ help ++ This option adds a `IMQ' target which is used to specify if and ++ to which imq device packets should get enqueued/dequeued. ++ ++ To compile it as a module, choose M here. If unsure, say N. ++ + config NETFILTER_XT_TARGET_MARK + tristate '"MARK" target support' + default m if NETFILTER_ADVANCED=n +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -45,6 +45,7 @@ + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o ++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o + obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -20,6 +20,26 @@ + + static DEFINE_MUTEX(queue_handler_mutex); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static const struct nf_queue_handler *queue_imq_handler; ++ ++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh) +{ -+ return xt_register_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, qh); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_register_queue_imq_handler); + -+static void __exit fini(void) ++void nf_unregister_queue_imq_handler(void) +{ -+ xt_unregister_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, NULL); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_unregister_queue_imq_handler); ++#endif + -+module_init(init); -+module_exit(fini); + /* return EBUSY when somebody else is registered, return EEXIST if the + * same handler is registered, return 0 in case of success. */ + int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +@@ -80,7 +100,7 @@ + } + EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); + +-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ++void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) +@@ -100,6 +120,7 @@ + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); + } ++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* + * Any packet that leaves via this function must come back +@@ -121,12 +142,26 @@ + #endif + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ const struct nf_queue_handler *qih = NULL; ++#endif + + /* QUEUE == DROP if noone is waiting, to be safe. */ + rcu_read_lock(); + + qh = rcu_dereference(queue_handler[pf]); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (pf == PF_INET || pf == PF_INET6) ++#else ++ if (pf == PF_INET) ++#endif ++ qih = rcu_dereference(queue_imq_handler); + -+MODULE_AUTHOR("http://www.linuximq.net"); -+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); -+MODULE_LICENSE("GPL"); ---- a/net/ipv4/netfilter/Kconfig -+++ b/net/ipv4/netfilter/Kconfig -@@ -112,6 +112,17 @@ config IP_NF_FILTER ++ if (!qh && !qih) ++#else /* !IMQ */ + if (!qh) ++#endif + goto err_unlock; - To compile it as a module, choose M here. If unsure, say N. + afinfo = nf_get_afinfo(pf); +@@ -145,6 +180,10 @@ + .indev = indev, + .outdev = outdev, + .okfn = okfn, ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ .next_outfn = qh ? qh->outfn : NULL, ++ .next_queuenum = queuenum, ++#endif + }; + + /* If it's going away, ignore hook. */ +@@ -170,8 +209,19 @@ + } + #endif + afinfo->saveroute(skb, entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ if (qih) { ++ status = qih->outfn(entry, queuenum); ++ goto imq_skip_queue; ++ } ++#endif ++ + status = qh->outfn(entry, queuenum); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++imq_skip_queue: ++#endif + rcu_read_unlock(); -+config IP_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which IMQ device packets should get enqueued/dequeued. -+ -+ For more information visit: http://www.linuximq.net/ -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP_NF_TARGET_REJECT - tristate "REJECT target support" - depends on IP_NF_FILTER ---- a/net/ipv4/netfilter/Makefile -+++ b/net/ipv4/netfilter/Makefile -@@ -58,6 +58,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set - obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o - obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o - obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o -+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o - obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o - obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o - obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o + if (status < 0) { --- /dev/null -+++ b/net/ipv6/netfilter/ip6t_IMQ.c -@@ -0,0 +1,69 @@ ++++ b/net/netfilter/xt_IMQ.c +@@ -0,0 +1,73 @@ +/* + * This target marks packets to be enqueued to an imq device + */ +#include <linux/module.h> +#include <linux/skbuff.h> -+#include <linux/netfilter_ipv6/ip6_tables.h> -+#include <linux/netfilter_ipv6/ip6t_IMQ.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_IMQ.h> +#include <linux/imq.h> + +static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++ const struct xt_target_param *par) +{ -+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo; ++ const struct xt_imq_info *mr = par->targinfo; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE; + + return XT_CONTINUE; +} + -+static bool imq_checkentry(const char *tablename, -+ const void *entry, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++static bool imq_checkentry(const struct xt_tgchk_param *par) +{ -+ struct ip6t_imq_info *mr; ++ struct xt_imq_info *mr = par->targinfo; + -+ mr = (struct ip6t_imq_info *)targinfo; -+ -+ if (mr->todev > IMQ_MAX_DEVS) { ++ if (mr->todev > IMQ_MAX_DEVS - 1) { + printk(KERN_WARNING + "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ IMQ_MAX_DEVS - 1); + return 0; + } + + return 1; +} + -+static struct xt_target ip6t_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET6, -+ .target = imq_target, -+ .targetsize = sizeof(struct ip6t_imq_info), -+ .table = "mangle", -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE ++static struct xt_target xt_imq_reg[] __read_mostly = { ++ { ++ .name = "IMQ", ++ .family = AF_INET, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, ++ { ++ .name = "IMQ", ++ .family = AF_INET6, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, +}; + -+static int __init init(void) ++static int __init imq_init(void) +{ -+ return xt_register_target(&ip6t_imq_reg); ++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+static void __exit fini(void) ++static void __exit imq_fini(void) +{ -+ xt_unregister_target(&ip6t_imq_reg); ++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+module_init(init); -+module_exit(fini); ++module_init(imq_init); ++module_exit(imq_fini); + +MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_LICENSE("GPL"); ---- a/net/ipv6/netfilter/Kconfig -+++ b/net/ipv6/netfilter/Kconfig -@@ -170,6 +170,15 @@ config IP6_NF_MANGLE - - To compile it as a module, choose M here. If unsure, say N. - -+config IP6_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP6_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which imq device packets should get enqueued/dequeued. -+ -+ To compile it as a module, choose M here. If unsure, say N. ++MODULE_ALIAS("ipt_IMQ"); ++MODULE_ALIAS("ip6t_IMQ"); + - config IP6_NF_TARGET_HL - tristate 'HL (hoplimit) target support' - depends on IP6_NF_MANGLE ---- a/net/ipv6/netfilter/Makefile -+++ b/net/ipv6/netfilter/Makefile -@@ -6,6 +6,7 @@ - obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o - obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o - obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o - obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o - obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o - obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -195,6 +195,7 @@ void __qdisc_run(struct Qdisc *q) - - clear_bit(__QDISC_STATE_RUNNING, &q->state); - } -+EXPORT_SYMBOL(__qdisc_run); - - static void dev_watchdog(unsigned long arg) - { diff --git a/target/linux/generic-2.6/patches-2.6.28/151-netfilter_imq_2.6.28.patch b/target/linux/generic-2.6/patches-2.6.28/151-netfilter_imq_2.6.28.patch deleted file mode 100644 index d4ed15129d..0000000000 --- a/target/linux/generic-2.6/patches-2.6.28/151-netfilter_imq_2.6.28.patch +++ /dev/null @@ -1,114 +0,0 @@ ---- a/drivers/net/imq.c -+++ b/drivers/net/imq.c -@@ -178,10 +178,11 @@ static int imq_nf_queue(struct nf_queue_ - struct sk_buff *skb2 = NULL; - struct Qdisc *q; - unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -- int ret = -1; -+ struct netdev_queue *txq; -+ int ret = -EINVAL; - - if (index > numdevs) -- return -1; -+ return ret; - - /* check for imq device by index from cache */ - dev = imq_devs_cache[index]; -@@ -194,7 +195,7 @@ static int imq_nf_queue(struct nf_queue_ - if (!dev) { - /* not found ?!*/ - BUG(); -- return -1; -+ return ret; - } - - imq_devs_cache[index] = dev; -@@ -212,17 +213,19 @@ static int imq_nf_queue(struct nf_queue_ - skb2 = entry->skb; - entry->skb = skb_clone(entry->skb, GFP_ATOMIC); - if (!entry->skb) -- return -1; -+ return -ENOMEM; - } - entry->skb->nf_queue_entry = entry; - - dev->stats.rx_bytes += entry->skb->len; - dev->stats.rx_packets++; - -- spin_lock_bh(&dev->queue_lock); -- q = dev->qdisc; -+ txq = netdev_get_tx_queue(dev, 0); -+ __netif_tx_lock_bh(txq); -+ q = txq->qdisc; -+ - if (q->enqueue) { -- q->enqueue(skb_get(entry->skb), q); -+ qdisc_enqueue_root(skb_get(entry->skb), q); - if (skb_shared(entry->skb)) { - entry->skb->destructor = imq_skb_destructor; - kfree_skb(entry->skb); -@@ -231,7 +234,7 @@ static int imq_nf_queue(struct nf_queue_ - } - if (!test_and_set_bit(1, &priv->tasklet_pending)) - tasklet_schedule(&priv->tasklet); -- spin_unlock_bh(&dev->queue_lock); -+ __netif_tx_unlock_bh(txq); - - if (skb2) - kfree_skb(ret ? entry->skb : skb2); -@@ -248,11 +251,13 @@ static void qdisc_run_tasklet(unsigned l - { - struct net_device *dev = (struct net_device *)arg; - struct imq_private *priv = netdev_priv(dev); -+ struct netdev_queue *txq; - -- spin_lock(&dev->queue_lock); -- qdisc_run(dev); -+ netif_tx_lock(dev); -+ txq = netdev_get_tx_queue(dev, 0); -+ qdisc_run(txq->qdisc); - clear_bit(1, &priv->tasklet_pending); -- spin_unlock(&dev->queue_lock); -+ netif_tx_unlock(dev); - } - - static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, ---- a/net/ipv4/netfilter/ipt_IMQ.c -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -7,29 +7,23 @@ - #include <linux/netfilter_ipv4/ipt_IMQ.h> - #include <linux/imq.h> - --static unsigned int imq_target(struct sk_buff *pskb, -- const struct net_device *in, -- const struct net_device *out, -- unsigned int hooknum, -- const struct xt_target *target, -- const void *targinfo) -+static unsigned int -+imq_target(struct sk_buff *pskb, -+ const struct xt_target_param *par) - { -- struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)par->targinfo; - - pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; - - return XT_CONTINUE; - } - --static bool imq_checkentry(const char *tablename, -- const void *e, -- const struct xt_target *target, -- void *targinfo, -- unsigned int hook_mask) -+static bool -+imq_checkentry(const struct xt_tgchk_param *par) - { - struct ipt_imq_info *mr; - -- mr = (struct ipt_imq_info *)targinfo; -+ mr = (struct ipt_imq_info *)par->targinfo; - - if (mr->todev > IMQ_MAX_DEVS) { - printk(KERN_WARNING diff --git a/target/linux/generic-2.6/patches-2.6.28/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.28/180-netfilter_depends.patch index 9144417129..b5a2615d24 100644 --- a/target/linux/generic-2.6/patches-2.6.28/180-netfilter_depends.patch +++ b/target/linux/generic-2.6/patches-2.6.28/180-netfilter_depends.patch @@ -1,6 +1,6 @@ --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -160,7 +160,6 @@ config NF_CONNTRACK_FTP +@@ -160,7 +160,6 @@ config NF_CONNTRACK_H323 tristate "H.323 protocol support" @@ -8,7 +8,7 @@ depends on NETFILTER_ADVANCED help H.323 is a VoIP signalling protocol from ITU-T. As one of the most -@@ -455,7 +454,6 @@ config NETFILTER_XT_TARGET_SECMARK +@@ -467,7 +466,6 @@ config NETFILTER_XT_TARGET_TCPMSS tristate '"TCPMSS" target support' diff --git a/target/linux/generic-2.6/patches-2.6.28/190-netfilter_rtsp.patch b/target/linux/generic-2.6/patches-2.6.28/190-netfilter_rtsp.patch index 62d4cf2e95..7aff0e6ddc 100644 --- a/target/linux/generic-2.6/patches-2.6.28/190-netfilter_rtsp.patch +++ b/target/linux/generic-2.6/patches-2.6.28/190-netfilter_rtsp.patch @@ -294,7 +294,7 @@ +#endif /* _NETFILTER_MIME_H */ --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile -@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_am +@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o @@ -304,7 +304,7 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -267,6 +267,16 @@ config NF_CONNTRACK_TFTP +@@ -267,6 +267,16 @@ To compile it as a module, choose M here. If unsure, say N. @@ -323,7 +323,7 @@ select NETFILTER_NETLINK --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co +@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o @@ -333,7 +333,7 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig -@@ -268,6 +268,11 @@ config NF_NAT_IRC +@@ -257,6 +257,11 @@ depends on NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC diff --git a/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch index b47696af87..07e730a206 100644 --- a/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch +++ b/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch @@ -1,6 +1,6 @@ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -1256,9 +1256,12 @@ static inline int skb_network_offset(con +@@ -1270,9 +1270,12 @@ * * Various parts of the networking layer expect at least 16 bytes of * headroom, you should not reduce this. diff --git a/target/linux/generic-2.6/patches-2.6.29/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.29/150-netfilter_imq.patch index cfbd996060..64cf22a017 100644 --- a/target/linux/generic-2.6/patches-2.6.29/150-netfilter_imq.patch +++ b/target/linux/generic-2.6/patches-2.6.29/150-netfilter_imq.patch @@ -1,6 +1,6 @@ --- /dev/null +++ b/drivers/net/imq.c -@@ -0,0 +1,474 @@ +@@ -0,0 +1,571 @@ +/* + * Pseudo-driver for the intermediate queue device. + * @@ -51,10 +51,28 @@ + * + * + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead -+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid ++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid + * recursive locking. New initialization routines to fix 'rmmod' not + * working anymore. Used code from ifb.c. (Jussi Kivilinna) + * ++ * 2008/08/06 - 2.6.26 - (JK) ++ * - Replaced tasklet with 'netif_schedule()'. ++ * - Cleaned up and added comments for imq_nf_queue(). ++ * ++ * 2009/04/12 ++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping ++ * control buffer. This is needed because qdisc-layer on kernels ++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna) ++ * - Add better locking for IMQ device. Hopefully this will solve ++ * SMP issues. (Jussi Kivilinna) ++ * - Port to 2.6.27 ++ * - Port to 2.6.28 ++ * - Port to 2.6.29 + fix rmmod not working ++ * ++ * 2009/04/20 - (Jussi Kivilinna) ++ * - Use netdevice feature flags to avoid extra packet handling ++ * by core networking layer and possibly increase performance. ++ * + * Also, many thanks to pablo Sebastian Greco for making the initial + * patch and to those who helped the testing. + * @@ -64,8 +82,10 @@ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> ++#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> ++#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_arp.h> +#include <linux/netfilter.h> @@ -77,11 +97,6 @@ +#include <net/pkt_sched.h> +#include <net/netfilter/nf_queue.h> + -+struct imq_private { -+ struct tasklet_struct tasklet; -+ unsigned long tasklet_pending; -+}; -+ +static nf_hookfn imq_nf_hook; + +static struct nf_hook_ops imq_ingress_ipv4 = { @@ -140,8 +155,11 @@ +static unsigned int numdevs = IMQ_MAX_DEVS; +#endif + ++static DEFINE_SPINLOCK(imq_nf_queue_lock); ++ +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS]; + ++ +static struct net_device_stats *imq_get_stats(struct net_device *dev) +{ + return &dev->stats; @@ -153,12 +171,35 @@ + struct nf_queue_entry *entry = skb->nf_queue_entry; + + if (entry) { -+ if (entry->indev) -+ dev_put(entry->indev); -+ if (entry->outdev) -+ dev_put(entry->outdev); ++ nf_queue_entry_release_refs(entry); ++ kfree(entry); ++ } ++ ++ skb_restore_cb(skb); /* kfree backup */ ++} ++ ++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ++{ ++ int status; ++ ++ if (!entry->next_outfn) { ++ spin_lock_bh(&imq_nf_queue_lock); ++ nf_reinject(entry, verdict); ++ spin_unlock_bh(&imq_nf_queue_lock); ++ return; ++ } ++ ++ rcu_read_lock(); ++ local_bh_disable(); ++ status = entry->next_outfn(entry, entry->next_queuenum); ++ local_bh_enable(); ++ if (status < 0) { ++ nf_queue_entry_release_refs(entry); ++ kfree_skb(entry->skb); + kfree(entry); + } ++ ++ rcu_read_unlock(); +} + +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -169,26 +210,35 @@ + skb->imq_flags = 0; + skb->destructor = NULL; + ++ skb_restore_cb(skb); /* restore skb->cb */ ++ + dev->trans_start = jiffies; -+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT); ++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT); + return 0; +} + +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) +{ + struct net_device *dev; -+ struct imq_private *priv; -+ struct sk_buff *skb2 = NULL; ++ struct sk_buff *skb_orig, *skb, *skb_shared; + struct Qdisc *q; -+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -+ int ret = -1; -+ -+ if (index > numdevs) -+ return -1; ++ struct netdev_queue *txq; ++ int users, index; ++ int retval = -EINVAL; ++ ++ index = entry->skb->imq_flags & IMQ_F_IFMASK; ++ if (unlikely(index > numdevs - 1)) { ++ if (net_ratelimit()) ++ printk(KERN_WARNING ++ "IMQ: invalid device specified, highest is %u\n", ++ numdevs - 1); ++ retval = -EINVAL; ++ goto out; ++ } + + /* check for imq device by index from cache */ + dev = imq_devs_cache[index]; -+ if (!dev) { ++ if (unlikely(!dev)) { + char buf[8]; + + /* get device by name and cache result */ @@ -197,49 +247,90 @@ + if (!dev) { + /* not found ?!*/ + BUG(); -+ return -1; ++ retval = -ENODEV; ++ goto out; + } + + imq_devs_cache[index] = dev; ++ dev_put(dev); + } + -+ priv = netdev_priv(dev); -+ if (!(dev->flags & IFF_UP)) { ++ if (unlikely(!(dev->flags & IFF_UP))) { + entry->skb->imq_flags = 0; -+ nf_reinject(entry, NF_ACCEPT); -+ return 0; ++ imq_nf_reinject(entry, NF_ACCEPT); ++ retval = 0; ++ goto out; + } + dev->last_rx = jiffies; + -+ if (entry->skb->destructor) { -+ skb2 = entry->skb; -+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC); -+ if (!entry->skb) -+ return -1; ++ skb = entry->skb; ++ skb_orig = NULL; ++ ++ /* skb has owner? => make clone */ ++ if (unlikely(skb->destructor)) { ++ skb_orig = skb; ++ skb = skb_clone(skb, GFP_ATOMIC); ++ if (!skb) { ++ retval = -ENOMEM; ++ goto out; ++ } ++ entry->skb = skb; + } -+ entry->skb->nf_queue_entry = entry; + -+ dev->stats.rx_bytes += entry->skb->len; ++ skb->nf_queue_entry = entry; ++ ++ dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + -+ spin_lock_bh(&dev->queue_lock); -+ q = dev->qdisc; -+ if (q->enqueue) { -+ q->enqueue(skb_get(entry->skb), q); -+ if (skb_shared(entry->skb)) { -+ entry->skb->destructor = imq_skb_destructor; -+ kfree_skb(entry->skb); -+ ret = 0; -+ } -+ } -+ if (!test_and_set_bit(1, &priv->tasklet_pending)) -+ tasklet_schedule(&priv->tasklet); -+ spin_unlock_bh(&dev->queue_lock); ++ txq = dev_pick_tx(dev, skb); + -+ if (skb2) -+ kfree_skb(ret ? entry->skb : skb2); ++ q = rcu_dereference(txq->qdisc); ++ if (unlikely(!q->enqueue)) ++ goto packet_not_eaten_by_imq_dev; + -+ return ret; ++ spin_lock_bh(qdisc_lock(q)); ++ ++ users = atomic_read(&skb->users); ++ ++ skb_shared = skb_get(skb); /* increase reference count by one */ ++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will ++ overwrite it */ ++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */ ++ ++ if (likely(atomic_read(&skb_shared->users) == users + 1)) { ++ kfree_skb(skb_shared); /* decrease reference count by one */ ++ ++ skb->destructor = &imq_skb_destructor; ++ ++ /* cloned? */ ++ if (skb_orig) ++ kfree_skb(skb_orig); /* free original */ ++ ++ spin_unlock_bh(qdisc_lock(q)); ++ ++ /* schedule qdisc dequeue */ ++ __netif_schedule(q); ++ ++ retval = 0; ++ goto out; ++ } else { ++ skb_restore_cb(skb_shared); /* restore skb->cb */ ++ /* qdisc dropped packet and decreased skb reference count of ++ * skb, so we don't really want to and try refree as that would ++ * actually destroy the skb. */ ++ spin_unlock_bh(qdisc_lock(q)); ++ goto packet_not_eaten_by_imq_dev; ++ } ++ ++packet_not_eaten_by_imq_dev: ++ /* cloned? restore original */ ++ if (skb_orig) { ++ kfree_skb(skb); ++ entry->skb = skb_orig; ++ } ++ retval = -1; ++out: ++ return retval; +} + +static struct nf_queue_handler nfqh = { @@ -247,17 +338,6 @@ + .outfn = imq_nf_queue, +}; + -+static void qdisc_run_tasklet(unsigned long arg) -+{ -+ struct net_device *dev = (struct net_device *)arg; -+ struct imq_private *priv = netdev_priv(dev); -+ -+ spin_lock(&dev->queue_lock); -+ qdisc_run(dev); -+ clear_bit(1, &priv->tasklet_pending); -+ spin_unlock(&dev->queue_lock); -+} -+ +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, + const struct net_device *indev, + const struct net_device *outdev, @@ -271,87 +351,98 @@ + +static int imq_close(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tasklet); + netif_stop_queue(dev); -+ + return 0; +} + +static int imq_open(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev); + netif_start_queue(dev); -+ + return 0; +} + ++static const struct net_device_ops imq_netdev_ops = { ++ .ndo_open = imq_open, ++ .ndo_stop = imq_close, ++ .ndo_start_xmit = imq_dev_xmit, ++ .ndo_get_stats = imq_get_stats, ++}; ++ +static void imq_setup(struct net_device *dev) +{ -+ dev->hard_start_xmit = imq_dev_xmit; -+ dev->open = imq_open; -+ dev->get_stats = imq_get_stats; -+ dev->stop = imq_close; ++ dev->netdev_ops = &imq_netdev_ops; + dev->type = ARPHRD_VOID; + dev->mtu = 16000; + dev->tx_queue_len = 11000; + dev->flags = IFF_NOARP; ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | ++ NETIF_F_GSO | NETIF_F_HW_CSUM | ++ NETIF_F_HIGHDMA; ++} ++ ++static int imq_validate(struct nlattr *tb[], struct nlattr *data[]) ++{ ++ int ret = 0; ++ ++ if (tb[IFLA_ADDRESS]) { ++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { ++ ret = -EINVAL; ++ goto end; ++ } ++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { ++ ret = -EADDRNOTAVAIL; ++ goto end; ++ } ++ } ++ return 0; ++end: ++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret); ++ return ret; +} + +static struct rtnl_link_ops imq_link_ops __read_mostly = { + .kind = "imq", -+ .priv_size = sizeof(struct imq_private), ++ .priv_size = 0, + .setup = imq_setup, ++ .validate = imq_validate, +}; + +static int __init imq_init_hooks(void) +{ + int err; + -+ err = nf_register_queue_handler(PF_INET, &nfqh); -+ if (err) -+ goto err1; ++ nf_register_queue_imq_handler(&nfqh); + + err = nf_register_hook(&imq_ingress_ipv4); + if (err) -+ goto err2; ++ goto err1; + + err = nf_register_hook(&imq_egress_ipv4); + if (err) -+ goto err3; ++ goto err2; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+ err = nf_register_queue_handler(PF_INET6, &nfqh); -+ if (err) -+ goto err4; -+ + err = nf_register_hook(&imq_ingress_ipv6); + if (err) -+ goto err5; ++ goto err3; + + err = nf_register_hook(&imq_egress_ipv6); + if (err) -+ goto err6; ++ goto err4; +#endif + + return 0; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+err6: -+ nf_unregister_hook(&imq_ingress_ipv6); -+err5: -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +err4: ++ nf_unregister_hook(&imq_ingress_ipv6); ++err3: + nf_unregister_hook(&imq_egress_ipv4); +#endif -+err3: -+ nf_unregister_hook(&imq_ingress_ipv4); +err2: -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ nf_unregister_hook(&imq_ingress_ipv4); +err1: ++ nf_unregister_queue_imq_handler(); + return err; +} + @@ -360,7 +451,7 @@ + struct net_device *dev; + int ret; + -+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup); ++ dev = alloc_netdev(0, "imq%d", imq_setup); + if (!dev) + return -ENOMEM; + @@ -383,7 +474,7 @@ +{ + int err, i; + -+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { ++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) { + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + IMQ_MAX_DEVS); + return -EINVAL; @@ -408,6 +499,12 @@ +{ + int err; + ++#if defined(CONFIG_IMQ_NUM_DEVS) ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK); ++#endif ++ + err = imq_init_devs(); + if (err) { + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); @@ -443,11 +540,11 @@ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_egress_ipv6); -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +#endif + nf_unregister_hook(&imq_ingress_ipv4); + nf_unregister_hook(&imq_egress_ipv4); -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ ++ nf_unregister_queue_imq_handler(); +} + +static void __exit imq_cleanup_devs(void) @@ -477,7 +574,7 @@ + --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -110,6 +110,129 @@ config EQUALIZER +@@ -110,6 +110,129 @@ To compile this driver as a module, choose M here: the module will be called eql. If unsure, say N. @@ -609,7 +706,7 @@ select CRC32 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -150,6 +150,7 @@ obj-$(CONFIG_SLHC) += slhc.o +@@ -150,6 +150,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o @@ -619,52 +716,102 @@ obj-$(CONFIG_DE600) += de600.o --- /dev/null +++ b/include/linux/imq.h -@@ -0,0 +1,9 @@ +@@ -0,0 +1,13 @@ +#ifndef _IMQ_H +#define _IMQ_H + -+#define IMQ_MAX_DEVS 16 ++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */ ++#define IMQ_F_BITS 5 ++ ++#define IMQ_F_IFMASK 0x0f ++#define IMQ_F_ENQUEUE 0x10 + -+#define IMQ_F_IFMASK 0x7f -+#define IMQ_F_ENQUEUE 0x80 ++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1) + +#endif /* _IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IPT_IMQ_H +#define _IPT_IMQ_H + -+struct ipt_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ipt_imq_info xt_imq_info + +#endif /* _IPT_IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IP6T_IMQ_H +#define _IP6T_IMQ_H + -+struct ip6t_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ip6t_imq_info xt_imq_info + +#endif /* _IP6T_IMQ_H */ ++ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -312,6 +312,10 @@ struct sk_buff { +@@ -28,6 +28,9 @@ + #include <linux/rcupdate.h> + #include <linux/dmaengine.h> + #include <linux/hrtimer.h> ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#include <linux/imq.h> ++#endif + + #define HAVE_ALLOC_SKB /* For the drivers to know */ + #define HAVE_ALIGNABLE_SKB /* Ditto 8) */ +@@ -282,6 +285,9 @@ + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ void *cb_next; ++#endif + + unsigned int len, + data_len; +@@ -312,6 +318,9 @@ struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) -+ unsigned char imq_flags; + struct nf_queue_entry *nf_queue_entry; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -@@ -1844,6 +1848,10 @@ static inline void __nf_copy(struct sk_b +@@ -332,6 +341,9 @@ + __u8 requeue:1; + #endif + /* 0/13/14 bit hole */ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ __u8 imq_flags:IMQ_F_BITS; ++#endif + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -372,6 +384,12 @@ + enum dma_data_direction dir); + #endif + ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern int skb_save_cb(struct sk_buff *skb); ++extern int skb_restore_cb(struct sk_buff *skb); ++#endif ++ + extern void kfree_skb(struct sk_buff *skb); + extern void __kfree_skb(struct sk_buff *skb); + extern struct sk_buff *__alloc_skb(unsigned int size, +@@ -1844,6 +1862,10 @@ dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -687,7 +834,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> -@@ -1671,7 +1674,11 @@ int dev_hard_start_xmit(struct sk_buff * +@@ -1671,7 +1674,11 @@ prefetch(&dev->netdev_ops->ndo_start_xmit); if (likely(!skb->next)) { @@ -700,215 +847,414 @@ dev_queue_xmit_nit(skb, dev); if (netif_needs_gso(dev, skb)) { +@@ -1762,8 +1769,7 @@ + return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); + } + +-static struct netdev_queue *dev_pick_tx(struct net_device *dev, +- struct sk_buff *skb) ++struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) + { + const struct net_device_ops *ops = dev->netdev_ops; + u16 queue_index = 0; +@@ -1776,6 +1782,7 @@ + skb_set_queue_mapping(skb, queue_index); + return netdev_get_tx_queue(dev, queue_index); + } ++EXPORT_SYMBOL(dev_pick_tx); + + /** + * dev_queue_xmit - transmit a buffer +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1071,6 +1071,7 @@ + extern int dev_open(struct net_device *dev); + extern int dev_close(struct net_device *dev); + extern void dev_disable_lro(struct net_device *dev); ++extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb); + extern int dev_queue_xmit(struct sk_buff *skb); + extern int register_netdevice(struct net_device *dev); + extern void unregister_netdevice(struct net_device *dev); --- /dev/null -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -0,0 +1,69 @@ -+/* -+ * This target marks packets to be enqueued to an imq device -+ */ -+#include <linux/module.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter_ipv4/ip_tables.h> -+#include <linux/netfilter_ipv4/ipt_IMQ.h> -+#include <linux/imq.h> ++++ b/include/linux/netfilter/xt_IMQ.h +@@ -0,0 +1,9 @@ ++#ifndef _XT_IMQ_H ++#define _XT_IMQ_H + -+static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++struct xt_imq_info { ++ unsigned int todev; /* target imq device */ ++}; ++ ++#endif /* _XT_IMQ_H */ ++ +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -13,6 +13,12 @@ + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ int (*next_outfn)(struct nf_queue_entry *entry, ++ unsigned int queuenum); ++ unsigned int next_queuenum; ++#endif + }; + + #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) +@@ -30,5 +36,11 @@ + const struct nf_queue_handler *qh); + extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); + extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); ++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh); ++extern void nf_unregister_queue_imq_handler(void); ++#endif + + #endif /* _NF_QUEUE_H */ +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -69,6 +69,9 @@ + + static struct kmem_cache *skbuff_head_cache __read_mostly; + static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static struct kmem_cache *skbuff_cb_store_cache __read_mostly; ++#endif + + static void sock_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +@@ -88,6 +91,80 @@ + return 1; + } + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++/* Control buffer save/restore for IMQ devices */ ++struct skb_cb_table { ++ void *cb_next; ++ atomic_t refcnt; ++ char cb[48]; ++}; ++ ++static DEFINE_SPINLOCK(skb_cb_store_lock); ++ ++int skb_save_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC); ++ if (!next) ++ return -ENOMEM; + -+ return XT_CONTINUE; ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(next->cb, skb->cb, sizeof(skb->cb)); ++ next->cb_next = skb->cb_next; ++ ++ atomic_set(&next->refcnt, 1); ++ ++ skb->cb_next = next; ++ return 0; +} ++EXPORT_SYMBOL(skb_save_cb); + -+static bool imq_checkentry(const char *tablename, -+ const void *e, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++int skb_restore_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr; -+ -+ mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ if (mr->todev > IMQ_MAX_DEVS) { -+ printk(KERN_WARNING -+ "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ if (!skb->cb_next) + return 0; ++ ++ next = skb->cb_next; ++ ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(skb->cb, next->cb, sizeof(skb->cb)); ++ skb->cb_next = next->cb_next; ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ if (atomic_dec_and_test(&next->refcnt)) { ++ kmem_cache_free(skbuff_cb_store_cache, next); + } + -+ return 1; ++ spin_unlock(&skb_cb_store_lock); ++ ++ return 0; +} ++EXPORT_SYMBOL(skb_restore_cb); + -+static struct xt_target ipt_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET, -+ .target = imq_target, -+ .targetsize = sizeof(struct ipt_imq_info), -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE, -+ .table = "mangle" -+}; ++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old) ++{ ++ struct skb_cb_table *next; ++ ++ if (!old->cb_next) { ++ new->cb_next = 0; ++ return; ++ } ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ next = old->cb_next; ++ atomic_inc(&next->refcnt); ++ new->cb_next = next; ++ ++ spin_unlock(&skb_cb_store_lock); ++} ++#endif + + /* Pipe buffer operations for a socket. */ + static struct pipe_buf_operations sock_pipe_buf_ops = { +@@ -381,6 +458,15 @@ + WARN_ON(in_irq()); + skb->destructor(skb); + } ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ /* This should not happen. When it does, avoid memleak by restoring ++ the chain of cb-backups. */ ++ while(skb->cb_next != NULL) { ++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n", ++ skb->cb_next); ++ skb_restore_cb(skb); ++ } ++#endif + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + nf_conntrack_put_reasm(skb->nfct_reasm); +@@ -493,6 +579,9 @@ + new->sp = secpath_get(old->sp); + #endif + memcpy(new->cb, old->cb, sizeof(old->cb)); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skb_copy_stored_cb(new, old); ++#endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->local_df = old->local_df; +@@ -2664,6 +2753,13 @@ + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache", ++ sizeof(struct skb_cb_table), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++#endif + } + + /** +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -357,6 +357,18 @@ + + To compile it as a module, choose M here. If unsure, say N. + ++config NETFILTER_XT_TARGET_IMQ ++ tristate '"IMQ" target support' ++ depends on NETFILTER_XTABLES ++ depends on IP_NF_MANGLE || IP6_NF_MANGLE ++ select IMQ ++ default m if NETFILTER_ADVANCED=n ++ help ++ This option adds a `IMQ' target which is used to specify if and ++ to which imq device packets should get enqueued/dequeued. + -+static int __init init(void) ++ To compile it as a module, choose M here. If unsure, say N. ++ + config NETFILTER_XT_TARGET_MARK + tristate '"MARK" target support' + default m if NETFILTER_ADVANCED=n +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -45,6 +45,7 @@ + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o ++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o + obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -20,6 +20,26 @@ + + static DEFINE_MUTEX(queue_handler_mutex); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static const struct nf_queue_handler *queue_imq_handler; ++ ++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh) +{ -+ return xt_register_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, qh); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_register_queue_imq_handler); + -+static void __exit fini(void) ++void nf_unregister_queue_imq_handler(void) +{ -+ xt_unregister_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, NULL); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_unregister_queue_imq_handler); ++#endif + -+module_init(init); -+module_exit(fini); + /* return EBUSY when somebody else is registered, return EEXIST if the + * same handler is registered, return 0 in case of success. */ + int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +@@ -80,7 +100,7 @@ + } + EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); + +-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ++void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) +@@ -100,6 +120,7 @@ + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); + } ++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* + * Any packet that leaves via this function must come back +@@ -121,12 +142,26 @@ + #endif + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ const struct nf_queue_handler *qih = NULL; ++#endif + + /* QUEUE == DROP if noone is waiting, to be safe. */ + rcu_read_lock(); + + qh = rcu_dereference(queue_handler[pf]); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (pf == PF_INET || pf == PF_INET6) ++#else ++ if (pf == PF_INET) ++#endif ++ qih = rcu_dereference(queue_imq_handler); + -+MODULE_AUTHOR("http://www.linuximq.net"); -+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); -+MODULE_LICENSE("GPL"); ---- a/net/ipv4/netfilter/Kconfig -+++ b/net/ipv4/netfilter/Kconfig -@@ -112,6 +112,17 @@ config IP_NF_FILTER ++ if (!qh && !qih) ++#else /* !IMQ */ + if (!qh) ++#endif + goto err_unlock; - To compile it as a module, choose M here. If unsure, say N. + afinfo = nf_get_afinfo(pf); +@@ -145,6 +180,10 @@ + .indev = indev, + .outdev = outdev, + .okfn = okfn, ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ .next_outfn = qh ? qh->outfn : NULL, ++ .next_queuenum = queuenum, ++#endif + }; + + /* If it's going away, ignore hook. */ +@@ -170,8 +209,19 @@ + } + #endif + afinfo->saveroute(skb, entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ if (qih) { ++ status = qih->outfn(entry, queuenum); ++ goto imq_skip_queue; ++ } ++#endif ++ + status = qh->outfn(entry, queuenum); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++imq_skip_queue: ++#endif + rcu_read_unlock(); -+config IP_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which IMQ device packets should get enqueued/dequeued. -+ -+ For more information visit: http://www.linuximq.net/ -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP_NF_TARGET_REJECT - tristate "REJECT target support" - depends on IP_NF_FILTER ---- a/net/ipv4/netfilter/Makefile -+++ b/net/ipv4/netfilter/Makefile -@@ -58,6 +58,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set - obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o - obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o - obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o -+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o - obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o - obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o - obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o + if (status < 0) { --- /dev/null -+++ b/net/ipv6/netfilter/ip6t_IMQ.c -@@ -0,0 +1,69 @@ ++++ b/net/netfilter/xt_IMQ.c +@@ -0,0 +1,73 @@ +/* + * This target marks packets to be enqueued to an imq device + */ +#include <linux/module.h> +#include <linux/skbuff.h> -+#include <linux/netfilter_ipv6/ip6_tables.h> -+#include <linux/netfilter_ipv6/ip6t_IMQ.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_IMQ.h> +#include <linux/imq.h> + +static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++ const struct xt_target_param *par) +{ -+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo; ++ const struct xt_imq_info *mr = par->targinfo; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE; + + return XT_CONTINUE; +} + -+static bool imq_checkentry(const char *tablename, -+ const void *entry, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++static bool imq_checkentry(const struct xt_tgchk_param *par) +{ -+ struct ip6t_imq_info *mr; ++ struct xt_imq_info *mr = par->targinfo; + -+ mr = (struct ip6t_imq_info *)targinfo; -+ -+ if (mr->todev > IMQ_MAX_DEVS) { ++ if (mr->todev > IMQ_MAX_DEVS - 1) { + printk(KERN_WARNING + "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ IMQ_MAX_DEVS - 1); + return 0; + } + + return 1; +} + -+static struct xt_target ip6t_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET6, -+ .target = imq_target, -+ .targetsize = sizeof(struct ip6t_imq_info), -+ .table = "mangle", -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE ++static struct xt_target xt_imq_reg[] __read_mostly = { ++ { ++ .name = "IMQ", ++ .family = AF_INET, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, ++ { ++ .name = "IMQ", ++ .family = AF_INET6, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, +}; + -+static int __init init(void) ++static int __init imq_init(void) +{ -+ return xt_register_target(&ip6t_imq_reg); ++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+static void __exit fini(void) ++static void __exit imq_fini(void) +{ -+ xt_unregister_target(&ip6t_imq_reg); ++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+module_init(init); -+module_exit(fini); ++module_init(imq_init); ++module_exit(imq_fini); + +MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_LICENSE("GPL"); ---- a/net/ipv6/netfilter/Kconfig -+++ b/net/ipv6/netfilter/Kconfig -@@ -170,6 +170,15 @@ config IP6_NF_MANGLE - - To compile it as a module, choose M here. If unsure, say N. - -+config IP6_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP6_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which imq device packets should get enqueued/dequeued. -+ -+ To compile it as a module, choose M here. If unsure, say N. ++MODULE_ALIAS("ipt_IMQ"); ++MODULE_ALIAS("ip6t_IMQ"); + - config IP6_NF_TARGET_HL - tristate 'HL (hoplimit) target support' - depends on IP6_NF_MANGLE ---- a/net/ipv6/netfilter/Makefile -+++ b/net/ipv6/netfilter/Makefile -@@ -6,6 +6,7 @@ - obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o - obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o - obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o - obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o - obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o - obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -195,6 +195,7 @@ void __qdisc_run(struct Qdisc *q) - - clear_bit(__QDISC_STATE_RUNNING, &q->state); - } -+EXPORT_SYMBOL(__qdisc_run); - - static void dev_watchdog(unsigned long arg) - { diff --git a/target/linux/generic-2.6/patches-2.6.29/151-netfilter_imq_2.6.28.patch b/target/linux/generic-2.6/patches-2.6.29/151-netfilter_imq_2.6.28.patch deleted file mode 100644 index d4ed15129d..0000000000 --- a/target/linux/generic-2.6/patches-2.6.29/151-netfilter_imq_2.6.28.patch +++ /dev/null @@ -1,114 +0,0 @@ ---- a/drivers/net/imq.c -+++ b/drivers/net/imq.c -@@ -178,10 +178,11 @@ static int imq_nf_queue(struct nf_queue_ - struct sk_buff *skb2 = NULL; - struct Qdisc *q; - unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -- int ret = -1; -+ struct netdev_queue *txq; -+ int ret = -EINVAL; - - if (index > numdevs) -- return -1; -+ return ret; - - /* check for imq device by index from cache */ - dev = imq_devs_cache[index]; -@@ -194,7 +195,7 @@ static int imq_nf_queue(struct nf_queue_ - if (!dev) { - /* not found ?!*/ - BUG(); -- return -1; -+ return ret; - } - - imq_devs_cache[index] = dev; -@@ -212,17 +213,19 @@ static int imq_nf_queue(struct nf_queue_ - skb2 = entry->skb; - entry->skb = skb_clone(entry->skb, GFP_ATOMIC); - if (!entry->skb) -- return -1; -+ return -ENOMEM; - } - entry->skb->nf_queue_entry = entry; - - dev->stats.rx_bytes += entry->skb->len; - dev->stats.rx_packets++; - -- spin_lock_bh(&dev->queue_lock); -- q = dev->qdisc; -+ txq = netdev_get_tx_queue(dev, 0); -+ __netif_tx_lock_bh(txq); -+ q = txq->qdisc; -+ - if (q->enqueue) { -- q->enqueue(skb_get(entry->skb), q); -+ qdisc_enqueue_root(skb_get(entry->skb), q); - if (skb_shared(entry->skb)) { - entry->skb->destructor = imq_skb_destructor; - kfree_skb(entry->skb); -@@ -231,7 +234,7 @@ static int imq_nf_queue(struct nf_queue_ - } - if (!test_and_set_bit(1, &priv->tasklet_pending)) - tasklet_schedule(&priv->tasklet); -- spin_unlock_bh(&dev->queue_lock); -+ __netif_tx_unlock_bh(txq); - - if (skb2) - kfree_skb(ret ? entry->skb : skb2); -@@ -248,11 +251,13 @@ static void qdisc_run_tasklet(unsigned l - { - struct net_device *dev = (struct net_device *)arg; - struct imq_private *priv = netdev_priv(dev); -+ struct netdev_queue *txq; - -- spin_lock(&dev->queue_lock); -- qdisc_run(dev); -+ netif_tx_lock(dev); -+ txq = netdev_get_tx_queue(dev, 0); -+ qdisc_run(txq->qdisc); - clear_bit(1, &priv->tasklet_pending); -- spin_unlock(&dev->queue_lock); -+ netif_tx_unlock(dev); - } - - static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, ---- a/net/ipv4/netfilter/ipt_IMQ.c -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -7,29 +7,23 @@ - #include <linux/netfilter_ipv4/ipt_IMQ.h> - #include <linux/imq.h> - --static unsigned int imq_target(struct sk_buff *pskb, -- const struct net_device *in, -- const struct net_device *out, -- unsigned int hooknum, -- const struct xt_target *target, -- const void *targinfo) -+static unsigned int -+imq_target(struct sk_buff *pskb, -+ const struct xt_target_param *par) - { -- struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)par->targinfo; - - pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; - - return XT_CONTINUE; - } - --static bool imq_checkentry(const char *tablename, -- const void *e, -- const struct xt_target *target, -- void *targinfo, -- unsigned int hook_mask) -+static bool -+imq_checkentry(const struct xt_tgchk_param *par) - { - struct ipt_imq_info *mr; - -- mr = (struct ipt_imq_info *)targinfo; -+ mr = (struct ipt_imq_info *)par->targinfo; - - if (mr->todev > IMQ_MAX_DEVS) { - printk(KERN_WARNING diff --git a/target/linux/generic-2.6/patches-2.6.29/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.29/180-netfilter_depends.patch index cf402f53dc..3694709ed1 100644 --- a/target/linux/generic-2.6/patches-2.6.29/180-netfilter_depends.patch +++ b/target/linux/generic-2.6/patches-2.6.29/180-netfilter_depends.patch @@ -1,6 +1,6 @@ --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -160,7 +160,6 @@ config NF_CONNTRACK_FTP +@@ -160,7 +160,6 @@ config NF_CONNTRACK_H323 tristate "H.323 protocol support" @@ -8,7 +8,7 @@ depends on NETFILTER_ADVANCED help H.323 is a VoIP signalling protocol from ITU-T. As one of the most -@@ -454,7 +453,6 @@ config NETFILTER_XT_TARGET_SECMARK +@@ -466,7 +465,6 @@ config NETFILTER_XT_TARGET_TCPMSS tristate '"TCPMSS" target support' diff --git a/target/linux/generic-2.6/patches-2.6.29/190-netfilter_rtsp.patch b/target/linux/generic-2.6/patches-2.6.29/190-netfilter_rtsp.patch index 62d4cf2e95..7aff0e6ddc 100644 --- a/target/linux/generic-2.6/patches-2.6.29/190-netfilter_rtsp.patch +++ b/target/linux/generic-2.6/patches-2.6.29/190-netfilter_rtsp.patch @@ -294,7 +294,7 @@ +#endif /* _NETFILTER_MIME_H */ --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile -@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_am +@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o @@ -304,7 +304,7 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -267,6 +267,16 @@ config NF_CONNTRACK_TFTP +@@ -267,6 +267,16 @@ To compile it as a module, choose M here. If unsure, say N. @@ -323,7 +323,7 @@ select NETFILTER_NETLINK --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co +@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o @@ -333,7 +333,7 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig -@@ -268,6 +268,11 @@ config NF_NAT_IRC +@@ -257,6 +257,11 @@ depends on NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC diff --git a/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch index d87160acfb..c11bffe550 100644 --- a/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch +++ b/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch @@ -1,6 +1,6 @@ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -1292,9 +1292,12 @@ static inline int skb_network_offset(con +@@ -1306,9 +1306,12 @@ * * Various parts of the networking layer expect at least 16 bytes of * headroom, you should not reduce this. diff --git a/target/linux/generic-2.6/patches-2.6.30/150-netfilter_imq.patch b/target/linux/generic-2.6/patches-2.6.30/150-netfilter_imq.patch index af85c65513..d1248ac154 100644 --- a/target/linux/generic-2.6/patches-2.6.30/150-netfilter_imq.patch +++ b/target/linux/generic-2.6/patches-2.6.30/150-netfilter_imq.patch @@ -1,6 +1,6 @@ --- /dev/null +++ b/drivers/net/imq.c -@@ -0,0 +1,474 @@ +@@ -0,0 +1,571 @@ +/* + * Pseudo-driver for the intermediate queue device. + * @@ -51,10 +51,28 @@ + * + * + * 2008/06/17 - 2.6.25 - Changed imq.c to use qdisc_run() instead -+ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid ++ * of qdisc_restart() and moved qdisc_run() to tasklet to avoid + * recursive locking. New initialization routines to fix 'rmmod' not + * working anymore. Used code from ifb.c. (Jussi Kivilinna) + * ++ * 2008/08/06 - 2.6.26 - (JK) ++ * - Replaced tasklet with 'netif_schedule()'. ++ * - Cleaned up and added comments for imq_nf_queue(). ++ * ++ * 2009/04/12 ++ * - Add skb_save_cb/skb_restore_cb helper functions for backuping ++ * control buffer. This is needed because qdisc-layer on kernels ++ * 2.6.27 and newer overwrite control buffer. (Jussi Kivilinna) ++ * - Add better locking for IMQ device. Hopefully this will solve ++ * SMP issues. (Jussi Kivilinna) ++ * - Port to 2.6.27 ++ * - Port to 2.6.28 ++ * - Port to 2.6.29 + fix rmmod not working ++ * ++ * 2009/04/20 - (Jussi Kivilinna) ++ * - Use netdevice feature flags to avoid extra packet handling ++ * by core networking layer and possibly increase performance. ++ * + * Also, many thanks to pablo Sebastian Greco for making the initial + * patch and to those who helped the testing. + * @@ -64,8 +82,10 @@ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/moduleparam.h> ++#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> ++#include <linux/etherdevice.h> +#include <linux/rtnetlink.h> +#include <linux/if_arp.h> +#include <linux/netfilter.h> @@ -77,11 +97,6 @@ +#include <net/pkt_sched.h> +#include <net/netfilter/nf_queue.h> + -+struct imq_private { -+ struct tasklet_struct tasklet; -+ unsigned long tasklet_pending; -+}; -+ +static nf_hookfn imq_nf_hook; + +static struct nf_hook_ops imq_ingress_ipv4 = { @@ -140,8 +155,11 @@ +static unsigned int numdevs = IMQ_MAX_DEVS; +#endif + ++static DEFINE_SPINLOCK(imq_nf_queue_lock); ++ +static struct net_device *imq_devs_cache[IMQ_MAX_DEVS]; + ++ +static struct net_device_stats *imq_get_stats(struct net_device *dev) +{ + return &dev->stats; @@ -153,12 +171,35 @@ + struct nf_queue_entry *entry = skb->nf_queue_entry; + + if (entry) { -+ if (entry->indev) -+ dev_put(entry->indev); -+ if (entry->outdev) -+ dev_put(entry->outdev); ++ nf_queue_entry_release_refs(entry); + kfree(entry); + } ++ ++ skb_restore_cb(skb); /* kfree backup */ ++} ++ ++static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) ++{ ++ int status; ++ ++ if (!entry->next_outfn) { ++ spin_lock_bh(&imq_nf_queue_lock); ++ nf_reinject(entry, verdict); ++ spin_unlock_bh(&imq_nf_queue_lock); ++ return; ++ } ++ ++ rcu_read_lock(); ++ local_bh_disable(); ++ status = entry->next_outfn(entry, entry->next_queuenum); ++ local_bh_enable(); ++ if (status < 0) { ++ nf_queue_entry_release_refs(entry); ++ kfree_skb(entry->skb); ++ kfree(entry); ++ } ++ ++ rcu_read_unlock(); +} + +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) @@ -169,26 +210,35 @@ + skb->imq_flags = 0; + skb->destructor = NULL; + ++ skb_restore_cb(skb); /* restore skb->cb */ ++ + dev->trans_start = jiffies; -+ nf_reinject(skb->nf_queue_entry, NF_ACCEPT); ++ imq_nf_reinject(skb->nf_queue_entry, NF_ACCEPT); + return 0; +} + +static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) +{ + struct net_device *dev; -+ struct imq_private *priv; -+ struct sk_buff *skb2 = NULL; ++ struct sk_buff *skb_orig, *skb, *skb_shared; + struct Qdisc *q; -+ unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -+ int ret = -1; -+ -+ if (index > numdevs) -+ return -1; ++ struct netdev_queue *txq; ++ int users, index; ++ int retval = -EINVAL; ++ ++ index = entry->skb->imq_flags & IMQ_F_IFMASK; ++ if (unlikely(index > numdevs - 1)) { ++ if (net_ratelimit()) ++ printk(KERN_WARNING ++ "IMQ: invalid device specified, highest is %u\n", ++ numdevs - 1); ++ retval = -EINVAL; ++ goto out; ++ } + + /* check for imq device by index from cache */ + dev = imq_devs_cache[index]; -+ if (!dev) { ++ if (unlikely(!dev)) { + char buf[8]; + + /* get device by name and cache result */ @@ -197,49 +247,90 @@ + if (!dev) { + /* not found ?!*/ + BUG(); -+ return -1; ++ retval = -ENODEV; ++ goto out; + } + + imq_devs_cache[index] = dev; ++ dev_put(dev); + } + -+ priv = netdev_priv(dev); -+ if (!(dev->flags & IFF_UP)) { ++ if (unlikely(!(dev->flags & IFF_UP))) { + entry->skb->imq_flags = 0; -+ nf_reinject(entry, NF_ACCEPT); -+ return 0; ++ imq_nf_reinject(entry, NF_ACCEPT); ++ retval = 0; ++ goto out; + } + dev->last_rx = jiffies; + -+ if (entry->skb->destructor) { -+ skb2 = entry->skb; -+ entry->skb = skb_clone(entry->skb, GFP_ATOMIC); -+ if (!entry->skb) -+ return -1; ++ skb = entry->skb; ++ skb_orig = NULL; ++ ++ /* skb has owner? => make clone */ ++ if (unlikely(skb->destructor)) { ++ skb_orig = skb; ++ skb = skb_clone(skb, GFP_ATOMIC); ++ if (!skb) { ++ retval = -ENOMEM; ++ goto out; ++ } ++ entry->skb = skb; + } -+ entry->skb->nf_queue_entry = entry; + -+ dev->stats.rx_bytes += entry->skb->len; ++ skb->nf_queue_entry = entry; ++ ++ dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + -+ spin_lock_bh(&dev->queue_lock); -+ q = dev->qdisc; -+ if (q->enqueue) { -+ q->enqueue(skb_get(entry->skb), q); -+ if (skb_shared(entry->skb)) { -+ entry->skb->destructor = imq_skb_destructor; -+ kfree_skb(entry->skb); -+ ret = 0; -+ } -+ } -+ if (!test_and_set_bit(1, &priv->tasklet_pending)) -+ tasklet_schedule(&priv->tasklet); -+ spin_unlock_bh(&dev->queue_lock); ++ txq = dev_pick_tx(dev, skb); + -+ if (skb2) -+ kfree_skb(ret ? entry->skb : skb2); ++ q = rcu_dereference(txq->qdisc); ++ if (unlikely(!q->enqueue)) ++ goto packet_not_eaten_by_imq_dev; + -+ return ret; ++ spin_lock_bh(qdisc_lock(q)); ++ ++ users = atomic_read(&skb->users); ++ ++ skb_shared = skb_get(skb); /* increase reference count by one */ ++ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will ++ overwrite it */ ++ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */ ++ ++ if (likely(atomic_read(&skb_shared->users) == users + 1)) { ++ kfree_skb(skb_shared); /* decrease reference count by one */ ++ ++ skb->destructor = &imq_skb_destructor; ++ ++ /* cloned? */ ++ if (skb_orig) ++ kfree_skb(skb_orig); /* free original */ ++ ++ spin_unlock_bh(qdisc_lock(q)); ++ ++ /* schedule qdisc dequeue */ ++ __netif_schedule(q); ++ ++ retval = 0; ++ goto out; ++ } else { ++ skb_restore_cb(skb_shared); /* restore skb->cb */ ++ /* qdisc dropped packet and decreased skb reference count of ++ * skb, so we don't really want to and try refree as that would ++ * actually destroy the skb. */ ++ spin_unlock_bh(qdisc_lock(q)); ++ goto packet_not_eaten_by_imq_dev; ++ } ++ ++packet_not_eaten_by_imq_dev: ++ /* cloned? restore original */ ++ if (skb_orig) { ++ kfree_skb(skb); ++ entry->skb = skb_orig; ++ } ++ retval = -1; ++out: ++ return retval; +} + +static struct nf_queue_handler nfqh = { @@ -247,17 +338,6 @@ + .outfn = imq_nf_queue, +}; + -+static void qdisc_run_tasklet(unsigned long arg) -+{ -+ struct net_device *dev = (struct net_device *)arg; -+ struct imq_private *priv = netdev_priv(dev); -+ -+ spin_lock(&dev->queue_lock); -+ qdisc_run(dev); -+ clear_bit(1, &priv->tasklet_pending); -+ spin_unlock(&dev->queue_lock); -+} -+ +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, + const struct net_device *indev, + const struct net_device *outdev, @@ -271,87 +351,98 @@ + +static int imq_close(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_kill(&priv->tasklet); + netif_stop_queue(dev); -+ + return 0; +} + +static int imq_open(struct net_device *dev) +{ -+ struct imq_private *priv = netdev_priv(dev); -+ -+ tasklet_init(&priv->tasklet, qdisc_run_tasklet, (unsigned long)dev); + netif_start_queue(dev); -+ + return 0; +} + ++static const struct net_device_ops imq_netdev_ops = { ++ .ndo_open = imq_open, ++ .ndo_stop = imq_close, ++ .ndo_start_xmit = imq_dev_xmit, ++ .ndo_get_stats = imq_get_stats, ++}; ++ +static void imq_setup(struct net_device *dev) +{ -+ dev->hard_start_xmit = imq_dev_xmit; -+ dev->open = imq_open; -+ dev->get_stats = imq_get_stats; -+ dev->stop = imq_close; ++ dev->netdev_ops = &imq_netdev_ops; + dev->type = ARPHRD_VOID; + dev->mtu = 16000; + dev->tx_queue_len = 11000; + dev->flags = IFF_NOARP; ++ dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | ++ NETIF_F_GSO | NETIF_F_HW_CSUM | ++ NETIF_F_HIGHDMA; ++} ++ ++static int imq_validate(struct nlattr *tb[], struct nlattr *data[]) ++{ ++ int ret = 0; ++ ++ if (tb[IFLA_ADDRESS]) { ++ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) { ++ ret = -EINVAL; ++ goto end; ++ } ++ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) { ++ ret = -EADDRNOTAVAIL; ++ goto end; ++ } ++ } ++ return 0; ++end: ++ printk(KERN_WARNING "IMQ: imq_validate failed (%d)\n", ret); ++ return ret; +} + +static struct rtnl_link_ops imq_link_ops __read_mostly = { + .kind = "imq", -+ .priv_size = sizeof(struct imq_private), ++ .priv_size = 0, + .setup = imq_setup, ++ .validate = imq_validate, +}; + +static int __init imq_init_hooks(void) +{ + int err; + -+ err = nf_register_queue_handler(PF_INET, &nfqh); -+ if (err) -+ goto err1; ++ nf_register_queue_imq_handler(&nfqh); + + err = nf_register_hook(&imq_ingress_ipv4); + if (err) -+ goto err2; ++ goto err1; + + err = nf_register_hook(&imq_egress_ipv4); + if (err) -+ goto err3; ++ goto err2; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+ err = nf_register_queue_handler(PF_INET6, &nfqh); -+ if (err) -+ goto err4; -+ + err = nf_register_hook(&imq_ingress_ipv6); + if (err) -+ goto err5; ++ goto err3; + + err = nf_register_hook(&imq_egress_ipv6); + if (err) -+ goto err6; ++ goto err4; +#endif + + return 0; + +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -+err6: -+ nf_unregister_hook(&imq_ingress_ipv6); -+err5: -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +err4: ++ nf_unregister_hook(&imq_ingress_ipv6); ++err3: + nf_unregister_hook(&imq_egress_ipv4); +#endif -+err3: -+ nf_unregister_hook(&imq_ingress_ipv4); +err2: -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ nf_unregister_hook(&imq_ingress_ipv4); +err1: ++ nf_unregister_queue_imq_handler(); + return err; +} + @@ -360,7 +451,7 @@ + struct net_device *dev; + int ret; + -+ dev = alloc_netdev(sizeof(struct imq_private), "imq%d", imq_setup); ++ dev = alloc_netdev(0, "imq%d", imq_setup); + if (!dev) + return -ENOMEM; + @@ -383,7 +474,7 @@ +{ + int err, i; + -+ if (!numdevs || numdevs > IMQ_MAX_DEVS) { ++ if (numdevs < 1 || numdevs > IMQ_MAX_DEVS) { + printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n", + IMQ_MAX_DEVS); + return -EINVAL; @@ -408,6 +499,12 @@ +{ + int err; + ++#if defined(CONFIG_IMQ_NUM_DEVS) ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS > 16); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS < 2); ++ BUILD_BUG_ON(CONFIG_IMQ_NUM_DEVS - 1 > IMQ_F_IFMASK); ++#endif ++ + err = imq_init_devs(); + if (err) { + printk(KERN_ERR "IMQ: Error trying imq_init_devs(net)\n"); @@ -443,11 +540,11 @@ +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + nf_unregister_hook(&imq_ingress_ipv6); + nf_unregister_hook(&imq_egress_ipv6); -+ nf_unregister_queue_handler(PF_INET6, &nfqh); +#endif + nf_unregister_hook(&imq_ingress_ipv4); + nf_unregister_hook(&imq_egress_ipv4); -+ nf_unregister_queue_handler(PF_INET, &nfqh); ++ ++ nf_unregister_queue_imq_handler(); +} + +static void __exit imq_cleanup_devs(void) @@ -477,7 +574,7 @@ + --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig -@@ -119,6 +119,129 @@ config EQUALIZER +@@ -119,6 +119,129 @@ To compile this driver as a module, choose M here: the module will be called eql. If unsure, say N. @@ -609,7 +706,7 @@ select CRC32 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile -@@ -152,6 +152,7 @@ obj-$(CONFIG_SLHC) += slhc.o +@@ -152,6 +152,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_DUMMY) += dummy.o @@ -619,52 +716,102 @@ obj-$(CONFIG_DE600) += de600.o --- /dev/null +++ b/include/linux/imq.h -@@ -0,0 +1,9 @@ +@@ -0,0 +1,13 @@ +#ifndef _IMQ_H +#define _IMQ_H + -+#define IMQ_MAX_DEVS 16 ++/* IFMASK (16 device indexes, 0 to 15) and flag(s) fit in 5 bits */ ++#define IMQ_F_BITS 5 ++ ++#define IMQ_F_IFMASK 0x0f ++#define IMQ_F_ENQUEUE 0x10 + -+#define IMQ_F_IFMASK 0x7f -+#define IMQ_F_ENQUEUE 0x80 ++#define IMQ_MAX_DEVS (IMQ_F_IFMASK + 1) + +#endif /* _IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv4/ipt_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IPT_IMQ_H +#define _IPT_IMQ_H + -+struct ipt_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ipt_imq_info xt_imq_info + +#endif /* _IPT_IMQ_H */ ++ --- /dev/null +++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h -@@ -0,0 +1,8 @@ +@@ -0,0 +1,10 @@ +#ifndef _IP6T_IMQ_H +#define _IP6T_IMQ_H + -+struct ip6t_imq_info { -+ unsigned int todev; /* target imq device */ -+}; ++/* Backwards compatibility for old userspace */ ++#include <linux/netfilter/xt_IMQ.h> ++ ++#define ip6t_imq_info xt_imq_info + +#endif /* _IP6T_IMQ_H */ ++ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -363,6 +363,10 @@ struct sk_buff { +@@ -28,6 +28,9 @@ + #include <linux/rcupdate.h> + #include <linux/dmaengine.h> + #include <linux/hrtimer.h> ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#include <linux/imq.h> ++#endif + + /* Don't change this without changing skb_csum_unnecessary! */ + #define CHECKSUM_NONE 0 +@@ -333,6 +336,9 @@ + * first. This is owned by whoever has the skb queued ATM. + */ + char cb[48]; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ void *cb_next; ++#endif + + unsigned int len, + data_len; +@@ -363,6 +369,9 @@ struct nf_conntrack *nfct; struct sk_buff *nfct_reasm; #endif +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) -+ unsigned char imq_flags; + struct nf_queue_entry *nf_queue_entry; +#endif #ifdef CONFIG_BRIDGE_NETFILTER struct nf_bridge_info *nf_bridge; #endif -@@ -1931,6 +1935,10 @@ static inline void __nf_copy(struct sk_b +@@ -383,6 +392,9 @@ + __u8 requeue:1; + #endif + /* 0/13/14 bit hole */ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ __u8 imq_flags:IMQ_F_BITS; ++#endif + + #ifdef CONFIG_NET_DMA + dma_cookie_t dma_cookie; +@@ -423,6 +435,12 @@ + enum dma_data_direction dir); + #endif + ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern int skb_save_cb(struct sk_buff *skb); ++extern int skb_restore_cb(struct sk_buff *skb); ++#endif ++ + extern void kfree_skb(struct sk_buff *skb); + extern void consume_skb(struct sk_buff *skb); + extern void __kfree_skb(struct sk_buff *skb); +@@ -1931,6 +1949,10 @@ dst->nfct_reasm = src->nfct_reasm; nf_conntrack_get_reasm(src->nfct_reasm); #endif @@ -687,7 +834,7 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> -@@ -1678,7 +1681,11 @@ int dev_hard_start_xmit(struct sk_buff * +@@ -1678,7 +1681,11 @@ int rc; if (likely(!skb->next)) { @@ -700,215 +847,414 @@ dev_queue_xmit_nit(skb, dev); if (netif_needs_gso(dev, skb)) { +@@ -1748,8 +1755,7 @@ + } + EXPORT_SYMBOL(skb_tx_hash); + +-static struct netdev_queue *dev_pick_tx(struct net_device *dev, +- struct sk_buff *skb) ++struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb) + { + const struct net_device_ops *ops = dev->netdev_ops; + u16 queue_index = 0; +@@ -1762,6 +1768,7 @@ + skb_set_queue_mapping(skb, queue_index); + return netdev_get_tx_queue(dev, queue_index); + } ++EXPORT_SYMBOL(dev_pick_tx); + + /** + * dev_queue_xmit - transmit a buffer +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1102,6 +1102,7 @@ + extern int dev_open(struct net_device *dev); + extern int dev_close(struct net_device *dev); + extern void dev_disable_lro(struct net_device *dev); ++extern struct netdev_queue *dev_pick_tx(struct net_device *dev, struct sk_buff *skb); + extern int dev_queue_xmit(struct sk_buff *skb); + extern int register_netdevice(struct net_device *dev); + extern void unregister_netdevice(struct net_device *dev); --- /dev/null -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -0,0 +1,69 @@ -+/* -+ * This target marks packets to be enqueued to an imq device -+ */ -+#include <linux/module.h> -+#include <linux/skbuff.h> -+#include <linux/netfilter_ipv4/ip_tables.h> -+#include <linux/netfilter_ipv4/ipt_IMQ.h> -+#include <linux/imq.h> ++++ b/include/linux/netfilter/xt_IMQ.h +@@ -0,0 +1,9 @@ ++#ifndef _XT_IMQ_H ++#define _XT_IMQ_H + -+static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++struct xt_imq_info { ++ unsigned int todev; /* target imq device */ ++}; ++ ++#endif /* _XT_IMQ_H */ ++ +--- a/include/net/netfilter/nf_queue.h ++++ b/include/net/netfilter/nf_queue.h +@@ -13,6 +13,12 @@ + struct net_device *indev; + struct net_device *outdev; + int (*okfn)(struct sk_buff *); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ int (*next_outfn)(struct nf_queue_entry *entry, ++ unsigned int queuenum); ++ unsigned int next_queuenum; ++#endif + }; + + #define nf_queue_entry_reroute(x) ((void *)x + sizeof(struct nf_queue_entry)) +@@ -30,5 +36,11 @@ + const struct nf_queue_handler *qh); + extern void nf_unregister_queue_handlers(const struct nf_queue_handler *qh); + extern void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); ++extern void nf_queue_entry_release_refs(struct nf_queue_entry *entry); ++ ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++extern void nf_register_queue_imq_handler(const struct nf_queue_handler *qh); ++extern void nf_unregister_queue_imq_handler(void); ++#endif + + #endif /* _NF_QUEUE_H */ +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -71,6 +71,9 @@ + + static struct kmem_cache *skbuff_head_cache __read_mostly; + static struct kmem_cache *skbuff_fclone_cache __read_mostly; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static struct kmem_cache *skbuff_cb_store_cache __read_mostly; ++#endif + + static void sock_pipe_buf_release(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) +@@ -90,6 +93,80 @@ + return 1; + } + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++/* Control buffer save/restore for IMQ devices */ ++struct skb_cb_table { ++ void *cb_next; ++ atomic_t refcnt; ++ char cb[48]; ++}; ++ ++static DEFINE_SPINLOCK(skb_cb_store_lock); ++ ++int skb_save_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; ++ struct skb_cb_table *next; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ next = kmem_cache_alloc(skbuff_cb_store_cache, GFP_ATOMIC); ++ if (!next) ++ return -ENOMEM; + -+ return XT_CONTINUE; ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(next->cb, skb->cb, sizeof(skb->cb)); ++ next->cb_next = skb->cb_next; ++ ++ atomic_set(&next->refcnt, 1); ++ ++ skb->cb_next = next; ++ return 0; +} ++EXPORT_SYMBOL(skb_save_cb); + -+static bool imq_checkentry(const char *tablename, -+ const void *e, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++int skb_restore_cb(struct sk_buff *skb) +{ -+ struct ipt_imq_info *mr; ++ struct skb_cb_table *next; + -+ mr = (struct ipt_imq_info *)targinfo; -+ -+ if (mr->todev > IMQ_MAX_DEVS) { -+ printk(KERN_WARNING -+ "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ if (!skb->cb_next) + return 0; ++ ++ next = skb->cb_next; ++ ++ BUILD_BUG_ON(sizeof(skb->cb) != sizeof(next->cb)); ++ ++ memcpy(skb->cb, next->cb, sizeof(skb->cb)); ++ skb->cb_next = next->cb_next; ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ if (atomic_dec_and_test(&next->refcnt)) { ++ kmem_cache_free(skbuff_cb_store_cache, next); + } + -+ return 1; ++ spin_unlock(&skb_cb_store_lock); ++ ++ return 0; +} ++EXPORT_SYMBOL(skb_restore_cb); + -+static struct xt_target ipt_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET, -+ .target = imq_target, -+ .targetsize = sizeof(struct ipt_imq_info), -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE, -+ .table = "mangle" -+}; ++static void skb_copy_stored_cb(struct sk_buff *new, struct sk_buff *old) ++{ ++ struct skb_cb_table *next; ++ ++ if (!old->cb_next) { ++ new->cb_next = 0; ++ return; ++ } ++ ++ spin_lock(&skb_cb_store_lock); ++ ++ next = old->cb_next; ++ atomic_inc(&next->refcnt); ++ new->cb_next = next; ++ ++ spin_unlock(&skb_cb_store_lock); ++} ++#endif + + /* Pipe buffer operations for a socket. */ + static struct pipe_buf_operations sock_pipe_buf_ops = { +@@ -389,6 +466,15 @@ + WARN_ON(in_irq()); + skb->destructor(skb); + } ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ /* This should not happen. When it does, avoid memleak by restoring ++ the chain of cb-backups. */ ++ while(skb->cb_next != NULL) { ++ printk(KERN_WARNING "kfree_skb: skb->cb_next: %08x\n", ++ skb->cb_next); ++ skb_restore_cb(skb); ++ } ++#endif + #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) + nf_conntrack_put(skb->nfct); + nf_conntrack_put_reasm(skb->nfct_reasm); +@@ -524,6 +610,9 @@ + new->sp = secpath_get(old->sp); + #endif + memcpy(new->cb, old->cb, sizeof(old->cb)); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skb_copy_stored_cb(new, old); ++#endif + new->csum_start = old->csum_start; + new->csum_offset = old->csum_offset; + new->local_df = old->local_df; +@@ -2766,6 +2855,13 @@ + 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, + NULL); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ skbuff_cb_store_cache = kmem_cache_create("skbuff_cb_store_cache", ++ sizeof(struct skb_cb_table), ++ 0, ++ SLAB_HWCACHE_ALIGN|SLAB_PANIC, ++ NULL); ++#endif + } + + /** +--- a/net/netfilter/Kconfig ++++ b/net/netfilter/Kconfig +@@ -396,6 +396,18 @@ + For more information on the LEDs available on your system, see + Documentation/leds-class.txt + ++config NETFILTER_XT_TARGET_IMQ ++ tristate '"IMQ" target support' ++ depends on NETFILTER_XTABLES ++ depends on IP_NF_MANGLE || IP6_NF_MANGLE ++ select IMQ ++ default m if NETFILTER_ADVANCED=n ++ help ++ This option adds a `IMQ' target which is used to specify if and ++ to which imq device packets should get enqueued/dequeued. + -+static int __init init(void) ++ To compile it as a module, choose M here. If unsure, say N. ++ + config NETFILTER_XT_TARGET_MARK + tristate '"MARK" target support' + default m if NETFILTER_ADVANCED=n +--- a/net/netfilter/Makefile ++++ b/net/netfilter/Makefile +@@ -46,6 +46,7 @@ + obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o + obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o ++obj-$(CONFIG_NETFILTER_XT_TARGET_IMQ) += xt_IMQ.o + obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o + obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o + obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o +--- a/net/netfilter/nf_queue.c ++++ b/net/netfilter/nf_queue.c +@@ -20,6 +20,26 @@ + + static DEFINE_MUTEX(queue_handler_mutex); + ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++static const struct nf_queue_handler *queue_imq_handler; ++ ++void nf_register_queue_imq_handler(const struct nf_queue_handler *qh) +{ -+ return xt_register_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, qh); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_register_queue_imq_handler); + -+static void __exit fini(void) ++void nf_unregister_queue_imq_handler(void) +{ -+ xt_unregister_target(&ipt_imq_reg); ++ mutex_lock(&queue_handler_mutex); ++ rcu_assign_pointer(queue_imq_handler, NULL); ++ mutex_unlock(&queue_handler_mutex); +} ++EXPORT_SYMBOL(nf_unregister_queue_imq_handler); ++#endif ++ + /* return EBUSY when somebody else is registered, return EEXIST if the + * same handler is registered, return 0 in case of success. */ + int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) +@@ -80,7 +100,7 @@ + } + EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers); + +-static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) ++void nf_queue_entry_release_refs(struct nf_queue_entry *entry) + { + /* Release those devices we held, or Alexey will kill me. */ + if (entry->indev) +@@ -100,6 +120,7 @@ + /* Drop reference to owner of hook which queued us. */ + module_put(entry->elem->owner); + } ++EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); + + /* + * Any packet that leaves via this function must come back +@@ -121,12 +142,26 @@ + #endif + const struct nf_afinfo *afinfo; + const struct nf_queue_handler *qh; ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ const struct nf_queue_handler *qih = NULL; ++#endif + + /* QUEUE == DROP if noone is waiting, to be safe. */ + rcu_read_lock(); + + qh = rcu_dereference(queue_handler[pf]); ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) ++ if (pf == PF_INET || pf == PF_INET6) ++#else ++ if (pf == PF_INET) ++#endif ++ qih = rcu_dereference(queue_imq_handler); + -+module_init(init); -+module_exit(fini); ++ if (!qh && !qih) ++#else /* !IMQ */ + if (!qh) ++#endif + goto err_unlock; + + afinfo = nf_get_afinfo(pf); +@@ -145,6 +180,10 @@ + .indev = indev, + .outdev = outdev, + .okfn = okfn, ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ .next_outfn = qh ? qh->outfn : NULL, ++ .next_queuenum = queuenum, ++#endif + }; + + /* If it's going away, ignore hook. */ +@@ -170,8 +209,19 @@ + } + #endif + afinfo->saveroute(skb, entry); + -+MODULE_AUTHOR("http://www.linuximq.net"); -+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); -+MODULE_LICENSE("GPL"); ---- a/net/ipv4/netfilter/Kconfig -+++ b/net/ipv4/netfilter/Kconfig -@@ -112,6 +112,17 @@ config IP_NF_FILTER ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++ if (qih) { ++ status = qih->outfn(entry, queuenum); ++ goto imq_skip_queue; ++ } ++#endif ++ + status = qh->outfn(entry, queuenum); - To compile it as a module, choose M here. If unsure, say N. ++#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE) ++imq_skip_queue: ++#endif + rcu_read_unlock(); -+config IP_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which IMQ device packets should get enqueued/dequeued. -+ -+ For more information visit: http://www.linuximq.net/ -+ -+ To compile it as a module, choose M here. If unsure, say N. -+ - config IP_NF_TARGET_REJECT - tristate "REJECT target support" - depends on IP_NF_FILTER ---- a/net/ipv4/netfilter/Makefile -+++ b/net/ipv4/netfilter/Makefile -@@ -57,6 +57,7 @@ obj-$(CONFIG_IP_NF_MATCH_SET) += ipt_set - obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o - obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o - obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o -+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o - obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o - obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o - obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o + if (status < 0) { --- /dev/null -+++ b/net/ipv6/netfilter/ip6t_IMQ.c -@@ -0,0 +1,69 @@ ++++ b/net/netfilter/xt_IMQ.c +@@ -0,0 +1,73 @@ +/* + * This target marks packets to be enqueued to an imq device + */ +#include <linux/module.h> +#include <linux/skbuff.h> -+#include <linux/netfilter_ipv6/ip6_tables.h> -+#include <linux/netfilter_ipv6/ip6t_IMQ.h> ++#include <linux/netfilter/x_tables.h> ++#include <linux/netfilter/xt_IMQ.h> +#include <linux/imq.h> + +static unsigned int imq_target(struct sk_buff *pskb, -+ const struct net_device *in, -+ const struct net_device *out, -+ unsigned int hooknum, -+ const struct xt_target *target, -+ const void *targinfo) ++ const struct xt_target_param *par) +{ -+ struct ip6t_imq_info *mr = (struct ip6t_imq_info *)targinfo; ++ const struct xt_imq_info *mr = par->targinfo; + -+ pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; ++ pskb->imq_flags = (mr->todev & IMQ_F_IFMASK) | IMQ_F_ENQUEUE; + + return XT_CONTINUE; +} + -+static bool imq_checkentry(const char *tablename, -+ const void *entry, -+ const struct xt_target *target, -+ void *targinfo, -+ unsigned int hook_mask) ++static bool imq_checkentry(const struct xt_tgchk_param *par) +{ -+ struct ip6t_imq_info *mr; -+ -+ mr = (struct ip6t_imq_info *)targinfo; ++ struct xt_imq_info *mr = par->targinfo; + -+ if (mr->todev > IMQ_MAX_DEVS) { ++ if (mr->todev > IMQ_MAX_DEVS - 1) { + printk(KERN_WARNING + "IMQ: invalid device specified, highest is %u\n", -+ IMQ_MAX_DEVS); ++ IMQ_MAX_DEVS - 1); + return 0; + } + + return 1; +} + -+static struct xt_target ip6t_imq_reg = { -+ .name = "IMQ", -+ .family = AF_INET6, -+ .target = imq_target, -+ .targetsize = sizeof(struct ip6t_imq_info), -+ .table = "mangle", -+ .checkentry = imq_checkentry, -+ .me = THIS_MODULE ++static struct xt_target xt_imq_reg[] __read_mostly = { ++ { ++ .name = "IMQ", ++ .family = AF_INET, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, ++ { ++ .name = "IMQ", ++ .family = AF_INET6, ++ .checkentry = imq_checkentry, ++ .target = imq_target, ++ .targetsize = sizeof(struct xt_imq_info), ++ .table = "mangle", ++ .me = THIS_MODULE ++ }, +}; + -+static int __init init(void) ++static int __init imq_init(void) +{ -+ return xt_register_target(&ip6t_imq_reg); ++ return xt_register_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+static void __exit fini(void) ++static void __exit imq_fini(void) +{ -+ xt_unregister_target(&ip6t_imq_reg); ++ xt_unregister_targets(xt_imq_reg, ARRAY_SIZE(xt_imq_reg)); +} + -+module_init(init); -+module_exit(fini); ++module_init(imq_init); ++module_exit(imq_fini); + +MODULE_AUTHOR("http://www.linuximq.net"); +MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information."); +MODULE_LICENSE("GPL"); ---- a/net/ipv6/netfilter/Kconfig -+++ b/net/ipv6/netfilter/Kconfig -@@ -129,6 +129,15 @@ config IP6_NF_MATCH_RT - - To compile it as a module, choose M here. If unsure, say N. - -+config IP6_NF_TARGET_IMQ -+ tristate "IMQ target support" -+ depends on IP6_NF_MANGLE && IMQ -+ help -+ This option adds a `IMQ' target which is used to specify if and -+ to which imq device packets should get enqueued/dequeued. -+ -+ To compile it as a module, choose M here. If unsure, say N. ++MODULE_ALIAS("ipt_IMQ"); ++MODULE_ALIAS("ip6t_IMQ"); + - # The targets - config IP6_NF_TARGET_HL - tristate '"HL" hoplimit target support' ---- a/net/ipv6/netfilter/Makefile -+++ b/net/ipv6/netfilter/Makefile -@@ -6,6 +6,7 @@ - obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o - obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o - obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o -+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o - obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o - obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o - obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o ---- a/net/sched/sch_generic.c -+++ b/net/sched/sch_generic.c -@@ -195,6 +195,7 @@ void __qdisc_run(struct Qdisc *q) - - clear_bit(__QDISC_STATE_RUNNING, &q->state); - } -+EXPORT_SYMBOL(__qdisc_run); - - static void dev_watchdog(unsigned long arg) - { diff --git a/target/linux/generic-2.6/patches-2.6.30/151-netfilter_imq_2.6.28.patch b/target/linux/generic-2.6/patches-2.6.30/151-netfilter_imq_2.6.28.patch deleted file mode 100644 index d4ed15129d..0000000000 --- a/target/linux/generic-2.6/patches-2.6.30/151-netfilter_imq_2.6.28.patch +++ /dev/null @@ -1,114 +0,0 @@ ---- a/drivers/net/imq.c -+++ b/drivers/net/imq.c -@@ -178,10 +178,11 @@ static int imq_nf_queue(struct nf_queue_ - struct sk_buff *skb2 = NULL; - struct Qdisc *q; - unsigned int index = entry->skb->imq_flags & IMQ_F_IFMASK; -- int ret = -1; -+ struct netdev_queue *txq; -+ int ret = -EINVAL; - - if (index > numdevs) -- return -1; -+ return ret; - - /* check for imq device by index from cache */ - dev = imq_devs_cache[index]; -@@ -194,7 +195,7 @@ static int imq_nf_queue(struct nf_queue_ - if (!dev) { - /* not found ?!*/ - BUG(); -- return -1; -+ return ret; - } - - imq_devs_cache[index] = dev; -@@ -212,17 +213,19 @@ static int imq_nf_queue(struct nf_queue_ - skb2 = entry->skb; - entry->skb = skb_clone(entry->skb, GFP_ATOMIC); - if (!entry->skb) -- return -1; -+ return -ENOMEM; - } - entry->skb->nf_queue_entry = entry; - - dev->stats.rx_bytes += entry->skb->len; - dev->stats.rx_packets++; - -- spin_lock_bh(&dev->queue_lock); -- q = dev->qdisc; -+ txq = netdev_get_tx_queue(dev, 0); -+ __netif_tx_lock_bh(txq); -+ q = txq->qdisc; -+ - if (q->enqueue) { -- q->enqueue(skb_get(entry->skb), q); -+ qdisc_enqueue_root(skb_get(entry->skb), q); - if (skb_shared(entry->skb)) { - entry->skb->destructor = imq_skb_destructor; - kfree_skb(entry->skb); -@@ -231,7 +234,7 @@ static int imq_nf_queue(struct nf_queue_ - } - if (!test_and_set_bit(1, &priv->tasklet_pending)) - tasklet_schedule(&priv->tasklet); -- spin_unlock_bh(&dev->queue_lock); -+ __netif_tx_unlock_bh(txq); - - if (skb2) - kfree_skb(ret ? entry->skb : skb2); -@@ -248,11 +251,13 @@ static void qdisc_run_tasklet(unsigned l - { - struct net_device *dev = (struct net_device *)arg; - struct imq_private *priv = netdev_priv(dev); -+ struct netdev_queue *txq; - -- spin_lock(&dev->queue_lock); -- qdisc_run(dev); -+ netif_tx_lock(dev); -+ txq = netdev_get_tx_queue(dev, 0); -+ qdisc_run(txq->qdisc); - clear_bit(1, &priv->tasklet_pending); -- spin_unlock(&dev->queue_lock); -+ netif_tx_unlock(dev); - } - - static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff *pskb, ---- a/net/ipv4/netfilter/ipt_IMQ.c -+++ b/net/ipv4/netfilter/ipt_IMQ.c -@@ -7,29 +7,23 @@ - #include <linux/netfilter_ipv4/ipt_IMQ.h> - #include <linux/imq.h> - --static unsigned int imq_target(struct sk_buff *pskb, -- const struct net_device *in, -- const struct net_device *out, -- unsigned int hooknum, -- const struct xt_target *target, -- const void *targinfo) -+static unsigned int -+imq_target(struct sk_buff *pskb, -+ const struct xt_target_param *par) - { -- struct ipt_imq_info *mr = (struct ipt_imq_info *)targinfo; -+ struct ipt_imq_info *mr = (struct ipt_imq_info *)par->targinfo; - - pskb->imq_flags = mr->todev | IMQ_F_ENQUEUE; - - return XT_CONTINUE; - } - --static bool imq_checkentry(const char *tablename, -- const void *e, -- const struct xt_target *target, -- void *targinfo, -- unsigned int hook_mask) -+static bool -+imq_checkentry(const struct xt_tgchk_param *par) - { - struct ipt_imq_info *mr; - -- mr = (struct ipt_imq_info *)targinfo; -+ mr = (struct ipt_imq_info *)par->targinfo; - - if (mr->todev > IMQ_MAX_DEVS) { - printk(KERN_WARNING diff --git a/target/linux/generic-2.6/patches-2.6.30/180-netfilter_depends.patch b/target/linux/generic-2.6/patches-2.6.30/180-netfilter_depends.patch index e05ec77514..e430cd78f1 100644 --- a/target/linux/generic-2.6/patches-2.6.30/180-netfilter_depends.patch +++ b/target/linux/generic-2.6/patches-2.6.30/180-netfilter_depends.patch @@ -1,6 +1,6 @@ --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -160,7 +160,6 @@ config NF_CONNTRACK_FTP +@@ -160,7 +160,6 @@ config NF_CONNTRACK_H323 tristate "H.323 protocol support" @@ -8,7 +8,7 @@ depends on NETFILTER_ADVANCED help H.323 is a VoIP signalling protocol from ITU-T. As one of the most -@@ -493,7 +492,6 @@ config NETFILTER_XT_TARGET_SECMARK +@@ -505,7 +504,6 @@ config NETFILTER_XT_TARGET_TCPMSS tristate '"TCPMSS" target support' diff --git a/target/linux/generic-2.6/patches-2.6.30/190-netfilter_rtsp.patch b/target/linux/generic-2.6/patches-2.6.30/190-netfilter_rtsp.patch index 62d4cf2e95..7aff0e6ddc 100644 --- a/target/linux/generic-2.6/patches-2.6.30/190-netfilter_rtsp.patch +++ b/target/linux/generic-2.6/patches-2.6.30/190-netfilter_rtsp.patch @@ -294,7 +294,7 @@ +#endif /* _NETFILTER_MIME_H */ --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile -@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_AMANDA) += nf_nat_am +@@ -26,6 +26,7 @@ obj-$(CONFIG_NF_NAT_FTP) += nf_nat_ftp.o obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o @@ -304,7 +304,7 @@ obj-$(CONFIG_NF_NAT_SNMP_BASIC) += nf_nat_snmp_basic.o --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig -@@ -267,6 +267,16 @@ config NF_CONNTRACK_TFTP +@@ -267,6 +267,16 @@ To compile it as a module, choose M here. If unsure, say N. @@ -323,7 +323,7 @@ select NETFILTER_NETLINK --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile -@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_PPTP) += nf_co +@@ -33,6 +33,7 @@ obj-$(CONFIG_NF_CONNTRACK_SANE) += nf_conntrack_sane.o obj-$(CONFIG_NF_CONNTRACK_SIP) += nf_conntrack_sip.o obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o @@ -333,7 +333,7 @@ obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig -@@ -268,6 +268,11 @@ config NF_NAT_IRC +@@ -257,6 +257,11 @@ depends on NF_CONNTRACK && NF_NAT default NF_NAT && NF_CONNTRACK_IRC diff --git a/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch index f1802fe1ec..9f527548f5 100644 --- a/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch +++ b/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch @@ -1,6 +1,6 @@ --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h -@@ -1355,9 +1355,12 @@ static inline int skb_network_offset(con +@@ -1369,9 +1369,12 @@ * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. |