aboutsummaryrefslogtreecommitdiffstats
path: root/xen-2.4.16/include/xeno/skbuff.h
diff options
context:
space:
mode:
Diffstat (limited to 'xen-2.4.16/include/xeno/skbuff.h')
-rw-r--r--xen-2.4.16/include/xeno/skbuff.h979
1 files changed, 150 insertions, 829 deletions
diff --git a/xen-2.4.16/include/xeno/skbuff.h b/xen-2.4.16/include/xeno/skbuff.h
index 5be56a7cc7..519328a679 100644
--- a/xen-2.4.16/include/xeno/skbuff.h
+++ b/xen-2.4.16/include/xeno/skbuff.h
@@ -19,21 +19,23 @@
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/cache.h>
-
+#include <linux/slab.h>
+#include <asm/system.h>
#include <asm/atomic.h>
#include <asm/types.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
-// vif special values.
+/* vif special values */
#define VIF_PHYSICAL_INTERFACE -1
#define VIF_UNKNOWN_INTERFACE -2
#define VIF_DROP -3
#define VIF_ANY_INTERFACE -4
-//skb_type values:
-#define SKB_NORMAL 0
-#define SKB_ZERO_COPY 1
+/* skb_type values */
+#define SKB_NORMAL 0 /* A Linux-style skbuff: no strangeness */
+#define SKB_ZERO_COPY 1 /* Zero copy skbs are used for receive */
+#define SKB_NODATA 2 /* Data allocation not handled by us */
#define HAVE_ALLOC_SKB /* For the drivers to know */
#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
@@ -44,9 +46,6 @@
#define CHECKSUM_UNNECESSARY 2
#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
-#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
-#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
/* A. Checksumming of received packets by device.
*
@@ -82,8 +81,6 @@
* TCP/UDP over IPv4. Sigh. Vendors like this
* way by an unknown reason. Though, see comment above
* about CHECKSUM_UNNECESSARY. 8)
- *
- * Any questions? No questions, good. --ANK
*/
#ifdef __i386__
@@ -92,171 +89,98 @@
#define NET_CALLER(arg) __builtin_return_address(0)
#endif
-#ifdef CONFIG_NETFILTER
-struct nf_conntrack {
- atomic_t use;
- void (*destroy)(struct nf_conntrack *);
-};
-
-struct nf_ct_info {
- struct nf_conntrack *master;
-};
-#endif
-
struct sk_buff_head {
- /* These two members must be first. */
- struct sk_buff * next;
- struct sk_buff * prev;
+ /* These two members must be first. */
+ struct sk_buff * next;
+ struct sk_buff * prev;
- __u32 qlen;
- spinlock_t lock;
+ __u32 qlen;
+ spinlock_t lock;
};
-struct sk_buff;
-
-#define MAX_SKB_FRAGS 6
+#define MAX_SKB_FRAGS 1 /* KAF: was 6 */
-typedef struct skb_frag_struct skb_frag_t;
+typedef struct skb_frag_struct {
+ struct pfn_info *page;
+ __u16 page_offset;
+ __u16 size;
+} skb_frag_t;
-struct skb_frag_struct
-{
- struct pfn_info *page;
- __u16 page_offset;
- __u16 size;
-};
-
-/* This data is invariant across clones and lives at
- * the end of the header data, ie. at skb->end.
- */
struct skb_shared_info {
- atomic_t dataref;
- unsigned int nr_frags;
- struct sk_buff *frag_list;
- skb_frag_t frags[MAX_SKB_FRAGS];
+ unsigned int nr_frags;
+ skb_frag_t frags[MAX_SKB_FRAGS];
};
struct sk_buff {
- /* These two members must be first. */
- struct sk_buff * next; /* Next buffer in list */
- struct sk_buff * prev; /* Previous buffer in list */
-
- struct sk_buff_head * list; /* List we are on */
- struct net_device *dev; /* Device we arrived on/are leaving by */
-
- /* Transport layer header */
- union
- {
- struct tcphdr *th;
- struct udphdr *uh;
- struct icmphdr *icmph;
- struct igmphdr *igmph;
- struct iphdr *ipiph;
- struct spxhdr *spxh;
- unsigned char *raw;
- } h;
-
- /* Network layer header */
- union
- {
- struct iphdr *iph;
- struct ipv6hdr *ipv6h;
- struct arphdr *arph;
- struct ipxhdr *ipxh;
- unsigned char *raw;
- } nh;
+ /* These two members must be first. */
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+
+ struct sk_buff_head * list; /* List we are on */
+ struct net_device *dev; /* Device we arrived on/are leaving by */
+
+ /* Transport layer header */
+ union
+ {
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct icmphdr *icmph;
+ struct igmphdr *igmph;
+ struct iphdr *ipiph;
+ struct spxhdr *spxh;
+ unsigned char *raw;
+ } h;
+
+ /* Network layer header */
+ union
+ {
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct arphdr *arph;
+ struct ipxhdr *ipxh;
+ unsigned char *raw;
+ } nh;
- /* Link layer header */
- union
- {
- struct ethhdr *ethernet;
- unsigned char *raw;
- } mac;
-
- unsigned int len; /* Length of actual data */
- unsigned int data_len;
- unsigned int csum; /* Checksum */
- unsigned char skb_type,
- cloned, /* head may be cloned (check refcnt to be sure) */
- pkt_type, /* Packet class */
- ip_summed; /* Driver fed us an IP checksum */
- atomic_t users; /* User count - see datagram.c,tcp.c */
- unsigned short protocol; /* Packet protocol from driver. */
- unsigned char *head; /* Head of buffer */
- unsigned char *data; /* Data head pointer */
- unsigned char *tail; /* Tail pointer */
- unsigned char *end; /* End pointer */
-
- void (*destructor)(struct sk_buff *); /* Destruct function */
- struct pfn_info *pf; /* record of physical pf address for freeing */
- int src_vif; /* vif we came from */
- int dst_vif; /* vif we are bound for */
- struct skb_shared_info shinfo; /* shared info is no longer shared in Xen. */
+ /* Link layer header */
+ union
+ {
+ struct ethhdr *ethernet;
+ unsigned char *raw;
+ } mac;
+
+ unsigned int len; /* Length of actual data */
+ unsigned int data_len;
+ unsigned int csum; /* Checksum */
+ unsigned char skb_type,
+ pkt_type, /* Packet class */
+ ip_summed; /* Driver fed us an IP checksum */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ struct pfn_info *pf; /* record of physical pf address for freeing */
+ int src_vif; /* vif we came from */
+ int dst_vif; /* vif we are bound for */
+ struct skb_shared_info shinfo; /* shared info is no longer shared in Xen. */
};
-#define SK_WMEM_MAX 65535
-#define SK_RMEM_MAX 65535
-
-#ifdef __KERNEL__
-/*
- * Handling routines are only of interest to the kernel
- */
-#include <linux/slab.h>
-
-#include <asm/system.h>
-
-extern void __kfree_skb(struct sk_buff *skb);
-extern struct sk_buff * alloc_skb(unsigned int size, int priority);
-extern struct sk_buff * alloc_zc_skb(unsigned int size, int priority);
-extern void kfree_skbmem(struct sk_buff *skb);
-extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
-extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
-extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
-extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
-extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
-extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
- int newheadroom,
- int newtailroom,
- int priority);
+extern void __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff *alloc_skb(unsigned int size, int priority);
+extern struct sk_buff *alloc_skb_nodata(int priority);
+extern struct sk_buff *alloc_zc_skb(unsigned int size, int priority);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
#define dev_kfree_skb(a) kfree_skb(a)
extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
-/* Internal */
-//#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
+/* In Xen, we don't clone skbs, so shared data can go in the sk_buff struct. */
#define skb_shinfo(SKB) ((struct skb_shared_info *)(&(SKB)->shinfo))
/**
- * skb_queue_empty - check if a queue is empty
- * @list: queue head
- *
- * Returns true if the queue is empty, false otherwise.
- */
-
-static inline int skb_queue_empty(struct sk_buff_head *list)
-{
- return (list->next == (struct sk_buff *) list);
-}
-
-/**
- * skb_get - reference buffer
- * @skb: buffer to reference
- *
- * Makes another reference to a socket buffer and returns a pointer
- * to the buffer.
- */
-
-static inline struct sk_buff *skb_get(struct sk_buff *skb)
-{
- atomic_inc(&skb->users);
- return skb;
-}
-
-/*
- * If users==1, we are the only owner and are can avoid redundant
- * atomic change.
- */
-
-/**
* kfree_skb - free an sk_buff
* @skb: buffer to free
*
@@ -266,143 +190,7 @@ static inline struct sk_buff *skb_get(struct sk_buff *skb)
static inline void kfree_skb(struct sk_buff *skb)
{
- if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
- __kfree_skb(skb);
-}
-
-/* Use this if you didn't touch the skb state [for fast switching] */
-static inline void kfree_skb_fast(struct sk_buff *skb)
-{
- if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
- kfree_skbmem(skb);
-}
-
-/**
- * skb_cloned - is the buffer a clone
- * @skb: buffer to check
- *
- * Returns true if the buffer was generated with skb_clone() and is
- * one of multiple shared copies of the buffer. Cloned buffers are
- * shared data so must not be written to under normal circumstances.
- */
-
-static inline int skb_cloned(struct sk_buff *skb)
-{
- return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
-}
-
-/**
- * skb_shared - is the buffer shared
- * @skb: buffer to check
- *
- * Returns true if more than one person has a reference to this
- * buffer.
- */
-
-static inline int skb_shared(struct sk_buff *skb)
-{
- return (atomic_read(&skb->users) != 1);
-}
-
-/**
- * skb_share_check - check if buffer is shared and if so clone it
- * @skb: buffer to check
- * @pri: priority for memory allocation
- *
- * If the buffer is shared the buffer is cloned and the old copy
- * drops a reference. A new clone with a single reference is returned.
- * If the buffer is not shared the original buffer is returned. When
- * being called from interrupt status or with spinlocks held pri must
- * be GFP_ATOMIC.
- *
- * NULL is returned on a memory allocation failure.
- */
-
-static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
-{
- if (skb_shared(skb)) {
- struct sk_buff *nskb;
- nskb = skb_clone(skb, pri);
- kfree_skb(skb);
- return nskb;
- }
- return skb;
-}
-
-
-/*
- * Copy shared buffers into a new sk_buff. We effectively do COW on
- * packets to handle cases where we have a local reader and forward
- * and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
- */
-
-/**
- * skb_unshare - make a copy of a shared buffer
- * @skb: buffer to check
- * @pri: priority for memory allocation
- *
- * If the socket buffer is a clone then this function creates a new
- * copy of the data, drops a reference count on the old copy and returns
- * the new copy with the reference count at 1. If the buffer is not a clone
- * the original buffer is returned. When called with a spinlock held or
- * from interrupt state @pri must be %GFP_ATOMIC
- *
- * %NULL is returned on a memory allocation failure.
- */
-
-static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
-{
- struct sk_buff *nskb;
- if(!skb_cloned(skb))
- return skb;
- nskb=skb_copy(skb, pri);
- kfree_skb(skb); /* Free our shared copy */
- return nskb;
-}
-
-/**
- * skb_peek
- * @list_: list to peek at
- *
- * Peek an &sk_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the head element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-
-static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
-{
- struct sk_buff *list = ((struct sk_buff *)list_)->next;
- if (list == (struct sk_buff *)list_)
- list = NULL;
- return list;
-}
-
-/**
- * skb_peek_tail
- * @list_: list to peek at
- *
- * Peek an &sk_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the tail element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-
-static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
-{
- struct sk_buff *list = ((struct sk_buff *)list_)->prev;
- if (list == (struct sk_buff *)list_)
- list = NULL;
- return list;
+ __kfree_skb(skb);
}
/**
@@ -414,24 +202,17 @@ static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
static inline __u32 skb_queue_len(struct sk_buff_head *list_)
{
- return(list_->qlen);
+ return(list_->qlen);
}
static inline void skb_queue_head_init(struct sk_buff_head *list)
{
- spin_lock_init(&list->lock);
- list->prev = (struct sk_buff *)list;
- list->next = (struct sk_buff *)list;
- list->qlen = 0;
+ spin_lock_init(&list->lock);
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
}
-/*
- * Insert an sk_buff at the start of a list.
- *
- * The "__skb_xxxx()" functions are the non-atomic ones that
- * can only be called with interrupts disabled.
- */
-
/**
* __skb_queue_head - queue a buffer at the list head
* @list: list to use
@@ -445,85 +226,16 @@ static inline void skb_queue_head_init(struct sk_buff_head *list)
static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
{
- struct sk_buff *prev, *next;
+ struct sk_buff *prev, *next;
- newsk->list = list;
- list->qlen++;
- prev = (struct sk_buff *)list;
- next = prev->next;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
-}
-
-
-/**
- * skb_queue_head - queue a buffer at the list head
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the start of the list. This function takes the
- * list lock and can be used safely with other locking &sk_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_queue_head(list, newsk);
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-/**
- * __skb_queue_tail - queue a buffer at the list tail
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the end of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-
-static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
-{
- struct sk_buff *prev, *next;
-
- newsk->list = list;
- list->qlen++;
- next = (struct sk_buff *)list;
- prev = next->prev;
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
-}
-
-/**
- * skb_queue_tail - queue a buffer at the list tail
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the tail of the list. This function takes the
- * list lock and can be used safely with other locking &sk_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- __skb_queue_tail(list, newsk);
- spin_unlock_irqrestore(&list->lock, flags);
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
}
/**
@@ -537,205 +249,29 @@ static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *new
static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
{
- struct sk_buff *next, *prev, *result;
-
- prev = (struct sk_buff *) list;
- next = prev->next;
- result = NULL;
- if (next != prev) {
- result = next;
- next = next->next;
- list->qlen--;
- next->prev = prev;
- prev->next = next;
- result->next = NULL;
- result->prev = NULL;
- result->list = NULL;
- }
- return result;
-}
-
-/**
- * skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The head item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
-{
- long flags;
- struct sk_buff *result;
-
- spin_lock_irqsave(&list->lock, flags);
- result = __skb_dequeue(list);
- spin_unlock_irqrestore(&list->lock, flags);
- return result;
-}
-
-/*
- * Insert a packet on a list.
- */
-
-static inline void __skb_insert(struct sk_buff *newsk,
- struct sk_buff * prev, struct sk_buff *next,
- struct sk_buff_head * list)
-{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = newsk;
- prev->next = newsk;
- newsk->list = list;
- list->qlen++;
-}
-
-/**
- * skb_insert - insert a buffer
- * @old: buffer to insert before
- * @newsk: buffer to insert
- *
- * Place a packet before a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
- * A buffer cannot be placed on two lists at the same time.
- */
-
-static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_insert(newsk, old->prev, old, old->list);
- spin_unlock_irqrestore(&old->list->lock, flags);
-}
+ struct sk_buff *next, *prev, *result;
-/*
- * Place a packet after a given packet in a list.
- */
-
-static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
- __skb_insert(newsk, old, old->next, old->list);
-}
-
-/**
- * skb_append - append a buffer
- * @old: buffer to insert after
- * @newsk: buffer to insert
- *
- * Place a packet after a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls.
- * A buffer cannot be placed on two lists at the same time.
- */
-
-
-static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&old->list->lock, flags);
- __skb_append(old, newsk);
- spin_unlock_irqrestore(&old->list->lock, flags);
-}
-
-/*
- * remove sk_buff from list. _Must_ be called atomically, and with
- * the list known..
- */
-
-static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
-{
- struct sk_buff * next, * prev;
-
- list->qlen--;
- next = skb->next;
- prev = skb->prev;
- skb->next = NULL;
- skb->prev = NULL;
- skb->list = NULL;
- next->prev = prev;
- prev->next = next;
-}
-
-/**
- * skb_unlink - remove a buffer from a list
- * @skb: buffer to remove
- *
- * Place a packet after a given packet in a list. The list locks are taken
- * and this function is atomic with respect to other list locked calls
- *
- * Works even without knowing the list it is sitting on, which can be
- * handy at times. It also means that THE LIST MUST EXIST when you
- * unlink. Thus a list must have its contents unlinked before it is
- * destroyed.
- */
-
-static inline void skb_unlink(struct sk_buff *skb)
-{
- struct sk_buff_head *list = skb->list;
-
- if(list) {
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
- if(skb->list == list)
- __skb_unlink(skb, skb->list);
- spin_unlock_irqrestore(&list->lock, flags);
- }
-}
-
-/* XXX: more streamlined implementation */
-
-/**
- * __skb_dequeue_tail - remove from the tail of the queue
- * @list: list to dequeue from
- *
- * Remove the tail of the list. This function does not take any locks
- * so must be used with appropriate locks held only. The tail item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
-{
- struct sk_buff *skb = skb_peek_tail(list);
- if (skb)
- __skb_unlink(skb, list);
- return skb;
-}
-
-/**
- * skb_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The tail item is
- * returned or %NULL if the list is empty.
- */
-
-static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
-{
- long flags;
- struct sk_buff *result;
-
- spin_lock_irqsave(&list->lock, flags);
- result = __skb_dequeue_tail(list);
- spin_unlock_irqrestore(&list->lock, flags);
- return result;
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
}
static inline int skb_is_nonlinear(const struct sk_buff *skb)
{
- return skb->data_len;
-}
-
-static inline int skb_headlen(const struct sk_buff *skb)
-{
- return skb->len - skb->data_len;
+ return skb->data_len;
}
-#define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) BUG(); } while (0)
-#define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) BUG(); } while (0)
#define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) BUG(); } while (0)
/*
@@ -744,11 +280,11 @@ static inline int skb_headlen(const struct sk_buff *skb)
static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp=skb->tail;
- SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
- return tmp;
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ return tmp;
}
/**
@@ -763,21 +299,21 @@ static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
{
- unsigned char *tmp=skb->tail;
- SKB_LINEAR_ASSERT(skb);
- skb->tail+=len;
- skb->len+=len;
- if(skb->tail>skb->end) {
- skb_over_panic(skb, len, current_text_addr());
- }
- return tmp;
+ unsigned char *tmp=skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end) {
+ skb_over_panic(skb, len, current_text_addr());
+ }
+ return tmp;
}
static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
{
- skb->data-=len;
- skb->len+=len;
- return skb->data;
+ skb->data-=len;
+ skb->len+=len;
+ return skb->data;
}
/**
@@ -792,20 +328,20 @@ static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
{
- skb->data-=len;
- skb->len+=len;
- if(skb->data<skb->head) {
- skb_under_panic(skb, len, current_text_addr());
- }
- return skb->data;
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head) {
+ skb_under_panic(skb, len, current_text_addr());
+ }
+ return skb->data;
}
static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
{
- skb->len-=len;
- if (skb->len < skb->data_len)
- BUG();
- return skb->data+=len;
+ skb->len-=len;
+ if (skb->len < skb->data_len)
+ BUG();
+ return skb->data+=len;
}
/**
@@ -821,60 +357,9 @@ static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
{
- if (len > skb->len)
- return NULL;
- return __skb_pull(skb,len);
-}
-
-extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
-
-static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb_headlen(skb) &&
- __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
- return NULL;
- skb->len -= len;
- return skb->data += len;
-}
-
-static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len > skb->len)
- return NULL;
- return __pskb_pull(skb,len);
-}
-
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
-{
- if (len <= skb_headlen(skb))
- return 1;
- if (len > skb->len)
- return 0;
- return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
-}
-
-/**
- * skb_headroom - bytes at buffer head
- * @skb: buffer to check
- *
- * Return the number of bytes of free space at the head of an &sk_buff.
- */
-
-static inline int skb_headroom(const struct sk_buff *skb)
-{
- return skb->data-skb->head;
-}
-
-/**
- * skb_tailroom - bytes at buffer end
- * @skb: buffer to check
- *
- * Return the number of bytes of free space at the tail of an sk_buff
- */
-
-static inline int skb_tailroom(const struct sk_buff *skb)
-{
- return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
+ if (len > skb->len)
+ return NULL;
+ return __skb_pull(skb,len);
}
/**
@@ -888,106 +373,8 @@ static inline int skb_tailroom(const struct sk_buff *skb)
static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
{
- skb->data+=len;
- skb->tail+=len;
-}
-
-extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
-
-static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- } else {
- ___pskb_trim(skb, len, 0);
- }
-}
-
-/**
- * skb_trim - remove end from a buffer
- * @skb: buffer to alter
- * @len: new length
- *
- * Cut the length of a buffer down by removing data from the tail. If
- * the buffer is already under the length specified it is not modified.
- */
-
-static inline void skb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (skb->len > len) {
- __skb_trim(skb, len);
- }
-}
-
-
-static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (!skb->data_len) {
- skb->len = len;
- skb->tail = skb->data+len;
- return 0;
- } else {
- return ___pskb_trim(skb, len, 1);
- }
-}
-
-static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
-{
- if (len < skb->len)
- return __pskb_trim(skb, len);
- return 0;
-}
-
-/**
- * skb_orphan - orphan a buffer
- * @skb: buffer to orphan
- *
- * If a buffer currently has an owner then we call the owner's
- * destructor function and make the @skb unowned. The buffer continues
- * to exist but is no longer charged to its former owner.
- */
-
-
-static inline void skb_orphan(struct sk_buff *skb)
-{
- if (skb->destructor)
- skb->destructor(skb);
- skb->destructor = NULL;
-}
-
-/**
- * skb_purge - empty a list
- * @list: list to empty
- *
- * Delete all buffers on an &sk_buff list. Each buffer is removed from
- * the list and one reference dropped. This function takes the list
- * lock and is atomic with respect to other list locking functions.
- */
-
-
-static inline void skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
- while ((skb=skb_dequeue(list))!=NULL)
- kfree_skb(skb);
-}
-
-/**
- * __skb_purge - empty a list
- * @list: list to empty
- *
- * Delete all buffers on an &sk_buff list. Each buffer is removed from
- * the list and one reference dropped. This function does not take the
- * list lock and the caller must hold the relevant locks to use it.
- */
-
-
-static inline void __skb_queue_purge(struct sk_buff_head *list)
-{
- struct sk_buff *skb;
- while ((skb=__skb_dequeue(list))!=NULL)
- kfree_skb(skb);
+ skb->data+=len;
+ skb->tail+=len;
}
/**
@@ -1006,13 +393,11 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
int gfp_mask)
{
- struct sk_buff *skb;
-
- //skb = alloc_skb(length+16, gfp_mask);
- skb = alloc_zc_skb(length+16, gfp_mask);
- if (skb)
- skb_reserve(skb,16);
- return skb;
+ struct sk_buff *skb;
+ skb = alloc_zc_skb(length+16, gfp_mask);
+ if (skb)
+ skb_reserve(skb,16);
+ return skb;
}
/**
@@ -1030,84 +415,20 @@ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
static inline struct sk_buff *dev_alloc_skb(unsigned int length)
{
- return __dev_alloc_skb(length, GFP_ATOMIC);
-}
-
-/**
- * skb_cow - copy header of skb when it is required
- * @skb: buffer to cow
- * @headroom: needed headroom
- *
- * If the skb passed lacks sufficient headroom or its data part
- * is shared, data is reallocated. If reallocation fails, an error
- * is returned and original skb is not changed.
- *
- * The result is skb with writable area skb->head...skb->tail
- * and at least @headroom of space at head.
- */
-
-static inline int
-skb_cow(struct sk_buff *skb, unsigned int headroom)
-{
- int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
-
- if (delta < 0)
- delta = 0;
-
- if (delta || skb_cloned(skb))
- return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
- return 0;
+ return __dev_alloc_skb(length, GFP_ATOMIC);
}
-/**
- * skb_linearize - convert paged skb to linear one
- * @skb: buffer to linarize
- * @gfp: allocation mode
- *
- * If there is no free memory -ENOMEM is returned, otherwise zero
- * is returned and the old skb data released. */
-int skb_linearize(struct sk_buff *skb, int gfp);
-
static inline void *kmap_skb_frag(const skb_frag_t *frag)
{
- return page_address(frag->page);
+ return page_address(frag->page);
}
static inline void kunmap_skb_frag(void *vaddr)
{
}
-#define skb_queue_walk(queue, skb) \
- for (skb = (queue)->next; \
- (skb != (struct sk_buff *)(queue)); \
- skb=skb->next)
-
-
-extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
-extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
-
-extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
-extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
-extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
-extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
-
+extern int skb_copy_bits(const struct sk_buff *skb,
+ int offset, void *to, int len);
extern void skb_init(void);
-extern void skb_add_mtu(int mtu);
-
-#ifdef CONFIG_NETFILTER
-static inline void
-nf_conntrack_put(struct nf_ct_info *nfct)
-{
- if (nfct && atomic_dec_and_test(&nfct->master->use))
- nfct->master->destroy(nfct->master);
-}
-static inline void
-nf_conntrack_get(struct nf_ct_info *nfct)
-{
- if (nfct)
- atomic_inc(&nfct->master->use);
-}
-#endif
-#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */