aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authormjw@wray-m-3.hpl.hp.com <mjw@wray-m-3.hpl.hp.com>2004-08-20 09:21:37 +0000
committermjw@wray-m-3.hpl.hp.com <mjw@wray-m-3.hpl.hp.com>2004-08-20 09:21:37 +0000
commitd5ff51612766126c1c1ed761df18d35006d117da (patch)
treed1467fc0c53c707fa14a68a85fbdca9cc8460c90
parentfbd62525cf0973a912a7e90d350aac94c1188349 (diff)
parent50726278a5d66fcb044ea50cd053de0f445124d0 (diff)
downloadxen-d5ff51612766126c1c1ed761df18d35006d117da.tar.gz
xen-d5ff51612766126c1c1ed761df18d35006d117da.tar.bz2
xen-d5ff51612766126c1c1ed761df18d35006d117da.zip
bitkeeper revision 1.1159.1.92 (4125c2a1hkXswFK5ZlWTGYbKQihTmQ)
Merge xenbk@gandalf:/var/bk/xeno-unstable.bk into wray-m-3.hpl.hp.com:/home/mjw/repos-bk/xeno-unstable.bk
-rw-r--r--.rootkeys9
-rw-r--r--BitKeeper/etc/ignore1
-rw-r--r--linux-2.4.26-xen-sparse/arch/xen/config.in7
-rw-r--r--linux-2.4.26-xen-sparse/arch/xen/defconfig-xen03
-rw-r--r--linux-2.4.26-xen-sparse/arch/xen/defconfig-xenU2
-rw-r--r--linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c45
-rw-r--r--linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c15
-rw-r--r--linux-2.4.26-xen-sparse/drivers/char/mem.c3
-rw-r--r--linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h12
-rw-r--r--linux-2.4.26-xen-sparse/include/linux/mm.h703
-rw-r--r--linux-2.4.26-xen-sparse/mm/page_alloc.c5
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/Kconfig14
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/configs/xen0_defconfig3
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/configs/xenU_defconfig3
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c3
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c2
-rw-r--r--linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c15
-rw-r--r--linux-2.6.7-xen-sparse/drivers/char/mem.c3
-rw-r--r--linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c13
-rw-r--r--linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c78
-rw-r--r--linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c39
-rw-r--r--linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c13
-rw-r--r--linux-2.6.7-xen-sparse/drivers/xen/privcmd/privcmd.c15
-rw-r--r--linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h7
-rw-r--r--linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h33
-rw-r--r--linux-2.6.7-xen-sparse/include/linux/bio.h304
-rw-r--r--linux-2.6.7-xen-sparse/include/linux/page-flags.h343
-rw-r--r--linux-2.6.7-xen-sparse/include/linux/skbuff.h1073
-rw-r--r--linux-2.6.7-xen-sparse/mm/page_alloc.c5
-rw-r--r--tools/examples/Makefile5
-rw-r--r--tools/examples/xmdefconfig-netbsd123
-rw-r--r--tools/examples/xmexample1 (renamed from tools/examples/xmdefconfig)0
-rw-r--r--tools/examples/xmexample2 (renamed from tools/examples/xmdefconfig-example)0
-rw-r--r--tools/libxc/xc.h2
-rw-r--r--tools/libxc/xc_linux_save.c7
-rw-r--r--tools/python/xen/xm/opts.py6
-rw-r--r--xen/arch/x86/memory.c81
-rw-r--r--xen/arch/x86/setup.c3
-rw-r--r--xen/arch/x86/shadow.c84
-rw-r--r--xen/arch/x86/smp.c2
-rw-r--r--xen/common/dom0_ops.c9
-rw-r--r--xen/common/domain.c17
-rw-r--r--xen/common/kernel.c3
-rw-r--r--xen/common/memory.c19
-rw-r--r--xen/common/page_alloc.c10
-rw-r--r--xen/include/asm-x86/mm.h2
-rw-r--r--xen/include/hypervisor-ifs/hypervisor-if.h33
-rw-r--r--xen/include/xen/sched.h15
48 files changed, 2763 insertions, 429 deletions
diff --git a/.rootkeys b/.rootkeys
index 2a7b97b63d..6089d9aa83 100644
--- a/.rootkeys
+++ b/.rootkeys
@@ -111,6 +111,7 @@
3f1056a9L_kqHcFheV00KbKBzv9j5w linux-2.4.26-xen-sparse/include/asm-xen/vga.h
40659defgWA92arexpMGn8X3QMDj3w linux-2.4.26-xen-sparse/include/asm-xen/xor.h
3f056927gMHl7mWB89rb73JahbhQIA linux-2.4.26-xen-sparse/include/linux/blk.h
+4124f66fPHG6yvB_vXmesjvzrJ3yMg linux-2.4.26-xen-sparse/include/linux/mm.h
401c0590D_kwJDU59X8NyvqSv_Cl2A linux-2.4.26-xen-sparse/include/linux/sched.h
40a248afgI0_JKthdYAe8beVfXSTpQ linux-2.4.26-xen-sparse/include/linux/skbuff.h
401c0592pLrp_aCbQRo9GXiYQQaVVA linux-2.4.26-xen-sparse/include/linux/timer.h
@@ -242,6 +243,9 @@
4122466356eIBnC9ot44WSVVIFyhQA linux-2.6.7-xen-sparse/include/asm-xen/queues.h
3fa8e3f0kBLeE4To2vpdi3cpJbIkbQ linux-2.6.7-xen-sparse/include/asm-xen/suspend.h
3f689063BoW-HWV3auUJ-OqXfcGArw linux-2.6.7-xen-sparse/include/asm-xen/xen_proc.h
+4124d8c4aocX7A-jIbuGraWN84pxGQ linux-2.6.7-xen-sparse/include/linux/bio.h
+4124f66fp5QwbDHEfoUIa7pqO5Xhag linux-2.6.7-xen-sparse/include/linux/page-flags.h
+4124f66f4NaKNa0xPiGGykn9QaZk3w linux-2.6.7-xen-sparse/include/linux/skbuff.h
40f56a0ddHCSs3501MY4hRf22tctOw linux-2.6.7-xen-sparse/mkbuildtree
410a94a4KT6I6X0LVc7djB39tRDp4g linux-2.6.7-xen-sparse/mm/page_alloc.c
40e1b09db5mN69Ijj0X_Eol-S7dXiw tools/Make.defs
@@ -264,9 +268,8 @@
40ee75a9xFz6S05sDKu-JCLqyVTkDA tools/examples/network
40ee75a967sxgcRY4Q7zXoVUaJ4flA tools/examples/vif-bridge
40ee75a93cqxHp6MiYXxxwR5j2_8QQ tools/examples/xend-config.sxp
-41090ec8Pj_bkgCBpg2W7WfmNkumEA tools/examples/xmdefconfig
-40cf2937oKlROYOJTN8GWwWM5AmjBg tools/examples/xmdefconfig-example
-40dfd40auJwNnb8NoiSnRkvZaaXkUg tools/examples/xmdefconfig-netbsd
+41090ec8Pj_bkgCBpg2W7WfmNkumEA tools/examples/xmexample1
+40cf2937oKlROYOJTN8GWwWM5AmjBg tools/examples/xmexample2
3fbba6dbDfYvJSsw9500b4SZyUhxjQ tools/libxc/Makefile
3fbba6dc1uU7U3IFeF6A-XEOYF2MkQ tools/libxc/rpm.spec
3fbba6dcrNxtygEcgJYAJJ1gCQqfsA tools/libxc/xc.h
diff --git a/BitKeeper/etc/ignore b/BitKeeper/etc/ignore
index c548c71cca..98eb5b8337 100644
--- a/BitKeeper/etc/ignore
+++ b/BitKeeper/etc/ignore
@@ -35,6 +35,7 @@ linux-xen-sparse
patches/*
tools/*/build/lib*/*.py
tools/balloon/balloon
+tools/check/.*
tools/misc/miniterm/miniterm
tools/misc/xen_cpuperf
tools/web-shutdown.tap
diff --git a/linux-2.4.26-xen-sparse/arch/xen/config.in b/linux-2.4.26-xen-sparse/arch/xen/config.in
index b77b979c11..8fa004a16b 100644
--- a/linux-2.4.26-xen-sparse/arch/xen/config.in
+++ b/linux-2.4.26-xen-sparse/arch/xen/config.in
@@ -20,7 +20,10 @@ endmenu
# The IBM S/390 patch needs this.
define_bool CONFIG_NO_IDLE_HZ y
-if [ "$CONFIG_XEN_PHYSDEV_ACCESS" != "y" ]; then
+if [ "$CONFIG_XEN_PHYSDEV_ACCESS" == "y" ]; then
+ define_bool CONFIG_FOREIGN_PAGES y
+else
+ define_bool CONFIG_FOREIGN_PAGES n
define_bool CONFIG_NETDEVICES y
define_bool CONFIG_VT n
fi
@@ -103,7 +106,7 @@ if [ "$CONFIG_HIGHMEM" = "y" ]; then
bool 'HIGHMEM I/O support' CONFIG_HIGHIO
fi
-define_int CONFIG_FORCE_MAX_ZONEORDER 12
+define_int CONFIG_FORCE_MAX_ZONEORDER 11
#bool 'Symmetric multi-processing support' CONFIG_SMP
#if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
diff --git a/linux-2.4.26-xen-sparse/arch/xen/defconfig-xen0 b/linux-2.4.26-xen-sparse/arch/xen/defconfig-xen0
index 558c70d78c..06ae1e972c 100644
--- a/linux-2.4.26-xen-sparse/arch/xen/defconfig-xen0
+++ b/linux-2.4.26-xen-sparse/arch/xen/defconfig-xen0
@@ -13,6 +13,7 @@ CONFIG_UID16=y
CONFIG_XEN_PRIVILEGED_GUEST=y
CONFIG_XEN_PHYSDEV_ACCESS=y
CONFIG_NO_IDLE_HZ=y
+CONFIG_FOREIGN_PAGES=y
#
# Code maturity level options
@@ -50,7 +51,7 @@ CONFIG_X86_TSC=y
CONFIG_X86_L1_CACHE_SHIFT=5
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FORCE_MAX_ZONEORDER=11
#
# General setup
diff --git a/linux-2.4.26-xen-sparse/arch/xen/defconfig-xenU b/linux-2.4.26-xen-sparse/arch/xen/defconfig-xenU
index 5a1d4803e3..9d5fdccdf8 100644
--- a/linux-2.4.26-xen-sparse/arch/xen/defconfig-xenU
+++ b/linux-2.4.26-xen-sparse/arch/xen/defconfig-xenU
@@ -13,6 +13,7 @@ CONFIG_UID16=y
# CONFIG_XEN_PRIVILEGED_GUEST is not set
# CONFIG_XEN_PHYSDEV_ACCESS is not set
CONFIG_NO_IDLE_HZ=y
+# CONFIG_FOREIGN_PAGES is not set
CONFIG_NETDEVICES=y
# CONFIG_VT is not set
@@ -52,7 +53,6 @@ CONFIG_X86_TSC=y
CONFIG_X86_L1_CACHE_SHIFT=5
CONFIG_NOHIGHMEM=y
# CONFIG_HIGHMEM4G is not set
-CONFIG_FORCE_MAX_ZONEORDER=12
#
# General setup
diff --git a/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c b/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c
index b72d0efe11..b13e3d75ef 100644
--- a/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c
+++ b/linux-2.4.26-xen-sparse/arch/xen/drivers/balloon/balloon.c
@@ -36,13 +36,16 @@ typedef struct user_balloon_op {
} user_balloon_op_t;
/* END OF USER DEFINE */
-/* Dead entry written into balloon-owned entries in the PMT. */
-#define DEAD 0xdeadbeef
-
static struct proc_dir_entry *balloon_pde;
unsigned long credit;
static unsigned long current_pages, most_seen_pages;
+/*
+ * Dead entry written into balloon-owned entries in the PMT.
+ * It is deliberately different to INVALID_P2M_ENTRY.
+ */
+#define DEAD 0xdead1234
+
static inline pte_t *get_ptep(unsigned long addr)
{
pgd_t *pgd; pmd_t *pmd; pte_t *ptep;
@@ -79,17 +82,16 @@ static unsigned long inflate_balloon(unsigned long num_pages)
for ( i = 0; i < num_pages; i++, currp++ )
{
struct page *page = alloc_page(GFP_HIGHUSER);
- unsigned long pfn = page - mem_map;
+ unsigned long pfn = page - mem_map;
/* If allocation fails then free all reserved pages. */
- if ( page == 0 )
+ if ( page == NULL )
{
- printk(KERN_ERR "Unable to inflate balloon by %ld, only %ld pages free.",
- num_pages, i);
+ printk(KERN_ERR "Unable to inflate balloon by %ld, only"
+ " %ld pages free.", num_pages, i);
currp = parray;
- for(j = 0; j < i; j++, ++currp){
+ for ( j = 0; j < i; j++, currp++ )
__free_page((struct page *) (mem_map + *currp));
- }
ret = -EFAULT;
goto cleanup;
}
@@ -102,9 +104,8 @@ static unsigned long inflate_balloon(unsigned long num_pages)
{
unsigned long mfn = phys_to_machine_mapping[*currp];
curraddr = (unsigned long)page_address(mem_map + *currp);
- if (curraddr)
+ if ( curraddr != 0 )
queue_l1_entry_update(get_ptep(curraddr), 0);
-
phys_to_machine_mapping[*currp] = DEAD;
*currp = mfn;
}
@@ -313,17 +314,18 @@ claim_new_pages(unsigned long num_pages)
XEN_flush_page_update_queue();
new_page_cnt = HYPERVISOR_dom_mem_op(MEMOP_increase_reservation,
parray, num_pages, 0);
- if (new_page_cnt != num_pages)
+ if ( new_page_cnt != num_pages )
{
printk(KERN_WARNING
"claim_new_pages: xen granted only %lu of %lu requested pages\n",
new_page_cnt, num_pages);
- /* XXX
- * avoid xen lockup when user forgot to setdomainmaxmem. xen
- * usually can dribble out a few pages and then hangs
+ /*
+ * Avoid xen lockup when user forgot to setdomainmaxmem. Xen
+ * usually can dribble out a few pages and then hangs.
*/
- if (new_page_cnt < 1000) {
+ if ( new_page_cnt < 1000 )
+ {
printk(KERN_WARNING "Remember to use setdomainmaxmem\n");
HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
parray, new_page_cnt, 0);
@@ -331,7 +333,7 @@ claim_new_pages(unsigned long num_pages)
}
}
memcpy(phys_to_machine_mapping+most_seen_pages, parray,
- new_page_cnt * sizeof(unsigned long));
+ new_page_cnt * sizeof(unsigned long));
pagetable_extend(most_seen_pages,new_page_cnt);
@@ -465,12 +467,15 @@ static int __init init_module(void)
/*
* make a new phys map if mem= says xen can give us memory to grow
*/
- if (max_pfn > start_info.nr_pages) {
+ if ( max_pfn > start_info.nr_pages )
+ {
extern unsigned long *phys_to_machine_mapping;
unsigned long *newmap;
newmap = (unsigned long *)vmalloc(max_pfn * sizeof(unsigned long));
- phys_to_machine_mapping = memcpy(newmap, phys_to_machine_mapping,
- start_info.nr_pages * sizeof(unsigned long));
+ memset(newmap, ~0, max_pfn * sizeof(unsigned long));
+ memcpy(newmap, phys_to_machine_mapping,
+ start_info.nr_pages * sizeof(unsigned long));
+ phys_to_machine_mapping = newmap;
}
return 0;
diff --git a/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c b/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c
index 9cd34cd925..34c95c84b5 100644
--- a/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c
+++ b/linux-2.4.26-xen-sparse/arch/xen/mm/ioremap.c
@@ -115,17 +115,10 @@ int direct_remap_area_pages(struct mm_struct *mm,
#define MAX_DIRECTMAP_MMU_QUEUE 130
mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
- if ( domid != 0 )
- {
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)domid << 16;
- v = w = &u[1];
- }
- else
- {
- v = w = &u[0];
- }
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
start_address = address;
diff --git a/linux-2.4.26-xen-sparse/drivers/char/mem.c b/linux-2.4.26-xen-sparse/drivers/char/mem.c
index 5635b269aa..f0d8502190 100644
--- a/linux-2.4.26-xen-sparse/drivers/char/mem.c
+++ b/linux-2.4.26-xen-sparse/drivers/char/mem.c
@@ -237,6 +237,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
if (!(start_info.flags & SIF_PRIVILEGED))
return -ENXIO;
+ if (file->private_data == NULL)
+ file->private_data = (void *)(unsigned long)DOMID_IO;
+
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h b/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
index e6845abc86..9ddd30bf73 100644
--- a/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
+++ b/linux-2.4.26-xen-sparse/include/asm-xen/pgtable-2level.h
@@ -58,7 +58,19 @@ static inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
* then we'll have p2m(m2p(MFN))==MFN.
* If we detect a special mapping then it doesn't have a 'struct page'.
* We force !VALID_PAGE() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
*/
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_page(_pte) \
({ \
unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
diff --git a/linux-2.4.26-xen-sparse/include/linux/mm.h b/linux-2.4.26-xen-sparse/include/linux/mm.h
new file mode 100644
index 0000000000..b24c6d6163
--- /dev/null
+++ b/linux-2.4.26-xen-sparse/include/linux/mm.h
@@ -0,0 +1,703 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include <linux/mmzone.h>
+#include <linux/swap.h>
+#include <linux/rbtree.h>
+
+extern unsigned long max_mapnr;
+extern unsigned long num_physpages;
+extern unsigned long num_mappedpages;
+extern void * high_memory;
+extern int page_cluster;
+/* The inactive_clean lists are per zone. */
+extern struct list_head active_list;
+extern struct list_head inactive_list;
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/atomic.h>
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* The address space we belong to. */
+ unsigned long vm_start; /* Our start address within vm_mm. */
+ unsigned long vm_end; /* The first byte after our end address
+ within vm_mm. */
+
+ /* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct *vm_next;
+
+ pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+ unsigned long vm_flags; /* Flags, listed below. */
+
+ rb_node_t vm_rb;
+
+ /*
+ * For areas with an address space and backing store,
+ * one of the address_space->i_mmap{,shared} lists,
+ * for shm areas, the list of attaches, otherwise unused.
+ */
+ struct vm_area_struct *vm_next_share;
+ struct vm_area_struct **vm_pprev_share;
+
+ /* Function pointers to deal with this struct. */
+ struct vm_operations_struct * vm_ops;
+
+ /* Information about our backing store: */
+ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE
+ units, *not* PAGE_CACHE_SIZE */
+ struct file * vm_file; /* File we map to (can be NULL). */
+ unsigned long vm_raend; /* XXX: put full readahead info here. */
+ void * vm_private_data; /* was vm_pte (shared mem) */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x00000001 /* currently active flags */
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+
+#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x00000020
+#define VM_MAYEXEC 0x00000040
+#define VM_MAYSHARE 0x00000080
+
+#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_GROWSUP 0x00000200
+#define VM_SHM 0x00000400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x00001000
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+
+ /* Used by sys_madvise() */
+#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
+
+#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
+
+#ifndef VM_STACK_FLAGS
+#define VM_STACK_FLAGS 0x00000177
+#endif
+
+#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
+#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
+#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
+#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
+#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
+
+/* read ahead limits */
+extern int vm_min_readahead;
+extern int vm_max_readahead;
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int unused);
+};
+
+/*
+ * Each physical page in the system has a struct page associated with
+ * it to keep track of whatever it is we are using the page for at the
+ * moment. Note that we have no way to track which tasks are using
+ * a page.
+ *
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ *
+ * TODO: make this structure smaller, it could be as small as 32 bytes.
+ */
+typedef struct page {
+ struct list_head list; /* ->mapping has some page lists. */
+ struct address_space *mapping; /* The inode (or ...) we belong to. */
+ unsigned long index; /* Our offset within mapping. */
+ struct page *next_hash; /* Next page sharing our hash bucket in
+ the pagecache hash table. */
+ atomic_t count; /* Usage count, see below. */
+ unsigned long flags; /* atomic flags, some possibly
+ updated asynchronously */
+ struct list_head lru; /* Pageout list, eg. active_list;
+ protected by pagemap_lru_lock !! */
+ struct page **pprev_hash; /* Complement to *next_hash. */
+ struct buffer_head * buffers; /* Buffer maps us to a disk block. */
+
+ /*
+ * On machines where all RAM is mapped into kernel address space,
+ * we can simply calculate the virtual address. On machines with
+ * highmem some memory is mapped into kernel virtual memory
+ * dynamically, so we need a place to store that address.
+ * Note that this field could be 16 bits on x86 ... ;)
+ *
+ * Architectures with slow multiplication can define
+ * WANT_PAGE_VIRTUAL in asm/page.h
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+ void *virtual; /* Kernel virtual address (NULL if
+ not kmapped, ie. highmem) */
+#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
+} mem_map_t;
+
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - disk mapping (page->buffers)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
+#define get_page(p) atomic_inc(&(p)->count)
+#define put_page(p) __free_page(p)
+#define put_page_testzero(p) atomic_dec_and_test(&(p)->count)
+#define page_count(p) atomic_read(&(p)->count)
+#define set_page_count(p,v) atomic_set(&(p)->count, v)
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped
+ * out. Some of them might not even exist (eg empty_bad_page)...
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * __get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->mapping is the pointer to the inode, and page->index is the
+ * file offset of the page, in units of PAGE_CACHE_SIZE.
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page, plus one
+ * for the page cache itself.
+ *
+ * All pages belonging to an inode are in these doubly linked lists:
+ * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
+ * using the page->list list_head. These fields are also used for
+ * freelist managemet (when page->count==0).
+ *
+ * There is also a hash table mapping (mapping,index) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->pprev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, PG_locked is used. This bit is set before I/O
+ * and reset when I/O completes. page_waitqueue(page) is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * PG_uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * PG_referenced bit, which is set any time the system accesses
+ * that page through the (mapping,index) hash table. This referenced
+ * bit, together with the referenced bit in the page tables, is used
+ * to manipulate page->age and move the page across the active,
+ * inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the
+ * active, inactive_dirty and inactive_clean lists are protected by
+ * the pagemap_lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_skip is used on sparc/sparc64 architectures to "skip" certain
+ * parts of the address space.
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit. The generic
+ * code guarantees that this bit is cleared for a page when it first
+ * is entered into the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual
+ * address space, they need to be kmapped separately for doing IO on
+ * the pages. The struct page (these bits with information) are always
+ * mapped into kernel address space...
+ */
+#define PG_locked 0 /* Page is locked. Don't touch. */
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_dirty 4
+#define PG_unused 5
+#define PG_lru 6
+#define PG_active 7
+#define PG_slab 8
+#define PG_skip 10
+#define PG_highmem 11
+#define PG_checked 12 /* kill me in 2.5.<early>. */
+#define PG_arch_1 13
+#define PG_reserved 14
+#define PG_launder 15 /* written out by VM pressure.. */
+#define PG_fs_1 16 /* Filesystem specific */
+#define PG_foreign 21 /* Page belongs to foreign allocator */
+
+#ifndef arch_set_page_uptodate
+#define arch_set_page_uptodate(page)
+#endif
+
+/* Make it prettier to test the above... */
+#define UnlockPage(page) unlock_page(page)
+#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page) \
+ do { \
+ arch_set_page_uptodate(page); \
+ set_bit(PG_uptodate, &(page)->flags); \
+ } while (0)
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
+#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
+#define LockPage(page) set_bit(PG_locked, &(page)->flags)
+#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
+#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
+#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)
+#define ClearPageArch1(page) clear_bit(PG_arch_1, &(page)->flags)
+
+/* A foreign page uses a custom destructor rather than the buddy allocator. */
+#ifdef CONFIG_FOREIGN_PAGES
+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
+#define SetPageForeign(page) set_bit(PG_foreign, &(page)->flags)
+#define ClearPageForeign(page) clear_bit(PG_foreign, &(page)->flags)
+#define PageForeignDestructor(page) \
+ ( (void (*) (struct page *)) (page)->mapping )
+#else
+#define PageForeign(page) 0
+#define PageForeignDestructor(page) void
+#endif
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+#define NODE_SHIFT 4
+#define ZONE_SHIFT (BITS_PER_LONG - 8)
+
+struct zone_struct;
+extern struct zone_struct *zone_table[];
+
+static inline zone_t *page_zone(struct page *page)
+{
+ return zone_table[page->flags >> ZONE_SHIFT];
+}
+
+static inline void set_page_zone(struct page *page, unsigned long zone_num)
+{
+ page->flags &= ~(~0UL << ZONE_SHIFT);
+ page->flags |= zone_num << ZONE_SHIFT;
+}
+
+/*
+ * In order to avoid #ifdefs within C code itself, we define
+ * set_page_address to a noop for non-highmem machines, where
+ * the field isn't useful.
+ * The same is true for page_address() in arch-dependent code.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define set_page_address(page, address) \
+ do { \
+ (page)->virtual = (address); \
+ } while(0)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+#define set_page_address(page, address) do { } while(0)
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+/*
+ * Permanent address of a page. Obviously must never be
+ * called on a highmem page.
+ */
+#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
+
+#define page_address(page) ((page)->virtual)
+
+#else /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+#define page_address(page) \
+ __va( (((page) - page_zone(page)->zone_mem_map) << PAGE_SHIFT) \
+ + page_zone(page)->zone_start_paddr)
+
+#endif /* CONFIG_HIGHMEM || WANT_PAGE_VIRTUAL */
+
+extern void FASTCALL(set_page_dirty(struct page *));
+
+/*
+ * The first mb is necessary to safely close the critical section opened by the
+ * TryLockPage(), the second mb is necessary to enforce ordering between
+ * the clear_bit and the read of the waitqueue (to avoid SMP races with a
+ * parallel wait_on_page).
+ */
+#define PageError(page) test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
+#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+#define PageTestandClearReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
+#define PageSetSlab(page) set_bit(PG_slab, &(page)->flags)
+#define PageClearSlab(page) clear_bit(PG_slab, &(page)->flags)
+#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
+
+#define PageActive(page) test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
+
+#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
+#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
+#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+
+#ifdef CONFIG_HIGHMEM
+#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
+#else
+#define PageHighMem(page) 0 /* needed to optimize away at compile time */
+#endif
+
+#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
+#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
+
+/*
+ * Error return values for the *_nopage functions
+ */
+#define NOPAGE_SIGBUS (NULL)
+#define NOPAGE_OOM ((struct page *) (-1))
+
+/* The array of struct pages */
+extern mem_map_t * mem_map;
+
+/*
+ * There is only one page-allocator function, and two main namespaces to
+ * it. The alloc_page*() variants return 'struct page *' and as such
+ * can allocate highmem pages, the *get*page*() variants return
+ * virtual kernel addresses to the allocated page(s).
+ */
+extern struct page * FASTCALL(_alloc_pages(unsigned int gfp_mask, unsigned int order));
+extern struct page * FASTCALL(__alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist));
+extern struct page * alloc_pages_node(int nid, unsigned int gfp_mask, unsigned int order);
+
+static inline struct page * alloc_pages(unsigned int gfp_mask, unsigned int order)
+{
+ /*
+ * Gets optimized away by the compiler.
+ */
+ if (order >= MAX_ORDER)
+ return NULL;
+ return _alloc_pages(gfp_mask, order);
+}
+
+#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
+
+extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
+
+#define __get_free_page(gfp_mask) \
+ __get_free_pages((gfp_mask),0)
+
+#define __get_dma_pages(gfp_mask, order) \
+ __get_free_pages((gfp_mask) | GFP_DMA,(order))
+
+/*
+ * The old interface name will be removed in 2.5:
+ */
+#define get_free_page get_zeroed_page
+
+/*
+ * There is only one 'core' page-freeing function.
+ */
+extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
+extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
+
+#define __free_page(page) __free_pages((page), 0)
+#define free_page(addr) free_pages((addr),0)
+
+extern void show_free_areas(void);
+extern void show_free_areas_node(pg_data_t *pgdat);
+
+extern void clear_page_tables(struct mm_struct *, unsigned long, int);
+
+extern int fail_writepage(struct page *);
+struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int unused);
+struct file *shmem_file_setup(char * name, loff_t size);
+extern void shmem_lock(struct file * file, int lock);
+extern int shmem_zero_setup(struct vm_area_struct *);
+
+extern void zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern int vmtruncate(struct inode * inode, loff_t offset);
+extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address));
+extern pte_t *FASTCALL(pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address));
+extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
+extern int make_pages_present(unsigned long addr, unsigned long end);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len);
+extern int ptrace_writedata(struct task_struct *tsk, char * src, unsigned long dst, int len);
+extern int ptrace_attach(struct task_struct *tsk);
+extern int ptrace_detach(struct task_struct *, unsigned int);
+extern void ptrace_disable(struct task_struct *);
+extern int ptrace_check_attach(struct task_struct *task, int kill);
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
+ int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+
+/*
+ * On a two-level page table, this ends up being trivial. Thus the
+ * inlining and the symmetry break with pte_alloc() that does all
+ * of this out-of-line.
+ */
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ if (pgd_none(*pgd))
+ return __pmd_alloc(mm, pgd, address);
+ return pmd_offset(pgd, address);
+}
+
+extern int pgt_cache_water[2];
+extern int check_pgt_cache(void);
+
+extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap,
+ unsigned long * zones_size, unsigned long zone_start_paddr,
+ unsigned long *zholes_size);
+extern void mem_init(void);
+extern void show_mem(void);
+extern void si_meminfo(struct sysinfo * val);
+extern void swapin_readahead(swp_entry_t);
+
+extern struct address_space swapper_space;
+#define PageSwapCache(page) ((page)->mapping == &swapper_space)
+
+static inline int is_page_cache_freeable(struct page * page)
+{
+ return page_count(page) - !!page->buffers == 1;
+}
+
+extern int FASTCALL(can_share_swap_page(struct page *));
+extern int FASTCALL(remove_exclusive_swap_page(struct page *));
+
+extern void __free_pte(pte_t);
+
+/* mmap.c */
+extern void lock_vma_mappings(struct vm_area_struct *);
+extern void unlock_vma_mappings(struct vm_area_struct *);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void __insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void build_mmap_rb(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+
+static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ unsigned long ret = -EINVAL;
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+out:
+ return ret;
+}
+
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
+extern unsigned long do_brk(unsigned long, unsigned long);
+
+static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev)
+{
+ prev->vm_next = vma->vm_next;
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ if (mm->mmap_cache == vma)
+ mm->mmap_cache = prev;
+}
+
+static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
+{
+ if (!vma->vm_file && vma->vm_flags == vm_flags)
+ return 1;
+ else
+ return 0;
+}
+
+struct zone_t;
+/* filemap.c */
+extern void remove_inode_page(struct page *);
+extern unsigned long page_unuse(struct page *);
+extern void truncate_inode_pages(struct address_space *, loff_t);
+
+/* generic vm_area_ops exported for stackable file systems */
+extern int filemap_sync(struct vm_area_struct *, unsigned long, size_t, unsigned int);
+extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int);
+
+/*
+ * GFP bitmasks..
+ */
+/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low four bits) */
+#define __GFP_DMA 0x01
+#define __GFP_HIGHMEM 0x02
+
+/* Action modifiers - doesn't change the zoning */
+#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
+#define __GFP_HIGH 0x20 /* Should access emergency pools? */
+#define __GFP_IO 0x40 /* Can start low memory physical IO? */
+#define __GFP_HIGHIO 0x80 /* Can start high mem physical IO? */
+#define __GFP_FS 0x100 /* Can call down to low-level FS? */
+
+#define GFP_NOHIGHIO (__GFP_HIGH | __GFP_WAIT | __GFP_IO)
+#define GFP_NOIO (__GFP_HIGH | __GFP_WAIT)
+#define GFP_NOFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO)
+#define GFP_ATOMIC (__GFP_HIGH)
+#define GFP_USER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
+#define GFP_HIGHUSER ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS | __GFP_HIGHMEM)
+#define GFP_KERNEL (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
+#define GFP_NFS (__GFP_HIGH | __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
+#define GFP_KSWAPD ( __GFP_WAIT | __GFP_IO | __GFP_HIGHIO | __GFP_FS)
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA __GFP_DMA
+
+static inline unsigned int pf_gfp_mask(unsigned int gfp_mask)
+{
+ /* avoid all memory balancing I/O methods if this task cannot block on I/O */
+ if (current->flags & PF_NOIO)
+ gfp_mask &= ~(__GFP_IO | __GFP_HIGHIO | __GFP_FS);
+
+ return gfp_mask;
+}
+
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ /*
+ * vma->vm_start/vm_end cannot change under us because the caller is required
+ * to hold the mmap_sem in write mode. We need to get the spinlock only
+ * before relocating the vma range ourself.
+ */
+ address &= PAGE_MASK;
+ spin_lock(&vma->vm_mm->page_table_lock);
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ vma->vm_mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return 0;
+}
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma = find_vma(mm,start_addr);
+
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
+
+extern struct page * vmalloc_to_page(void *addr);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux-2.4.26-xen-sparse/mm/page_alloc.c b/linux-2.4.26-xen-sparse/mm/page_alloc.c
index 62ed7751a5..fda37e1929 100644
--- a/linux-2.4.26-xen-sparse/mm/page_alloc.c
+++ b/linux-2.4.26-xen-sparse/mm/page_alloc.c
@@ -89,6 +89,9 @@ static void __free_pages_ok (struct page *page, unsigned int order)
struct page *base;
zone_t *zone;
+ if (PageForeign(page))
+ return (PageForeignDestructor(page))(page);
+
/*
* Yes, think what happens when other parts of the kernel take
* a reference to a page in order to pin it for io. -ben
@@ -102,7 +105,7 @@ static void __free_pages_ok (struct page *page, unsigned int order)
if (page->buffers)
BUG();
if (page->mapping)
- return (*(void(*)(struct page *))page->mapping)(page);
+ BUG();
if (!VALID_PAGE(page))
BUG();
if (PageLocked(page))
diff --git a/linux-2.6.7-xen-sparse/arch/xen/Kconfig b/linux-2.6.7-xen-sparse/arch/xen/Kconfig
index 72733f2efc..9f4c1d8002 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/Kconfig
+++ b/linux-2.6.7-xen-sparse/arch/xen/Kconfig
@@ -44,11 +44,15 @@ config XEN_WRITABLE_PAGETABLES
endmenu
-# Xen's block device backend driver needs 2^12 pages
-config FORCE_MAX_ZONEORDER
- int
- default "12" if XEN_PHYSDEV_ACCESS
- default "11" if !XEN_PHYSDEV_ACCESS
+config FOREIGN_PAGES
+ bool
+ default y if XEN_PHYSDEV_ACCESS
+ default n if !XEN_PHYSDEV_ACCESS
+
+config PAGESIZED_SKBS
+ bool
+ default y if XEN_PHYSDEV_ACCESS
+ default n if !XEN_PHYSDEV_ACCESS
#config VT
# bool
diff --git a/linux-2.6.7-xen-sparse/arch/xen/configs/xen0_defconfig b/linux-2.6.7-xen-sparse/arch/xen/configs/xen0_defconfig
index 0a70e9bb26..bda8feb206 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/configs/xen0_defconfig
+++ b/linux-2.6.7-xen-sparse/arch/xen/configs/xen0_defconfig
@@ -10,7 +10,8 @@ CONFIG_NO_IDLE_HZ=y
#
CONFIG_XEN_PRIVILEGED_GUEST=y
CONFIG_XEN_PHYSDEV_ACCESS=y
-CONFIG_FORCE_MAX_ZONEORDER=12
+CONFIG_FOREIGN_PAGES=y
+CONFIG_PAGESIZED_SKBS=y
CONFIG_X86=y
# CONFIG_X86_64 is not set
diff --git a/linux-2.6.7-xen-sparse/arch/xen/configs/xenU_defconfig b/linux-2.6.7-xen-sparse/arch/xen/configs/xenU_defconfig
index fb55077199..a8cc83f752 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/configs/xenU_defconfig
+++ b/linux-2.6.7-xen-sparse/arch/xen/configs/xenU_defconfig
@@ -10,7 +10,8 @@ CONFIG_NO_IDLE_HZ=y
#
# CONFIG_XEN_PRIVILEGED_GUEST is not set
# CONFIG_XEN_PHYSDEV_ACCESS is not set
-CONFIG_FORCE_MAX_ZONEORDER=11
+# CONFIG_FOREIGN_PAGES is not set
+# CONFIG_PAGESIZED_SKBS is not set
CONFIG_X86=y
# CONFIG_X86_64 is not set
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c
index 6f5e1b2c73..46702c5795 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c
+++ b/linux-2.6.7-xen-sparse/arch/xen/i386/kernel/pci-dma.c
@@ -61,6 +61,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn = pte->pte_low >> PAGE_SHIFT;
queue_l1_entry_update(pte, 0);
+ phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
+ INVALID_P2M_ENTRY;
flush_page_update_queue();
if (HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
&pfn, 1, 0) != 1) BUG();
@@ -79,7 +81,6 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
pfn+i, (__pa(ret)>>PAGE_SHIFT)+i);
phys_to_machine_mapping[(__pa(ret)>>PAGE_SHIFT)+i] =
pfn+i;
- flush_page_update_queue();
}
flush_page_update_queue();
}
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
index fc7bc3e523..957555f92a 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
+++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/hypervisor.c
@@ -299,7 +299,7 @@ unsigned long allocate_empty_lowmem_region(unsigned long pages)
pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
pfn_array[i] = pte->pte_low >> PAGE_SHIFT;
queue_l1_entry_update(pte, 0);
- phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = 0xdeadbeef;
+ phys_to_machine_mapping[__pa(vstart)>>PAGE_SHIFT] = INVALID_P2M_ENTRY;
}
flush_page_update_queue();
diff --git a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c
index e6d1e95cb4..3fb6ea9aa0 100644
--- a/linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c
+++ b/linux-2.6.7-xen-sparse/arch/xen/i386/mm/ioremap.c
@@ -415,17 +415,10 @@ int direct_remap_area_pages(struct mm_struct *mm,
#define MAX_DIRECTMAP_MMU_QUEUE 130
mmu_update_t u[MAX_DIRECTMAP_MMU_QUEUE], *w, *v;
- if ( domid != 0 )
- {
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)domid << 16;
- v = w = &u[1];
- }
- else
- {
- v = w = &u[0];
- }
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)domid << 16;
+ v = w = &u[1];
start_address = address;
diff --git a/linux-2.6.7-xen-sparse/drivers/char/mem.c b/linux-2.6.7-xen-sparse/drivers/char/mem.c
index ceea2b5ffd..ed3df13f65 100644
--- a/linux-2.6.7-xen-sparse/drivers/char/mem.c
+++ b/linux-2.6.7-xen-sparse/drivers/char/mem.c
@@ -247,6 +247,9 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
if (!(start_info.flags & SIF_PRIVILEGED))
return -ENXIO;
+ if (file->private_data == NULL)
+ file->private_data = (void *)(unsigned long)DOMID_IO;
+
/* DONTCOPY is essential for Xen as copy_page_range is broken. */
vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c b/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c
index f26387f305..0af48d6e79 100644
--- a/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c
+++ b/linux-2.6.7-xen-sparse/drivers/xen/blkback/blkback.c
@@ -24,22 +24,15 @@
#define MAX_PENDING_REQS 64
#define BATCH_PER_DOMAIN 16
-/*
- * NB. We place a page of padding between each buffer page to avoid incorrect
- * merging of requests by the IDE and SCSI merging routines. Otherwise, two
- * adjacent buffers in a scatter-gather request would have adjacent page
- * numbers: since the merge routines don't realise that this is in *pseudophys*
- * space, not real space, they may collapse the s-g elements!
- */
static unsigned long mmap_vstart;
#define MMAP_PAGES_PER_REQUEST \
- (2 * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1))
+ (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1)
#define MMAP_PAGES \
(MAX_PENDING_REQS * MMAP_PAGES_PER_REQUEST)
#define MMAP_VADDR(_req,_seg) \
(mmap_vstart + \
((_req) * MMAP_PAGES_PER_REQUEST * PAGE_SIZE) + \
- ((_seg) * 2 * PAGE_SIZE))
+ ((_seg) * PAGE_SIZE))
/*
* Each outstanding request that we've passed to the lower device layers has a
@@ -415,7 +408,7 @@ static void dispatch_rw_block_io(blkif_t *blkif, blkif_request_t *req)
mcl[i].args[3] = blkif->domid;
phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx, i))>>PAGE_SHIFT] =
- phys_seg[i].buffer >> PAGE_SHIFT;
+ FOREIGN_FRAME(phys_seg[i].buffer >> PAGE_SHIFT);
}
if ( unlikely(HYPERVISOR_multicall(mcl, nr_psegs) != 0) )
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c
index 5a3a45873f..e28274a457 100644
--- a/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c
+++ b/linux-2.6.7-xen-sparse/drivers/xen/blkfront/blkfront.c
@@ -1,5 +1,5 @@
/******************************************************************************
- * block.c
+ * blkfront.c
*
* XenLinux virtual block-device driver.
*
@@ -67,11 +67,12 @@ static inline int GET_ID_FROM_FREELIST( void )
{
unsigned long free = rec_ring_free;
- if(free>BLKIF_RING_SIZE) BUG();
+ if ( free > BLKIF_RING_SIZE )
+ BUG();
rec_ring_free = rec_ring[free].id;
- rec_ring[free].id = 0x0fffffee; // debug
+ rec_ring[free].id = 0x0fffffee; /* debug */
return free;
}
@@ -253,8 +254,6 @@ static int blkif_queue_request(struct request *req)
id = GET_ID_FROM_FREELIST();
rec_ring[id].id = (unsigned long) req;
-//printk(KERN_ALERT"r: %d req %p (%ld)\n",req_prod,req,id);
-
ring_req->id = id;
ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
BLKIF_OP_READ;
@@ -300,8 +299,6 @@ void do_blkif_request(request_queue_t *rq)
DPRINTK("Entered do_blkif_request\n");
-//printk(KERN_ALERT"r: %d req\n",req_prod);
-
queued = 0;
while ((req = elv_next_request(rq)) != NULL) {
@@ -310,7 +307,8 @@ void do_blkif_request(request_queue_t *rq)
continue;
}
- if (BLKIF_RING_FULL) {
+ if ( BLKIF_RING_FULL )
+ {
blk_stop_queue(rq);
break;
}
@@ -358,11 +356,9 @@ static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
id = bret->id;
req = (struct request *)rec_ring[id].id;
-//printk(KERN_ALERT"i: %d req %p (%ld)\n",i,req,id);
-
blkif_completion( &rec_ring[id] );
- ADD_ID_TO_FREELIST(id); // overwrites req
+ ADD_ID_TO_FREELIST(id); /* overwrites req */
switch ( bret->operation )
{
@@ -772,8 +768,6 @@ static int blkif_queue_request(unsigned long id,
req->nr_segments = 1;
req->frame_and_sects[0] = buffer_ma | (fsect<<3) | lsect;
-//printk("N: %d req %p (%ld)\n",req_prod,rec_ring[xid].id,xid);
-
req_prod++;
/* Keep a private copy so we can reissue requests when recovering. */
@@ -892,8 +886,6 @@ static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
id = bret->id;
bh = (struct buffer_head *)rec_ring[id].id;
-//printk("i: %d req %p (%ld)\n",i,bh,id);
-
blkif_completion( &rec_ring[id] );
ADD_ID_TO_FREELIST(id);
@@ -942,16 +934,11 @@ static inline void translate_req_to_pfn(blkif_request_t *xreq,
xreq->operation = req->operation;
xreq->nr_segments = req->nr_segments;
xreq->device = req->device;
- // preserve id
+ /* preserve id */
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
- {
- xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) |
- (machine_to_phys_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] <<
- PAGE_SHIFT);
- }
-
+ xreq->frame_and_sects[i] = machine_to_phys(req->frame_and_sects[i]);
}
static inline void translate_req_to_mfn(blkif_request_t *xreq,
@@ -962,15 +949,11 @@ static inline void translate_req_to_mfn(blkif_request_t *xreq,
xreq->operation = req->operation;
xreq->nr_segments = req->nr_segments;
xreq->device = req->device;
- xreq->id = req->id; // copy id (unlike above)
+ xreq->id = req->id; /* copy id (unlike above) */
xreq->sector_number = req->sector_number;
for ( i = 0; i < req->nr_segments; i++ )
- {
- xreq->frame_and_sects[i] = (req->frame_and_sects[i] & ~PAGE_MASK) |
- (phys_to_machine_mapping[req->frame_and_sects[i] >> PAGE_SHIFT] <<
- PAGE_SHIFT);
- }
+ xreq->frame_and_sects[i] = phys_to_machine(req->frame_and_sects[i]);
}
@@ -978,7 +961,6 @@ static inline void translate_req_to_mfn(blkif_request_t *xreq,
static inline void flush_requests(void)
{
DISABLE_SCATTERGATHER();
-//printk(KERN_ALERT"flush %d\n",req_prod);
wmb(); /* Ensure that the frontend can see the requests. */
blk_ring->req_prod = req_prod;
notify_via_evtchn(blkif_evtchn);
@@ -1010,8 +992,6 @@ void blkif_control_send(blkif_request_t *req, blkif_response_t *rsp)
blk_ring->ring[MASK_BLKIF_IDX(req_prod)].req.id = id;
rec_ring[id].id = (unsigned long) req;
-//printk("c: %d req %p (%ld)\n",req_prod,req,id);
-
translate_req_to_pfn( &rec_ring[id], req );
req_prod++;
@@ -1094,13 +1074,13 @@ static void blkif_status_change(blkif_fe_interface_status_changed_t *status)
" in state %d\n", blkif_state);
break;
}
+
blkif_evtchn = status->evtchn;
- blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
- if ( (rc=request_irq(blkif_irq, blkif_int,
- SA_SAMPLE_RANDOM, "blkif", NULL)) )
- {
+ blkif_irq = bind_evtchn_to_irq(blkif_evtchn);
+
+ if ( (rc = request_irq(blkif_irq, blkif_int,
+ SA_SAMPLE_RANDOM, "blkif", NULL)) )
printk(KERN_ALERT"blkfront request_irq failed (%ld)\n",rc);
- }
if ( recovery )
{
@@ -1109,31 +1089,28 @@ static void blkif_status_change(blkif_fe_interface_status_changed_t *status)
/* Hmm, requests might be re-ordered when we re-issue them.
This will need to be fixed once we have barriers */
- // req_prod = 0; : already is zero
-
- // stage 1 : find active and move to safety
- for ( i=0; i <BLKIF_RING_SIZE; i++ )
+ /* Stage 1 : Find active and move to safety. */
+ for ( i = 0; i < BLKIF_RING_SIZE; i++ )
{
if ( rec_ring[i].id >= PAGE_OFFSET )
{
translate_req_to_mfn(
- &blk_ring->ring[req_prod].req, &rec_ring[i] );
-
+ &blk_ring->ring[req_prod].req, &rec_ring[i]);
req_prod++;
}
}
-printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
+ printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
- // stage 2 : set up shadow list
- for ( i=0; i<req_prod; i++ )
+ /* Stage 2 : Set up shadow list. */
+ for ( i = 0; i < req_prod; i++ )
{
rec_ring[i].id = blk_ring->ring[i].req.id;
blk_ring->ring[i].req.id = i;
- translate_req_to_pfn( &rec_ring[i], &blk_ring->ring[i].req );
+ translate_req_to_pfn(&rec_ring[i], &blk_ring->ring[i].req);
}
- // stage 3 : set up free list
+ /* Stage 3 : Set up free list. */
for ( ; i < BLKIF_RING_SIZE; i++ )
rec_ring[i].id = i+1;
rec_ring_free = req_prod;
@@ -1150,9 +1127,6 @@ printk(KERN_ALERT"blkfront: recovered %d descriptors\n",req_prod);
/* Kicks things back into life. */
flush_requests();
-
-
-
}
else
{
@@ -1270,7 +1244,7 @@ void blkdev_resume(void)
/* XXXXX THIS IS A TEMPORARY FUNCTION UNTIL WE GET GRANT TABLES */
-void blkif_completion( blkif_request_t *req )
+void blkif_completion(blkif_request_t *req)
{
int i;
@@ -1281,10 +1255,8 @@ void blkif_completion( blkif_request_t *req )
{
unsigned long pfn = req->frame_and_sects[i] >> PAGE_SHIFT;
unsigned long mfn = phys_to_machine_mapping[pfn];
-
queue_machphys_update(mfn, pfn);
}
-
break;
}
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c b/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c
index 23b0f87130..a28115fe0c 100644
--- a/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c
+++ b/linux-2.6.7-xen-sparse/drivers/xen/netback/netback.c
@@ -204,6 +204,12 @@ static void net_rx_action(unsigned long unused)
mdata = virt_to_machine(vdata);
new_mfn = get_new_mfn();
+ /*
+ * Set the new P2M table entry before reassigning the old data page.
+ * Heed the comment in pgtable-2level.h:pte_page(). :-)
+ */
+ phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
+
mmu[0].ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
mmu[0].val = __pa(vdata) >> PAGE_SHIFT;
mmu[1].ptr = MMU_EXTENDED_COMMAND;
@@ -250,8 +256,6 @@ static void net_rx_action(unsigned long unused)
mdata = ((mmu[2].ptr & PAGE_MASK) |
((unsigned long)skb->data & ~PAGE_MASK));
- phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
-
atomic_set(&(skb_shinfo(skb)->dataref), 1);
skb_shinfo(skb)->nr_frags = 0;
skb_shinfo(skb)->frag_list = NULL;
@@ -372,7 +376,6 @@ static void net_tx_action(unsigned long unused)
netif_tx_request_t txreq;
u16 pending_idx;
NETIF_RING_IDX i;
- struct page *page;
multicall_entry_t *mcl;
PEND_RING_IDX dc, dp;
@@ -556,17 +559,16 @@ static void net_tx_action(unsigned long unused)
}
phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
- txreq.addr >> PAGE_SHIFT;
+ FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
__skb_put(skb, PKT_PROT_LEN);
memcpy(skb->data,
(void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
PKT_PROT_LEN);
- page = virt_to_page(MMAP_VADDR(pending_idx));
-
/* Append the packet payload as a fragment. */
- skb_shinfo(skb)->frags[0].page = page;
+ skb_shinfo(skb)->frags[0].page =
+ virt_to_page(MMAP_VADDR(pending_idx));
skb_shinfo(skb)->frags[0].size = txreq.size - PKT_PROT_LEN;
skb_shinfo(skb)->frags[0].page_offset =
(txreq.addr + PKT_PROT_LEN) & ~PAGE_MASK;
@@ -577,17 +579,6 @@ static void net_tx_action(unsigned long unused)
skb->dev = netif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
- /*
- * Destructor information. We hideously abuse the 'mapping' pointer,
- * which isn't otherwise used by us. The page deallocator is modified
- * to interpret a non-NULL value as a destructor function to be called.
- * This works okay because in all other cases the pointer must be NULL
- * when the page is freed (normally Linux will explicitly bug out if
- * it sees otherwise.
- */
- page->mapping = (struct address_space *)netif_page_release;
- set_page_count(page, 1);
-
netif->stats.tx_bytes += txreq.size;
netif->stats.tx_packets++;
@@ -603,8 +594,8 @@ static void netif_page_release(struct page *page)
unsigned long flags;
u16 pending_idx = page - virt_to_page(mmap_vstart);
- /* Stop the abuse. */
- page->mapping = NULL;
+ /* Ready for next use. */
+ set_page_count(page, 1);
spin_lock_irqsave(&dealloc_lock, flags);
dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
@@ -738,6 +729,7 @@ static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
static int __init netback_init(void)
{
int i;
+ struct page *page;
if ( !(start_info.flags & SIF_NET_BE_DOMAIN) &&
!(start_info.flags & SIF_INITDOMAIN) )
@@ -753,6 +745,13 @@ static int __init netback_init(void)
if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
BUG();
+ for ( i = 0; i < MAX_PENDING_REQS; i++ )
+ {
+ page = virt_to_page(MMAP_VADDR(i));
+ SetPageForeign(page);
+ PageForeignDestructor(page) = netif_page_release;
+ }
+
pending_cons = 0;
pending_prod = MAX_PENDING_REQS;
for ( i = 0; i < MAX_PENDING_REQS; i++ )
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c
index b2b63441d5..0011273abd 100644
--- a/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c
+++ b/linux-2.6.7-xen-sparse/drivers/xen/netfront/netfront.c
@@ -263,9 +263,9 @@ static void network_alloc_rx_buffers(struct net_device *dev)
rx_pfn_array[nr_pfns] = virt_to_machine(skb->head) >> PAGE_SHIFT;
- /* remove this page from pseudo phys map (migration optimization) */
+ /* Remove this page from pseudo phys map before passing back to Xen. */
phys_to_machine_mapping[virt_to_phys(skb->head) >> PAGE_SHIFT]
- = 0x80000001;
+ = INVALID_P2M_ENTRY;
rx_mcl[nr_pfns].op = __HYPERVISOR_update_va_mapping;
rx_mcl[nr_pfns].args[0] = (unsigned long)skb->head >> PAGE_SHIFT;
@@ -478,15 +478,6 @@ static int netif_poll(struct net_device *dev, int *pbudget)
mcl->args[2] = 0;
mcl++;
(void)HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl);
-
-#if 0
- if (unlikely(rx_mcl[0].args[5] != 0))
- printk(KERN_ALERT"Hypercall0 failed %u\n",np->rx->resp_prod);
-
- if (unlikely(rx_mcl[1].args[5] != 0))
- printk(KERN_ALERT"Hypercall1 failed %u\n",np->rx->resp_prod);
-#endif
-
}
while ( (skb = __skb_dequeue(&rxq)) != NULL )
diff --git a/linux-2.6.7-xen-sparse/drivers/xen/privcmd/privcmd.c b/linux-2.6.7-xen-sparse/drivers/xen/privcmd/privcmd.c
index aa7a1d9a01..c57bdf6b23 100644
--- a/linux-2.6.7-xen-sparse/drivers/xen/privcmd/privcmd.c
+++ b/linux-2.6.7-xen-sparse/drivers/xen/privcmd/privcmd.c
@@ -138,17 +138,10 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
if ( (m.addr + (m.num<<PAGE_SHIFT)) > vma->vm_end )
{ ret = -EFAULT; goto batch_err; }
- if ( m.dom != 0 )
- {
- u[0].ptr = MMU_EXTENDED_COMMAND;
- u[0].val = MMUEXT_SET_FOREIGNDOM;
- u[0].val |= (unsigned long)m.dom << 16;
- v = w = &u[1];
- }
- else
- {
- v = w = &u[0];
- }
+ u[0].ptr = MMU_EXTENDED_COMMAND;
+ u[0].val = MMUEXT_SET_FOREIGNDOM;
+ u[0].val |= (unsigned long)m.dom << 16;
+ v = w = &u[1];
p = m.arr;
addr = m.addr;
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
index 5b6604b696..81de51c1b5 100644
--- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
+++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/io.h
@@ -88,6 +88,13 @@ static inline void * phys_to_virt(unsigned long address)
#define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
#define page_to_phys(page) (phys_to_machine(page_to_pseudophys(page)))
+#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
+ ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == bvec_to_pseudophys((vec2))))
+
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
/**
diff --git a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
index 760569f95d..f30bd2b83d 100644
--- a/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
+++ b/linux-2.6.7-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
@@ -88,30 +88,33 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
* not have MFN in our p2m table. Conversely, if the page is ours,
* then we'll have p2m(m2p(MFN))==MFN.
* If we detect a special mapping then it doesn't have a 'struct page'.
- * We force !VALID_PAGE() by returning an out-of-range pointer.
+ * We force !pfn_valid() by returning an out-of-range pointer.
+ *
+ * NB. These checks require that, for any MFN that is not in our reservation,
+ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
+ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
+ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
+ *
+ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
+ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
+ * require. In all the cases we care about, the high bit gets shifted out
+ * (e.g., phys_to_machine()) so behaviour there is correct.
*/
-#define pte_page(_pte) \
-({ \
- unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = mfn_to_pfn(mfn); \
- if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
- pfn = max_mapnr; /* special: force !VALID_PAGE() */ \
- pfn_to_page(pfn); \
-})
-
-#define pte_none(x) (!(x).pte_low)
-/* See comments above pte_page */
-/* XXXcl check pte_present because msync.c:filemap_sync_pte calls
- * without pte_present check */
+#define INVALID_P2M_ENTRY (~0UL)
+#define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
#define pte_pfn(_pte) \
({ \
unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT; \
- unsigned long pfn = pte_present(_pte) ? mfn_to_pfn(mfn) : mfn; \
+ unsigned long pfn = mfn_to_pfn(mfn); \
if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) ) \
pfn = max_mapnr; /* special: force !pfn_valid() */ \
pfn; \
})
+#define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
+
+#define pte_none(x) (!(x).pte_low)
+
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
diff --git a/linux-2.6.7-xen-sparse/include/linux/bio.h b/linux-2.6.7-xen-sparse/include/linux/bio.h
new file mode 100644
index 0000000000..e4d50adaf2
--- /dev/null
+++ b/linux-2.6.7-xen-sparse/include/linux/bio.h
@@ -0,0 +1,304 @@
+/*
+ * 2.5 block I/O model
+ *
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
+ */
+#ifndef __LINUX_BIO_H
+#define __LINUX_BIO_H
+
+#include <linux/highmem.h>
+#include <linux/mempool.h>
+
+/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
+#include <asm/io.h>
+#ifndef BIO_VMERGE_BOUNDARY
+#define BIO_VMERGE_BOUNDARY 0
+#endif
+
+#define BIO_DEBUG
+
+#ifdef BIO_DEBUG
+#define BIO_BUG_ON BUG_ON
+#else
+#define BIO_BUG_ON
+#endif
+
+#define BIO_MAX_PAGES (256)
+#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
+#define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
+
+/*
+ * was unsigned short, but we might as well be ready for > 64kB I/O pages
+ */
+struct bio_vec {
+ struct page *bv_page;
+ unsigned int bv_len;
+ unsigned int bv_offset;
+};
+
+struct bio;
+typedef int (bio_end_io_t) (struct bio *, unsigned int, int);
+typedef void (bio_destructor_t) (struct bio *);
+
+/*
+ * main unit of I/O for the block layer and lower layers (ie drivers and
+ * stacking drivers)
+ */
+struct bio {
+ sector_t bi_sector;
+ struct bio *bi_next; /* request queue link */
+ struct block_device *bi_bdev;
+ unsigned long bi_flags; /* status, command, etc */
+ unsigned long bi_rw; /* bottom bits READ/WRITE,
+ * top bits priority
+ */
+
+ unsigned short bi_vcnt; /* how many bio_vec's */
+ unsigned short bi_idx; /* current index into bvl_vec */
+
+ /* Number of segments in this BIO after
+ * physical address coalescing is performed.
+ */
+ unsigned short bi_phys_segments;
+
+ /* Number of segments after physical and DMA remapping
+ * hardware coalescing is performed.
+ */
+ unsigned short bi_hw_segments;
+
+ unsigned int bi_size; /* residual I/O count */
+ unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
+
+ struct bio_vec *bi_io_vec; /* the actual vec list */
+
+ bio_end_io_t *bi_end_io;
+ atomic_t bi_cnt; /* pin count */
+
+ void *bi_private;
+
+ bio_destructor_t *bi_destructor; /* destructor */
+};
+
+/*
+ * bio flags
+ */
+#define BIO_UPTODATE 0 /* ok after I/O completion */
+#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
+#define BIO_EOF 2 /* out-out-bounds error */
+#define BIO_SEG_VALID 3 /* nr_hw_seg valid */
+#define BIO_CLONED 4 /* doesn't own data */
+#define BIO_BOUNCED 5 /* bio is a bounce bio */
+#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
+
+/*
+ * top 4 bits of bio flags indicate the pool this bio came from
+ */
+#define BIO_POOL_BITS (4)
+#define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
+#define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
+#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
+
+/*
+ * bio bi_rw flags
+ *
+ * bit 0 -- read (not set) or write (set)
+ * bit 1 -- rw-ahead when set
+ * bit 2 -- barrier
+ * bit 3 -- fail fast, don't want low level driver retries
+ * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
+ */
+#define BIO_RW 0
+#define BIO_RW_AHEAD 1
+#define BIO_RW_BARRIER 2
+#define BIO_RW_FAILFAST 3
+#define BIO_RW_SYNC 4
+
+/*
+ * various member access, note that bio_data should of course not be used
+ * on highmem page vectors
+ */
+#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
+#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
+#define bio_page(bio) bio_iovec((bio))->bv_page
+#define bio_offset(bio) bio_iovec((bio))->bv_offset
+#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
+#define bio_sectors(bio) ((bio)->bi_size >> 9)
+#define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
+#define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
+#define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
+#define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
+
+/*
+ * will die
+ */
+#define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
+#define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+
+/*
+ * queues that have highmem support enabled may still need to revert to
+ * PIO transfers occasionally and thus map high pages temporarily. For
+ * permanent PIO fall back, user is probably better off disabling highmem
+ * I/O completely on that queue (see ide-dma for example)
+ */
+#define __bio_kmap_atomic(bio, idx, kmtype) \
+ (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
+ bio_iovec_idx((bio), (idx))->bv_offset)
+
+#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
+
+/*
+ * merge helpers etc
+ */
+
+#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
+#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
+/* Platforms may set this to restrict multi-page buffer merging. */
+#ifndef BIOVEC_PHYS_MERGEABLE
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
+#endif
+#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
+ ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
+#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
+ (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
+#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+#define BIO_SEG_BOUNDARY(q, b1, b2) \
+ BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
+
+#define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
+
+/*
+ * drivers should not use the __ version unless they _really_ want to
+ * run through the entire bio and not just pending pieces
+ */
+#define __bio_for_each_segment(bvl, bio, i, start_idx) \
+ for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
+ i < (bio)->bi_vcnt; \
+ bvl++, i++)
+
+#define bio_for_each_segment(bvl, bio, i) \
+ __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
+
+/*
+ * get a reference to a bio, so it won't disappear. the intended use is
+ * something like:
+ *
+ * bio_get(bio);
+ * submit_bio(rw, bio);
+ * if (bio->bi_flags ...)
+ * do_something
+ * bio_put(bio);
+ *
+ * without the bio_get(), it could potentially complete I/O before submit_bio
+ * returns. and then bio would be freed memory when if (bio->bi_flags ...)
+ * runs
+ */
+#define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
+
+
+/*
+ * A bio_pair is used when we need to split a bio.
+ * This can only happen for a bio that refers to just one
+ * page of data, and in the unusual situation when the
+ * page crosses a chunk/device boundary
+ *
+ * The address of the master bio is stored in bio1.bi_private
+ * The address of the pool the pair was allocated from is stored
+ * in bio2.bi_private
+ */
+struct bio_pair {
+ struct bio bio1, bio2;
+ struct bio_vec bv1, bv2;
+ atomic_t cnt;
+ int error;
+};
+extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool,
+ int first_sectors);
+extern mempool_t *bio_split_pool;
+extern void bio_pair_release(struct bio_pair *dbio);
+
+extern struct bio *bio_alloc(int, int);
+extern void bio_put(struct bio *);
+
+extern void bio_endio(struct bio *, unsigned int, int);
+struct request_queue;
+extern int bio_phys_segments(struct request_queue *, struct bio *);
+extern int bio_hw_segments(struct request_queue *, struct bio *);
+
+extern void __bio_clone(struct bio *, struct bio *);
+extern struct bio *bio_clone(struct bio *, int);
+
+extern void bio_init(struct bio *);
+
+extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+extern int bio_get_nr_vecs(struct block_device *);
+extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
+ unsigned long, unsigned int, int);
+extern void bio_unmap_user(struct bio *, int);
+extern void bio_set_pages_dirty(struct bio *bio);
+extern void bio_check_pages_dirty(struct bio *bio);
+
+#ifdef CONFIG_HIGHMEM
+/*
+ * remember to add offset! and never ever reenable interrupts between a
+ * bvec_kmap_irq and bvec_kunmap_irq!!
+ *
+ * This function MUST be inlined - it plays with the CPU interrupt flags.
+ * Hence the `extern inline'.
+ */
+extern inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
+{
+ unsigned long addr;
+
+ /*
+ * might not be a highmem page, but the preempt/irq count
+ * balancing is a lot nicer this way
+ */
+ local_irq_save(*flags);
+ addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ);
+
+ BUG_ON(addr & ~PAGE_MASK);
+
+ return (char *) addr + bvec->bv_offset;
+}
+
+extern inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
+{
+ unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
+
+ kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ);
+ local_irq_restore(*flags);
+}
+
+#else
+#define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
+#define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
+#endif
+
+extern inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
+ unsigned long *flags)
+{
+ return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags);
+}
+#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
+
+#define bio_kmap_irq(bio, flags) \
+ __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
+#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
+
+#endif /* __LINUX_BIO_H */
diff --git a/linux-2.6.7-xen-sparse/include/linux/page-flags.h b/linux-2.6.7-xen-sparse/include/linux/page-flags.h
new file mode 100644
index 0000000000..951844c30e
--- /dev/null
+++ b/linux-2.6.7-xen-sparse/include/linux/page-flags.h
@@ -0,0 +1,343 @@
+/*
+ * Macros for manipulating and testing page->flags
+ */
+
+#ifndef PAGE_FLAGS_H
+#define PAGE_FLAGS_H
+
+#include <linux/percpu.h>
+#include <linux/cache.h>
+#include <asm/pgtable.h>
+
+/*
+ * Various page->flags bits:
+ *
+ * PG_reserved is set for special pages, which can never be swapped out. Some
+ * of them might not even exist (eg empty_bad_page)...
+ *
+ * The PG_private bitflag is set if page->private contains a valid value.
+ *
+ * During disk I/O, PG_locked is used. This bit is set before I/O and
+ * reset when I/O completes. page_waitqueue(page) is a wait queue of all tasks
+ * waiting for the I/O on this page to complete.
+ *
+ * PG_uptodate tells whether the page's contents is valid. When a read
+ * completes, the page becomes uptodate, unless a disk I/O error happened.
+ *
+ * For choosing which pages to swap out, inode pages carry a PG_referenced bit,
+ * which is set any time the system accesses that page through the (mapping,
+ * index) hash table. This referenced bit, together with the referenced bit
+ * in the page tables, is used to manipulate page->age and move the page across
+ * the active, inactive_dirty and inactive_clean lists.
+ *
+ * Note that the referenced bit, the page->lru list_head and the active,
+ * inactive_dirty and inactive_clean lists are protected by the
+ * zone->lru_lock, and *NOT* by the usual PG_locked bit!
+ *
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
+ * PG_arch_1 is an architecture specific page state bit. The generic code
+ * guarantees that this bit is cleared for a page when it first is entered into
+ * the page cache.
+ *
+ * PG_highmem pages are not permanently mapped into the kernel virtual address
+ * space, they need to be kmapped separately for doing IO on the pages. The
+ * struct page (these bits with information) are always mapped into kernel
+ * address space...
+ */
+
+/*
+ * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
+ * locked- and dirty-page accounting. The top eight bits of page->flags are
+ * used for page->zone, so putting flag bits there doesn't work.
+ */
+#define PG_locked 0 /* Page is locked. Don't touch. */
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+
+#define PG_dirty 4
+#define PG_lru 5
+#define PG_active 6
+#define PG_slab 7 /* slab debug (Suparna wants this) */
+
+#define PG_highmem 8
+#define PG_checked 9 /* kill me in 2.5.<early>. */
+#define PG_arch_1 10
+#define PG_reserved 11
+
+#define PG_private 12 /* Has something at ->private */
+#define PG_writeback 13 /* Page is under writeback */
+#define PG_nosave 14 /* Used for system suspend/resume */
+#define PG_maplock 15 /* Lock bit for rmap to ptes */
+
+#define PG_swapcache 16 /* Swap page: swp_entry_t in private */
+#define PG_mappedtodisk 17 /* Has blocks allocated on-disk */
+#define PG_reclaim 18 /* To be reclaimed asap */
+#define PG_compound 19 /* Part of a compound page */
+
+#define PG_anon 20 /* Anonymous: anon_vma in mapping */
+#define PG_foreign 21 /* Page belongs to foreign allocator */
+
+
+/*
+ * Global page accounting. One instance per CPU. Only unsigned longs are
+ * allowed.
+ */
+struct page_state {
+ unsigned long nr_dirty; /* Dirty writeable pages */
+ unsigned long nr_writeback; /* Pages under writeback */
+ unsigned long nr_unstable; /* NFS unstable pages */
+ unsigned long nr_page_table_pages;/* Pages used for pagetables */
+ unsigned long nr_mapped; /* mapped into pagetables */
+ unsigned long nr_slab; /* In slab */
+#define GET_PAGE_STATE_LAST nr_slab
+
+ /*
+ * The below are zeroed by get_page_state(). Use get_full_page_state()
+ * to add up all these.
+ */
+ unsigned long pgpgin; /* Disk reads */
+ unsigned long pgpgout; /* Disk writes */
+ unsigned long pswpin; /* swap reads */
+ unsigned long pswpout; /* swap writes */
+ unsigned long pgalloc_high; /* page allocations */
+
+ unsigned long pgalloc_normal;
+ unsigned long pgalloc_dma;
+ unsigned long pgfree; /* page freeings */
+ unsigned long pgactivate; /* pages moved inactive->active */
+ unsigned long pgdeactivate; /* pages moved active->inactive */
+
+ unsigned long pgfault; /* faults (major+minor) */
+ unsigned long pgmajfault; /* faults (major only) */
+ unsigned long pgrefill_high; /* inspected in refill_inactive_zone */
+ unsigned long pgrefill_normal;
+ unsigned long pgrefill_dma;
+
+ unsigned long pgsteal_high; /* total highmem pages reclaimed */
+ unsigned long pgsteal_normal;
+ unsigned long pgsteal_dma;
+ unsigned long pgscan_kswapd_high;/* total highmem pages scanned */
+ unsigned long pgscan_kswapd_normal;
+
+ unsigned long pgscan_kswapd_dma;
+ unsigned long pgscan_direct_high;/* total highmem pages scanned */
+ unsigned long pgscan_direct_normal;
+ unsigned long pgscan_direct_dma;
+ unsigned long pginodesteal; /* pages reclaimed via inode freeing */
+
+ unsigned long slabs_scanned; /* slab objects scanned */
+ unsigned long kswapd_steal; /* pages reclaimed by kswapd */
+ unsigned long kswapd_inodesteal;/* reclaimed via kswapd inode freeing */
+ unsigned long pageoutrun; /* kswapd's calls to page reclaim */
+ unsigned long allocstall; /* direct reclaim calls */
+
+ unsigned long pgrotated; /* pages rotated to tail of the LRU */
+};
+
+DECLARE_PER_CPU(struct page_state, page_states);
+
+extern void get_page_state(struct page_state *ret);
+extern void get_full_page_state(struct page_state *ret);
+extern unsigned long __read_page_state(unsigned offset);
+
+#define read_page_state(member) \
+ __read_page_state(offsetof(struct page_state, member))
+
+#define mod_page_state(member, delta) \
+ do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ __get_cpu_var(page_states).member += (delta); \
+ local_irq_restore(flags); \
+ } while (0)
+
+
+#define inc_page_state(member) mod_page_state(member, 1UL)
+#define dec_page_state(member) mod_page_state(member, 0UL - 1)
+#define add_page_state(member,delta) mod_page_state(member, (delta))
+#define sub_page_state(member,delta) mod_page_state(member, 0UL - (delta))
+
+#define mod_page_state_zone(zone, member, delta) \
+ do { \
+ unsigned long flags; \
+ local_irq_save(flags); \
+ if (is_highmem(zone)) \
+ __get_cpu_var(page_states).member##_high += (delta);\
+ else if (is_normal(zone)) \
+ __get_cpu_var(page_states).member##_normal += (delta);\
+ else \
+ __get_cpu_var(page_states).member##_dma += (delta);\
+ local_irq_restore(flags); \
+ } while (0)
+
+/*
+ * Manipulation of page state flags
+ */
+#define PageLocked(page) \
+ test_bit(PG_locked, &(page)->flags)
+#define SetPageLocked(page) \
+ set_bit(PG_locked, &(page)->flags)
+#define TestSetPageLocked(page) \
+ test_and_set_bit(PG_locked, &(page)->flags)
+#define ClearPageLocked(page) \
+ clear_bit(PG_locked, &(page)->flags)
+#define TestClearPageLocked(page) \
+ test_and_clear_bit(PG_locked, &(page)->flags)
+
+#define PageError(page) test_bit(PG_error, &(page)->flags)
+#define SetPageError(page) set_bit(PG_error, &(page)->flags)
+#define ClearPageError(page) clear_bit(PG_error, &(page)->flags)
+
+#define PageReferenced(page) test_bit(PG_referenced, &(page)->flags)
+#define SetPageReferenced(page) set_bit(PG_referenced, &(page)->flags)
+#define ClearPageReferenced(page) clear_bit(PG_referenced, &(page)->flags)
+#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+
+#ifndef arch_set_page_uptodate
+#define arch_set_page_uptodate(page) do { } while (0)
+#endif
+
+#define PageUptodate(page) test_bit(PG_uptodate, &(page)->flags)
+#define SetPageUptodate(page) \
+ do { \
+ arch_set_page_uptodate(page); \
+ set_bit(PG_uptodate, &(page)->flags); \
+ } while (0)
+#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
+
+#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
+#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
+#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
+#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
+#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
+
+#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
+#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
+#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags)
+#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags)
+
+#define PageActive(page) test_bit(PG_active, &(page)->flags)
+#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
+#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
+#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags)
+#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
+
+#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
+#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags)
+#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags)
+#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags)
+#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
+
+#ifdef CONFIG_HIGHMEM
+#define PageHighMem(page) test_bit(PG_highmem, &(page)->flags)
+#else
+#define PageHighMem(page) 0 /* needed to optimize away at compile time */
+#endif
+
+#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
+#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
+#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
+
+#define PageReserved(page) test_bit(PG_reserved, &(page)->flags)
+#define SetPageReserved(page) set_bit(PG_reserved, &(page)->flags)
+#define ClearPageReserved(page) clear_bit(PG_reserved, &(page)->flags)
+
+#define SetPagePrivate(page) set_bit(PG_private, &(page)->flags)
+#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
+#define PagePrivate(page) test_bit(PG_private, &(page)->flags)
+
+#define PageWriteback(page) test_bit(PG_writeback, &(page)->flags)
+#define SetPageWriteback(page) \
+ do { \
+ if (!test_and_set_bit(PG_writeback, \
+ &(page)->flags)) \
+ inc_page_state(nr_writeback); \
+ } while (0)
+#define TestSetPageWriteback(page) \
+ ({ \
+ int ret; \
+ ret = test_and_set_bit(PG_writeback, \
+ &(page)->flags); \
+ if (!ret) \
+ inc_page_state(nr_writeback); \
+ ret; \
+ })
+#define ClearPageWriteback(page) \
+ do { \
+ if (test_and_clear_bit(PG_writeback, \
+ &(page)->flags)) \
+ dec_page_state(nr_writeback); \
+ } while (0)
+#define TestClearPageWriteback(page) \
+ ({ \
+ int ret; \
+ ret = test_and_clear_bit(PG_writeback, \
+ &(page)->flags); \
+ if (ret) \
+ dec_page_state(nr_writeback); \
+ ret; \
+ })
+
+#define PageNosave(page) test_bit(PG_nosave, &(page)->flags)
+#define SetPageNosave(page) set_bit(PG_nosave, &(page)->flags)
+#define TestSetPageNosave(page) test_and_set_bit(PG_nosave, &(page)->flags)
+#define ClearPageNosave(page) clear_bit(PG_nosave, &(page)->flags)
+#define TestClearPageNosave(page) test_and_clear_bit(PG_nosave, &(page)->flags)
+
+#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
+#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
+#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
+
+#define PageReclaim(page) test_bit(PG_reclaim, &(page)->flags)
+#define SetPageReclaim(page) set_bit(PG_reclaim, &(page)->flags)
+#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
+#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+
+#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
+#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags)
+#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags)
+
+#define PageAnon(page) test_bit(PG_anon, &(page)->flags)
+#define SetPageAnon(page) set_bit(PG_anon, &(page)->flags)
+#define ClearPageAnon(page) clear_bit(PG_anon, &(page)->flags)
+
+/* A foreign page uses a custom destructor rather than the buddy allocator. */
+#ifdef CONFIG_FOREIGN_PAGES
+#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
+#define SetPageForeign(page) set_bit(PG_foreign, &(page)->flags)
+#define ClearPageForeign(page) clear_bit(PG_foreign, &(page)->flags)
+#define PageForeignDestructor(page) \
+ ( (void (*) (struct page *)) (page)->mapping )
+#else
+#define PageForeign(page) 0
+#define PageForeignDestructor(page) void
+#endif
+
+#ifdef CONFIG_SWAP
+#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
+#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
+#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
+#else
+#define PageSwapCache(page) 0
+#endif
+
+struct page; /* forward declaration */
+
+int test_clear_page_dirty(struct page *page);
+int __clear_page_dirty(struct page *page);
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
+
+static inline void clear_page_dirty(struct page *page)
+{
+ test_clear_page_dirty(page);
+}
+
+static inline void set_page_writeback(struct page *page)
+{
+ test_set_page_writeback(page);
+}
+
+#endif /* PAGE_FLAGS_H */
diff --git a/linux-2.6.7-xen-sparse/include/linux/skbuff.h b/linux-2.6.7-xen-sparse/include/linux/skbuff.h
new file mode 100644
index 0000000000..37c4342b9b
--- /dev/null
+++ b/linux-2.6.7-xen-sparse/include/linux/skbuff.h
@@ -0,0 +1,1073 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/time.h>
+#include <linux/cache.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/poll.h>
+#include <linux/net.h>
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+#define SLAB_SKB /* Slabified skbuffs */
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \
+ ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_ORDER(X, ORDER) (((PAGE_SIZE << (ORDER)) - (X) - \
+ sizeof(struct skb_shared_info)) & \
+ ~(SMP_CACHE_BYTES - 1))
+#define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0))
+#define SKB_MAX_ALLOC (SKB_MAX_ORDER(0, 2))
+
+/* A. Checksumming of received packets by device.
+ *
+ * NONE: device failed to checksum this packet.
+ * skb->csum is undefined.
+ *
+ * UNNECESSARY: device parsed packet and wouldbe verified checksum.
+ * skb->csum is undefined.
+ * It is bad option, but, unfortunately, many of vendors do this.
+ * Apparently with secret goal to sell you new device, when you
+ * will add new protocol to your host. F.e. IPv6. 8)
+ *
+ * HW: the most generic way. Device supplied checksum of _all_
+ * the packet as seen by netif_rx in skb->csum.
+ * NOTE: Even if device supports only some protocols, but
+ * is able to produce some skb->csum, it MUST use HW,
+ * not UNNECESSARY.
+ *
+ * B. Checksumming on output.
+ *
+ * NONE: skb is checksummed by protocol or csum is not required.
+ *
+ * HW: device is required to csum packet as seen by hard_start_xmit
+ * from skb->h.raw to the end and to record the checksum
+ * at skb->h.raw+skb->csum.
+ *
+ * Device must show its capabilities in dev->features, set
+ * at device setup time.
+ * NETIF_F_HW_CSUM - it is clever device, it is able to checksum
+ * everything.
+ * NETIF_F_NO_CSUM - loopback or reliable single hop media.
+ * NETIF_F_IP_CSUM - device is dumb. It is able to csum only
+ * TCP/UDP over IPv4. Sigh. Vendors like this
+ * way by an unknown reason. Though, see comment above
+ * about CHECKSUM_UNNECESSARY. 8)
+ *
+ * Any questions? No questions, good. --ANK
+ */
+
+#ifdef __i386__
+#define NET_CALLER(arg) (*(((void **)&arg) - 1))
+#else
+#define NET_CALLER(arg) __builtin_return_address(0)
+#endif
+
+#ifdef CONFIG_NETFILTER
+struct nf_conntrack {
+ atomic_t use;
+ void (*destroy)(struct nf_conntrack *);
+};
+
+struct nf_ct_info {
+ struct nf_conntrack *master;
+};
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+struct nf_bridge_info {
+ atomic_t use;
+ struct net_device *physindev;
+ struct net_device *physoutdev;
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+ struct net_device *netoutdev;
+#endif
+ unsigned int mask;
+ unsigned long data[32 / sizeof(unsigned long)];
+};
+#endif
+
+#endif
+
+struct sk_buff_head {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ __u32 qlen;
+ spinlock_t lock;
+};
+
+struct sk_buff;
+
+/* To allow 64K frame to be packed as single skb without frag_list */
+#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
+
+typedef struct skb_frag_struct skb_frag_t;
+
+struct skb_frag_struct {
+ struct page *page;
+ __u16 page_offset;
+ __u16 size;
+};
+
+/* This data is invariant across clones and lives at
+ * the end of the header data, ie. at skb->end.
+ */
+struct skb_shared_info {
+ atomic_t dataref;
+ unsigned int nr_frags;
+ unsigned short tso_size;
+ unsigned short tso_segs;
+ struct sk_buff *frag_list;
+ skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+/**
+ * struct sk_buff - socket buffer
+ * @next: Next buffer in list
+ * @prev: Previous buffer in list
+ * @list: List we are on
+ * @sk: Socket we are owned by
+ * @stamp: Time we arrived
+ * @dev: Device we arrived on/are leaving by
+ * @real_dev: The real device we are using
+ * @h: Transport layer header
+ * @nh: Network layer header
+ * @mac: Link layer header
+ * @dst: FIXME: Describe this field
+ * @cb: Control buffer. Free for use by every layer. Put private vars here
+ * @len: Length of actual data
+ * @data_len: Data length
+ * @mac_len: Length of link layer header
+ * @csum: Checksum
+ * @__unused: Dead field, may be reused
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @pkt_type: Packet class
+ * @ip_summed: Driver fed us an IP checksum
+ * @priority: Packet queueing priority
+ * @users: User count - see {datagram,tcp}.c
+ * @protocol: Packet protocol from driver
+ * @security: Security level of packet
+ * @truesize: Buffer size
+ * @head: Head of buffer
+ * @data: Data head pointer
+ * @tail: Tail pointer
+ * @end: End pointer
+ * @destructor: Destruct function
+ * @nfmark: Can be used for communication between hooks
+ * @nfcache: Cache info
+ * @nfct: Associated connection, if any
+ * @nf_debug: Netfilter debugging
+ * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
+ * @private: Data which is private to the HIPPI implementation
+ * @tc_index: Traffic control index
+ */
+
+struct sk_buff {
+ /* These two members must be first. */
+ struct sk_buff *next;
+ struct sk_buff *prev;
+
+ struct sk_buff_head *list;
+ struct sock *sk;
+ struct timeval stamp;
+ struct net_device *dev;
+ struct net_device *real_dev;
+
+ union {
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct icmphdr *icmph;
+ struct igmphdr *igmph;
+ struct iphdr *ipiph;
+ struct ipv6hdr *ipv6h;
+ unsigned char *raw;
+ } h;
+
+ union {
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ struct arphdr *arph;
+ unsigned char *raw;
+ } nh;
+
+ union {
+ struct ethhdr *ethernet;
+ unsigned char *raw;
+ } mac;
+
+ struct dst_entry *dst;
+ struct sec_path *sp;
+
+ /*
+ * This is the control buffer. It is free to use for every
+ * layer. Please put your private variables there. If you
+ * want to keep them across layers you have to do a skb_clone()
+ * first. This is owned by whoever has the skb queued ATM.
+ */
+ char cb[48];
+
+ unsigned int len,
+ data_len,
+ mac_len,
+ csum;
+ unsigned char local_df,
+ cloned,
+ pkt_type,
+ ip_summed;
+ __u32 priority;
+ unsigned short protocol,
+ security;
+
+ void (*destructor)(struct sk_buff *skb);
+#ifdef CONFIG_NETFILTER
+ unsigned long nfmark;
+ __u32 nfcache;
+ struct nf_ct_info *nfct;
+#ifdef CONFIG_NETFILTER_DEBUG
+ unsigned int nf_debug;
+#endif
+#ifdef CONFIG_BRIDGE_NETFILTER
+ struct nf_bridge_info *nf_bridge;
+#endif
+#endif /* CONFIG_NETFILTER */
+#if defined(CONFIG_HIPPI)
+ union {
+ __u32 ifield;
+ } private;
+#endif
+#ifdef CONFIG_NET_SCHED
+ __u32 tc_index; /* traffic control index */
+#endif
+
+ /* These elements must be at the end, see alloc_skb() for details. */
+ unsigned int truesize;
+ atomic_t users;
+ unsigned char *head,
+ *data,
+ *tail,
+ *end;
+};
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/slab.h>
+
+#include <asm/system.h>
+
+extern void __kfree_skb(struct sk_buff *skb);
+extern struct sk_buff *alloc_skb(unsigned int size, int priority);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff *skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff *skb_copy(const struct sk_buff *skb, int priority);
+extern struct sk_buff *pskb_copy(struct sk_buff *skb, int gfp_mask);
+extern int pskb_expand_head(struct sk_buff *skb,
+ int nhead, int ntail, int gfp_mask);
+extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
+ unsigned int headroom);
+extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
+ int newheadroom, int newtailroom,
+ int priority);
+extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
+#define dev_kfree_skb(a) kfree_skb(a)
+extern void skb_over_panic(struct sk_buff *skb, int len,
+ void *here);
+extern void skb_under_panic(struct sk_buff *skb, int len,
+ void *here);
+
+/* Internal */
+#define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
+
+/**
+ * skb_queue_empty - check if a queue is empty
+ * @list: queue head
+ *
+ * Returns true if the queue is empty, false otherwise.
+ */
+static inline int skb_queue_empty(const struct sk_buff_head *list)
+{
+ return list->next == (struct sk_buff *)list;
+}
+
+/**
+ * skb_get - reference buffer
+ * @skb: buffer to reference
+ *
+ * Makes another reference to a socket buffer and returns a pointer
+ * to the buffer.
+ */
+static inline struct sk_buff *skb_get(struct sk_buff *skb)
+{
+ atomic_inc(&skb->users);
+ return skb;
+}
+
+/*
+ * If users == 1, we are the only owner and are can avoid redundant
+ * atomic change.
+ */
+
+/**
+ * kfree_skb - free an sk_buff
+ * @skb: buffer to free
+ *
+ * Drop a reference to the buffer and free it if the usage count has
+ * hit zero.
+ */
+static inline void kfree_skb(struct sk_buff *skb)
+{
+ if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+ __kfree_skb(skb);
+}
+
+/* Use this if you didn't touch the skb state [for fast switching] */
+static inline void kfree_skb_fast(struct sk_buff *skb)
+{
+ if (atomic_read(&skb->users) == 1 || atomic_dec_and_test(&skb->users))
+ kfree_skbmem(skb);
+}
+
+/**
+ * skb_cloned - is the buffer a clone
+ * @skb: buffer to check
+ *
+ * Returns true if the buffer was generated with skb_clone() and is
+ * one of multiple shared copies of the buffer. Cloned buffers are
+ * shared data so must not be written to under normal circumstances.
+ */
+static inline int skb_cloned(const struct sk_buff *skb)
+{
+ return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
+}
+
+/**
+ * skb_shared - is the buffer shared
+ * @skb: buffer to check
+ *
+ * Returns true if more than one person has a reference to this
+ * buffer.
+ */
+static inline int skb_shared(const struct sk_buff *skb)
+{
+ return atomic_read(&skb->users) != 1;
+}
+
+/**
+ * skb_share_check - check if buffer is shared and if so clone it
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the buffer is shared the buffer is cloned and the old copy
+ * drops a reference. A new clone with a single reference is returned.
+ * If the buffer is not shared the original buffer is returned. When
+ * being called from interrupt status or with spinlocks held pri must
+ * be GFP_ATOMIC.
+ *
+ * NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, pri);
+ kfree_skb(skb);
+ skb = nskb;
+ }
+ return skb;
+}
+
+/*
+ * Copy shared buffers into a new sk_buff. We effectively do COW on
+ * packets to handle cases where we have a local reader and forward
+ * and a couple of other messy ones. The normal one is tcpdumping
+ * a packet thats being forwarded.
+ */
+
+/**
+ * skb_unshare - make a copy of a shared buffer
+ * @skb: buffer to check
+ * @pri: priority for memory allocation
+ *
+ * If the socket buffer is a clone then this function creates a new
+ * copy of the data, drops a reference count on the old copy and returns
+ * the new copy with the reference count at 1. If the buffer is not a clone
+ * the original buffer is returned. When called with a spinlock held or
+ * from interrupt state @pri must be %GFP_ATOMIC
+ *
+ * %NULL is returned on a memory allocation failure.
+ */
+static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
+{
+ might_sleep_if(pri & __GFP_WAIT);
+ if (skb_cloned(skb)) {
+ struct sk_buff *nskb = skb_copy(skb, pri);
+ kfree_skb(skb); /* Free our shared copy */
+ skb = nskb;
+ }
+ return skb;
+}
+
+/**
+ * skb_peek
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the head element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_peek_tail
+ * @list_: list to peek at
+ *
+ * Peek an &sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. You must hold
+ * the appropriate locks or have a private queue to do this.
+ *
+ * Returns %NULL for an empty list or a pointer to the tail element.
+ * The reference count is not incremented and the reference is therefore
+ * volatile. Use with caution.
+ */
+static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->prev;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/**
+ * skb_queue_len - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ */
+static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+{
+ return list_->qlen;
+}
+
+static inline void skb_queue_head_init(struct sk_buff_head *list)
+{
+ spin_lock_init(&list->lock);
+ list->prev = list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+/**
+ * __skb_queue_head - queue a buffer at the list head
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the start of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_head(struct sk_buff_head *list,
+ struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+}
+
+/**
+ * __skb_queue_tail - queue a buffer at the list tail
+ * @list: list to use
+ * @newsk: buffer to queue
+ *
+ * Queue a buffer at the end of a list. This function takes no locks
+ * and you must therefore hold required locks before calling it.
+ *
+ * A buffer cannot be placed on two lists at the same time.
+ */
+extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
+static inline void __skb_queue_tail(struct sk_buff_head *list,
+ struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+}
+
+
+/**
+ * __skb_dequeue - remove from the head of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the head of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The head item is
+ * returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+
+/*
+ * Insert a packet on a list.
+ */
+extern void skb_insert(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff *prev, struct sk_buff *next,
+ struct sk_buff_head *list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+extern void skb_append(struct sk_buff *old, struct sk_buff *newsk);
+static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ __skb_insert(newsk, old, old->next, old->list);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern void skb_unlink(struct sk_buff *skb);
+static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+
+/* XXX: more streamlined implementation */
+
+/**
+ * __skb_dequeue_tail - remove from the tail of the queue
+ * @list: list to dequeue from
+ *
+ * Remove the tail of the list. This function does not take any locks
+ * so must be used with appropriate locks held only. The tail item is
+ * returned or %NULL if the list is empty.
+ */
+extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
+static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
+{
+ struct sk_buff *skb = skb_peek_tail(list);
+ if (skb)
+ __skb_unlink(skb, list);
+ return skb;
+}
+
+
+static inline int skb_is_nonlinear(const struct sk_buff *skb)
+{
+ return skb->data_len;
+}
+
+static inline unsigned int skb_headlen(const struct sk_buff *skb)
+{
+ return skb->len - skb->data_len;
+}
+
+static inline int skb_pagelen(const struct sk_buff *skb)
+{
+ int i, len = 0;
+
+ for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
+ len += skb_shinfo(skb)->frags[i].size;
+ return len + skb_headlen(skb);
+}
+
+static inline void skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size)
+{
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = off;
+ frag->size = size;
+ skb_shinfo(skb)->nr_frags = i+1;
+}
+
+#define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags)
+#define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list)
+#define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb))
+
+/*
+ * Add data to an sk_buff
+ */
+static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp = skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail += len;
+ skb->len += len;
+ return tmp;
+}
+
+/**
+ * skb_put - add data to a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer. If this would
+ * exceed the total buffer size the kernel will panic. A pointer to the
+ * first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
+{
+ unsigned char *tmp = skb->tail;
+ SKB_LINEAR_ASSERT(skb);
+ skb->tail += len;
+ skb->len += len;
+ if (unlikely(skb->tail>skb->end))
+ skb_over_panic(skb, len, current_text_addr());
+ return tmp;
+}
+
+static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data -= len;
+ skb->len += len;
+ return skb->data;
+}
+
+/**
+ * skb_push - add data to the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to add
+ *
+ * This function extends the used data area of the buffer at the buffer
+ * start. If this would exceed the total buffer headroom the kernel will
+ * panic. A pointer to the first byte of the extra data is returned.
+ */
+static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
+{
+ skb->data -= len;
+ skb->len += len;
+ if (unlikely(skb->data<skb->head))
+ skb_under_panic(skb, len, current_text_addr());
+ return skb->data;
+}
+
+static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ skb->len -= len;
+ BUG_ON(skb->len < skb->data_len);
+ return skb->data += len;
+}
+
+/**
+ * skb_pull - remove data from the start of a buffer
+ * @skb: buffer to use
+ * @len: amount of data to remove
+ *
+ * This function removes data from the start of a buffer, returning
+ * the memory to the headroom. A pointer to the next data in the buffer
+ * is returned. Once the data has been pulled future pushes will overwrite
+ * the old data.
+ */
+static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
+}
+
+extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
+
+static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (len > skb_headlen(skb) &&
+ !__pskb_pull_tail(skb, len-skb_headlen(skb)))
+ return NULL;
+ skb->len -= len;
+ return skb->data += len;
+}
+
+static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
+{
+ return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
+}
+
+static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+{
+ if (likely(len <= skb_headlen(skb)))
+ return 1;
+ if (unlikely(len > skb->len))
+ return 0;
+ return __pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL;
+}
+
+/**
+ * skb_headroom - bytes at buffer head
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the head of an &sk_buff.
+ */
+static inline int skb_headroom(const struct sk_buff *skb)
+{
+ return skb->data - skb->head;
+}
+
+/**
+ * skb_tailroom - bytes at buffer end
+ * @skb: buffer to check
+ *
+ * Return the number of bytes of free space at the tail of an sk_buff
+ */
+static inline int skb_tailroom(const struct sk_buff *skb)
+{
+ return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
+}
+
+/**
+ * skb_reserve - adjust headroom
+ * @skb: buffer to alter
+ * @len: bytes to move
+ *
+ * Increase the headroom of an empty &sk_buff by reducing the tail
+ * room. This is only allowed for an empty buffer.
+ */
+static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
+{
+ skb->data += len;
+ skb->tail += len;
+}
+
+extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
+
+static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data + len;
+ } else
+ ___pskb_trim(skb, len, 0);
+}
+
+/**
+ * skb_trim - remove end from a buffer
+ * @skb: buffer to alter
+ * @len: new length
+ *
+ * Cut the length of a buffer down by removing data from the tail. If
+ * the buffer is already under the length specified it is not modified.
+ */
+static inline void skb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->len > len)
+ __skb_trim(skb, len);
+}
+
+
+static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ if (!skb->data_len) {
+ skb->len = len;
+ skb->tail = skb->data+len;
+ return 0;
+ }
+ return ___pskb_trim(skb, len, 1);
+}
+
+static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
+{
+ return (len < skb->len) ? __pskb_trim(skb, len) : 0;
+}
+
+/**
+ * skb_orphan - orphan a buffer
+ * @skb: buffer to orphan
+ *
+ * If a buffer currently has an owner then we call the owner's
+ * destructor function and make the @skb unowned. The buffer continues
+ * to exist but is no longer charged to its former owner.
+ */
+static inline void skb_orphan(struct sk_buff *skb)
+{
+ if (skb->destructor)
+ skb->destructor(skb);
+ skb->destructor = NULL;
+ skb->sk = NULL;
+}
+
+/**
+ * __skb_queue_purge - empty a list
+ * @list: list to empty
+ *
+ * Delete all buffers on an &sk_buff list. Each buffer is removed from
+ * the list and one reference dropped. This function does not take the
+ * list lock and the caller must hold the relevant locks to use it.
+ */
+extern void skb_queue_purge(struct sk_buff_head *list);
+static inline void __skb_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+ while ((skb = __skb_dequeue(list)) != NULL)
+ kfree_skb(skb);
+}
+
+/**
+ * __dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ * @gfp_mask: get_free_pages mask, passed to alloc_skb
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory.
+ */
+static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ int gfp_mask)
+{
+ struct sk_buff *skb;
+#ifdef CONFIG_PAGESIZED_SKBS
+ length = max(length, (unsigned int)(PAGE_SIZE - 16));
+#endif
+ skb = alloc_skb(length + 16, gfp_mask);
+ if (likely(skb))
+ skb_reserve(skb, 16);
+ return skb;
+}
+
+/**
+ * dev_alloc_skb - allocate an skbuff for sending
+ * @length: length to allocate
+ *
+ * Allocate a new &sk_buff and assign it a usage count of one. The
+ * buffer has unspecified headroom built in. Users should allocate
+ * the headroom they think they need without accounting for the
+ * built in space. The built in space is used for optimisations.
+ *
+ * %NULL is returned in there is no free memory. Although this function
+ * allocates memory it can be called from an interrupt.
+ */
+static inline struct sk_buff *dev_alloc_skb(unsigned int length)
+{
+ return __dev_alloc_skb(length, GFP_ATOMIC);
+}
+
+/**
+ * skb_cow - copy header of skb when it is required
+ * @skb: buffer to cow
+ * @headroom: needed headroom
+ *
+ * If the skb passed lacks sufficient headroom or its data part
+ * is shared, data is reallocated. If reallocation fails, an error
+ * is returned and original skb is not changed.
+ *
+ * The result is skb with writable area skb->head...skb->tail
+ * and at least @headroom of space at head.
+ */
+static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
+{
+ int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
+
+ if (delta < 0)
+ delta = 0;
+
+ if (delta || skb_cloned(skb))
+ return pskb_expand_head(skb, (delta + 15) & ~15, 0, GFP_ATOMIC);
+ return 0;
+}
+
+/**
+ * skb_padto - pad an skbuff up to a minimal size
+ * @skb: buffer to pad
+ * @len: minimal length
+ *
+ * Pads up a buffer to ensure the trailing bytes exist and are
+ * blanked. If the buffer already contains sufficient data it
+ * is untouched. Returns the buffer, which may be a replacement
+ * for the original, or NULL for out of memory - in which case
+ * the original buffer is still freed.
+ */
+
+static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
+{
+ unsigned int size = skb->len;
+ if (likely(size >= len))
+ return skb;
+ return skb_pad(skb, len-size);
+}
+
+/**
+ * skb_linearize - convert paged skb to linear one
+ * @skb: buffer to linarize
+ * @gfp: allocation mode
+ *
+ * If there is no free memory -ENOMEM is returned, otherwise zero
+ * is returned and the old skb data released.
+ */
+extern int __skb_linearize(struct sk_buff *skb, int gfp);
+static inline int skb_linearize(struct sk_buff *skb, int gfp)
+{
+ return __skb_linearize(skb, gfp);
+}
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+ BUG_ON(in_irq());
+
+ local_bh_disable();
+#endif
+ return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+ local_bh_enable();
+#endif
+}
+
+#define skb_queue_walk(queue, skb) \
+ for (skb = (queue)->next, prefetch(skb->next); \
+ (skb != (struct sk_buff *)(queue)); \
+ skb = skb->next, prefetch(skb->next))
+
+
+extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
+ int noblock, int *err);
+extern unsigned int datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
+extern int skb_copy_datagram(const struct sk_buff *from,
+ int offset, char __user *to, int size);
+extern int skb_copy_datagram_iovec(const struct sk_buff *from,
+ int offset, struct iovec *to,
+ int size);
+extern int skb_copy_and_csum_datagram(const struct sk_buff *skb,
+ int offset, u8 __user *to,
+ int len, unsigned int *csump);
+extern int skb_copy_and_csum_datagram_iovec(const
+ struct sk_buff *skb,
+ int hlen,
+ struct iovec *iov);
+extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
+extern unsigned int skb_checksum(const struct sk_buff *skb, int offset,
+ int len, unsigned int csum);
+extern int skb_copy_bits(const struct sk_buff *skb, int offset,
+ void *to, int len);
+extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb,
+ int offset, u8 *to, int len,
+ unsigned int csum);
+extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
+
+extern void skb_init(void);
+extern void skb_add_mtu(int mtu);
+
+#ifdef CONFIG_NETFILTER
+static inline void nf_conntrack_put(struct nf_ct_info *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->master->use))
+ nfct->master->destroy(nfct->master);
+}
+static inline void nf_conntrack_get(struct nf_ct_info *nfct)
+{
+ if (nfct)
+ atomic_inc(&nfct->master->use);
+}
+
+#ifdef CONFIG_BRIDGE_NETFILTER
+static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
+{
+ if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
+ kfree(nf_bridge);
+}
+static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
+{
+ if (nf_bridge)
+ atomic_inc(&nf_bridge->use);
+}
+#endif
+
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux-2.6.7-xen-sparse/mm/page_alloc.c b/linux-2.6.7-xen-sparse/mm/page_alloc.c
index 5d9b765d39..6671262ae0 100644
--- a/linux-2.6.7-xen-sparse/mm/page_alloc.c
+++ b/linux-2.6.7-xen-sparse/mm/page_alloc.c
@@ -497,9 +497,8 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pages *pcp;
unsigned long flags;
- /* XXX Xen: use mapping pointer as skb/data-page destructor */
- if (page->mapping)
- return (*(void(*)(struct page *))page->mapping)(page);
+ if (PageForeign(page))
+ return (PageForeignDestructor(page))(page);
kernel_map_pages(page, 1, 0);
inc_page_state(pgfree);
diff --git a/tools/examples/Makefile b/tools/examples/Makefile
index 0aa958f685..56a24880f9 100644
--- a/tools/examples/Makefile
+++ b/tools/examples/Makefile
@@ -5,9 +5,8 @@ XEND_INITD = init.d/xend
# Xen configuration dir and configs to go there.
XEN_CONFIG_DIR = /etc/xen
XEN_CONFIGS = xend-config.sxp
-XEN_CONFIGS += xmdefconfig
-XEN_CONFIGS += xmdefconfig-example
-XEN_CONFIGS += xmdefconfig-netbsd
+XEN_CONFIGS += xmexample1
+XEN_CONFIGS += xmexample2
# Xen script dir and scripts to go there.
XEN_SCRIPT_DIR = /etc/xen/scripts
diff --git a/tools/examples/xmdefconfig-netbsd b/tools/examples/xmdefconfig-netbsd
deleted file mode 100644
index 8d1662483a..0000000000
--- a/tools/examples/xmdefconfig-netbsd
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- mode: python; -*-
-#============================================================================
-# Example Python setup script for 'xm create'.
-# This script sets the parameters used when a domain is created using 'xm create'.
-#
-# This is a relatively advanced script that uses a parameter, vmid, to control
-# the settings. So this script can be used to start a set of domains by
-# setting the vmid parameter on the 'xm create' command line. For example:
-#
-# xm create vmid=1
-# xm create vmid=2
-# xm create vmid=3
-#
-# The vmid is purely a script variable, and has no effect on the the domain
-# id assigned to the new domain.
-#============================================================================
-
-# Define script variables here.
-# xm_vars is defined automatically, use xm_vars.var() to define a variable.
-
-# This function checks that 'vmid' has been given a valid value.
-# It is called automatically by 'xm create'.
-def vmid_check(var, val):
- val = int(val)
- if val <= 0:
- raise ValueError
- return val
-
-# Define the 'vmid' variable so that 'xm create' knows about it.
-xm_vars.var('vmid',
- use="Virtual machine id. Integer greater than 0.",
- check=vmid_check)
-
-# Check the defined variables have valid values..
-xm_vars.check()
-
-#----------------------------------------------------------------------------
-# Kernel image file.
-image = "/boot/netbsd"
-
-# The domain build function.
-builder='netbsd'
-
-# Initial memory allocation (in megabytes) for the new domain.
-memory = 16
-
-# A name for the new domain. All domains have to have different names,
-# so we use the vmid to create a name.
-name = "NETBSD%d" % vmid
-
-#----------------------------------------------------------------------------
-# Define network interfaces.
-
-# Number of network interfaces. Default is 1.
-#nics=1
-
-# Optionally define mac and/or bridge for the network interfaces.
-# Random MACs are assigned if not given.
-#vif = [ 'mac=aa:00:00:00:00:11, bridge=xen-br0' ]
-
-# Specify IP address(es), for the new domain. You need to
-# configure IP addrs within the domain just as you do normally. This
-# is just to let Xen know about them so it can route packets
-# appropriately.
-
-#ipaddr = [ xenctl.utils.add_offset_to_ip(xenctl.utils.get_current_ipaddr(),vmid),
-# xenctl.utils.add_offset_to_ip('169.254.1.0',vmid),
-# ]
-
-#----------------------------------------------------------------------------
-# Define the disk devices you want the domain to have access to, and
-# what you want them accessible as.
-# Each disk entry is of the form phy:UNAME,DEV,MODE
-# where UNAME is the device, DEV is the device name the domain will see,
-# and MODE is r for read-only, w for read-write.
-
-# This makes the disk device depend on the vmid - assuming
-# that devices sda7, sda8 etc. exist. The device is exported
-# to all domains as sda1.
-# All domains get sda6 read-only (to use for /usr, see below).
-disk = [ 'phy:sda%d,sda1,w' % (7+vmid),
- 'phy:sda6,sda6,r' ]
-
-#----------------------------------------------------------------------------
-# Set the kernel command line for the new domain.
-# You only need to define the IP parameters and hostname if the domain's
-# IP config doesn't, e.g. in ifcfg-eth0 or via DHCP.
-# You can use 'extra' to set the runlevel and custom environment
-# variables used by custom rc scripts (e.g. VMID=, usr= ).
-
-# Set if you want dhcp to allocate the IP address.
-#dhcp="dhcp"
-# Set netmask.
-#netmask=
-# Set default gateway.
-#gateway=
-# Set the hostname.
-#hostname= "vm%d" % vmid
-
-# Set root device.
-root = "/dev/sda1 ro"
-
-# Root device for nfs.
-#root = "/dev/nfs"
-# The nfs server.
-#nfs_server = '169.254.1.0'
-# Root directory on the nfs server.
-#nfs_root = '/full/path/to/root/directory'
-
-# Sets runlevel 4 and the device for /usr.
-#extra = "4 VMID=%d usr=/dev/sda6" % vmid
-extra = "4 VMID=%d bootdev=xennet0" % vmid
-
-
-#----------------------------------------------------------------------------
-# Set according to whether you want the domain restarted when it exits.
-# The default is 'onreboot', which restarts the domain when it shuts down
-# with exit code reboot.
-# Other values are 'always', and 'never'.
-#
-#restart = 'onreboot'
-
-#============================================================================
diff --git a/tools/examples/xmdefconfig b/tools/examples/xmexample1
index b297d1d846..b297d1d846 100644
--- a/tools/examples/xmdefconfig
+++ b/tools/examples/xmexample1
diff --git a/tools/examples/xmdefconfig-example b/tools/examples/xmexample2
index d6df731c45..d6df731c45 100644
--- a/tools/examples/xmdefconfig-example
+++ b/tools/examples/xmexample2
diff --git a/tools/libxc/xc.h b/tools/libxc/xc.h
index 974ac975f2..8b54ed0207 100644
--- a/tools/libxc/xc.h
+++ b/tools/libxc/xc.h
@@ -154,6 +154,8 @@ int xc_rrobin_global_set(int xc_handle, u64 slice);
int xc_rrobin_global_get(int xc_handle, u64 *slice);
#define DOMID_SELF (0x7FF0U)
+#define DOMID_IO (0x7FF1U)
+#define DOMID_XEN (0x7FF2U)
typedef struct {
#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */
diff --git a/tools/libxc/xc_linux_save.c b/tools/libxc/xc_linux_save.c
index 5a47b30f56..cfcc6cd0c8 100644
--- a/tools/libxc/xc_linux_save.c
+++ b/tools/libxc/xc_linux_save.c
@@ -295,7 +295,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt)
int rc = 1, i, j, k, last_iter, iter = 0;
unsigned long mfn;
u32 domid = ioctxt->domain;
- int live = 0; // (ioctxt->flags & XCFLAGS_LIVE);
+ int live = (ioctxt->flags & XCFLAGS_LIVE);
int debug = (ioctxt->flags & XCFLAGS_DEBUG);
int sent_last_iter, skip_this_iter;
@@ -423,7 +423,7 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt)
mfn_to_pfn_table_start_mfn = xc_get_m2p_start_mfn( xc_handle );
live_mfn_to_pfn_table =
- mfn_mapper_map_single(xc_handle, 0x7FFFU,
+ mfn_mapper_map_single(xc_handle, DOMID_XEN,
PAGE_SIZE*1024, PROT_READ,
mfn_to_pfn_table_start_mfn );
@@ -440,7 +440,8 @@ int xc_linux_save(int xc_handle, XcIOContext *ioctxt)
/* Domain is still running at this point */
- if( live ){
+ if( live ){
+printf("GO LIVE!!\n");
if ( xc_shadow_control( xc_handle, domid,
DOM0_SHADOW_CONTROL_OP_ENABLE_LOGDIRTY,
NULL, 0, NULL ) < 0 ) {
diff --git a/tools/python/xen/xm/opts.py b/tools/python/xen/xm/opts.py
index b206d6ce5a..5e7d69a398 100644
--- a/tools/python/xen/xm/opts.py
+++ b/tools/python/xen/xm/opts.py
@@ -347,7 +347,7 @@ class Opts:
def var_usage(self):
if self.vars:
- print 'The defconfig defines the following variables:'
+ print 'The config file defines the following variables:'
for var in self.vars:
var.show()
print
@@ -372,11 +372,11 @@ class Opts:
else:
p = self.vals.defconfig
if os.path.exists(p):
- self.info('Using defconfig file %s.' % p)
+ self.info('Using config file "%s".' % p)
self.load(p, help)
break
else:
- self.err("Cannot open defconfig file %s" % self.vals.defconfig)
+ self.err('Cannot open config file "%s"' % self.vals.defconfig)
def load(self, defconfig, help):
"""Load a defconfig file. Local variables in the file
diff --git a/xen/arch/x86/memory.c b/xen/arch/x86/memory.c
index cb0cf2f19a..5152e39648 100644
--- a/xen/arch/x86/memory.c
+++ b/xen/arch/x86/memory.c
@@ -137,14 +137,49 @@ static struct {
*/
#define FOREIGNDOM (percpu_info[smp_processor_id()].foreign ? : current)
-void ptwr_init_backpointers(void);
+/* Private domain structs for DOMID_XEN and DOMID_IO. */
+static struct domain *dom_xen, *dom_io;
void arch_init_memory(void)
{
+ static void ptwr_init_backpointers(void);
+ unsigned long mfn;
+
memset(percpu_info, 0, sizeof(percpu_info));
vm_assist_info[VMASST_TYPE_writeable_pagetables].enable =
ptwr_init_backpointers;
+
+ /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
+ memset(machine_to_phys_mapping, 0x55, 4<<20);
+
+ /*
+ * Initialise our DOMID_XEN domain.
+ * Any Xen-heap pages that we will allow to be mapped will have
+ * their domain field set to dom_xen.
+ */
+ dom_xen = alloc_domain_struct();
+ atomic_set(&dom_xen->refcnt, 1);
+ dom_xen->domain = DOMID_XEN;
+
+ /*
+ * Initialise our DOMID_IO domain.
+ * This domain owns no pages but is considered a special case when
+ * mapping I/O pages, as the mappings occur at the priv of the caller.
+ */
+ dom_io = alloc_domain_struct();
+ atomic_set(&dom_io->refcnt, 1);
+ dom_io->domain = DOMID_IO;
+
+ /* M2P table is mappable read-only by privileged domains. */
+ for ( mfn = virt_to_phys(&machine_to_phys_mapping[0<<20])>>PAGE_SHIFT;
+ mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
+ mfn++ )
+ {
+ frame_table[mfn].u.inuse.count_info = 1 | PGC_allocated;
+ frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW */
+ frame_table[mfn].u.inuse.domain = dom_xen;
+ }
}
static void __invalidate_shadow_ldt(struct domain *d)
@@ -178,7 +213,7 @@ static inline void invalidate_shadow_ldt(struct domain *d)
}
-int alloc_segdesc_page(struct pfn_info *page)
+static int alloc_segdesc_page(struct pfn_info *page)
{
unsigned long *descs = map_domain_mem((page-frame_table) << PAGE_SHIFT);
int i;
@@ -345,11 +380,15 @@ get_page_from_l1e(
if ( unlikely(!pfn_is_ram(pfn)) )
{
- if ( IS_PRIV(current) )
+ /* Revert to caller privileges if FD == DOMID_IO. */
+ if ( d == dom_io )
+ d = current;
+
+ if ( IS_PRIV(d) )
return 1;
- if ( IS_CAPABLE_PHYSDEV(current) )
- return domain_iomem_in_pfn(current, pfn);
+ if ( IS_CAPABLE_PHYSDEV(d) )
+ return domain_iomem_in_pfn(d, pfn);
MEM_LOG("Non-privileged attempt to map I/O space %08lx", pfn);
return 0;
@@ -827,9 +866,16 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
if ( !IS_PRIV(d) )
{
- MEM_LOG("Dom %u has no privilege to set subject domain",
- d->domain);
- okay = 0;
+ switch ( domid )
+ {
+ case DOMID_IO:
+ get_knownalive_domain(e = dom_io);
+ break;
+ default:
+ MEM_LOG("Dom %u cannot set foreign dom\n", d->domain);
+ okay = 0;
+ break;
+ }
}
else
{
@@ -839,8 +885,19 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
percpu_info[cpu].foreign = e = find_domain_by_id(domid);
if ( e == NULL )
{
- MEM_LOG("Unknown domain '%u'", domid);
- okay = 0;
+ switch ( domid )
+ {
+ case DOMID_XEN:
+ get_knownalive_domain(e = dom_xen);
+ break;
+ case DOMID_IO:
+ get_knownalive_domain(e = dom_io);
+ break;
+ default:
+ MEM_LOG("Unknown domain '%u'", domid);
+ okay = 0;
+ break;
+ }
}
}
break;
@@ -926,7 +983,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
* the lock so they'll spin waiting for us.
*/
if ( unlikely(e->tot_pages++ == 0) )
- get_domain(e);
+ get_knownalive_domain(e);
list_add_tail(&page->list, &e->page_list);
reassign_fail:
@@ -1493,7 +1550,7 @@ int ptwr_do_page_fault(unsigned long addr)
return 0;
}
-void ptwr_init_backpointers(void)
+static void ptwr_init_backpointers(void)
{
struct pfn_info *page;
unsigned long pde;
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index e629930ab6..3d18ebd4ee 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -16,6 +16,7 @@
#include <asm/domain_page.h>
#include <asm/pdb.h>
+extern void arch_init_memory(void);
extern void init_IRQ(void);
extern void trap_init(void);
extern void time_init(void);
@@ -360,6 +361,8 @@ void __init start_of_day(void)
time_init(); /* installs software handler for HZ clock. */
init_apic_mappings(); /* make APICs addressable in our pagetables. */
+ arch_init_memory();
+
#ifndef CONFIG_SMP
APIC_init_uniprocessor();
#else
diff --git a/xen/arch/x86/shadow.c b/xen/arch/x86/shadow.c
index 362b0f4560..06445943ab 100644
--- a/xen/arch/x86/shadow.c
+++ b/xen/arch/x86/shadow.c
@@ -29,41 +29,6 @@ hypercall lock anyhow (at least initially).
********/
-/**
-
-FIXME:
-
-The shadow table flush command is dangerous on SMP systems as the
-guest may be using the L2 on one CPU while the other is trying to
-blow the table away.
-
-The current save restore code works around this by not calling FLUSH,
-but by calling CLEAN2 which leaves all L2s in tact (this is probably
-quicker anyhow).
-
-Even so, we have to be very careful. The flush code may need to cause
-a TLB flush on another CPU. It needs to do this while holding the
-shadow table lock. The trouble is, the guest may be in the shadow page
-fault handler spinning waiting to grab the shadow lock. It may have
-intterupts disabled, hence we can't use the normal flush_tlb_cpu
-mechanism.
-
-For the moment, we have a grim race whereby the spinlock in the shadow
-fault handler is actually a try lock, in a loop with a helper for the
-tlb flush code.
-
-A better soloution would be to take a new flush lock, then raise a
-per-domain soft irq on the other CPU. The softirq will switch to
-init's PTs, then do an atomic inc of a variable to count himself in,
-then spin on a lock. Having noticed that the other guy has counted
-in, flush the shadow table, then release him by dropping the lock. He
-will then reload cr3 from mm.page_table on the way out of the softirq.
-
-In domian-softirq context we know that the guy holds no locks and has
-interrupts enabled. Nothing can go wrong ;-)
-
-**/
-
static inline void free_shadow_page(struct mm_struct *m,
struct pfn_info *page)
{
@@ -381,9 +346,9 @@ static int shadow_mode_table_op(struct domain *d,
d->mm.shadow_dirty_net_count = 0;
d->mm.shadow_dirty_block_count = 0;
- sc->pages = d->tot_pages;
+ sc->pages = d->max_pages;
- if( d->tot_pages > sc->pages ||
+ if( d->max_pages > sc->pages ||
!sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
{
rc = -EINVAL;
@@ -393,10 +358,10 @@ static int shadow_mode_table_op(struct domain *d,
#define chunk (8*1024) // do this in 1KB chunks for L1 cache
- for(i=0;i<d->tot_pages;i+=chunk)
+ for(i=0;i<d->max_pages;i+=chunk)
{
- int bytes = (( ((d->tot_pages-i) > (chunk))?
- (chunk):(d->tot_pages-i) ) + 7) / 8;
+ int bytes = (( ((d->max_pages-i) > (chunk))?
+ (chunk):(d->max_pages-i) ) + 7) / 8;
copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
@@ -428,21 +393,21 @@ static int shadow_mode_table_op(struct domain *d,
sc->stats.dirty_net_count = d->mm.shadow_dirty_net_count;
sc->stats.dirty_block_count = d->mm.shadow_dirty_block_count;
- if( d->tot_pages > sc->pages ||
+ if( d->max_pages > sc->pages ||
!sc->dirty_bitmap || !d->mm.shadow_dirty_bitmap )
{
rc = -EINVAL;
goto out;
}
- sc->pages = d->tot_pages;
+ sc->pages = d->max_pages;
#define chunk (8*1024) // do this in 1KB chunks for L1 cache
- for(i=0;i<d->tot_pages;i+=chunk)
+ for(i=0;i<d->max_pages;i+=chunk)
{
- int bytes = (( ((d->tot_pages-i) > (chunk))?
- (chunk):(d->tot_pages-i) ) + 7) / 8;
+ int bytes = (( ((d->max_pages-i) > (chunk))?
+ (chunk):(d->max_pages-i) ) + 7) / 8;
copy_to_user( sc->dirty_bitmap + (i/(8*sizeof(unsigned long))),
d->mm.shadow_dirty_bitmap +(i/(8*sizeof(unsigned long))),
@@ -475,7 +440,13 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
unsigned int cmd = sc->op;
int rc = 0;
- spin_lock(&d->mm.shadow_lock);
+ if (d == current)
+ printk("Attempt to control your _own_ shadow tables. I hope you know what you're doing!\n");
+
+ domain_pause(d);
+ synchronise_pagetables(d->processor);
+
+ spin_lock(&d->mm.shadow_lock);
if ( cmd == DOM0_SHADOW_CONTROL_OP_OFF )
{
@@ -502,10 +473,10 @@ int shadow_mode_control(struct domain *d, dom0_shadow_control_t *sc)
rc = -EINVAL;
}
- flush_tlb_cpu(d->processor);
-
spin_unlock(&d->mm.shadow_lock);
+ domain_unpause(d);
+
return rc;
}
@@ -518,6 +489,7 @@ static inline struct pfn_info *alloc_shadow_page(struct mm_struct *m)
void unshadow_table( unsigned long gpfn, unsigned int type )
{
unsigned long spfn;
+ struct domain *d = frame_table[gpfn].u.inuse.domain;
SH_VLOG("unshadow_table type=%08x gpfn=%08lx",
type,
@@ -530,11 +502,11 @@ void unshadow_table( unsigned long gpfn, unsigned int type )
// even in the SMP guest case, there won't be a race here as
// this CPU was the one that cmpxchg'ed the page to invalid
- spfn = __shadow_status(&current->mm, gpfn) & PSH_pfn_mask;
+ spfn = __shadow_status(&d->mm, gpfn) & PSH_pfn_mask;
- delete_shadow_status(&current->mm, gpfn);
+ delete_shadow_status(&d->mm, gpfn);
- free_shadow_page( &current->mm, &frame_table[spfn] );
+ free_shadow_page(&d->mm, &frame_table[spfn] );
}
@@ -651,15 +623,7 @@ int shadow_fault( unsigned long va, long error_code )
// take the lock and reread gpte
- while( unlikely(!spin_trylock(&current->mm.shadow_lock)) )
- {
- extern volatile unsigned long flush_cpumask;
- if ( test_and_clear_bit(smp_processor_id(), &flush_cpumask) )
- local_flush_tlb();
- rep_nop();
- }
-
- ASSERT(spin_is_locked(&current->mm.shadow_lock));
+ spin_lock(&current->mm.shadow_lock);
if ( unlikely(__get_user(gpte, (unsigned long*)&linear_pg_table[va>>PAGE_SHIFT])) )
{
diff --git a/xen/arch/x86/smp.c b/xen/arch/x86/smp.c
index b9a0fec0ad..ef2a1e062a 100644
--- a/xen/arch/x86/smp.c
+++ b/xen/arch/x86/smp.c
@@ -212,7 +212,7 @@ static inline void send_IPI_allbutself(int vector)
*/
static spinlock_t flush_lock = SPIN_LOCK_UNLOCKED;
-volatile unsigned long flush_cpumask;
+static unsigned long flush_cpumask;
asmlinkage void smp_invalidate_interrupt(void)
{
diff --git a/xen/common/dom0_ops.c b/xen/common/dom0_ops.c
index 6c8abcb8f8..1f1e911b73 100644
--- a/xen/common/dom0_ops.c
+++ b/xen/common/dom0_ops.c
@@ -25,13 +25,14 @@
extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
-extern void arch_getdomaininfo_ctxt(struct domain *, full_execution_context_t *);
+extern void arch_getdomaininfo_ctxt(
+ struct domain *, full_execution_context_t *);
static inline int is_free_domid(domid_t dom)
{
struct domain *d;
- if ( dom >= DOMID_SELF )
+ if ( dom >= DOMID_FIRST_RESERVED )
return 0;
if ( (d = find_domain_by_id(dom)) == NULL )
@@ -66,7 +67,7 @@ static int allocate_domid(domid_t *pdom)
}
/* Couldn't find a free domain id in 0..topdom, try higher. */
- for ( dom = topdom; dom < DOMID_SELF; dom++ )
+ for ( dom = topdom; dom < DOMID_FIRST_RESERVED; dom++ )
{
if ( is_free_domid(dom) )
{
@@ -167,7 +168,7 @@ long do_dom0_op(dom0_op_t *u_dom0_op)
domid_t dom;
dom = op->u.createdomain.domain;
- if ( (dom > 0) && (dom < DOMID_SELF) )
+ if ( (dom > 0) && (dom < DOMID_FIRST_RESERVED) )
{
ret = -EINVAL;
if ( !is_free_domid(dom) )
diff --git a/xen/common/domain.c b/xen/common/domain.c
index cac4c2edf0..4ca9f58135 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -39,11 +39,18 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
d->domain = dom_id;
d->processor = cpu;
d->create_time = NOW();
- /* Initialise the sleep_lock */
spin_lock_init(&d->sleep_lock);
memcpy(&d->thread, &idle0_task.thread, sizeof(d->thread));
+ spin_lock_init(&d->page_alloc_lock);
+ INIT_LIST_HEAD(&d->page_list);
+ d->max_pages = d->tot_pages = 0;
+
+ /* Per-domain PCI-device list. */
+ spin_lock_init(&d->pcidev_lock);
+ INIT_LIST_HEAD(&d->pcidev_list);
+
if ( d->domain != IDLE_DOMAIN_ID )
{
if ( init_event_channels(d) != 0 )
@@ -59,16 +66,8 @@ struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
d->addr_limit = USER_DS;
- spin_lock_init(&d->page_alloc_lock);
- INIT_LIST_HEAD(&d->page_list);
- d->max_pages = d->tot_pages = 0;
-
arch_do_createdomain(d);
- /* Per-domain PCI-device list. */
- spin_lock_init(&d->pcidev_lock);
- INIT_LIST_HEAD(&d->pcidev_list);
-
sched_add_domain(d);
write_lock_irqsave(&tasklist_lock, flags);
diff --git a/xen/common/kernel.c b/xen/common/kernel.c
index d4cd43ae9d..707cede7f1 100644
--- a/xen/common/kernel.c
+++ b/xen/common/kernel.c
@@ -304,9 +304,6 @@ void cmain(multiboot_info_t *mbi)
start_of_day();
- /* Add CPU0 idle task to the task hash list */
- task_hash[TASK_HASH(IDLE_DOMAIN_ID)] = &idle0_task;
-
/* Create initial domain 0. */
new_dom = do_createdomain(0, 0);
if ( new_dom == NULL )
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 8e513b55e3..2dfdd10e07 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -37,14 +37,8 @@ struct pfn_info *frame_table;
unsigned long frame_table_size;
unsigned long max_page;
-extern void arch_init_memory(void);
-
void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
{
- unsigned long mfn;
-
- arch_init_memory();
-
max_page = nr_pages;
frame_table_size = nr_pages * sizeof(struct pfn_info);
frame_table_size = (frame_table_size + PAGE_SIZE - 1) & PAGE_MASK;
@@ -54,17 +48,4 @@ void __init init_frametable(void *frametable_vstart, unsigned long nr_pages)
panic("Not enough memory for frame table - reduce Xen heap size?\n");
memset(frame_table, 0, frame_table_size);
-
- /* Initialise to a magic of 0x55555555 so easier to spot bugs later. */
- memset(machine_to_phys_mapping, 0x55, 4<<20);
-
- /* Pin the ownership of the MP table so that DOM0 can map it later. */
- for ( mfn = virt_to_phys(&machine_to_phys_mapping[0<<20])>>PAGE_SHIFT;
- mfn < virt_to_phys(&machine_to_phys_mapping[1<<20])>>PAGE_SHIFT;
- mfn++ )
- {
- frame_table[mfn].u.inuse.count_info = 1 | PGC_allocated;
- frame_table[mfn].u.inuse.type_info = 1 | PGT_gdt_page; /* non-RW */
- frame_table[mfn].u.inuse.domain = &idle0_task;
- }
}
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index a57d43b9c3..323ecd244e 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -393,7 +393,7 @@ struct pfn_info *alloc_domheap_pages(struct domain *d, int order)
}
if ( unlikely(d->tot_pages == 0) )
- get_domain(d);
+ get_knownalive_domain(d);
d->tot_pages += 1 << order;
@@ -422,7 +422,7 @@ void free_domheap_pages(struct pfn_info *pg, int order)
drop_dom_ref = (d->xenheap_pages == 0);
spin_unlock_recursive(&d->page_alloc_lock);
}
- else
+ else if ( likely(d != NULL) )
{
/* NB. May recursively lock from domain_relinquish_memory(). */
spin_lock_recursive(&d->page_alloc_lock);
@@ -442,6 +442,12 @@ void free_domheap_pages(struct pfn_info *pg, int order)
free_heap_pages(MEMZONE_DOM, pg, order);
}
+ else
+ {
+ /* Freeing an anonymous domain-heap page. */
+ free_heap_pages(MEMZONE_DOM, pg, order);
+ drop_dom_ref = 0;
+ }
if ( drop_dom_ref )
put_domain(d);
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index c0143352e0..a2fc40e0b3 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -108,7 +108,7 @@ struct pfn_info
/* _dom holds an allocation reference */ \
(_pfn)->u.inuse.count_info = PGC_allocated | 1; \
if ( unlikely((_dom)->xenheap_pages++ == 0) ) \
- get_domain(_dom); \
+ get_knownalive_domain(_dom); \
spin_unlock(&(_dom)->page_alloc_lock); \
} while ( 0 )
diff --git a/xen/include/hypervisor-ifs/hypervisor-if.h b/xen/include/hypervisor-ifs/hypervisor-if.h
index 1c28062c5b..efcaeb8abb 100644
--- a/xen/include/hypervisor-ifs/hypervisor-if.h
+++ b/xen/include/hypervisor-ifs/hypervisor-if.h
@@ -87,8 +87,10 @@
* ptr[1:0] == MMU_NORMAL_PT_UPDATE:
* Updates an entry in a page table. If updating an L1 table, and the new
* table entry is valid/present, the mapped frame must belong to the FD, if
- * an FD has been specified. If attempting to map an I/O page, then the FD
- * is ignored, but the calling domain must have sufficient privilege.
+ * an FD has been specified. If attempting to map an I/O page then the
+ * caller assumes the privilege of the FD.
+ * FD == DOMID_IO: Permit /only/ I/O mappings, at the priv level of the caller.
+ * FD == DOMID_XEN: Map restricted areas of Xen's heap space.
* ptr[:2] -- Machine address of the page-table entry to modify.
* val -- Value to write.
*
@@ -121,6 +123,7 @@
* val[7:0] == MMUEXT_SET_FOREIGNDOM:
* val[31:15] -- Domain to set as the Foreign Domain (FD).
* (NB. DOMID_SELF is not recognised)
+ * If FD != DOMID_IO then the caller must be privileged.
*
* val[7:0] == MMUEXT_REASSIGN_PAGE:
* ptr[:2] -- A machine address within the page to be reassigned to the FD.
@@ -186,9 +189,31 @@
#ifndef __ASSEMBLY__
typedef u16 domid_t;
+
+/* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */
+#define DOMID_FIRST_RESERVED (0x7FF0U)
+
/* DOMID_SELF is used in certain contexts to refer to oneself. */
-#define DOMID_SELF (0x7FF0U)
-/* NB. IDs >= 0x7FF1 are reserved for future use. */
+#define DOMID_SELF (0x7FF0U)
+
+/*
+ * DOMID_IO is used to restrict page-table updates to mapping I/O memory.
+ * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO
+ * is useful to ensure that no mappings to the OS's own heap are accidentally
+ * installed. (e.g., in Linux this could cause havoc as reference counts
+ * aren't adjusted on the I/O-mapping code path).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can
+ * be specified by any calling domain.
+ */
+#define DOMID_IO (0x7FF1U)
+
+/*
+ * DOMID_XEN is used to allow privileged domains to map restricted parts of
+ * Xen's heap space (e.g., the machine_to_phys table).
+ * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if
+ * the caller is privileged.
+ */
+#define DOMID_XEN (0x7FF2U)
/*
* Send an array of these to HYPERVISOR_mmu_update().
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index 06b6faf6cc..af74cf5380 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -164,11 +164,26 @@ struct domain *alloc_domain_struct();
#define DOMAIN_DESTRUCTED (1<<31) /* assumes atomic_t is >= 32 bits */
#define put_domain(_d) \
if ( atomic_dec_and_test(&(_d)->refcnt) ) domain_destruct(_d)
+
+/*
+ * Use this when you don't have an existing reference to @d. It returns
+ * FALSE if @d is being destructed.
+ */
static inline int get_domain(struct domain *d)
{
atomic_inc(&d->refcnt);
return !(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED);
}
+
+/*
+ * Use this when you already have, or are borrowing, a reference to @d.
+ * In this case we know that @d cannot be destructed under our feet.
+ */
+static inline void get_knownalive_domain(struct domain *d)
+{
+ atomic_inc(&d->refcnt);
+ ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTRUCTED));
+}
extern struct domain *do_createdomain(
domid_t dom_id, unsigned int cpu);