aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--extras/mini-os/h/hypervisor.h10
-rw-r--r--extras/mini-os/head.S13
-rw-r--r--tools/internal/xi_build.c20
-rw-r--r--xen/arch/i386/entry.S2
-rw-r--r--xen/common/domain.c50
-rw-r--r--xen/common/network.c36
-rw-r--r--xen/drivers/block/xen_block.c23
-rw-r--r--xen/include/hypervisor-ifs/block.h8
-rw-r--r--xen/include/hypervisor-ifs/dom0_ops.h1
-rw-r--r--xen/include/hypervisor-ifs/hypervisor-if.h74
-rw-r--r--xen/include/hypervisor-ifs/network.h8
-rw-r--r--xen/net/dev.c288
-rw-r--r--xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c2
-rw-r--r--xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c4
-rw-r--r--xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S18
-rw-r--r--xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c6
-rw-r--r--xenolinux-2.4.22-sparse/arch/xeno/mm/init.c453
-rw-r--r--xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h5
-rw-r--r--xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h10
19 files changed, 546 insertions, 485 deletions
diff --git a/extras/mini-os/h/hypervisor.h b/extras/mini-os/h/hypervisor.h
index 3519b6faff..23c39134f3 100644
--- a/extras/mini-os/h/hypervisor.h
+++ b/extras/mini-os/h/hypervisor.h
@@ -103,12 +103,13 @@ static inline int HYPERVISOR_set_callbacks(
return ret;
}
-static inline int HYPERVISOR_net_update(void)
+static inline int HYPERVISOR_net_io_op(unsigned int op, unsigned int idx)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_net_update) );
+ : "=a" (ret) : "0" (__HYPERVISOR_net_io_op),
+ "b" (op), "c" (idx) );
return ret;
}
@@ -165,12 +166,13 @@ static inline int HYPERVISOR_network_op(void *network_op)
return ret;
}
-static inline int HYPERVISOR_block_io_op(void)
+static inline int HYPERVISOR_block_io_op(unsigned int op)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) );
+ : "=a" (ret) : "0" (__HYPERVISOR_block_io_op),
+ "b" (op) );
return ret;
}
diff --git a/extras/mini-os/head.S b/extras/mini-os/head.S
index 3f4e6670c3..d5cb19f574 100644
--- a/extras/mini-os/head.S
+++ b/extras/mini-os/head.S
@@ -1,9 +1,8 @@
#include <os.h>
/* Offsets in start_info structure */
-#define SHARED_INFO 4
-#define MOD_START 12
-#define MOD_LEN 16
+#define MOD_START 4
+#define MOD_LEN 8
#define ENTRY(X) .globl X ; X :
@@ -31,10 +30,10 @@ _start:
/* Clear BSS first so that there are no surprises... */
2: xorl %eax,%eax
- movl $__bss_start,%edi
- movl $_end,%ecx
- subl %edi,%ecx
- rep stosb
+ movl $__bss_start,%edi
+ movl $_end,%ecx
+ subl %edi,%ecx
+ rep stosb
push %esi
call start_kernel
diff --git a/tools/internal/xi_build.c b/tools/internal/xi_build.c
index 24863744fb..429048ec94 100644
--- a/tools/internal/xi_build.c
+++ b/tools/internal/xi_build.c
@@ -230,12 +230,8 @@ static int setup_guestos(
alloc_index = tot_pages - 1;
- /*
- * Count bottom-level PTs, rounding up. Include one PTE for shared info. We
- * therefore add 1024 because 1 is for shared_info, 1023 is to round up.
- */
- num_pt_pages =
- (l1_table_offset(virt_load_addr) + tot_pages + 1024) / 1024;
+ /* Count bottom-level PTs, rounding up. */
+ num_pt_pages = (l1_table_offset(virt_load_addr) + tot_pages + 1023) / 1024;
/* We must also count the page directory. */
num_pt_pages++;
@@ -250,7 +246,6 @@ static int setup_guestos(
l2tab = page_array[alloc_index] << PAGE_SHIFT;
alloc_index--;
meminfo->l2_pgt_addr = l2tab;
- meminfo->virt_shinfo_addr = virt_load_addr + (tot_pages << PAGE_SHIFT);
/*
* Pin down l2tab addr as page dir page - causes hypervisor to provide
@@ -261,16 +256,12 @@ static int setup_guestos(
pgt_updates++;
num_pgt_updates++;
- /*
- * Initialise the page tables. The final iteration is for the shared_info
- * PTE -- we break out before filling in the entry, as that is done by
- * Xen during final setup.
- */
+ /* Initialise the page tables. */
if ( (vl2tab = map_pfn(l2tab >> PAGE_SHIFT)) == NULL )
goto error_out;
memset(vl2tab, 0, PAGE_SIZE);
vl2e = vl2tab + l2_table_offset(virt_load_addr);
- for ( count = 0; count < (tot_pages + 1); count++ )
+ for ( count = 0; count < tot_pages; count++ )
{
if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
{
@@ -291,9 +282,6 @@ static int setup_guestos(
vl2e++;
}
- /* The last PTE we consider is filled in later by Xen. */
- if ( count == tot_pages ) break;
-
if ( count < pt_start )
{
pgt_updates->ptr = (unsigned long)vl1e;
diff --git a/xen/arch/i386/entry.S b/xen/arch/i386/entry.S
index 2ce1c30784..77ad757d62 100644
--- a/xen/arch/i386/entry.S
+++ b/xen/arch/i386/entry.S
@@ -710,7 +710,7 @@ ENTRY(hypervisor_call_table)
.long SYMBOL_NAME(do_set_gdt)
.long SYMBOL_NAME(do_stack_switch)
.long SYMBOL_NAME(do_set_callbacks)
- .long SYMBOL_NAME(do_net_update)
+ .long SYMBOL_NAME(do_net_io_op)
.long SYMBOL_NAME(do_fpu_taskswitch)
.long SYMBOL_NAME(do_yield)
.long SYMBOL_NAME(kill_domain)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index f16a25f30c..b817e0f36f 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -347,7 +347,6 @@ void release_task(struct task_struct *p)
int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
{
l2_pgentry_t * l2tab;
- l1_pgentry_t * l1tab;
start_info_t * virt_startinfo_addr;
unsigned long virt_stack_addr;
unsigned long phys_l2tab;
@@ -374,19 +373,9 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
p->mm.pagetable = mk_pagetable(phys_l2tab);
unmap_domain_mem(l2tab);
- /* map in the shared info structure */
- phys_l2tab = pagetable_val(p->mm.pagetable);
- l2tab = map_domain_mem(phys_l2tab);
- l2tab += l2_table_offset(meminfo->virt_shinfo_addr);
- l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l1tab += l1_table_offset(meminfo->virt_shinfo_addr);
- *l1tab = mk_l1_pgentry(__pa(p->shared_info) | L1_PROT);
- unmap_domain_mem((void *)((unsigned long)l2tab & PAGE_MASK));
- unmap_domain_mem((void *)((unsigned long)l1tab & PAGE_MASK));
-
/* set up the shared info structure */
update_dom_time(p->shared_info);
- p->shared_info->domain_time = 0;
+ p->shared_info->domain_time = 0;
/* we pass start info struct to guest os as function parameter on stack */
virt_startinfo_addr = (start_info_t *)meminfo->virt_startinfo_addr;
@@ -401,7 +390,7 @@ int final_setup_guestos(struct task_struct * p, dom_meminfo_t * meminfo)
memset(virt_startinfo_addr, 0, sizeof(*virt_startinfo_addr));
virt_startinfo_addr->nr_pages = p->tot_pages;
- virt_startinfo_addr->shared_info = (shared_info_t *)meminfo->virt_shinfo_addr;
+ virt_startinfo_addr->shared_info = virt_to_phys(p->shared_info);
virt_startinfo_addr->pt_base = meminfo->virt_load_addr +
((p->tot_pages - 1) << PAGE_SHIFT);
@@ -474,7 +463,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params,
int i, dom = p->domain;
unsigned long phys_l1tab, phys_l2tab;
unsigned long cur_address, alloc_address;
- unsigned long virt_load_address, virt_stack_address, virt_shinfo_address;
+ unsigned long virt_load_address, virt_stack_address;
start_info_t *virt_startinfo_address;
unsigned long count;
unsigned long alloc_index;
@@ -551,16 +540,11 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params,
memset(l2tab, 0, DOMAIN_ENTRIES_PER_L2_PAGETABLE*sizeof(l2_pgentry_t));
p->mm.pagetable = mk_pagetable(phys_l2tab);
- /*
- * NB. The upper limit on this loop does one extra page. This is to make
- * sure a pte exists when we want to map the shared_info struct.
- */
-
l2tab += l2_table_offset(virt_load_address);
cur_address = list_entry(p->pg_head.next, struct pfn_info, list) -
frame_table;
cur_address <<= PAGE_SHIFT;
- for ( count = 0; count < p->tot_pages + 1; count++ )
+ for ( count = 0; count < p->tot_pages; count++ )
{
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
@@ -574,14 +558,11 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params,
}
*l1tab++ = mk_l1_pgentry(cur_address|L1_PROT);
- if ( count < p->tot_pages )
- {
- page = frame_table + (cur_address >> PAGE_SHIFT);
- page->flags = dom | PGT_writeable_page | PG_need_flush;
- page->type_count = page->tot_count = 1;
- /* Set up the MPT entry. */
- machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count;
- }
+ page = frame_table + (cur_address >> PAGE_SHIFT);
+ page->flags = dom | PGT_writeable_page | PG_need_flush;
+ page->type_count = page->tot_count = 1;
+ /* Set up the MPT entry. */
+ machine_to_phys_mapping[cur_address >> PAGE_SHIFT] = count;
list_ent = frame_table[cur_address >> PAGE_SHIFT].list.next;
cur_address = list_entry(list_ent, struct pfn_info, list) -
@@ -630,17 +611,9 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params,
page->flags = dom | PGT_l2_page_table;
unmap_domain_mem(l1start);
- /* Map in the the shared info structure. */
- virt_shinfo_address = virt_load_address + (p->tot_pages << PAGE_SHIFT);
- l2tab = l2start + l2_table_offset(virt_shinfo_address);
- l1start = l1tab = map_domain_mem(l2_pgentry_to_phys(*l2tab));
- l1tab += l1_table_offset(virt_shinfo_address);
- *l1tab = mk_l1_pgentry(__pa(p->shared_info)|L1_PROT);
- unmap_domain_mem(l1start);
-
/* Set up shared info area. */
update_dom_time(p->shared_info);
- p->shared_info->domain_time = 0;
+ p->shared_info->domain_time = 0;
virt_startinfo_address = (start_info_t *)
(virt_load_address + ((alloc_index - 1) << PAGE_SHIFT));
@@ -671,8 +644,7 @@ int setup_guestos(struct task_struct *p, dom0_newdomain_t *params,
/* Set up start info area. */
memset(virt_startinfo_address, 0, sizeof(*virt_startinfo_address));
virt_startinfo_address->nr_pages = p->tot_pages;
- virt_startinfo_address->shared_info =
- (shared_info_t *)virt_shinfo_address;
+ virt_startinfo_address->shared_info = virt_to_phys(p->shared_info);
virt_startinfo_address->pt_base = virt_load_address +
((p->tot_pages - 1) << PAGE_SHIFT);
diff --git a/xen/common/network.c b/xen/common/network.c
index d51195059e..367790644a 100644
--- a/xen/common/network.c
+++ b/xen/common/network.c
@@ -159,40 +159,10 @@ net_vif_t *create_net_vif(int domain)
void destroy_net_vif(net_vif_t *vif)
{
- int i;
- unsigned long *pte, flags;
- struct pfn_info *page;
+ extern long flush_bufs_for_vif(net_vif_t *vif);
struct task_struct *p = vif->domain;
-
- /* Return any outstanding receive buffers to the guest OS. */
- spin_lock_irqsave(&p->page_lock, flags);
- for ( i = vif->rx_cons; i != vif->rx_prod; i = ((i+1) & (RX_RING_SIZE-1)) )
- {
- rx_shadow_entry_t *rx = vif->rx_shadow_ring + i;
-
- /* Release the page-table page. */
- page = frame_table + (rx->pte_ptr >> PAGE_SHIFT);
- put_page_type(page);
- put_page_tot(page);
-
- /* Give the buffer page back to the domain. */
- page = frame_table + rx->buf_pfn;
- list_add(&page->list, &p->pg_head);
- page->flags = vif->domain->domain;
-
- /* Patch up the PTE if it hasn't changed under our feet. */
- pte = map_domain_mem(rx->pte_ptr);
- if ( !(*pte & _PAGE_PRESENT) )
- {
- *pte = (rx->buf_pfn<<PAGE_SHIFT) | (*pte & ~PAGE_MASK) |
- _PAGE_RW | _PAGE_PRESENT;
- page->flags |= PGT_writeable_page | PG_need_flush;
- page->type_count = page->tot_count = 1;
- }
- unmap_domain_mem(pte);
- }
- spin_unlock_irqrestore(&p->page_lock, flags);
-
+ (void)flush_bufs_for_vif(vif);
+ UNSHARE_PFN(virt_to_page(vif->shared_rings));
kmem_cache_free(net_vif_cache, vif);
put_task_struct(p);
}
diff --git a/xen/drivers/block/xen_block.c b/xen/drivers/block/xen_block.c
index 3a56144007..addadcf7dd 100644
--- a/xen/drivers/block/xen_block.c
+++ b/xen/drivers/block/xen_block.c
@@ -241,11 +241,26 @@ static void end_block_io_op(struct buffer_head *bh, int uptodate)
* GUEST-OS SYSCALL -- Indicates there are requests outstanding.
*/
-long do_block_io_op(void)
+long do_block_io_op(unsigned int op)
{
- add_to_blkdev_list_tail(current);
- maybe_trigger_io_schedule();
- return 0L;
+ long ret = 0;
+
+ switch ( op )
+ {
+ case BLKOP_PUSH_BUFFERS:
+ add_to_blkdev_list_tail(current);
+ maybe_trigger_io_schedule();
+ break;
+
+ case BLKOP_FLUSH_BUFFERS:
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
}
diff --git a/xen/include/hypervisor-ifs/block.h b/xen/include/hypervisor-ifs/block.h
index 5c9a0caa77..9a5dd753e8 100644
--- a/xen/include/hypervisor-ifs/block.h
+++ b/xen/include/hypervisor-ifs/block.h
@@ -9,6 +9,14 @@
#define __BLOCK_H__
/*
+ * Command values for block_io_op()
+ */
+
+#define BLKOP_PUSH_BUFFERS 0 /* Notify Xen of new requests on the ring. */
+#define BLKOP_FLUSH_BUFFERS 1 /* Flush all pending request buffers. */
+
+
+/*
* Device numbers
*/
diff --git a/xen/include/hypervisor-ifs/dom0_ops.h b/xen/include/hypervisor-ifs/dom0_ops.h
index 1e7ab9a489..55544e1626 100644
--- a/xen/include/hypervisor-ifs/dom0_ops.h
+++ b/xen/include/hypervisor-ifs/dom0_ops.h
@@ -57,7 +57,6 @@ typedef struct domain_launch
unsigned int domain;
unsigned long l2_pgt_addr;
unsigned long virt_load_addr;
- unsigned long virt_shinfo_addr;
unsigned long virt_startinfo_addr;
unsigned int num_vifs;
char cmd_line[MAX_CMD_LEN];
diff --git a/xen/include/hypervisor-ifs/hypervisor-if.h b/xen/include/hypervisor-ifs/hypervisor-if.h
index 3213d0b6c6..33115a7941 100644
--- a/xen/include/hypervisor-ifs/hypervisor-if.h
+++ b/xen/include/hypervisor-ifs/hypervisor-if.h
@@ -19,9 +19,9 @@
* NB. The reserved range is inclusive (that is, both FIRST_RESERVED_GDT_ENTRY
* and LAST_RESERVED_GDT_ENTRY are reserved).
*/
-#define NR_RESERVED_GDT_ENTRIES 40
-#define FIRST_RESERVED_GDT_ENTRY 256
-#define LAST_RESERVED_GDT_ENTRY \
+#define NR_RESERVED_GDT_ENTRIES 40
+#define FIRST_RESERVED_GDT_ENTRY 256
+#define LAST_RESERVED_GDT_ENTRY \
(FIRST_RESERVED_GDT_ENTRY + NR_RESERVED_GDT_ENTRIES - 1)
/*
@@ -29,10 +29,10 @@
* are also present in the initial GDT, many OSes will be able to avoid
* installing their own GDT.
*/
-#define FLAT_RING1_CS 0x0819
-#define FLAT_RING1_DS 0x0821
-#define FLAT_RING3_CS 0x082b
-#define FLAT_RING3_DS 0x0833
+#define FLAT_RING1_CS 0x0819
+#define FLAT_RING1_DS 0x0821
+#define FLAT_RING3_CS 0x082b
+#define FLAT_RING3_DS 0x0833
/*
@@ -40,25 +40,25 @@
*/
/* EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. */
-#define __HYPERVISOR_set_trap_table 0
-#define __HYPERVISOR_mmu_update 1
-#define __HYPERVISOR_console_write 2
-#define __HYPERVISOR_set_gdt 3
+#define __HYPERVISOR_set_trap_table 0
+#define __HYPERVISOR_mmu_update 1
+#define __HYPERVISOR_console_write 2
+#define __HYPERVISOR_set_gdt 3
#define __HYPERVISOR_stack_switch 4
#define __HYPERVISOR_set_callbacks 5
-#define __HYPERVISOR_net_update 6
-#define __HYPERVISOR_fpu_taskswitch 7
-#define __HYPERVISOR_yield 8
-#define __HYPERVISOR_exit 9
-#define __HYPERVISOR_dom0_op 10
-#define __HYPERVISOR_network_op 11
-#define __HYPERVISOR_block_io_op 12
-#define __HYPERVISOR_set_debugreg 13
-#define __HYPERVISOR_get_debugreg 14
-#define __HYPERVISOR_update_descriptor 15
-#define __HYPERVISOR_set_fast_trap 16
-#define __HYPERVISOR_dom_mem_op 17
-#define __HYPERVISOR_multicall 18
+#define __HYPERVISOR_net_io_op 6
+#define __HYPERVISOR_fpu_taskswitch 7
+#define __HYPERVISOR_yield 8
+#define __HYPERVISOR_exit 9
+#define __HYPERVISOR_dom0_op 10
+#define __HYPERVISOR_network_op 11
+#define __HYPERVISOR_block_io_op 12
+#define __HYPERVISOR_set_debugreg 13
+#define __HYPERVISOR_get_debugreg 14
+#define __HYPERVISOR_update_descriptor 15
+#define __HYPERVISOR_set_fast_trap 16
+#define __HYPERVISOR_dom_mem_op 17
+#define __HYPERVISOR_multicall 18
#define __HYPERVISOR_kbd_op 19
#define __HYPERVISOR_update_va_mapping 20
@@ -276,19 +276,19 @@ typedef struct shared_info_st {
* NB. We expect that this struct is smaller than a page.
*/
typedef struct start_info_st {
- unsigned long nr_pages; /* total pages allocated to this domain */
- shared_info_t *shared_info; /* VIRTUAL address of shared info struct */
- unsigned long pt_base; /* VIRTUAL address of page directory */
- unsigned long mod_start; /* VIRTUAL address of pre-loaded module */
- unsigned long mod_len; /* size (bytes) of pre-loaded module */
- /* Machine address of net rings for each VIF. Will be page aligned. */
- unsigned long net_rings[MAX_DOMAIN_VIFS];
- unsigned char net_vmac[MAX_DOMAIN_VIFS][6];
- /* Machine address of block-device ring. Will be page aligned. */
- unsigned long blk_ring;
- unsigned int dom_id;
- unsigned long flags;
- unsigned char cmd_line[1]; /* variable-length */
+ /* THE FOLLOWING ARE ONLY FILLED IN ON INITIAL BOOT (NOT RESUME). */
+ unsigned long pt_base; /* VIRTUAL address of page directory. */
+ unsigned long mod_start; /* VIRTUAL address of pre-loaded module. */
+ unsigned long mod_len; /* Size (bytes) of pre-loaded module. */
+ /* THE FOLLOWING ARE FILLED IN BOTH ON INITIAL BOOT AND ON RESUME. */
+ unsigned long nr_pages; /* total pages allocated to this domain. */
+ unsigned long shared_info; /* MACHINE address of shared info struct.*/
+ unsigned int dom_id; /* Domain identifier. */
+ unsigned long flags; /* SIF_xxx flags. */
+ unsigned long net_rings[MAX_DOMAIN_VIFS]; /* MACHINE address of ring.*/
+ unsigned char net_vmac[MAX_DOMAIN_VIFS][6]; /* MAC address of VIF. */
+ unsigned long blk_ring; /* MACHINE address of blkdev ring. */
+ unsigned char cmd_line[1]; /* Variable-length options. */
} start_info_t;
/* These flags are passed in the 'flags' field of start_info_t. */
diff --git a/xen/include/hypervisor-ifs/network.h b/xen/include/hypervisor-ifs/network.h
index c35f1bab15..8681b5253e 100644
--- a/xen/include/hypervisor-ifs/network.h
+++ b/xen/include/hypervisor-ifs/network.h
@@ -12,6 +12,14 @@
#ifndef __RING_H__
#define __RING_H__
+/*
+ * Command values for block_io_op()
+ */
+
+#define NETOP_PUSH_BUFFERS 0 /* Notify Xen of new buffers on the rings. */
+#define NETOP_FLUSH_BUFFERS 1 /* Flush all pending request buffers. */
+
+
typedef struct tx_req_entry_st
{
unsigned short id;
diff --git a/xen/net/dev.c b/xen/net/dev.c
index 2fcf935319..b2a4212e3e 100644
--- a/xen/net/dev.c
+++ b/xen/net/dev.c
@@ -1877,7 +1877,7 @@ static int get_tx_bufs(net_vif_t *vif)
tx = shared_rings->tx_ring[i].req;
target = VIF_DROP;
- if ( (tx.size < PKT_PROT_LEN) || (tx.size > ETH_FRAME_LEN) )
+ if ( (tx.size <= PKT_PROT_LEN) || (tx.size > ETH_FRAME_LEN) )
{
DPRINTK("Bad packet size: %d\n", tx.size);
__make_tx_response(vif, tx.id, RING_STATUS_BAD_PAGE);
@@ -2019,133 +2019,225 @@ static int get_tx_bufs(net_vif_t *vif)
}
-/*
- * do_net_update:
- *
- * Called from guest OS to notify updates to its transmit and/or receive
- * descriptor rings.
- */
-
-long do_net_update(void)
+static long get_bufs_from_vif(net_vif_t *vif)
{
net_ring_t *shared_rings;
- net_vif_t *vif;
net_idx_t *shared_idxs;
- unsigned int i, j, idx;
+ unsigned int i, j;
rx_req_entry_t rx;
unsigned long pte_pfn, buf_pfn;
struct pfn_info *pte_page, *buf_page;
+ struct task_struct *p = vif->domain;
unsigned long *ptep;
- perfc_incr(net_hypercalls);
-
- for ( idx = 0; idx < MAX_DOMAIN_VIFS; idx++ )
- {
- if ( (vif = current->net_vif_list[idx]) == NULL )
- break;
-
- shared_idxs = vif->shared_idxs;
- shared_rings = vif->shared_rings;
+ shared_idxs = vif->shared_idxs;
+ shared_rings = vif->shared_rings;
- /*
- * PHASE 1 -- TRANSMIT RING
- */
+ /*
+ * PHASE 1 -- TRANSMIT RING
+ */
- if ( get_tx_bufs(vif) )
- {
- add_to_net_schedule_list_tail(vif);
- maybe_schedule_tx_action();
- }
+ if ( get_tx_bufs(vif) )
+ {
+ add_to_net_schedule_list_tail(vif);
+ maybe_schedule_tx_action();
+ }
- /*
- * PHASE 2 -- RECEIVE RING
- */
+ /*
+ * PHASE 2 -- RECEIVE RING
+ */
- /*
- * Collect up new receive buffers. We collect up to the guest OS's
- * new producer index, but take care not to catch up with our own
- * consumer index.
- */
- j = vif->rx_prod;
- for ( i = vif->rx_req_cons;
- (i != shared_idxs->rx_req_prod) &&
- (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1);
- i = RX_RING_INC(i) )
- {
- rx = shared_rings->rx_ring[i].req;
+ /*
+ * Collect up new receive buffers. We collect up to the guest OS's new
+ * producer index, but take care not to catch up with our own consumer
+ * index.
+ */
+ j = vif->rx_prod;
+ for ( i = vif->rx_req_cons;
+ (i != shared_idxs->rx_req_prod) &&
+ (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1);
+ i = RX_RING_INC(i) )
+ {
+ rx = shared_rings->rx_ring[i].req;
- pte_pfn = rx.addr >> PAGE_SHIFT;
- pte_page = frame_table + pte_pfn;
+ pte_pfn = rx.addr >> PAGE_SHIFT;
+ pte_page = frame_table + pte_pfn;
- spin_lock_irq(&current->page_lock);
- if ( (pte_pfn >= max_page) ||
- ((pte_page->flags & (PG_type_mask | PG_domain_mask)) !=
- (PGT_l1_page_table | current->domain)) )
- {
- DPRINTK("Bad page frame for ppte %d,%08lx,%08lx,%08lx\n",
- current->domain, pte_pfn, max_page, pte_page->flags);
- spin_unlock_irq(&current->page_lock);
- make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
- continue;
- }
+ spin_lock_irq(&p->page_lock);
+ if ( (pte_pfn >= max_page) ||
+ ((pte_page->flags & (PG_type_mask | PG_domain_mask)) !=
+ (PGT_l1_page_table | p->domain)) )
+ {
+ DPRINTK("Bad page frame for ppte %d,%08lx,%08lx,%08lx\n",
+ p->domain, pte_pfn, max_page, pte_page->flags);
+ spin_unlock_irq(&p->page_lock);
+ make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
+ continue;
+ }
- ptep = map_domain_mem(rx.addr);
+ ptep = map_domain_mem(rx.addr);
- if ( !(*ptep & _PAGE_PRESENT) )
- {
- DPRINTK("Invalid PTE passed down (not present)\n");
- make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
- goto rx_unmap_and_continue;
- }
+ if ( !(*ptep & _PAGE_PRESENT) )
+ {
+ DPRINTK("Invalid PTE passed down (not present)\n");
+ make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
+ goto rx_unmap_and_continue;
+ }
- buf_pfn = *ptep >> PAGE_SHIFT;
- buf_page = frame_table + buf_pfn;
+ buf_pfn = *ptep >> PAGE_SHIFT;
+ buf_page = frame_table + buf_pfn;
- if ( ((buf_page->flags & (PG_type_mask | PG_domain_mask)) !=
- (PGT_writeable_page | current->domain)) ||
- (buf_page->tot_count != 1) )
- {
- DPRINTK("Need a mapped-once writeable page (%ld/%ld/%08lx)\n",
- buf_page->type_count, buf_page->tot_count, buf_page->flags);
- make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
- goto rx_unmap_and_continue;
- }
+ if ( ((buf_page->flags & (PG_type_mask | PG_domain_mask)) !=
+ (PGT_writeable_page | p->domain)) ||
+ (buf_page->tot_count != 1) )
+ {
+ DPRINTK("Need a mapped-once writeable page (%ld/%ld/%08lx)\n",
+ buf_page->type_count, buf_page->tot_count,
+ buf_page->flags);
+ make_rx_response(vif, rx.id, 0, RING_STATUS_BAD_PAGE, 0);
+ goto rx_unmap_and_continue;
+ }
- /*
- * The pte they passed was good, so take it away from them. We
- * also lock down the page-table page, so it doesn't go away.
- */
- get_page_type(pte_page);
- get_page_tot(pte_page);
- *ptep &= ~_PAGE_PRESENT;
- buf_page->flags = buf_page->type_count = buf_page->tot_count = 0;
- list_del(&buf_page->list);
-
- vif->rx_shadow_ring[j].id = rx.id;
- vif->rx_shadow_ring[j].pte_ptr = rx.addr;
- vif->rx_shadow_ring[j].buf_pfn = buf_pfn;
- vif->rx_shadow_ring[j].flush_count = (unsigned short)
- atomic_read(&tlb_flush_count[smp_processor_id()]);
- j = RX_RING_INC(j);
+ /*
+ * The pte they passed was good, so take it away from them. We also
+ * lock down the page-table page, so it doesn't go away.
+ */
+ get_page_type(pte_page);
+ get_page_tot(pte_page);
+ *ptep &= ~_PAGE_PRESENT;
+ buf_page->flags = buf_page->type_count = buf_page->tot_count = 0;
+ list_del(&buf_page->list);
+
+ vif->rx_shadow_ring[j].id = rx.id;
+ vif->rx_shadow_ring[j].pte_ptr = rx.addr;
+ vif->rx_shadow_ring[j].buf_pfn = buf_pfn;
+ vif->rx_shadow_ring[j].flush_count = (unsigned short)
+ atomic_read(&tlb_flush_count[smp_processor_id()]);
+ j = RX_RING_INC(j);
- rx_unmap_and_continue:
- unmap_domain_mem(ptep);
- spin_unlock_irq(&current->page_lock);
- }
+ rx_unmap_and_continue:
+ unmap_domain_mem(ptep);
+ spin_unlock_irq(&p->page_lock);
+ }
+
+ vif->rx_req_cons = i;
+
+ if ( vif->rx_prod != j )
+ {
+ smp_mb(); /* Let other CPUs see new descriptors first. */
+ vif->rx_prod = j;
+ }
+
+ return 0;
+}
+
+
+long flush_bufs_for_vif(net_vif_t *vif)
+{
+ int i;
+ unsigned long *pte, flags;
+ struct pfn_info *page;
+ struct task_struct *p = vif->domain;
+ rx_shadow_entry_t *rx;
+ net_ring_t *shared_rings = vif->shared_rings;
+ net_idx_t *shared_idxs = vif->shared_idxs;
+
+ /* Return any outstanding receive buffers to the guest OS. */
+ spin_lock_irqsave(&p->page_lock, flags);
+ for ( i = vif->rx_req_cons;
+ (i != shared_idxs->rx_req_prod) &&
+ (((vif->rx_resp_prod-i) & (RX_RING_SIZE-1)) != 1);
+ i = RX_RING_INC(i) )
+ {
+ make_rx_response(vif, shared_rings->rx_ring[i].req.id, 0,
+ RING_STATUS_DROPPED, 0);
+ }
+ vif->rx_req_cons = i;
+ for ( i = vif->rx_cons; i != vif->rx_prod; i = RX_RING_INC(i) )
+ {
+ rx = &vif->rx_shadow_ring[i];
+
+ /* Release the page-table page. */
+ page = frame_table + (rx->pte_ptr >> PAGE_SHIFT);
+ put_page_type(page);
+ put_page_tot(page);
- vif->rx_req_cons = i;
+ /* Give the buffer page back to the domain. */
+ page = frame_table + rx->buf_pfn;
+ list_add(&page->list, &p->pg_head);
+ page->flags = vif->domain->domain;
- if ( vif->rx_prod != j )
+ /* Patch up the PTE if it hasn't changed under our feet. */
+ pte = map_domain_mem(rx->pte_ptr);
+ if ( !(*pte & _PAGE_PRESENT) )
{
- smp_mb(); /* Let other CPUs see new descriptors first. */
- vif->rx_prod = j;
+ *pte = (rx->buf_pfn<<PAGE_SHIFT) | (*pte & ~PAGE_MASK) |
+ _PAGE_RW | _PAGE_PRESENT;
+ page->flags |= PGT_writeable_page | PG_need_flush;
+ page->type_count = page->tot_count = 1;
}
+ unmap_domain_mem(pte);
+
+ make_rx_response(vif, rx->id, 0, RING_STATUS_DROPPED, 0);
}
+ vif->rx_cons = i;
+ spin_unlock_irqrestore(&p->page_lock, flags);
+
+ /*
+ * Flush pending transmit buffers. The guest may still have to wait for
+ * buffers that are queued at a physical NIC.
+ */
+ spin_lock_irqsave(&vif->tx_lock, flags);
+ for ( i = vif->tx_req_cons;
+ (i != shared_idxs->tx_req_prod) &&
+ (((vif->tx_resp_prod-i) & (TX_RING_SIZE-1)) != 1);
+ i = TX_RING_INC(i) )
+ {
+ __make_tx_response(vif, shared_rings->tx_ring[i].req.id,
+ RING_STATUS_DROPPED);
+ }
+ vif->tx_req_cons = i;
+ spin_unlock_irqrestore(&vif->tx_lock, flags);
return 0;
}
+/*
+ * do_net_io_op:
+ *
+ * Called from guest OS to notify updates to its transmit and/or receive
+ * descriptor rings.
+ */
+long do_net_io_op(unsigned int op, unsigned int idx)
+{
+ net_vif_t *vif;
+ long ret;
+
+ perfc_incr(net_hypercalls);
+
+ if ( (vif = current->net_vif_list[idx]) == NULL )
+ return -EINVAL;
+
+ switch ( op )
+ {
+ case NETOP_PUSH_BUFFERS:
+ ret = get_bufs_from_vif(vif);
+ break;
+
+ case NETOP_FLUSH_BUFFERS:
+ ret = flush_bufs_for_vif(vif);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
static void __make_tx_response(net_vif_t *vif,
unsigned short id,
unsigned char st)
diff --git a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c
index 2592b88ec3..cc3c91d704 100644
--- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c
+++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/block/xl_block.c
@@ -40,7 +40,7 @@ static inline void signal_requests_to_xen(void)
{
DISABLE_SCATTERGATHER();
blk_ring->req_prod = req_prod;
- HYPERVISOR_block_io_op();
+ HYPERVISOR_block_io_op(BLKOP_PUSH_BUFFERS);
}
diff --git a/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c b/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c
index 51d86c8fb3..be46c4c28f 100644
--- a/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c
+++ b/xenolinux-2.4.22-sparse/arch/xeno/drivers/network/network.c
@@ -249,7 +249,7 @@ static void network_alloc_rx_buffers(struct net_device *dev)
/* Batch Xen notifications. */
if ( np->rx_bufs_to_notify > (RX_MAX_ENTRIES/4) )
{
- HYPERVISOR_net_update();
+ HYPERVISOR_net_io_op(NETOP_PUSH_BUFFERS, np->idx);
np->rx_bufs_to_notify = 0;
}
}
@@ -322,7 +322,7 @@ static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Only notify Xen if there are no outstanding responses. */
mb();
if ( np->net_idx->tx_resp_prod == i )
- HYPERVISOR_net_update();
+ HYPERVISOR_net_io_op(NETOP_PUSH_BUFFERS, np->idx);
return 0;
}
diff --git a/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S b/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S
index a89fd8eda4..11a1324c74 100644
--- a/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S
+++ b/xenolinux-2.4.22-sparse/arch/xeno/kernel/head.S
@@ -9,9 +9,8 @@
#include <asm/desc.h>
/* Offsets in start_info structure */
-#define SHARED_INFO 4
-#define MOD_START 12
-#define MOD_LEN 16
+#define MOD_START 4
+#define MOD_LEN 8
startup_32:
cld
@@ -35,19 +34,16 @@ startup_32:
/* Clear BSS first so that there are no surprises... */
2: xorl %eax,%eax
- movl $SYMBOL_NAME(__bss_start),%edi
- movl $SYMBOL_NAME(_end),%ecx
- subl %edi,%ecx
- rep stosb
+ movl $SYMBOL_NAME(__bss_start),%edi
+ movl $SYMBOL_NAME(_end),%ecx
+ subl %edi,%ecx
+ rep stosb
/* Copy the necessary stuff from start_info structure. */
- /* We need to copy shared_info early, so that sti/cli work */
- mov SHARED_INFO(%esi),%eax
- mov %eax,SYMBOL_NAME(HYPERVISOR_shared_info)
mov $SYMBOL_NAME(start_info_union),%edi
mov $128,%ecx
rep movsl
-
+
jmp SYMBOL_NAME(start_kernel)
ENTRY(stack_start)
diff --git a/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c b/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c
index b2532d7b58..df9a6d4531 100644
--- a/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c
+++ b/xenolinux-2.4.22-sparse/arch/xeno/kernel/setup.c
@@ -46,7 +46,11 @@
#include <asm/hypervisor.h>
#include <asm/hypervisor-ifs/dom0_ops.h>
-shared_info_t *HYPERVISOR_shared_info;
+/*
+ * Point at the empty zero page to start with. We map the real shared_info
+ * page as soon as fixmap is up and running.
+ */
+shared_info_t *HYPERVISOR_shared_info = empty_zero_page;
unsigned long *phys_to_machine_mapping;
diff --git a/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c b/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c
index 7f9967531e..633472b9d0 100644
--- a/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c
+++ b/xenolinux-2.4.22-sparse/arch/xeno/mm/init.c
@@ -43,50 +43,50 @@ static unsigned long totalhigh_pages;
int do_check_pgt_cache(int low, int high)
{
- int freed = 0;
- if(pgtable_cache_size > high) {
- do {
+ int freed = 0;
+ if(pgtable_cache_size > high) {
+ do {
if (!QUICKLIST_EMPTY(pgd_quicklist)) {
- free_pgd_slow(get_pgd_fast());
- freed++;
- }
+ free_pgd_slow(get_pgd_fast());
+ freed++;
+ }
if (!QUICKLIST_EMPTY(pte_quicklist)) {
- pte_free_slow(pte_alloc_one_fast(NULL, 0));
- freed++;
- }
- } while(pgtable_cache_size > low);
- }
- return freed;
+ pte_free_slow(pte_alloc_one_fast(NULL, 0));
+ freed++;
+ }
+ } while(pgtable_cache_size > low);
+ }
+ return freed;
}
void show_mem(void)
{
- int i, total = 0, reserved = 0;
- int shared = 0, cached = 0;
- int highmem = 0;
-
- printk("Mem-info:\n");
- show_free_areas();
- printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
- i = max_mapnr;
- while (i-- > 0) {
- total++;
- if (PageHighMem(mem_map+i))
- highmem++;
- if (PageReserved(mem_map+i))
- reserved++;
- else if (PageSwapCache(mem_map+i))
- cached++;
- else if (page_count(mem_map+i))
- shared += page_count(mem_map+i) - 1;
- }
- printk("%d pages of RAM\n", total);
- printk("%d pages of HIGHMEM\n",highmem);
- printk("%d reserved pages\n",reserved);
- printk("%d pages shared\n",shared);
- printk("%d pages swap cached\n",cached);
- printk("%ld pages in page table cache\n",pgtable_cache_size);
- show_buffers();
+ int i, total = 0, reserved = 0;
+ int shared = 0, cached = 0;
+ int highmem = 0;
+
+ printk("Mem-info:\n");
+ show_free_areas();
+ printk("Free swap: %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+ i = max_mapnr;
+ while (i-- > 0) {
+ total++;
+ if (PageHighMem(mem_map+i))
+ highmem++;
+ if (PageReserved(mem_map+i))
+ reserved++;
+ else if (PageSwapCache(mem_map+i))
+ cached++;
+ else if (page_count(mem_map+i))
+ shared += page_count(mem_map+i) - 1;
+ }
+ printk("%d pages of RAM\n", total);
+ printk("%d pages of HIGHMEM\n",highmem);
+ printk("%d reserved pages\n",reserved);
+ printk("%d pages shared\n",shared);
+ printk("%d pages swap cached\n",cached);
+ printk("%ld pages in page table cache\n",pgtable_cache_size);
+ show_buffers();
}
/* References to section boundaries */
@@ -95,118 +95,118 @@ extern char _text, _etext, _edata, __bss_start, _end;
extern char __init_begin, __init_end;
static inline void set_pte_phys (unsigned long vaddr,
- unsigned long phys, pgprot_t flags)
+ unsigned long phys, pgprot_t flags)
{
- pgprot_t prot;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = init_mm.pgd + __pgd_offset(vaddr);
- if (pgd_none(*pgd)) {
- printk("PAE BUG #00!\n");
- return;
- }
- pmd = pmd_offset(pgd, vaddr);
- if (pmd_none(*pmd)) {
- printk("PAE BUG #01!\n");
- return;
- }
- pte = pte_offset(pmd, vaddr);
-
- if (pte_val(*pte))
- pte_ERROR(*pte);
-
- pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
-
- /* We queue directly, avoiding hidden phys->machine translation. */
- queue_l1_entry_update(pte, phys | pgprot_val(prot));
-
- /*
- * It's enough to flush this one mapping.
- * (PGE mappings get flushed as well)
- */
- __flush_tlb_one(vaddr);
+ pgprot_t prot;
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = init_mm.pgd + __pgd_offset(vaddr);
+ if (pgd_none(*pgd)) {
+ printk("PAE BUG #00!\n");
+ return;
+ }
+ pmd = pmd_offset(pgd, vaddr);
+ if (pmd_none(*pmd)) {
+ printk("PAE BUG #01!\n");
+ return;
+ }
+ pte = pte_offset(pmd, vaddr);
+
+ if (pte_val(*pte))
+ pte_ERROR(*pte);
+
+ pgprot_val(prot) = pgprot_val(PAGE_KERNEL) | pgprot_val(flags);
+
+ /* We queue directly, avoiding hidden phys->machine translation. */
+ queue_l1_entry_update(pte, phys | pgprot_val(prot));
+
+ /*
+ * It's enough to flush this one mapping.
+ * (PGE mappings get flushed as well)
+ */
+ __flush_tlb_one(vaddr);
}
void __set_fixmap (enum fixed_addresses idx, unsigned long phys,
pgprot_t flags)
{
- unsigned long address = __fix_to_virt(idx);
+ unsigned long address = __fix_to_virt(idx);
- if (idx >= __end_of_fixed_addresses) {
- printk("Invalid __set_fixmap\n");
- return;
- }
- set_pte_phys(address, phys, flags);
+ if (idx >= __end_of_fixed_addresses) {
+ printk("Invalid __set_fixmap\n");
+ return;
+ }
+ set_pte_phys(address, phys, flags);
}
static void __init fixrange_init (unsigned long start,
unsigned long end, pgd_t *pgd_base)
{
- pgd_t *pgd, *kpgd;
- pmd_t *pmd, *kpmd;
- pte_t *pte, *kpte;
- int i, j;
- unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pgd_t *pgd, *kpgd;
+ pmd_t *pmd, *kpmd;
+ pte_t *pte, *kpte;
+ int i, j;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
#if CONFIG_X86_PAE
- if (pgd_none(*pgd)) {
- pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
- if (pmd != pmd_offset(pgd, 0))
- printk("PAE BUG #02!\n");
- }
- pmd = pmd_offset(pgd, vaddr);
+ if (pgd_none(*pgd)) {
+ pmd = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ set_pgd(pgd, __pgd(__pa(pmd) + 0x1));
+ if (pmd != pmd_offset(pgd, 0))
+ printk("PAE BUG #02!\n");
+ }
+ pmd = pmd_offset(pgd, vaddr);
#else
- pmd = (pmd_t *)pgd;
+ pmd = (pmd_t *)pgd;
#endif
- for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
- clear_page(pte);
- kpgd = pgd_offset_k((unsigned long)pte);
- kpmd = pmd_offset(kpgd, (unsigned long)pte);
- kpte = pte_offset(kpmd, (unsigned long)pte);
- queue_l1_entry_update(kpte,
+ for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
+ clear_page(pte);
+ kpgd = pgd_offset_k((unsigned long)pte);
+ kpmd = pmd_offset(kpgd, (unsigned long)pte);
+ kpte = pte_offset(kpmd, (unsigned long)pte);
+ queue_l1_entry_update(kpte,
(*(unsigned long *)kpte)&~_PAGE_RW);
- set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
- }
- vaddr += PMD_SIZE;
- }
- j = 0;
- }
+ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)));
+ }
+ vaddr += PMD_SIZE;
+ }
+ j = 0;
+ }
- XENO_flush_page_update_queue();
+ XENO_flush_page_update_queue();
}
static void __init zone_sizes_init(void)
{
- unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
- unsigned int max_dma, high, low;
-
- max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
- low = max_low_pfn;
- high = highend_pfn;
-
- if (low < max_dma)
- zones_size[ZONE_DMA] = low;
- else {
- zones_size[ZONE_DMA] = max_dma;
- zones_size[ZONE_NORMAL] = low - max_dma;
+ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
+ unsigned int max_dma, high, low;
+
+ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
+ low = max_low_pfn;
+ high = highend_pfn;
+
+ if (low < max_dma)
+ zones_size[ZONE_DMA] = low;
+ else {
+ zones_size[ZONE_DMA] = max_dma;
+ zones_size[ZONE_NORMAL] = low - max_dma;
#ifdef CONFIG_HIGHMEM
- zones_size[ZONE_HIGHMEM] = high - low;
+ zones_size[ZONE_HIGHMEM] = high - low;
#endif
- }
- free_area_init(zones_size);
+ }
+ free_area_init(zones_size);
}
/*
@@ -218,60 +218,65 @@ static void __init zone_sizes_init(void)
*/
void __init paging_init(void)
{
- unsigned long vaddr;
+ unsigned long vaddr;
- zone_sizes_init();
+ zone_sizes_init();
- /*
- * Fixed mappings, only the page table structure has to be created -
- * mappings will be set by set_fixmap():
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
+ /*
+ * Fixed mappings, only the page table structure has to be created -
+ * mappings will be set by set_fixmap():
+ */
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, HYPERVISOR_VIRT_START, init_mm.pgd);
+
+ /* Cheesy: this can probably be moved to the blkdev driver. */
+ set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring);
- /* Cheesy: this can probably be moved to the blkdev driver. */
- set_fixmap(FIX_BLKRING_BASE, start_info.blk_ring);
+ /* Switch to the real shared_info page, and clear the dummy page. */
+ set_fixmap(FIX_SHARED_INFO, start_info.shared_info);
+ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
+ memset(empty_zero_page, 0, sizeof(empty_zero_page));
#ifdef CONFIG_HIGHMEM
#error
- kmap_init();
+ kmap_init();
#endif
}
static inline int page_is_ram (unsigned long pagenr)
{
- return 1;
+ return 1;
}
#ifdef CONFIG_HIGHMEM
void __init one_highpage_init(struct page *page, int pfn, int bad_ppro)
{
- if (!page_is_ram(pfn)) {
- SetPageReserved(page);
- return;
- }
+ if (!page_is_ram(pfn)) {
+ SetPageReserved(page);
+ return;
+ }
- if (bad_ppro && page_kills_ppro(pfn)) {
- SetPageReserved(page);
- return;
- }
+ if (bad_ppro && page_kills_ppro(pfn)) {
+ SetPageReserved(page);
+ return;
+ }
- ClearPageReserved(page);
- set_bit(PG_highmem, &page->flags);
- atomic_set(&page->count, 1);
- __free_page(page);
- totalhigh_pages++;
+ ClearPageReserved(page);
+ set_bit(PG_highmem, &page->flags);
+ atomic_set(&page->count, 1);
+ __free_page(page);
+ totalhigh_pages++;
}
#endif /* CONFIG_HIGHMEM */
static void __init set_max_mapnr_init(void)
{
#ifdef CONFIG_HIGHMEM
- highmem_start_page = mem_map + highstart_pfn;
- max_mapnr = num_physpages = highend_pfn;
- num_mappedpages = max_low_pfn;
+ highmem_start_page = mem_map + highstart_pfn;
+ max_mapnr = num_physpages = highend_pfn;
+ num_mappedpages = max_low_pfn;
#else
- max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
+ max_mapnr = num_mappedpages = num_physpages = max_low_pfn;
#endif
}
@@ -279,112 +284,112 @@ static int __init free_pages_init(void)
{
#ifdef CONFIG_HIGHMEM
#error Where is this supposed to be initialised?
- int bad_ppro;
+ int bad_ppro;
#endif
- int reservedpages, pfn;
-
- /* this will put all low memory onto the freelists */
- totalram_pages += free_all_bootmem();
-
- reservedpages = 0;
- for (pfn = 0; pfn < max_low_pfn; pfn++) {
- /*
- * Only count reserved RAM pages
- */
- if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
- reservedpages++;
- }
+ int reservedpages, pfn;
+
+ /* this will put all low memory onto the freelists */
+ totalram_pages += free_all_bootmem();
+
+ reservedpages = 0;
+ for (pfn = 0; pfn < max_low_pfn; pfn++) {
+ /*
+ * Only count reserved RAM pages
+ */
+ if (page_is_ram(pfn) && PageReserved(mem_map+pfn))
+ reservedpages++;
+ }
#ifdef CONFIG_HIGHMEM
- for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
- one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro);
- totalram_pages += totalhigh_pages;
+ for (pfn = highend_pfn-1; pfn >= highstart_pfn; pfn--)
+ one_highpage_init((struct page *) (mem_map + pfn), pfn, bad_ppro);
+ totalram_pages += totalhigh_pages;
#endif
- return reservedpages;
+ return reservedpages;
}
void __init mem_init(void)
{
- int codesize, reservedpages, datasize, initsize;
+ int codesize, reservedpages, datasize, initsize;
- if (!mem_map)
- BUG();
+ if (!mem_map)
+ BUG();
- set_max_mapnr_init();
+ set_max_mapnr_init();
- high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
- /* clear the zero-page */
- memset(empty_zero_page, 0, PAGE_SIZE);
+ /* clear the zero-page */
+ memset(empty_zero_page, 0, PAGE_SIZE);
- reservedpages = free_pages_init();
+ reservedpages = free_pages_init();
- codesize = (unsigned long) &_etext - (unsigned long) &_text;
- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
- initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
- printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
- (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
- max_mapnr << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- initsize >> 10,
- (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
- );
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
+ max_mapnr << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ initsize >> 10,
+ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
+ );
boot_cpu_data.wp_works_ok = 1;
}
void free_initmem(void)
{
- unsigned long addr;
-
- addr = (unsigned long)(&__init_begin);
- for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(addr));
- set_page_count(virt_to_page(addr), 1);
- free_page(addr);
- totalram_pages++;
- }
- printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
+ unsigned long addr;
+
+ addr = (unsigned long)(&__init_begin);
+ for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ set_page_count(virt_to_page(addr), 1);
+ free_page(addr);
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (start < end)
- printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
- for (; start < end; start += PAGE_SIZE) {
- ClearPageReserved(virt_to_page(start));
- set_page_count(virt_to_page(start), 1);
- free_page(start);
- totalram_pages++;
- }
+ if (start < end)
+ printk (KERN_INFO "Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
+ for (; start < end; start += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(start));
+ set_page_count(virt_to_page(start), 1);
+ free_page(start);
+ totalram_pages++;
+ }
}
#endif
void si_meminfo(struct sysinfo *val)
{
- val->totalram = totalram_pages;
- val->sharedram = 0;
- val->freeram = nr_free_pages();
- val->bufferram = atomic_read(&buffermem_pages);
- val->totalhigh = totalhigh_pages;
- val->freehigh = nr_free_highpages();
- val->mem_unit = PAGE_SIZE;
- return;
+ val->totalram = totalram_pages;
+ val->sharedram = 0;
+ val->freeram = nr_free_pages();
+ val->bufferram = atomic_read(&buffermem_pages);
+ val->totalhigh = totalhigh_pages;
+ val->freehigh = nr_free_highpages();
+ val->mem_unit = PAGE_SIZE;
+ return;
}
#if defined(CONFIG_X86_PAE)
struct kmem_cache_s *pae_pgd_cachep;
void __init pgtable_cache_init(void)
{
- /*
- * PAE pgds must be 16-byte aligned:
+ /*
+ * PAE pgds must be 16-byte aligned:
*/
- pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
- SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
- if (!pae_pgd_cachep)
- panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
+ pae_pgd_cachep = kmem_cache_create("pae_pgd", 32, 0,
+ SLAB_HWCACHE_ALIGN | SLAB_MUST_HWCACHE_ALIGN, NULL, NULL);
+ if (!pae_pgd_cachep)
+ panic("init_pae(): Cannot alloc pae_pgd SLAB cache");
}
#endif /* CONFIG_X86_PAE */
diff --git a/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h b/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h
index 590ecf9961..ec7083a359 100644
--- a/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h
+++ b/xenolinux-2.4.22-sparse/include/asm-xeno/fixmap.h
@@ -47,15 +47,16 @@ enum fixed_addresses {
FIX_NETRING1_BASE,
FIX_NETRING2_BASE,
FIX_NETRING3_BASE,
+ FIX_SHARED_INFO,
#ifdef CONFIG_VGA_CONSOLE
#define NR_FIX_BTMAPS 32 /* 128KB For the Dom0 VGA Console A0000-C0000 */
#else
-#define NR_FIX_BTMAPS 1 /* have on page incase anyone wants it in future */
+#define NR_FIX_BTMAPS 1 /* in case anyone wants it in future... */
#endif
FIX_BTMAP_END,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
- /* our bt_ioremap is permenant unlike other architectures */
+ /* our bt_ioremap is permanent, unlike other architectures */
__end_of_permanent_fixed_addresses,
__end_of_fixed_addresses = __end_of_permanent_fixed_addresses
diff --git a/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h b/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h
index 80b2c28da0..57a0e30d79 100644
--- a/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h
+++ b/xenolinux-2.4.22-sparse/include/asm-xeno/hypervisor.h
@@ -219,12 +219,13 @@ static inline int HYPERVISOR_set_callbacks(
return ret;
}
-static inline int HYPERVISOR_net_update(void)
+static inline int HYPERVISOR_net_io_op(unsigned int op, unsigned int idx)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_net_update) );
+ : "=a" (ret) : "0" (__HYPERVISOR_net_io_op),
+ "b" (op), "c" (idx) );
return ret;
}
@@ -281,12 +282,13 @@ static inline int HYPERVISOR_network_op(void *network_op)
return ret;
}
-static inline int HYPERVISOR_block_io_op(void)
+static inline int HYPERVISOR_block_io_op(unsigned int op)
{
int ret;
__asm__ __volatile__ (
TRAP_INSTR
- : "=a" (ret) : "0" (__HYPERVISOR_block_io_op) );
+ : "=a" (ret) : "0" (__HYPERVISOR_block_io_op),
+ "b" (op) );
return ret;
}