aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authoriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-04-27 16:15:55 +0000
committeriap10@labyrinth.cl.cam.ac.uk <iap10@labyrinth.cl.cam.ac.uk>2004-04-27 16:15:55 +0000
commit33e4a0c224e38eefd0654f8627363d928247b3a6 (patch)
treee3ccf8ea870f598b8dd78ee1a48e2e2b94c1bda2
parentf57f5ec70d3e1b9d8047b14e0d5deafc2c549e5c (diff)
parent901fa6de04309bed46b8d3ad84aa77c5baabb9cc (diff)
downloadxen-33e4a0c224e38eefd0654f8627363d928247b3a6.tar.gz
xen-33e4a0c224e38eefd0654f8627363d928247b3a6.tar.bz2
xen-33e4a0c224e38eefd0654f8627363d928247b3a6.zip
bitkeeper revision 1.880 (408e873bJZUD6CJ5wPeA5HUnW78lhQ)
Merge
-rw-r--r--tools/xc/lib/xc_linux_save.c233
-rw-r--r--tools/xc/lib/xc_private.c106
-rw-r--r--tools/xc/lib/xc_private.h45
-rw-r--r--xen/common/memory.c6
-rw-r--r--xen/net/dev.c5
-rw-r--r--xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c58
-rw-r--r--xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c5
-rw-r--r--xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h13
8 files changed, 390 insertions, 81 deletions
diff --git a/tools/xc/lib/xc_linux_save.c b/tools/xc/lib/xc_linux_save.c
index dc759f546c..e6e8f8864d 100644
--- a/tools/xc/lib/xc_linux_save.c
+++ b/tools/xc/lib/xc_linux_save.c
@@ -84,6 +84,9 @@ int xc_linux_save(int xc_handle,
unsigned long mfn;
unsigned int prev_pc, this_pc;
+ /* state of the new MFN mapper */
+ mfn_mapper_t *mapper_handle1, *mapper_handle2;
+
/* Remember if we stopped the guest, so we can restart it on exit. */
int we_stopped_it = 0;
@@ -102,18 +105,23 @@ int xc_linux_save(int xc_handle,
/* A temporary mapping, and a copy, of one frame of guest memory. */
unsigned long *ppage, page[1024];
- /* A temporary mapping, and a copy, of the pfn-to-mfn table frame list. */
- unsigned long *p_pfn_to_mfn_frame_list, pfn_to_mfn_frame_list[1024];
- /* A temporary mapping of one frame in the above list. */
- unsigned long *pfn_to_mfn_frame;
+ /* A copy of the pfn-to-mfn table frame list. */
+ unsigned long *pfn_to_mfn_frame_list;
+ /* A live mapping of the pfn-to-mfn table frame list. */
+ unsigned long *live_pfn_to_mfn_frame_list;
- /* A table mapping each PFN to its current MFN. */
+ /* A table translating each PFN to its current MFN. */
unsigned long *pfn_to_mfn_table = NULL;
- /* A table mapping each current MFN to its canonical PFN. */
+ /* Live mapping of the table mapping each PFN to its current MFN. */
+ unsigned long *live_pfn_to_mfn_table = NULL;
+ /* A table translating each current MFN to its canonical PFN. */
unsigned long *mfn_to_pfn_table = NULL;
+ /* Live mapping of shared info structure */
+ unsigned long *live_shinfo;
+
/* A temporary mapping, and a copy, of the guest's suspend record. */
- suspend_record_t *p_srec, srec;
+ suspend_record_t *srec;
/* The name and descriptor of the file that we are writing to. */
int fd;
@@ -178,8 +186,10 @@ int xc_linux_save(int xc_handle,
goto out;
}
- if ( (pm_handle = init_pfn_mapper((domid_t)domid)) < 0 )
- goto out;
+
+ /* Map the suspend-record MFN to pin it */
+ srec = mfn_mapper_map_single(xc_handle, PROT_READ,
+ ctxt.cpu_ctxt.esi, sizeof(*srec));
/* Is the suspend-record MFN actually valid for this domain? */
if ( !check_pfn_ownership(xc_handle, ctxt.cpu_ctxt.esi, domid) )
@@ -188,33 +198,65 @@ int xc_linux_save(int xc_handle,
goto out;
}
- /* If the suspend-record MFN is okay then grab a copy of it to @srec. */
- p_srec = map_pfn_readonly(pm_handle, ctxt.cpu_ctxt.esi);
- memcpy(&srec, p_srec, sizeof(srec));
- unmap_pfn(pm_handle, p_srec);
-
- if ( srec.nr_pfns > 1024*1024 )
+ /* cheesy sanity check */
+ if ( srec->nr_pfns > 1024*1024 )
{
ERROR("Invalid state record -- pfn count out of range");
goto out;
}
- if ( !check_pfn_ownership(xc_handle, srec.pfn_to_mfn_frame_list, domid) )
+
+ /* the pfn_to_mfn_frame_list fits in a single page */
+ live_pfn_to_mfn_frame_list =
+ mfn_mapper_map_single(xc_handle, PROT_WRITE,
+ srec->pfn_to_mfn_frame_list,
+ PAGE_SIZE);
+
+ if ( !check_pfn_ownership(xc_handle,
+ srec->pfn_to_mfn_frame_list, domid) )
{
- ERROR("Invalid pfn-to-mfn frame list pointer");
+ ERROR("Invalid pfn-to-mfn frame list pointer");
+ goto out;
+ }
+
+ memcpy( pfn_to_mfn_frame_list, live_pfn_to_mfn_frame_list, PAGE_SIZE );
+
+ if ( (mapper_handle1 = mfn_mapper_init(xc_handle, 1024*1024, PROT_READ ))
+ == NULL )
goto out;
+
+ for ( i = 0; i < (srec->nr_pfns+1023)/1024; i++ )
+ {
+ /* Grab a copy of the pfn-to-mfn table frame list.
+ This has the effect of preventing the page from being freed and
+ given to another domain. (though the domain is stopped anyway...) */
+ mfn_mapper_queue_entry( mapper_handle1, i<<PAGE_SHIFT,
+ pfn_to_mfn_frame_list[i],
+ PAGE_SIZE );
}
+
+ mfn_mapper_flush_queue(mapper_handle1);
+
+ /* Now they're pined, check they're the right dom. We assume
+ they're not going to change, otherwise the suspend is going to fail,
+ with only itself to blame. */
- /* Grab a copy of the pfn-to-mfn table frame list. */
- p_pfn_to_mfn_frame_list = map_pfn_readonly(
- pm_handle, srec.pfn_to_mfn_frame_list);
- memcpy(pfn_to_mfn_frame_list, p_pfn_to_mfn_frame_list, PAGE_SIZE);
- unmap_pfn(pm_handle, p_pfn_to_mfn_frame_list);
+ for ( i = 0; i < (srec->nr_pfns+1023)/1024; i++ )
+ {
+ if ( !check_pfn_ownership(xc_handle,
+ pfn_to_mfn_frame_list[i], domid) )
+ {
+ ERROR("Invalid pfn-to-mfn frame list pointer");
+ goto out;
+ }
+ }
+
+ live_pfn_to_mfn_table = mfn_mapper_base( mapper_handle1 );
/* We want zeroed memory so use calloc rather than malloc. */
mfn_to_pfn_table = calloc(1, 4 * 1024 * 1024);
- pfn_to_mfn_table = calloc(1, 4 * srec.nr_pfns);
- pfn_type = calloc(1, 4 * srec.nr_pfns);
+ pfn_to_mfn_table = calloc(1, 4 * srec->nr_pfns);
+ pfn_type = calloc(1, 4 * srec->nr_pfns);
if ( (mfn_to_pfn_table == NULL) ||
(pfn_to_mfn_table == NULL) ||
@@ -230,24 +272,10 @@ int xc_linux_save(int xc_handle,
* loop we have each MFN mapped at most once. Note that there may be MFNs
* that aren't mapped at all: we detect these by MFN_IS_IN_PSEUDOPHYS_MAP.
*/
- pfn_to_mfn_frame = NULL;
- for ( i = 0; i < srec.nr_pfns; i++ )
+
+ for ( i = 0; i < srec->nr_pfns; i++ )
{
- /* Each frameful of table frames must be checked & mapped on demand. */
- if ( (i & 1023) == 0 )
- {
- mfn = pfn_to_mfn_frame_list[i/1024];
- if ( !check_pfn_ownership(xc_handle, mfn, domid) )
- {
- ERROR("Invalid frame number if pfn-to-mfn frame list");
- goto out;
- }
- if ( pfn_to_mfn_frame != NULL )
- unmap_pfn(pm_handle, pfn_to_mfn_frame);
- pfn_to_mfn_frame = map_pfn_readonly(pm_handle, mfn);
- }
-
- mfn = pfn_to_mfn_frame[i & 1023];
+ mfn = live_pfn_to_mfn_table[i];
if ( !check_pfn_ownership(xc_handle, mfn, domid) )
{
@@ -256,14 +284,14 @@ int xc_linux_save(int xc_handle,
}
/* Did we map this MFN already? That would be invalid! */
- if ( MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
+ if ( mfn_to_pfn_table[mfn] )
{
ERROR("A machine frame appears twice in pseudophys space");
goto out;
}
- pfn_to_mfn_table[i] = mfn;
mfn_to_pfn_table[mfn] = i;
+ pfn_to_mfn_table[i] = live_pfn_to_mfn_table[i];
/* Query page type by MFN, but store it by PFN. */
if ( (pfn_type[i] = get_pfn_type(xc_handle, mfn, domid)) ==
@@ -307,63 +335,110 @@ int xc_linux_save(int xc_handle,
}
/* Start writing out the saved-domain record. */
- ppage = map_pfn_readonly(pm_handle, shared_info_frame);
+ live_shinfo = mfn_mapper_map_single(xc_handle, PROT_READ,
+ shared_info_frame, PAGE_SIZE);
+
+ /* Belts and braces safety check on the shared info record */
+ if ( !check_pfn_ownership(xc_handle, shared_info_frame, domid) )
+ {
+ ERROR("Invalid shared_info_frame");
+ goto out;
+ }
+
if ( !checked_write(gfd, "LinuxGuestRecord", 16) ||
!checked_write(gfd, name, sizeof(name)) ||
!checked_write(gfd, &srec.nr_pfns, sizeof(unsigned long)) ||
!checked_write(gfd, &ctxt, sizeof(ctxt)) ||
- !checked_write(gfd, ppage, PAGE_SIZE) ||
+ !checked_write(gfd, live_shinfo, PAGE_SIZE) ||
!checked_write(gfd, pfn_to_mfn_frame_list, PAGE_SIZE) ||
!checked_write(gfd, pfn_type, 4 * srec.nr_pfns) )
{
ERROR("Error when writing to state file");
goto out;
}
- unmap_pfn(pm_handle, ppage);
+ munmap(live_shinfo, PAGE_SIZE);
verbose_printf("Saving memory pages: 0%%");
+ if ( (mapper_handle2 = mfn_mapper_init(xc_handle,
+ BATCH_SIZE*4096, PROT_READ ))
+ == NULL )
+ goto out;
+
+ region_base = mfn_mapper_base( mapper_handle2 );
+
/* Now write out each data page, canonicalising page tables as we go... */
prev_pc = 0;
- for ( i = 0; i < srec.nr_pfns; i++ )
+ for ( n = 0; n < srec.nr_pfns; )
{
- this_pc = (i * 100) / srec.nr_pfns;
+ this_pc = (n * 100) / srec.nr_pfns;
if ( (this_pc - prev_pc) >= 5 )
{
verbose_printf("\b\b\b\b%3d%%", this_pc);
prev_pc = this_pc;
}
- mfn = pfn_to_mfn_table[i];
-
- ppage = map_pfn_readonly(pm_handle, mfn);
- memcpy(page, ppage, PAGE_SIZE);
- unmap_pfn(pm_handle, ppage);
- if ( (pfn_type[i] == L1TAB) || (pfn_type[i] == L2TAB) )
- {
- for ( j = 0;
- j < ((pfn_type[i] == L2TAB) ?
- (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT) : 1024);
- j++ )
- {
- if ( !(page[j] & _PAGE_PRESENT) ) continue;
- mfn = page[j] >> PAGE_SHIFT;
- if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
- {
- ERROR("Frame number in pagetable page is invalid");
- goto out;
- }
- page[j] &= PAGE_SIZE - 1;
- page[j] |= mfn_to_pfn_table[mfn] << PAGE_SHIFT;
- }
- }
-
- if ( !checked_write(gfd, page, PAGE_SIZE) )
- {
- ERROR("Error when writing to state file");
- goto out;
- }
+ for( j = 0, i = n; j < BATCH_SIZE && i < srec.nr_pfns ; j++, i++ )
+ {
+ /* queue up mappings for all of the pages in this batch */
+
+ mfn_mapper_queue_entry( mapper_handle2, j<<PAGE_SHIFT,
+ pfn_to_mfn_frame_list[i],
+ PAGE_SIZE );
+ }
+ mfn_mapper_flush_queue(mapper_handle2);
+
+ typer_handle = get_type_init( xc_handle, BATCH_SIZE )
+
+ for( j = 0, i = n; j < BATCH_SIZE && i < srec.nr_pfns ; j++, i++ )
+ {
+ /* queue up ownership and type checks for all pages in batch */
+
+ get_type_queue_entry( typer_handle, domain,
+ pfn_to_mfn_frame_list[i] );
+ }
+
+
+ region_type = get_type;
+
+ for( j = 0, i = n; j < BATCH_SIZE && i < srec.nr_pfns ; j++, i++ )
+ {
+ /* write out pages in batch */
+
+ mfn = pfn_to_mfn_table[i];
+
+ ppage = map_pfn_readonly(pm_handle, mfn);
+ memcpy(page, ppage, PAGE_SIZE);
+ unmap_pfn(pm_handle, ppage);
+
+ if ( (pfn_type[i] == L1TAB) || (pfn_type[i] == L2TAB) )
+ {
+ for ( j = 0;
+ j < ((pfn_type[i] == L2TAB) ?
+ (HYPERVISOR_VIRT_START >> L2_PAGETABLE_SHIFT) : 1024);
+ j++ )
+ {
+ if ( !(page[j] & _PAGE_PRESENT) ) continue;
+ mfn = page[j] >> PAGE_SHIFT;
+ if ( !MFN_IS_IN_PSEUDOPHYS_MAP(mfn) )
+ {
+ ERROR("Frame number in pagetable page is invalid");
+ goto out;
+ }
+ page[j] &= PAGE_SIZE - 1;
+ page[j] |= mfn_to_pfn_table[mfn] << PAGE_SHIFT;
+ }
+ }
+
+ if ( !checked_write(gfd, page, PAGE_SIZE) )
+ {
+ ERROR("Error when writing to state file");
+ goto out;
+ }
+ }
+
+ n+=j; /* i is the master loop counter */
}
verbose_printf("\b\b\b\b100%%\nMemory saved.\n");
@@ -371,7 +446,7 @@ int xc_linux_save(int xc_handle,
/* Success! */
rc = 0;
- out:
+out:
/* Restart the domain if we had to stop it to save its state. */
if ( we_stopped_it )
{
@@ -397,4 +472,6 @@ int xc_linux_save(int xc_handle,
unlink(state_file);
return !!rc;
+
+
}
diff --git a/tools/xc/lib/xc_private.c b/tools/xc/lib/xc_private.c
index 485aa58754..796da64ad7 100644
--- a/tools/xc/lib/xc_private.c
+++ b/tools/xc/lib/xc_private.c
@@ -45,6 +45,112 @@ void unmap_pfn(int pm_handle, void *vaddr)
(void)munmap(vaddr, PAGE_SIZE);
}
+/*******************/
+
+void * mfn_mapper_map_single(int xc_handle, int prot,
+ unsigned long mfn, int size)
+{
+ privcmd_mmap_t ioctlx;
+ privcmd_mmap_entry_t entry;
+ void *addr;
+ addr = mmap( NULL, size, prot, MAP_SHARED, xc_handle, 0 );
+ if (addr)
+ {
+ ioctlx.num=1;
+ ioctlx.entry=&entry;
+ entry.va=(unsigned long) addr;
+ entry.mfn=mfn;
+ entry.npages=(size+PAGE_SIZE-1)>>PAGE_SHIFT;
+ if ( ioctl( xc_handle, IOCTL_PRIVCMD_MMAP, &ioctlx ) <0 )
+ return 0;
+ }
+ return addr;
+}
+
+mfn_mapper_t * mfn_mapper_init(int xc_handle, int size, int prot)
+{
+ mfn_mapper_t * t;
+ t = calloc( 1, sizeof(mfn_mapper_t)+
+ mfn_mapper_queue_size*sizeof(privcmd_mmap_entry_t) );
+ if (!t) return NULL;
+ t->xc_handle = xc_handle;
+ t->size = size;
+ t->prot = prot;
+ t->max_queue_size = mfn_mapper_queue_size;
+ t->addr = mmap( NULL, size, prot, MAP_SHARED, xc_handle, 0 );
+ if (!t->addr)
+ {
+ free(t);
+ return NULL;
+ }
+ t->ioctl.num = 0;
+ t->ioctl.entry = (privcmd_mmap_entry_t *) &t[1];
+ return t;
+}
+
+void * mfn_mapper_base(mfn_mapper_t *t)
+{
+ return t->addr;
+}
+
+void mfn_mapper_close(mfn_mapper_t *t)
+{
+ if(t->addr) munmap( t->addr, t->size );
+ free(t);
+}
+
+int mfn_mapper_flush_queue(mfn_mapper_t *t)
+{
+ int rc;
+
+ rc = ioctl( t->xc_handle, IOCTL_PRIVCMD_MMAP, &t->ioctl );
+ if (rc<0) return rc;
+ t->ioctl.num = 0;
+ return 0;
+}
+
+void * mfn_mapper_queue_entry(mfn_mapper_t *t, int offset,
+ unsigned long mfn, int size)
+{
+ privcmd_mmap_entry_t *entry, *prev;
+ int pages;
+
+ offset &= PAGE_MASK;
+ pages =(size+PAGE_SIZE-1)>>PAGE_SHIFT;
+ entry = &t->ioctl.entry[t->ioctl.num];
+
+ if ( t->ioctl.num > 0 )
+ {
+ prev = &t->ioctl.entry[t->ioctl.num-1];
+
+ if ( (prev->va+(prev->npages*PAGE_SIZE)) == (t->addr+offset) &&
+ (prev->mfn+prev->npages) == mfn )
+ {
+ prev->npages += pages;
+printf("merge\n");
+ return t->addr+offset;
+ }
+ }
+
+ entry->va = t->addr+offset;
+ entry->mfn = mfn;
+ entry->npages = pages;
+ t->ioctl.num++;
+
+ if(t->ioctl.num == t->max_queue_size)
+ {
+ if ( mfn_mapper_flush_queue(t) )
+ return 0;
+ }
+
+ return t->addr+offset;
+}
+
+
+
+
+/*******************/
+
#define FIRST_MMU_UPDATE 2
static int flush_mmu_updates(int xc_handle, mmu_t *mmu)
diff --git a/tools/xc/lib/xc_private.h b/tools/xc/lib/xc_private.h
index d4299109e5..d173e58e46 100644
--- a/tools/xc/lib/xc_private.h
+++ b/tools/xc/lib/xc_private.h
@@ -174,4 +174,49 @@ int add_mmu_update(int xc_handle, mmu_t *mmu,
unsigned long ptr, unsigned long val);
int finish_mmu_updates(int xc_handle, mmu_t *mmu);
+
+/*
+ * ioctl-based mfn mapping interface
+ */
+
+/*
+typedef struct privcmd_mmap_entry {
+ unsigned long va;
+ unsigned long mfn;
+ unsigned long npages;
+} privcmd_mmap_entry_t;
+
+typedef struct privcmd_mmap {
+ int num;
+ privcmd_mmap_entry_t *entry;
+} privcmd_mmap_t;
+*/
+
+#define mfn_mapper_queue_size 128
+
+typedef struct mfn_mapper {
+ int xc_handle;
+ int size;
+ int prot;
+ int max_queue_size;
+ void * addr;
+ privcmd_mmap_t ioctl;
+
+} mfn_mapper_t;
+
+void * mfn_mapper_map_single(int xc_handle, int prot,
+ unsigned long mfn, int size);
+
+mfn_mapper_t * mfn_mapper_init(int xc_handle, int size, int prot);
+
+void * mfn_mapper_base(mfn_mapper_t *t);
+
+void mfn_mapper_close(mfn_mapper_t *t);
+
+int mfn_mapper_flush_queue(mfn_mapper_t *t);
+
+void * mfn_mapper_queue_entry(mfn_mapper_t *t, int offset,
+ unsigned long mfn, int size );
+
+
#endif /* __XC_PRIVATE_H__ */
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 6d3dc9ead4..1ffc207bdf 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1041,6 +1041,12 @@ int do_mmu_update(mmu_update_t *ureqs, int count)
machine_to_phys_mapping[pfn] = req.val;
okay = 1;
+
+ /* if in log dirty shadow mode, mark the corresponding
+ psuedo-physical page as dirty */
+ if( unlikely(current->mm.shadow_mode == SHM_logdirty) )
+ mark_dirty( &current->mm, pfn );
+
put_page(&frame_table[pfn]);
break;
diff --git a/xen/net/dev.c b/xen/net/dev.c
index 69ed0e399a..e4e1092840 100644
--- a/xen/net/dev.c
+++ b/xen/net/dev.c
@@ -555,8 +555,11 @@ void deliver_packet(struct sk_buff *skb, net_vif_t *vif)
/* Avoid the fault later. */
*sptr = new_pte;
-
unmap_domain_mem(sptr);
+
+ if( p->mm.shadow_mode == SHM_logdirty )
+ mark_dirty( &p->mm, new_page-frame_table );
+
put_shadow_status(&p->mm);
}
diff --git a/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c b/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c
index b59f3e8a84..a94b016fb7 100644
--- a/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c
+++ b/xenolinux-2.4.26-sparse/arch/xen/drivers/dom0/core.c
@@ -62,16 +62,70 @@ static int privcmd_ioctl(struct inode *inode, struct file *file,
}
break;
+ case IOCTL_PRIVCMD_MMAP:
+ {
+#define PRIVCMD_MMAP_SZ 32
+ privcmd_mmap_t mmapcmd;
+ privcmd_mmap_entry_t msg[PRIVCMD_MMAP_SZ], *p;
+ int i;
+
+ if ( copy_from_user(&mmapcmd, (void *)data, sizeof(mmapcmd)) )
+ return -EFAULT;
+
+ p = mmapcmd.entry;
+
+ for (i=0; i<mmapcmd.num; i+=PRIVCMD_MMAP_SZ, p+=PRIVCMD_MMAP_SZ)
+ {
+ int j, n = ((mmapcmd.num-i)>PRIVCMD_MMAP_SZ)?
+ PRIVCMD_MMAP_SZ:(mmapcmd.num-i);
+ if ( copy_from_user(&msg, p, n*sizeof(privcmd_mmap_entry_t)) )
+ return -EFAULT;
+
+ for (j=0;j<n;j++)
+ {
+ struct vm_area_struct *vma =
+ find_vma( current->mm, msg[j].va );
+
+ if (!vma)
+ return -EINVAL;
+
+ if (msg[j].va > PAGE_OFFSET)
+ return -EINVAL;
+
+ if (msg[j].va + (msg[j].npages<<PAGE_SHIFT) > vma->vm_end)
+ return -EINVAL;
+
+ if (direct_remap_area_pages(vma->vm_mm,
+ msg[j].va&PAGE_MASK,
+ msg[j].mfn<<PAGE_SHIFT,
+ msg[j].npages<<PAGE_SHIFT,
+ vma->vm_page_prot))
+ return -EINVAL;
+ }
+ }
+ ret = 0;
+ }
+ break;
+
default:
ret = -EINVAL;
break;
- }
+ }
return ret;
}
+static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
+{
+printk(KERN_ALERT"privcmd_mmap\n");
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
+
+ return 0;
+}
static struct file_operations privcmd_file_ops = {
- ioctl : privcmd_ioctl
+ ioctl : privcmd_ioctl,
+ mmap: privcmd_mmap
};
diff --git a/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c b/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c
index 665357d4bc..6474d4c6ed 100644
--- a/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c
+++ b/xenolinux-2.4.26-sparse/arch/xen/mm/ioremap.c
@@ -60,10 +60,12 @@ static inline void direct_remap_area_pte(pte_t *pte,
if (address >= end)
BUG();
do {
+#if 0 /* thanks to new ioctl mmaping interface this is no longer a bug */
if (!pte_none(*pte)) {
printk("direct_remap_area_pte: page already exists\n");
BUG();
}
+#endif
v->ptr = virt_to_machine(pte);
v->val = (machine_addr & PAGE_MASK) | pgprot_val(prot) | _PAGE_IO;
v++;
@@ -118,6 +120,9 @@ int direct_remap_area_pages(struct mm_struct *mm,
pgd_t * dir;
unsigned long end = address + size;
+printk("direct_remap_area_pages va=%08lx ma=%08lx size=%d\n",
+ address, machine_addr, size);
+
machine_addr -= address;
dir = pgd_offset(mm, address);
flush_cache_all();
diff --git a/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h b/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h
index 4ce2930daa..205355fa9c 100644
--- a/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h
+++ b/xenolinux-2.4.26-sparse/include/asm-xen/proc_cmd.h
@@ -13,6 +13,17 @@ typedef struct privcmd_hypercall
unsigned long arg[5];
} privcmd_hypercall_t;
+typedef struct privcmd_mmap_entry {
+ unsigned long va;
+ unsigned long mfn;
+ unsigned long npages;
+} privcmd_mmap_entry_t;
+
+typedef struct privcmd_mmap {
+ int num;
+ privcmd_mmap_entry_t *entry;
+} privcmd_mmap_t;
+
typedef struct privcmd_blkmsg
{
unsigned long op;
@@ -24,5 +35,7 @@ typedef struct privcmd_blkmsg
_IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
#define IOCTL_PRIVCMD_BLKMSG \
_IOC(_IOC_NONE, 'P', 1, sizeof(privcmd_blkmsg_t))
+#define IOCTL_PRIVCMD_MMAP \
+ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
#endif /* __PROC_CMD_H__ */