aboutsummaryrefslogtreecommitdiffstats
path: root/tools/libxc/xc_core.c
diff options
context:
space:
mode:
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-08-30 14:58:40 +0100
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>2007-08-30 14:58:40 +0100
commit0e0204500eb2bb9499ca6ff0a60ccf7e3612c917 (patch)
treee4a9b4ccfd06b44a1ae9d1e91aedf9ff5eb266c1 /tools/libxc/xc_core.c
parentc5836257128829a52ee816d81b7b2c9cda6a95b9 (diff)
downloadxen-0e0204500eb2bb9499ca6ff0a60ccf7e3612c917.tar.gz
xen-0e0204500eb2bb9499ca6ff0a60ccf7e3612c917.tar.bz2
xen-0e0204500eb2bb9499ca6ff0a60ccf7e3612c917.zip
Optimize xc_core.c implementation.
Optimize out unnecessary map/unmap foreign domain page by moving p2m/pfn talbe after pages array. This patch doesn't change the xm dump-core format. Signed-off-by: Isaku Yamahata <yamahata@valinux.co.jp>
Diffstat (limited to 'tools/libxc/xc_core.c')
-rw-r--r--tools/libxc/xc_core.c233
1 files changed, 121 insertions, 112 deletions
diff --git a/tools/libxc/xc_core.c b/tools/libxc/xc_core.c
index 22cb8dcb6b..9ddbfef418 100644
--- a/tools/libxc/xc_core.c
+++ b/tools/libxc/xc_core.c
@@ -17,8 +17,8 @@
* | .xen_prstatus |
* | .xen_ia64_mmapped_regs if ia64 |
* | .xen_shared_info if present |
- * | .xen_p2m or .xen_pfn |
* | .xen_pages |
+ * | .xen_p2m or .xen_pfn |
* +--------------------------------------------------------+
* |.note.Xen:note section |
* | "Xen" is used as note name, |
@@ -37,13 +37,13 @@
* +--------------------------------------------------------+
* |.xen_shared_info if possible |
* +--------------------------------------------------------+
+ * |.xen_pages |
+ * | page * nr_pages |
+ * +--------------------------------------------------------+
* |.xen_p2m or .xen_pfn |
* | .xen_p2m: struct xen_dumpcore_p2m[nr_pages] |
* | .xen_pfn: uint64_t[nr_pages] |
* +--------------------------------------------------------+
- * |.xen_pages |
- * | page * nr_pages |
- * +--------------------------------------------------------+
* |.shstrtab: section header string table |
* +--------------------------------------------------------+
*
@@ -58,21 +58,6 @@
/* number of pages to write at a time */
#define DUMP_INCREMENT (4 * 1024)
-static int
-copy_from_domain_page(int xc_handle,
- uint32_t domid,
- unsigned long mfn,
- void *dst_page)
-{
- void *vaddr = xc_map_foreign_range(
- xc_handle, domid, PAGE_SIZE, PROT_READ, mfn);
- if ( vaddr == NULL )
- return -1;
- memcpy(dst_page, vaddr, PAGE_SIZE);
- munmap(vaddr, PAGE_SIZE);
- return 0;
-}
-
/* string table */
struct xc_core_strtab {
char *strings;
@@ -520,47 +505,6 @@ xc_domain_dumpcore_via_callback(int xc_handle,
}
}
- /* create .xen_p2m or .xen_pfn */
- j = 0;
- for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
- {
- uint64_t pfn_start;
- uint64_t pfn_end;
-
- pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
- pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
- for ( i = pfn_start; i < pfn_end; i++ )
- {
- if ( !auto_translated_physmap )
- {
- if ( p2m[i] == INVALID_P2M_ENTRY )
- continue;
- p2m_array[j].pfn = i;
- p2m_array[j].gmfn = p2m[i];
- }
- else
- {
- /* try to map page to determin wheter it has underlying page */
- void *vaddr = xc_map_foreign_range(xc_handle, domid,
- PAGE_SIZE, PROT_READ, i);
- if ( vaddr == NULL )
- continue;
- munmap(vaddr, PAGE_SIZE);
- pfn_array[j] = i;
- }
-
- j++;
- }
- }
- if ( j != nr_pages )
- {
- PERROR("j (%ld) != nr_pages (%ld)", j , nr_pages);
- /* When live dump-mode (-L option) is specified,
- * guest domain may change its mapping.
- */
- nr_pages = j;
- }
-
/* ehdr.e_shnum and ehdr.e_shstrndx aren't known here yet. fill it later*/
xc_core_ehdr_init(&ehdr);
@@ -660,6 +604,33 @@ xc_domain_dumpcore_via_callback(int xc_handle,
offset += filesz;
}
+ /*
+ * pages and p2m/pfn are the last section to allocate section headers
+ * so that we know the number of section headers here.
+ * 2 = pages section and p2m/pfn table section
+ */
+ fixup = (sheaders->num + 2) * sizeof(*shdr);
+ /* zeroth section should have zero offset */
+ for ( i = 1; i < sheaders->num; i++ )
+ sheaders->shdrs[i].sh_offset += fixup;
+ offset += fixup;
+ dummy_len = ROUNDUP(offset, PAGE_SHIFT) - offset; /* padding length */
+ offset += dummy_len;
+
+ /* pages */
+ shdr = xc_core_shdr_get(sheaders);
+ if ( shdr == NULL )
+ {
+ PERROR("could not get section headers for .xen_pages");
+ goto out;
+ }
+ filesz = nr_pages * PAGE_SIZE;
+ sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_PAGES, SHT_PROGBITS,
+ offset, filesz, PAGE_SIZE, PAGE_SIZE);
+ if ( sts != 0 )
+ goto out;
+ offset += filesz;
+
/* p2m/pfn table */
shdr = xc_core_shdr_get(sheaders);
if ( shdr == NULL )
@@ -674,8 +645,6 @@ xc_domain_dumpcore_via_callback(int xc_handle,
SHT_PROGBITS,
offset, filesz, __alignof__(p2m_array[0]),
sizeof(p2m_array[0]));
- if ( sts != 0 )
- goto out;
}
else
{
@@ -684,34 +653,7 @@ xc_domain_dumpcore_via_callback(int xc_handle,
SHT_PROGBITS,
offset, filesz, __alignof__(pfn_array[0]),
sizeof(pfn_array[0]));
- if ( sts != 0 )
- goto out;
- }
- offset += filesz;
-
- /* pages */
- shdr = xc_core_shdr_get(sheaders);
- if ( shdr == NULL )
- {
- PERROR("could not get section headers for .xen_pages");
- goto out;
}
-
- /*
- * pages are the last section to allocate section headers
- * so that we know the number of section headers here.
- */
- fixup = sheaders->num * sizeof(*shdr);
- /* zeroth section should have zero offset */
- for ( i = 1; i < sheaders->num; i++ )
- sheaders->shdrs[i].sh_offset += fixup;
- offset += fixup;
- dummy_len = ROUNDUP(offset, PAGE_SHIFT) - offset; /* padding length */
- offset += dummy_len;
-
- filesz = nr_pages * PAGE_SIZE;
- sts = xc_core_shdr_set(shdr, strtab, XEN_DUMPCORE_SEC_PAGES, SHT_PROGBITS,
- offset, filesz, PAGE_SIZE, PAGE_SIZE);
if ( sts != 0 )
goto out;
offset += filesz;
@@ -736,7 +678,7 @@ xc_domain_dumpcore_via_callback(int xc_handle,
/* elf note section: xen core header */
sts = elfnote_dump_none(args, dump_rtn);
- if ( sts != 0)
+ if ( sts != 0 )
goto out;
/* elf note section: xen core header */
@@ -772,16 +714,6 @@ xc_domain_dumpcore_via_callback(int xc_handle,
if ( sts != 0 )
goto out;
- /* p2m/pfn table: .xen_p2m/.xen_pfn */
- if ( !auto_translated_physmap )
- sts = dump_rtn(args, (char *)p2m_array,
- sizeof(p2m_array[0]) * nr_pages);
- else
- sts = dump_rtn(args, (char *)pfn_array,
- sizeof(pfn_array[0]) * nr_pages);
- if ( sts != 0 )
- goto out;
-
/* Pad the output data to page alignment. */
memset(dummy, 0, PAGE_SIZE);
sts = dump_rtn(args, dummy, dummy_len);
@@ -789,25 +721,100 @@ xc_domain_dumpcore_via_callback(int xc_handle,
goto out;
/* dump pages: .xen_pages */
- for ( dump_mem = dump_mem_start, i = 0; i < nr_pages; i++ )
+ j = 0;
+ dump_mem = dump_mem_start;
+ for ( map_idx = 0; map_idx < nr_memory_map; map_idx++ )
{
- uint64_t gmfn;
- if ( !auto_translated_physmap )
- gmfn = p2m_array[i].gmfn;
- else
- gmfn = pfn_array[i];
-
- copy_from_domain_page(xc_handle, domid, gmfn, dump_mem);
- dump_mem += PAGE_SIZE;
- if ( ((i + 1) % DUMP_INCREMENT == 0) || ((i + 1) == nr_pages) )
+ uint64_t pfn_start;
+ uint64_t pfn_end;
+
+ pfn_start = memory_map[map_idx].addr >> PAGE_SHIFT;
+ pfn_end = pfn_start + (memory_map[map_idx].size >> PAGE_SHIFT);
+ for ( i = pfn_start; i < pfn_end; i++ )
{
- sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
+ uint64_t gmfn;
+ void *vaddr;
+
+ if ( j >= nr_pages )
+ {
+ /*
+ * When live dump-mode (-L option) is specified,
+ * guest domain may increase memory.
+ */
+ IPRINTF("exceeded nr_pages (%ld) losing pages", nr_pages);
+ goto copy_done;
+ }
+
+ if ( !auto_translated_physmap )
+ {
+ gmfn = p2m[i];
+ if ( gmfn == INVALID_P2M_ENTRY )
+ continue;
+
+ p2m_array[j].pfn = i;
+ p2m_array[j].gmfn = gmfn;
+ }
+ else
+ {
+ gmfn = i;
+ pfn_array[j] = i;
+ }
+
+ vaddr = xc_map_foreign_range(
+ xc_handle, domid, PAGE_SIZE, PROT_READ, gmfn);
+ if ( vaddr == NULL )
+ continue;
+ memcpy(dump_mem, vaddr, PAGE_SIZE);
+ munmap(vaddr, PAGE_SIZE);
+ dump_mem += PAGE_SIZE;
+ if ( (j + 1) % DUMP_INCREMENT == 0 )
+ {
+ sts = dump_rtn(
+ args, dump_mem_start, dump_mem - dump_mem_start);
+ if ( sts != 0 )
+ goto out;
+ dump_mem = dump_mem_start;
+ }
+
+ j++;
+ }
+ }
+
+copy_done:
+ sts = dump_rtn(args, dump_mem_start, dump_mem - dump_mem_start);
+ if ( sts != 0 )
+ goto out;
+ if ( j < nr_pages )
+ {
+ /* When live dump-mode (-L option) is specified,
+ * guest domain may reduce memory. pad with zero pages.
+ */
+ IPRINTF("j (%ld) != nr_pages (%ld)", j , nr_pages);
+ memset(dump_mem_start, 0, PAGE_SIZE);
+ for (; j < nr_pages; j++) {
+ sts = dump_rtn(args, dump_mem_start, PAGE_SIZE);
if ( sts != 0 )
goto out;
- dump_mem = dump_mem_start;
+ if ( !auto_translated_physmap )
+ {
+ p2m_array[j].pfn = XC_CORE_INVALID_PFN;
+ p2m_array[j].gmfn = XC_CORE_INVALID_GMFN;
+ }
+ else
+ pfn_array[j] = XC_CORE_INVALID_PFN;
}
}
+ /* p2m/pfn table: .xen_p2m/.xen_pfn */
+ if ( !auto_translated_physmap )
+ sts = dump_rtn(
+ args, (char *)p2m_array, sizeof(p2m_array[0]) * nr_pages);
+ else
+ sts = dump_rtn(
+ args, (char *)pfn_array, sizeof(pfn_array[0]) * nr_pages);
+ if ( sts != 0 )
+ goto out;
+
/* elf section header string table: .shstrtab */
sts = dump_rtn(args, strtab->strings, strtab->current);
if ( sts != 0 )
@@ -816,6 +823,8 @@ xc_domain_dumpcore_via_callback(int xc_handle,
sts = 0;
out:
+ if ( memory_map != NULL )
+ free(memory_map);
if ( p2m != NULL )
munmap(p2m, PAGE_SIZE * P2M_FL_ENTRIES);
if ( p2m_array != NULL )