aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKeir Fraser <keir@xen.org>2011-02-07 09:58:11 +0000
committerKeir Fraser <keir@xen.org>2011-02-07 09:58:11 +0000
commit00cee37d3759218c28856d092713b44633fde1f3 (patch)
tree0190b6c7348f4f3a80e14af27a444a3771996fec
parent462236bac932a5016dc0c2907054b5517a51728a (diff)
downloadxen-00cee37d3759218c28856d092713b44633fde1f3.tar.gz
xen-00cee37d3759218c28856d092713b44633fde1f3.tar.bz2
xen-00cee37d3759218c28856d092713b44633fde1f3.zip
Pack some hvmop memory structures better
Some of the hvmop memory structures have a shocking amount of unnecesssary padding in them. Elements which can have only 3 values are given 64 bits of memory, and then aligned (so that there is padding behind them). This patch resizes and reorganizes in the following way, (hopefully) without introducing any differences between the layout for 32- and 64-bit. xen_hvm_set_mem_type: hvmmem_type -> 16 bits nr -> 32 bits (limiting us to setting 16TB at a time) xen_hvm_set_mem_access: hvmmem_access -> 16 bits nr -> 32 bits xen_hvm_get_mem_access: hvmmem_access -> 16 bits Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
-rw-r--r--xen/include/public/hvm/hvm_op.h22
1 files changed, 10 insertions, 12 deletions
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 6f846fb250..d0849247a3 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -119,11 +119,11 @@ struct xen_hvm_set_mem_type {
/* Domain to be updated. */
domid_t domid;
/* Memory type */
- uint64_aligned_t hvmmem_type;
+ uint16_t hvmmem_type;
+ /* Number of pages. */
+ uint32_t nr;
/* First pfn. */
uint64_aligned_t first_pfn;
- /* Number of pages. */
- uint64_aligned_t nr;
};
typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
@@ -179,13 +179,12 @@ typedef enum {
struct xen_hvm_set_mem_access {
/* Domain to be updated. */
domid_t domid;
- uint16_t pad[3]; /* align next field on 8-byte boundary */
/* Memory type */
- uint64_t hvmmem_access; /* hvm_access_t */
- /* First pfn, or ~0ull to set the default access for new pages */
- uint64_t first_pfn;
+ uint16_t hvmmem_access; /* hvm_access_t */
/* Number of pages, ignored on setting default access */
- uint64_t nr;
+ uint32_t nr;
+ /* First pfn, or ~0ull to set the default access for new pages */
+ uint64_aligned_t first_pfn;
};
typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
@@ -195,11 +194,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
struct xen_hvm_get_mem_access {
/* Domain to be queried. */
domid_t domid;
- uint16_t pad[3]; /* align next field on 8-byte boundary */
/* Memory type: OUT */
- uint64_t hvmmem_access; /* hvm_access_t */
+ uint16_t hvmmem_access; /* hvm_access_t */
/* pfn, or ~0ull for default access for new pages. IN */
- uint64_t pfn;
+ uint64_aligned_t pfn;
};
typedef struct xen_hvm_get_mem_access xen_hvm_get_mem_access_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_access_t);
@@ -220,7 +218,7 @@ struct xen_hvm_inject_trap {
/* Error code, or -1 to skip */
uint32_t error_code;
/* CR2 for page faults */
- uint64_t cr2;
+ uint64_aligned_t cr2;
};
typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);