aboutsummaryrefslogtreecommitdiffstats
path: root/xen/arch/x86/hvm/mtrr.c
diff options
context:
space:
mode:
authorKeir Fraser <keir@xensource.com>2007-10-23 14:38:47 +0100
committerKeir Fraser <keir@xensource.com>2007-10-23 14:38:47 +0100
commit907e0a6046567e32113433fddea5b55c67961391 (patch)
treee9c1ff31b07abf40d243b76cdecf65b1b3b81f11 /xen/arch/x86/hvm/mtrr.c
parenteab576f8d60b834e34f5fe9beed90d2cd09b9fd5 (diff)
downloadxen-907e0a6046567e32113433fddea5b55c67961391.tar.gz
xen-907e0a6046567e32113433fddea5b55c67961391.tar.bz2
xen-907e0a6046567e32113433fddea5b55c67961391.zip
hvm, vt-d: Add memory cache-attribute pinning domctl for HVM
guests. Use this to pin virtual framebuffer VRAM as attribute WB, even if guest tries to map with other attributes. Signed-off-by: Disheng Su <disheng.su@intel.com>
Diffstat (limited to 'xen/arch/x86/hvm/mtrr.c')
-rw-r--r--xen/arch/x86/hvm/mtrr.c81
1 files changed, 81 insertions, 0 deletions
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 016652723d..0c23778ced 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -26,6 +26,7 @@
#include <stdbool.h>
#include <asm/mtrr.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/cacheattr.h>
/* Xen holds the native MTRR MSRs */
extern struct mtrr_state mtrr_state;
@@ -685,3 +686,83 @@ bool_t mtrr_pat_not_equal(struct vcpu *vd, struct vcpu *vs)
return 0;
}
+
+void hvm_init_cacheattr_region_list(
+ struct domain *d)
+{
+ INIT_LIST_HEAD(&d->arch.hvm_domain.pinned_cacheattr_ranges);
+}
+
+void hvm_destroy_cacheattr_region_list(
+ struct domain *d)
+{
+ struct list_head *head = &d->arch.hvm_domain.pinned_cacheattr_ranges;
+ struct hvm_mem_pinned_cacheattr_range *range;
+
+ while ( !list_empty(head) )
+ {
+ range = list_entry(head->next,
+ struct hvm_mem_pinned_cacheattr_range,
+ list);
+ list_del(&range->list);
+ xfree(range);
+ }
+}
+
+int hvm_get_mem_pinned_cacheattr(
+ struct domain *d,
+ unsigned long guest_fn,
+ unsigned int *type)
+{
+ struct hvm_mem_pinned_cacheattr_range *range;
+
+ *type = 0;
+
+ if ( !is_hvm_domain(d) )
+ return 0;
+
+ list_for_each_entry_rcu ( range,
+ &d->arch.hvm_domain.pinned_cacheattr_ranges,
+ list )
+ {
+ if ( (guest_fn >= range->start) && (guest_fn <= range->end) )
+ {
+ *type = range->type;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int hvm_set_mem_pinned_cacheattr(
+ struct domain *d,
+ unsigned long gfn_start,
+ unsigned long gfn_end,
+ unsigned int type)
+{
+ struct hvm_mem_pinned_cacheattr_range *range;
+
+ if ( !((type == PAT_TYPE_UNCACHABLE) ||
+ (type == PAT_TYPE_WRCOMB) ||
+ (type == PAT_TYPE_WRTHROUGH) ||
+ (type == PAT_TYPE_WRPROT) ||
+ (type == PAT_TYPE_WRBACK) ||
+ (type == PAT_TYPE_UC_MINUS)) ||
+ !is_hvm_domain(d) )
+ return -EINVAL;
+
+ range = xmalloc(struct hvm_mem_pinned_cacheattr_range);
+ if ( range == NULL )
+ return -ENOMEM;
+
+ memset(range, 0, sizeof(*range));
+
+ range->start = gfn_start;
+ range->end = gfn_end;
+ range->type = type;
+
+ list_add_rcu(&range->list, &d->arch.hvm_domain.pinned_cacheattr_ranges);
+
+ return 0;
+}