diff options
author | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2006-08-03 15:22:25 +0100 |
---|---|---|
committer | kfraser@localhost.localdomain <kfraser@localhost.localdomain> | 2006-08-03 15:22:25 +0100 |
commit | 931e1a3a502ec16cd8c7ba154f13620c534e96dd (patch) | |
tree | 11959aef0b01c2dbc61c993051d7e84b838b3ee1 | |
parent | 72879391fb78a641b946e8b9e4edcb67e0945278 (diff) | |
download | xen-931e1a3a502ec16cd8c7ba154f13620c534e96dd.tar.gz xen-931e1a3a502ec16cd8c7ba154f13620c534e96dd.tar.bz2 xen-931e1a3a502ec16cd8c7ba154f13620c534e96dd.zip |
[HVM] Make copy_{to,from}_guest work for HVM domains.
Signed-off-by: Steven Smith <ssmith@xensource.com>
-rw-r--r-- | xen/arch/x86/hvm/hvm.c | 9 | ||||
-rw-r--r-- | xen/arch/x86/hvm/platform.c | 14 | ||||
-rw-r--r-- | xen/include/asm-x86/guest_access.h | 20 | ||||
-rw-r--r-- | xen/include/asm-x86/hvm/guest_access.h | 7 | ||||
-rw-r--r-- | xen/include/asm-x86/shadow.h | 7 |
5 files changed, 51 insertions, 6 deletions
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 9dd0fee66f..7b348e32dd 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -254,7 +254,7 @@ int cpu_get_interrupt(struct vcpu *v, int *type) int hvm_copy(void *buf, unsigned long vaddr, int size, int dir) { - unsigned long gpa, mfn; + unsigned long mfn; char *addr; int count; @@ -263,10 +263,9 @@ hvm_copy(void *buf, unsigned long vaddr, int size, int dir) if (count > size) count = size; - if (hvm_paging_enabled(current)) { - gpa = gva_to_gpa(vaddr); - mfn = get_mfn_from_gpfn(gpa >> PAGE_SHIFT); - } else + if (hvm_paging_enabled(current)) + mfn = gva_to_mfn(vaddr); + else mfn = get_mfn_from_gpfn(vaddr >> PAGE_SHIFT); if (mfn == INVALID_MFN) return 0; diff --git a/xen/arch/x86/hvm/platform.c b/xen/arch/x86/hvm/platform.c index 1b5c5966af..efe443a8a0 100644 --- a/xen/arch/x86/hvm/platform.c +++ b/xen/arch/x86/hvm/platform.c @@ -1034,6 +1034,20 @@ void handle_mmio(unsigned long va, unsigned long gpa) } } +/* Note that copy_{to,from}_user_hvm don't set the A and D bits on + PTEs, and require the PTE to be writable even when they're only + trying to read from it. The guest is expected to deal with + this. */ +unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len) +{ + return !hvm_copy((void *)from, (unsigned long)to, len, HVM_COPY_OUT); +} + +unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len) +{ + return !hvm_copy(to, (unsigned long)from, len, HVM_COPY_IN); +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/guest_access.h b/xen/include/asm-x86/guest_access.h index 7be0f3efe6..7b88aad86b 100644 --- a/xen/include/asm-x86/guest_access.h +++ b/xen/include/asm-x86/guest_access.h @@ -8,6 +8,8 @@ #define __ASM_X86_GUEST_ACCESS_H__ #include <asm/uaccess.h> +#include <asm/hvm/support.h> +#include <asm/hvm/guest_access.h> /* Is the guest handle a NULL reference? */ #define guest_handle_is_null(hnd) ((hnd).p == NULL) @@ -28,6 +30,8 @@ #define copy_to_guest_offset(hnd, off, ptr, nr) ({ \ const typeof(ptr) _x = (hnd).p; \ const typeof(ptr) _y = (ptr); \ + hvm_guest(current) ? \ + copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) : \ copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ }) @@ -38,6 +42,8 @@ #define copy_from_guest_offset(ptr, hnd, off, nr) ({ \ const typeof(ptr) _x = (hnd).p; \ const typeof(ptr) _y = (ptr); \ + hvm_guest(current) ? \ + copy_from_user_hvm(_y, _x+(off), sizeof(*_x)*(nr)) :\ copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ }) @@ -45,6 +51,8 @@ #define copy_field_to_guest(hnd, ptr, field) ({ \ const typeof(&(ptr)->field) _x = &(hnd).p->field; \ const typeof(&(ptr)->field) _y = &(ptr)->field; \ + hvm_guest(current) ? \ + copy_to_user_hvm(_x, _y, sizeof(*_x)) : \ copy_to_user(_x, _y, sizeof(*_x)); \ }) @@ -52,6 +60,8 @@ #define copy_field_from_guest(ptr, hnd, field) ({ \ const typeof(&(ptr)->field) _x = &(hnd).p->field; \ const typeof(&(ptr)->field) _y = &(ptr)->field; \ + hvm_guest(current) ? \ + copy_from_user_hvm(_y, _x, sizeof(*_x)) : \ copy_from_user(_y, _x, sizeof(*_x)); \ }) @@ -60,29 +70,37 @@ * Allows use of faster __copy_* functions. */ #define guest_handle_okay(hnd, nr) \ - array_access_ok((hnd).p, (nr), sizeof(*(hnd).p)) + (hvm_guest(current) || array_access_ok((hnd).p, (nr), sizeof(*(hnd).p))) #define __copy_to_guest_offset(hnd, off, ptr, nr) ({ \ const typeof(ptr) _x = (hnd).p; \ const typeof(ptr) _y = (ptr); \ + hvm_guest(current) ? \ + copy_to_user_hvm(_x+(off), _y, sizeof(*_x)*(nr)) : \ __copy_to_user(_x+(off), _y, sizeof(*_x)*(nr)); \ }) #define __copy_from_guest_offset(ptr, hnd, off, nr) ({ \ const typeof(ptr) _x = (hnd).p; \ const typeof(ptr) _y = (ptr); \ + hvm_guest(current) ? \ + copy_from_user_hvm(_y, _x+(off),sizeof(*_x)*(nr)) : \ __copy_from_user(_y, _x+(off), sizeof(*_x)*(nr)); \ }) #define __copy_field_to_guest(hnd, ptr, field) ({ \ const typeof(&(ptr)->field) _x = &(hnd).p->field; \ const typeof(&(ptr)->field) _y = &(ptr)->field; \ + hvm_guest(current) ? \ + copy_to_user_hvm(_x, _y, sizeof(*_x)) : \ __copy_to_user(_x, _y, sizeof(*_x)); \ }) #define __copy_field_from_guest(ptr, hnd, field) ({ \ const typeof(&(ptr)->field) _x = &(hnd).p->field; \ const typeof(&(ptr)->field) _y = &(ptr)->field; \ + hvm_guest(current) ? \ + copy_from_user_hvm(_x, _y, sizeof(*_x)) : \ __copy_from_user(_y, _x, sizeof(*_x)); \ }) diff --git a/xen/include/asm-x86/hvm/guest_access.h b/xen/include/asm-x86/hvm/guest_access.h new file mode 100644 index 0000000000..7a89e81536 --- /dev/null +++ b/xen/include/asm-x86/hvm/guest_access.h @@ -0,0 +1,7 @@ +#ifndef __ASM_X86_HVM_GUEST_ACCESS_H__ +#define __ASM_X86_HVM_GUEST_ACCESS_H__ + +unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len); +unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len); + +#endif /* __ASM_X86_HVM_GUEST_ACCESS_H__ */ diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index 397d4beb6b..f7d76af88e 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -1734,6 +1734,13 @@ static inline unsigned long gva_to_gpa(unsigned long gva) return l1e_get_paddr(gpte) + (gva & ~PAGE_MASK); } #endif + +static inline unsigned long gva_to_mfn(unsigned long gva) +{ + unsigned long gpa = gva_to_gpa(gva); + return get_mfn_from_gpfn(gpa >> PAGE_SHIFT); +} + /************************************************************************/ extern void __update_pagetables(struct vcpu *v); |