/* * hvm.c: Common hardware virtual machine abstractions. * * Copyright (c) 2004, Intel Corporation. * Copyright (c) 2005, International Business Machines Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include int hvm_enabled __read_mostly; unsigned int opt_hvm_debug_level __read_mostly; integer_param("hvm_debug", opt_hvm_debug_level); struct hvm_function_table hvm_funcs __read_mostly; /* I/O permission bitmap is globally shared by all HVM guests. */ char __attribute__ ((__section__ (".bss.page_aligned"))) hvm_io_bitmap[3*PAGE_SIZE]; void hvm_enable(struct hvm_function_table *fns) { BUG_ON(hvm_enabled); printk("HVM: %s enabled\n", fns->name); /* * Allow direct access to the PC debug port (it is often used for I/O * delays, but the vmexits simply slow things down). */ memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap)); clear_bit(0x80, hvm_io_bitmap); hvm_funcs = *fns; hvm_enabled = 1; } void hvm_set_guest_time(struct vcpu *v, u64 gtime) { u64 host_tsc; rdtscll(host_tsc); v->arch.hvm_vcpu.cache_tsc_offset = gtime - host_tsc; hvm_funcs.set_tsc_offset(v, v->arch.hvm_vcpu.cache_tsc_offset); } u64 hvm_get_guest_time(struct vcpu *v) { u64 host_tsc; rdtscll(host_tsc); return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset; } void hvm_migrate_timers(struct vcpu *v) { rtc_migrate_timers(v); hpet_migrate_timers(v); pt_migrate(v); } void hvm_do_resume(struct vcpu *v) { ioreq_t *p; if ( !v->fpu_dirtied ) hvm_funcs.stts(v); pt_thaw_time(v); /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */ p = &get_ioreq(v)->vp_ioreq; while ( p->state != STATE_IOREQ_NONE ) { switch ( p->state ) { case STATE_IORESP_READY: /* IORESP_READY -> NONE */ hvm_io_assist(); break; case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */ case STATE_IOREQ_INPROCESS: wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port, (p->state != STATE_IOREQ_READY) && (p->state != STATE_IOREQ_INPROCESS)); break; default: gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p->state); domain_crash_synchronous(); } } } static void hvm_init_ioreq_page( struct domain *d, struct hvm_ioreq_page *iorp) { memset(iorp, 0, sizeof(*iorp)); spin_lock_init(&iorp->lock); domain_pause(d); } static void hvm_destroy_ioreq_page( struct domain *d, struct hvm_ioreq_page *iorp) { spin_lock(&iorp->lock); ASSERT(d->is_dying); if ( iorp->va != NULL ) { unmap_domain_page_global(iorp->va); put_page_and_type(iorp->page); iorp->va = NULL; } spin_unlock(&iorp->lock); } static int hvm_set_ioreq_page( struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn) { struct page_info *page; p2m_type_t p2mt; unsigned long mfn; void *va; mfn = mfn_x(gfn_to_mfn(d, gmfn, &p2mt)); if ( !p2m_is_ram(p2mt) ) return -EINVAL; ASSERT(mfn_valid(mfn)); page = mfn_to_page(mfn); if ( !get_page_and_type(page, d, PGT_writable_page) ) return -EINVAL; va = map_domain_page_global(mfn); if ( va == NULL ) { put_page_and_type(page); return -ENOMEM; } spin_lock(&iorp->lock); if ( (iorp->va != NULL) || d->is_dying ) { spin_unlock(&iorp->lock); unmap_domain_page_global(va); put_page_and_type(mfn_to_page(mfn)); return -EINVAL; } iorp->va = va; iorp->page = page; spin_unlock(&iorp->lock); domain_unpause(d); return 0; } int hvm_domain_initialise(struct domain *d) { int rc; if ( !hvm_enabled ) { gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest " "on a non-VT/AMDV platform.\n"); return -EINVAL; } spin_lock_init(&d->arch.hvm_domain.pbuf_lock); spin_lock_init(&d->arch.hvm_domain.irq_lock); rc = paging_enable(d, PG_refcounts|PG_translate|PG_external); if ( rc != 0 ) return rc; vpic_init(d); vioapic_init(d); hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq); hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); return hvm_funcs.domain_initialise(d); } void hvm_domain_relinquish_resources(struct domain *d) { hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq); hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq); pit_deinit(d); rtc_deinit(d); pmtimer_deinit(d); hpet_deinit(d); } void hvm_domain_destroy(struct domain *d) { hvm_funcs.domain_destroy(d); } static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) { struct vcpu *v; struct hvm_hw_cpu ctxt; struct vcpu_guest_context *vc; for_each_vcpu(d, v) { /* We don't need to save state for a vcpu that is down; the restore * code will leave it down if there is nothing saved. */ if ( test_bit(_VPF_down, &v->pause_flags) ) continue; /* Architecture-specific vmcs/vmcb bits */ hvm_funcs.save_cpu_ctxt(v, &ctxt); /* Other vcpu register state */ vc = &v->arch.guest_context; if ( v->fpu_initialised ) memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs)); else memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs)); ctxt.rax = vc->user_regs.eax; ctxt.rbx = vc->user_regs.ebx; ctxt.rcx = vc->user_regs.ecx; ctxt.rdx = vc->user_regs.edx; ctxt.rbp = vc->user_regs.ebp; ctxt.rsi = vc->user_regs.esi; ctxt.rdi = vc->user_regs.edi; /* %rsp handled by arch-specific call above */ #ifdef __x86_64__ ctxt.r8 = vc->user_regs.r8; ctxt.r9 = vc->user_regs.r9; ctxt.r10 = vc->user_regs.r10; ctxt.r11 = vc->user_regs.r11; ctxt.r12 = vc->user_regs.r12; ctxt.r13 = vc->user_regs.r13; ctxt.r14 = vc->user_regs.r14; ctxt.r15 = vc->user_regs.r15; #endif ctxt.dr0 = vc->debugreg[0]; ctxt.dr1 = vc->debugreg[1]; ctxt.dr2 = vc->debugreg[2]; ctxt.dr3 = vc->debugreg[3]; ctxt.dr6 = vc->debugreg[6]; ctxt.dr7 = vc->debugreg[7]; if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 ) return 1; } return 0; } static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h) { int vcpuid, rc; struct vcpu *v; struct hvm_hw_cpu ctxt; struct vcpu_guest_context *vc; /* Which vcpu is this? */ vcpuid = hvm_load_instance(h); if ( vcpuid > MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL ) { gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid); return -EINVAL; } vc = &v->arch.guest_context; /* Need to init this vcpu b
/*
 * netlink/cache.h		Caching Module
 *
 *	This library is free software; you can redistribute it and/or
 *	modify it under the terms of the GNU Lesser General Public
 *	License as published by the Free Software Foundation version 2.1
 *	of the License.
 *
 * Copyright (c) 2003-2008 Thomas Graf <tgraf@suug.ch>
 */

#ifndef NETLINK_CACHE_H_
#define NETLINK_CACHE_H_

#include <netlink/netlink.h>
#include <netlink/msg.h>
#include <netlink/utils.h>
#include <netlink/object.h>
#include <netlink/cache-api.h>

#ifdef __cplusplus
extern "C" {
#endif

struct nl_cache;

typedef void (*change_func_t)(struct nl_cache *, struct nl_object *, int);

/* Access Functions */
extern int			nl_cache_nitems(struct nl_cache *);
extern int			nl_cache_nitems_filter(struct nl_cache *,
						       struct nl_object *);
extern struct nl_cache_ops *	nl_cache_get_ops(struct nl_cache *);
extern struct nl_object *	nl_cache_get_first(struct nl_cache *);
extern struct nl_object *	nl_cache_get_last(struct nl_cache *);
extern struct nl_object *	nl_cache_get_next(struct nl_object *);
extern struct nl_object *	nl_cache_get_prev(struct nl_object *);

extern struct nl_cache *	nl_cache_alloc(struct nl_cache_ops *);
extern int			nl_cache_alloc_and_fill(struct nl_cache_ops *,
							struct nl_sock *,
							struct nl_cache **);
extern int			nl_cache_alloc_name(const char *,
						    struct nl_cache **);
extern struct nl_cache *	nl_cache_subset(struct nl_cache *,
						struct nl_object *);
extern void			nl_cache_clear(struct nl_cache *);
extern void			nl_cache_free(struct nl_cache *);

/* Cache modification */
extern int			nl_cache_add(struct nl_cache *,
					     struct nl_object *);
extern int			nl_cache_parse_and_add(struct nl_cache *,
						       struct nl_msg *);
extern void			nl_cache_remove(struct nl_object *);
extern int			nl_cache_refill(struct nl_sock *,
						struct nl_cache *);
extern int			nl_cache_pickup(struct nl_sock *,
						struct nl_cache *);
extern int			nl_cache_resync(struct nl_sock *,
						struct nl_cache *,
						change_func_t);
extern int			nl_cache_include(struct nl_cache *,
						 struct nl_object *,
						 change_func_t);

/* General */
extern int			nl_cache_is_empty(struct nl_cache *);
extern void			nl_cache_mark_all(struct nl_cache *);

/* Dumping */
extern void			nl_cache_dump(struct nl_cache *,
					      struct nl_dump_params *);
extern void			nl_cache_dump_filter(struct nl_cache *,
						     struct nl_dump_params *,
						     struct nl_object *);

/* Iterators */
#ifdef disabled
extern void			nl_cache_foreach(struct nl_cache *,
						 void (*cb)(struct nl_object *,
							    void *),
						 void *arg);
extern void			nl_cache_foreach_filter(struct nl_cache *,
							struct nl_object *,
							void (*cb)(struct
								   nl_object *,
								   void *),
							void *arg);
#endif

/* --- cache management --- */

/* Cache type management */
extern struct nl_cache_ops *	nl_cache_ops_lookup(const char *);
extern struct nl_cache_ops *	nl_cache_ops_associate(int, int);
extern struct nl_msgtype *	nl_msgtype_lookup(struct nl_cache_ops *, int);
extern void			nl_cache_ops_foreach(void (*cb)(struct nl_cache_ops *, void *), void *);
extern int			nl_cache_mngt_register(struct nl_cache_ops *);
extern int			nl_cache_mngt_unregister(struct nl_cache_ops *);

/* Global cache provisioning/requiring */
extern void			nl_cache_mngt_provide(struct nl_cache *);
extern void			nl_cache_mngt_unprovide(struct nl_cache *);
extern struct nl_cache *	nl_cache_mngt_require(const char *);

struct nl_cache_mngr;

#define NL_AUTO_PROVIDE		1

extern int			nl_cache_mngr_alloc(struct nl_sock *,
						    int, int,
						    struct nl_cache_mngr **);
extern int			nl_cache_mngr_add(struct nl_cache_mngr *,
						  const char *,
						  change_func_t,
						  struct nl_cache **);
extern int			nl_cache_mngr_get_fd(struct nl_cache_mngr *);
extern int			nl_cache_mngr_poll(struct nl_cache_mngr *,
						   int);
extern int			nl_cache_mngr_data_ready(struct nl_cache_mngr *);
extern void			nl_cache_mngr_free(struct nl_cache_mngr *);

#ifdef __cplusplus
}
#endif

#endif
if ( copy_from_guest(&op, uop, 1) ) return -EFAULT; if ( !IS_PRIV(current->domain) ) return -EPERM; if ( (op.link > 3) || (op.isa_irq > 15) ) return -EINVAL; d = rcu_lock_domain_by_id(op.domid); if ( d == NULL ) return -ESRCH; rc = -EINVAL; if ( !is_hvm_domain(d) ) goto out; rc = xsm_hvm_set_pci_link_route(d); if ( rc ) goto out; rc = 0; hvm_set_pci_link_route(d, op.link, op.isa_irq); out: rcu_unlock_domain(d); return rc; } static int hvmop_flush_tlb_all(void) { flush_tlb_mask(current->domain->domain_dirty_cpumask); return 0; } long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg) { long rc = 0; switch ( op ) { case HVMOP_set_param: case HVMOP_get_param: { struct xen_hvm_param a; struct hvm_ioreq_page *iorp; struct domain *d; struct vcpu *v; if ( copy_from_guest(&a, arg, 1) ) return -EFAULT; if ( a.index >= HVM_NR_PARAMS ) return -EINVAL; if ( a.domid == DOMID_SELF ) d = rcu_lock_current_domain(); else if ( IS_PRIV(current->domain) ) d = rcu_lock_domain_by_id(a.domid); else return -EPERM; if ( d == NULL ) return -ESRCH; rc = -EINVAL; if ( !is_hvm_domain(d) ) goto param_fail; rc = xsm_hvm_param(d, op); if ( rc ) goto param_fail; if ( op == HVMOP_set_param ) { switch ( a.index ) { case HVM_PARAM_IOREQ_PFN: iorp = &d->arch.hvm_domain.ioreq; rc = hvm_set_ioreq_page(d, iorp, a.value); spin_lock(&iorp->lock); if ( (rc == 0) && (iorp->va != NULL) ) /* Initialise evtchn port info if VCPUs already created. */ for_each_vcpu ( d, v ) get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port; spin_unlock(&iorp->lock); break; case HVM_PARAM_BUFIOREQ_PFN: iorp = &d->arch.hvm_domain.buf_ioreq; rc = hvm_set_ioreq_page(d, iorp, a.value); break; case HVM_PARAM_CALLBACK_IRQ: hvm_set_callback_via(d, a.value); hvm_latch_shinfo_size(d); break; } d->arch.hvm_domain.params[a.index] = a.value; rc = 0; } else { a.value = d->arch.hvm_domain.params[a.index]; rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0; } HVM_DBG_LOG(DBG_LEVEL_HCALL, "%s param %u = %"PRIx64, op == HVMOP_set_param ? "set" : "get", a.index, a.value); param_fail: rcu_unlock_domain(d); break; } case HVMOP_set_pci_intx_level: rc = hvmop_set_pci_intx_level( guest_handle_cast(arg, xen_hvm_set_pci_intx_level_t)); break; case HVMOP_set_isa_irq_level: rc = hvmop_set_isa_irq_level( guest_handle_cast(arg, xen_hvm_set_isa_irq_level_t)); break; case HVMOP_set_pci_link_route: rc = hvmop_set_pci_link_route( guest_handle_cast(arg, xen_hvm_set_pci_link_route_t)); break; case HVMOP_flush_tlbs: rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS; break; default: { gdprintk(XENLOG_WARNING, "Bad HVM op %ld.\n", op); rc = -ENOSYS; break; } } return rc; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */