/****************************************************************************** * domain.c * * Generic domain-handling functions. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Protect updates/reads (resp.) of domain_list and domain_hash. */ DEFINE_SPINLOCK(domlist_update_lock); DEFINE_RCU_READ_LOCK(domlist_read_lock); #define DOMAIN_HASH_SIZE 256 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1)) static struct domain *domain_hash[DOMAIN_HASH_SIZE]; struct domain *domain_list; struct domain *dom0; struct vcpu *idle_vcpu[NR_CPUS] __read_mostly; int current_domain_id(void) { return current->domain->domain_id; } struct domain *alloc_domain(domid_t domid) { struct domain *d; if ( (d = xmalloc(struct domain)) == NULL ) return NULL; memset(d, 0, sizeof(*d)); d->domain_id = domid; if ( xsm_alloc_security_domain(d) != 0 ) { free_domain(d); return NULL; } atomic_set(&d->refcnt, 1); spin_lock_init(&d->big_lock); spin_lock_init(&d->page_alloc_lock); spin_lock_init(&d->shutdown_lock); spin_lock_init(&d->hypercall_deadlock_mutex); INIT_LIST_HEAD(&d->page_list); INIT_LIST_HEAD(&d->xenpage_list); return d; } void free_domain(struct domain *d) { xsm_free_security_domain(d); xfree(d); } static void __domain_finalise_shutdown(struct domain *d) { struct vcpu *v; BUG_ON(!spin_is_locked(&d->shutdown_lock)); if ( d->is_shut_down ) return; for_each_vcpu ( d, v ) if ( !v->paused_for_shutdown ) return; d->is_shut_down = 1; for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); send_guest_global_virq(dom0, VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) { struct domain *d = v->domain; spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { if ( !v->paused_for_shutdown ) atomic_inc(&v->pause_count); v->paused_for_shutdown = 1; v->defer_shutdown = 0; __domain_finalise_shutdown(d); } spin_unlock(&d->shutdown_lock); } struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) { struct vcpu *v; BUG_ON(d->vcpu[vcpu_id] != NULL); if ( (v = alloc_vcpu_struct()) == NULL ) return NULL; v->domain = d; v->vcpu_id = vcpu_id; v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; v->runstate.state_entry_time = NOW(); if ( !is_idle_domain(d) ) { set_bit(_VPF_down, &v->pause_flags); v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); } if ( sched_init_vcpu(v, cpu_id) != 0 ) { free_vcpu_struct(v); return NULL; } if ( vcpu_initialise(v) != 0 ) { sched_destroy_vcpu(v); free_vcpu_struct(v); return NULL; } d->vcpu[vcpu_id] = v; if ( vcpu_id != 0 ) d->vcpu[v->vcpu_id-1]->next_in_list = v; /* Must be called after making new vcpu visible to for_each_vcpu(). */ vcpu_check_shutdown(v); return v; } struct vcpu *alloc_idle_vcpu(unsigned int cpu_id) { struct domain *d; struct vcpu *v; unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; if ( (v = idle_vcpu[cpu_id]) != NULL ) return v; d = (vcpu_id == 0) ? domain_create(IDLE_DOMAIN_ID, 0, 0) : idle_vcpu[cpu_id - vcpu_id]->domain; BUG_ON(d == NULL); v = alloc_vcpu(d, vcpu_id, cpu_id); idle_vcpu[cpu_id] = v; return v; } struct domain *domain_create( domid_t domid, unsigned int domcr_flags, ssidref_t ssidref) { struct domain *d, **pd; enum { INIT_evtchn = 1, INIT_gnttab = 2, INIT_arch = 8 }; int init_status = 0; if ( (d = alloc_domain(domid)) == NULL ) return NULL; if ( domcr_flags & DOMCRF_hvm ) d->is_hvm = 1; rangeset_domain_initialise(d); if ( !is_idle_domain(d) ) { if ( xsm_do
#include <linux/ioctl.h>

#define TAPI_MAGIC 't'
#define TAPI_IOCTL(x) _IO(TAPI_MAGIC, (x))

#define TAPI_CONTROL_IOCTL_LINK_ALLOC TAPI_IOCTL(0)
#define TAPI_CONTROL_IOCTL_LINK_FREE  TAPI_IOCTL(1)
#define TAPI_CONTROL_IOCTL_LINK_ENABLE TAPI_IOCTL(2)
#define TAPI_CONTROL_IOCTL_LINK_DISABLE TAPI_IOCTL(3)

#define TAPI_CONTROL_IOCTL_SYNC TAPI_IOCTL(4)

#define TAPI_PORT_IOCTL_GET_ENDPOINT TAPI_IOCTL(5)
#define TAPI_PORT_IOCTL_SET_RING TAPI_IOCTL(6)

#define TAPI_STREAM_IOCTL_GET_ENDPOINT TAPI_IOCTL(7)
#define TAPI_STREAM_IOCTL_CONFIGURE TAPI_IOCTL(8)
#define TAPI_STREAM_IOCTL_START TAPI_IOCTL(9)
#define TAPI_STREAM_IOCTL_STOP TAPI_IOCTL(10)
*d = container_of(head, struct domain, rcu); struct vcpu *v; int i; for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) { if ( (v = d->vcpu[i]) == NULL ) continue; vcpu_destroy(v); sched_destroy_vcpu(v); } rangeset_domain_destroy(d); grant_table_destroy(d); arch_domain_destroy(d); sched_destroy_domain(d); for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) free_vcpu_struct(v); free_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); } /* Release resources belonging to task @p. */ void domain_destroy(struct domain *d) { struct domain **pd; atomic_t old, new; BUG_ON(!d->is_dying); /* May be already destroyed, or get_domain() can race us. */ _atomic_set(old, 0); _atomic_set(new, DOMAIN_DESTROYED); old = atomic_compareandswap(old, new, &d->refcnt); if ( _atomic_read(old) != 0 ) return; /* Delete from task list and task hashtable. */ spin_lock(&domlist_update_lock); pd = &domain_list; while ( *pd != d ) pd = &(*pd)->next_in_list; rcu_assign_pointer(*pd, d->next_in_list); pd = &domain_hash[DOMAIN_HASH(d->domain_id)]; while ( *pd != d ) pd = &(*pd)->next_in_hashbucket; rcu_assign_pointer(*pd, d->next_in_hashbucket); spin_unlock(&domlist_update_lock); /* Schedule RCU asynchronous completion of domain destroy. */ call_rcu(&d->rcu, complete_domain_destroy); } void vcpu_pause(struct vcpu *v) { ASSERT(v != current); atomic_inc(&v->pause_count); vcpu_sleep_sync(v); } void vcpu_pause_nosync(struct vcpu *v) { atomic_inc(&v->pause_count); vcpu_sleep_nosync(v); } void vcpu_unpause(struct vcpu *v) { if ( atomic_dec_and_test(&v->pause_count) ) vcpu_wake(v); } void domain_pause(struct domain *d) { struct vcpu *v; ASSERT(d != current->domain); atomic_inc(&d->pause_count); for_each_vcpu( d, v ) vcpu_sleep_sync(v); } void domain_unpause(struct domain *d) { struct vcpu *v; if ( atomic_dec_and_test(&d->pause_count) ) for_each_vcpu( d, v ) vcpu_wake(v); } void domain_pause_by_systemcontroller(struct domain *d) { domain_pause(d); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); } void domain_unpause_by_systemcontroller(struct domain *d) { if ( test_and_clear_bool(d->is_paused_by_controller) ) domain_unpause(d); } int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt) { struct vcpu *v = d->vcpu[vcpuid]; BUG_ON(v->is_initialised); return arch_set_info_guest(v, ctxt); } int vcpu_reset(struct vcpu *v) { struct domain *d = v->domain; int rc; domain_pause(d); LOCK_BIGLOCK(d); rc = arch_vcpu_reset(v); if ( rc != 0 ) goto out; set_bit(_VPF_down, &v->pause_flags); v->fpu_initialised = 0; v->fpu_dirtied = 0; v->is_polling = 0; v->is_initialised = 0; v->nmi_pending = 0; v->nmi_masked = 0; clear_bit(_VPF_blocked, &v->pause_flags); out: UNLOCK_BIGLOCK(v->domain); domain_unpause(d); return rc; } long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg) { struct domain *d = current->domain; struct vcpu *v; struct vcpu_guest_context *ctxt; long rc = 0; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return -EINVAL; if ( (v = d->vcpu[vcpuid]) == NULL ) return -ENOENT; switch ( cmd ) { case VCPUOP_initialise: if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) return -ENOMEM; if ( copy_from_guest(ctxt, arg, 1) ) { xfree(ctxt); return -EFAULT; } LOCK_BIGLOCK(d); rc = -EEXIST; if ( !v->is_initialised ) rc = boot_vcpu(d, vcpuid, ctxt); UNLOCK_BIGLOCK(d); xfree(ctxt); break; case VCPUOP_up: if ( !v->is_initialised ) return -EINVAL; if ( test_and_clear_bit(_VPF_down, &v->pause_flags) ) vcpu_wake(v); break; case VCPUOP_down: if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) vcpu_sleep_nosync(v); break; case VCPUOP_is_up: rc = !test_bit(_VPF_down, &v->pause_flags); break; case VCPUOP_get_runstate_info: { struct vcpu_runstate_info runstate; vcpu_runstate_get(v, &runstate); if ( copy_to_guest(arg, &runstate, 1) ) rc = -EFAULT; break; } case VCPUOP_set_periodic_timer: { struct vcpu_set_periodic_timer set; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( set.period_ns < MILLISECS(1) ) return -EINVAL; v->periodic_period = set.period_ns; vcpu_force_reschedule(v); break; } case VCPUOP_stop_periodic_timer: v->periodic_period = 0; vcpu_force_reschedule(v); break; case VCPUOP_set_singleshot_timer: { struct vcpu_set_singleshot_timer set; if ( v != current ) return -EINVAL; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( (set.flags & VCPU_SSHOTTMR_future) && (set.timeout_abs_ns < NOW()) ) return -ETIME; if ( v->singleshot_timer.cpu != smp_processor_id() ) { stop_timer(&v->singleshot_timer); v->singleshot_timer.cpu = smp_processor_id(); } set_timer(&v->singleshot_timer, set.timeout_abs_ns); break; } case VCPUOP_stop_singleshot_timer: if ( v != current ) return -EINVAL; stop_timer(&v->singleshot_timer); break; case VCPUOP_send_nmi: if ( !guest_handle_is_null(arg) ) return -EINVAL; if ( !test_and_set_bool(v->nmi_pending) ) vcpu_kick(v); break; default: rc = arch_do_vcpu_op(cmd, v, arg); break; } return rc; } long vm_assist(struct domain *p, unsigned int cmd, unsigned int type) { if ( type > MAX_VMASST_TYPE ) return -EINVAL; switch ( cmd ) { case VMASST_CMD_enable: set_bit(type, &p->vm_assist); return 0; case VMASST_CMD_disable: clear_bit(type, &p->vm_assist); return 0; } return -ENOSYS; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */