aboutsummaryrefslogtreecommitdiffstats
path: root/lib/lufa/Demos/Host/ClassDriver/AudioOutputHost/AudioOutputHost.c
blob: 6ef74c3aa9bb61c74000f7c2d73815e5d6f2ad50 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
/*
             LUFA Library
     Copyright (C) Dean Camera, 2017.

  dean [at] fourwalledcubicle [dot] com
           www.lufa-lib.org
*/

/*
  Copyright 2017  Dean Camera (dean [at] fourwalledcubicle [dot] com)

  Permission to use, copy, modify, distribute, and sell this
  software and its documentation for any purpose is hereby granted
  without fee, provided that the above copyright notice appear in
  all copies and that both that the copyright notice and this
  permission notice and warranty disclaimer appear in supporting
  documentation, and that the name of the author not be used in
  advertising or publicity pertaining to distribution of the
  software without specific, written prior permission.

  The author disclaims all warranties with regard to this
  software, including all implied warranties of merchantability
  and fitness.  In no event shall the author be liable for any
  special, indirect or consequential damages or any damages
  whatsoever resulting from loss of use, data or profits, whether
  in an action of contract, negligence or other tortious action,
  arising out of or in connection with the use or performance of
  this software.
*/

/** \file
 *
 *  Main source file for the AudioOutputHost demo. This file contains the main tasks of
 *  the demo and is responsible for the initial application hardware configuration.
 */

#include "AudioOutputHost.h"

/** LUFA Audio Class driver interface configuration and state information. This structure is
 *  passed to all Audio Class driver functions, so that multiple instances of the same class
 *  within a device can be differentiated from one another.
 */
USB_ClassInfo_Audio_Host_t Speaker_Audio_Interface =
	{
		.Config =
			{
				.DataOUTPipe            =
					{
						.Address        = (PIPE_DIR_OUT | 2),
					},
			},
	};


/** Main program entry point. This routine configures the hardware required by the application, then
 *  enters a loop to run the application tasks in sequence.
 */
int main(void)
{
	SetupHardware();

	puts_P(PSTR(ESC_FG_CYAN "Audio Output Host Demo running.\r\n" ESC_FG_WHITE));

	LEDs_SetAllLEDs(LEDMASK_USB_NOTREADY);
	GlobalInterruptEnable();

	for (;;)
	{
		Audio_Host_USBTask(&Speaker_Audio_Interface);
		USB_USBTask();
	}
}

/** ISR to handle the reloading of the PWM timer with the next sample. */
ISR(TIMER0_COMPA_vect, ISR_BLOCK)
{
	uint8_t PrevPipe = Pipe_GetCurrentPipe();

	/* Check that the USB bus is ready for the next sample to write */
	if (Audio_Host_IsReadyForNextSample(&Speaker_Audio_Interface))
	{
		int16_t AudioSample;

		#if defined(USE_TEST_TONE)
			static uint8_t SquareWaveSampleCount;
			static int16_t CurrentWaveValue;

			/* In test tone mode, generate a square wave at 1/256 of the sample rate */
			if (SquareWaveSampleCount++ == 0xFF)
			  CurrentWaveValue ^= 0x8000;

			/* Only generate audio if the board button is being pressed */
			AudioSample = (Buttons_GetStatus() & BUTTONS_BUTTON1) ? CurrentWaveValue : 0;
		#else
			/* Audio sample is ADC value scaled to fit the entire range */
			AudioSample = ((SAMPLE_MAX_RANGE / ADC_MAX_RANGE) * ADC_GetResult());

			#if defined(MICROPHONE_BIASED_TO_HALF_RAIL)
			/* Microphone is biased to half rail voltage, subtract the bias from the sample value */
			AudioSample -= (SAMPLE_MAX_RANGE / 2);
			#endif
		#endif

		Audio_Host_WriteSample16(&Speaker_Audio_Interface, AudioSample);
		Audio_Host_WriteSample16(&Speaker_Audio_Interface, AudioSample);
	}

	Pipe_SelectPipe(PrevPipe);
}

/** Configures the board hardware and chip peripherals for the demo's functionality. */
void SetupHardware(void)
{
#if (ARCH == ARCH_AVR8)
	/* Disable watchdog if enabled by bootloader/fuses */
	MCUSR &= ~(1 << WDRF);
	wdt_disable();

	/* Disable clock division */
	clock_prescale_set(clock_div_1);
#endif

	/* Hardware Initialization */
	Serial_Init(9600, false);
	LEDs_Init();
	Buttons_Init();
	ADC_Init(ADC_FREE_RUNNING | ADC_PRESCALE_32);
	ADC_SetupChannel(MIC_IN_ADC_CHANNEL);
	USB_Init();

	/* Create a stdio stream for the serial port for stdin and stdout */
	Serial_CreateStream(NULL);

	/* Start the ADC conversion in free running mode */
	ADC_StartReading(ADC_REFERENCE_AVCC | ADC_RIGHT_ADJUSTED | ADC_GET_CHANNEL_MASK(MIC_IN_ADC_CHANNEL));
}

/** Event handler for the USB_DeviceAttached event. This indicates that a device has been attached to the host, and
 *  starts the library USB task to begin the enumeration and USB management process.
 */
void EVENT_USB_Host_DeviceAttached(void)
{
	puts_P(PSTR("Device Attached.\r\n"));
	LEDs_SetAllLEDs(LEDMASK_USB_ENUMERATING);
}

/** Event handler for the USB_DeviceUnattached event. This indicates that a device has been removed from the host, and
 *  stops the library USB task management process.
 */
void EVENT_USB_Host_DeviceUnattached(void)
{
	puts_P(PSTR("\r\nDevice Unattached.\r\n"));
	LEDs_SetAllLEDs(LEDMASK_USB_NOTREADY);
}

/** Event handler for the USB_DeviceEnumerationComplete event. This indicates that a device has been successfully
 *  enumerated by the host and is now ready to be used by the application.
 */
void EVENT_USB_Host_DeviceEnumerationComplete(void)
{
	LEDs_SetAllLEDs(LEDMASK_USB_ENUMERATING);

	uint16_t ConfigDescriptorSize;
	uint8_t  ConfigDescriptorData[512];

	if (USB_Host_GetDeviceConfigDescriptor(1, &ConfigDescriptorSize, ConfigDescriptorData,
	                                       sizeof(ConfigDescriptorData)) != HOST_GETCONFIG_Successful)
	{
		puts_P(PSTR("Error Retrieving Configuration Descriptor.\r\n"));
		LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
		return;
	}

	if (Audio_Host_ConfigurePipes(&Speaker_Audio_Interface,
	                              ConfigDescriptorSize, ConfigDescriptorData) != AUDIO_ENUMERROR_NoError)
	{
		puts_P(PSTR("Attached Device Not a Valid Audio Output Device.\r\n"));
		LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
		return;
	}

	if (USB_Host_SetDeviceConfiguration(1) != HOST_SENDCONTROL_Successful)
	{
		puts_P(PSTR("Error Setting Device Configuration.\r\n"));
		LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
		return;
	}

	if (Audio_Host_StartStopStreaming(&Speaker_Audio_Interface, true) != HOST_SENDCONTROL_Successful)
	{
		puts_P(PSTR("Error Enabling Audio Stream.\r\n"));
		LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
		USB_Host_SetDeviceConfiguration(0);
		return;
	}

	USB_Audio_SampleFreq_t SampleRate = AUDIO_SAMPLE_FREQ(48000);
	if (Audio_Host_GetSetEndpointProperty(&Speaker_Audio_Interface, Speaker_Audio_Interface.Config.DataOUTPipe.Address,
	                                      AUDIO_REQ_SetCurrent, AUDIO_EPCONTROL_SamplingFreq,
	                                      sizeof(SampleRate), &SampleRate) != HOST_SENDCONTROL_Successful)
	{
		puts_P(PSTR("Error Setting Audio Sampling Frequency.\r\n"));
		LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
		USB_Host_SetDeviceConfiguration(0);
		return;
	}

	/* Sample reload timer initialization */
	TIMSK0  = (1 << OCIE0A);
	OCR0A   = ((F_CPU / 8 / 48000) - 1);
	TCCR0A  = (1 << WGM01);  // CTC mode
	TCCR0B  = (1 << CS01);   // Fcpu/8 speed

	puts_P(PSTR("Audio Device Enumerated.\r\n"));
	LEDs_SetAllLEDs(LEDMASK_USB_READY);
}

/** Event handler for the USB_HostError event. This indicates that a hardware error occurred while in host mode. */
void EVENT_USB_Host_HostError(const uint8_t ErrorCode)
{
	USB_Disable();

	printf_P(PSTR(ESC_FG_RED "Host Mode Error\r\n"
	                         " -- Error Code %d\r\n" ESC_FG_WHITE), ErrorCode);

	LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
	for(;;);
}

/** Event handler for the USB_DeviceEnumerationFailed event. This indicates that a problem occurred while
 *  enumerating an attached USB device.
 */
void EVENT_USB_Host_DeviceEnumerationFailed(const uint8_t ErrorCode,
                                            const uint8_t SubErrorCode)
{
	printf_P(PSTR(ESC_FG_RED "Dev Enum Error\r\n"
	                         " -- Error Code %d\r\n"
	                         " -- Sub Error Code %d\r\n"
	                         " -- In State %d\r\n" ESC_FG_WHITE), ErrorCode, SubErrorCode, USB_HostState);

	LEDs_SetAllLEDs(LEDMASK_USB_ERROR);
}
/span> d->domain_id = domid; if ( xsm_alloc_security_domain(d) != 0 ) { free_domain(d); return NULL; } atomic_set(&d->refcnt, 1); spin_lock_init(&d->big_lock); spin_lock_init(&d->page_alloc_lock); spin_lock_init(&d->shutdown_lock); INIT_LIST_HEAD(&d->page_list); INIT_LIST_HEAD(&d->xenpage_list); return d; } void free_domain(struct domain *d) { xsm_free_security_domain(d); xfree(d); } static void __domain_finalise_shutdown(struct domain *d) { struct vcpu *v; BUG_ON(!spin_is_locked(&d->shutdown_lock)); if ( d->is_shut_down ) return; for_each_vcpu ( d, v ) if ( !v->paused_for_shutdown ) return; d->is_shut_down = 1; for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); send_guest_global_virq(dom0, VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) { struct domain *d = v->domain; spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { if ( !v->paused_for_shutdown ) atomic_inc(&v->pause_count); v->paused_for_shutdown = 1; v->defer_shutdown = 0; __domain_finalise_shutdown(d); } spin_unlock(&d->shutdown_lock); } struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) { struct vcpu *v; BUG_ON(d->vcpu[vcpu_id] != NULL); if ( (v = alloc_vcpu_struct()) == NULL ) return NULL; v->domain = d; v->vcpu_id = vcpu_id; v->runstate.state = is_idle_vcpu(v) ? RUNSTATE_running : RUNSTATE_offline; v->runstate.state_entry_time = NOW(); if ( !is_idle_domain(d) ) { set_bit(_VPF_down, &v->pause_flags); v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]); } if ( sched_init_vcpu(v, cpu_id) != 0 ) { free_vcpu_struct(v); return NULL; } if ( vcpu_initialise(v) != 0 ) { sched_destroy_vcpu(v); free_vcpu_struct(v); return NULL; } d->vcpu[vcpu_id] = v; if ( vcpu_id != 0 ) d->vcpu[v->vcpu_id-1]->next_in_list = v; /* Must be called after making new vcpu visible to for_each_vcpu(). */ vcpu_check_shutdown(v); return v; } struct vcpu *alloc_idle_vcpu(unsigned int cpu_id) { struct domain *d; struct vcpu *v; unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS; if ( (v = idle_vcpu[cpu_id]) != NULL ) return v; d = (vcpu_id == 0) ? domain_create(IDLE_DOMAIN_ID, 0, 0) : idle_vcpu[cpu_id - vcpu_id]->domain; BUG_ON(d == NULL); v = alloc_vcpu(d, vcpu_id, cpu_id); idle_vcpu[cpu_id] = v; return v; } struct domain *domain_create( domid_t domid, unsigned int domcr_flags, ssidref_t ssidref) { struct domain *d, **pd; enum { INIT_evtchn = 1, INIT_gnttab = 2, INIT_arch = 8 }; int init_status = 0; if ( (d = alloc_domain(domid)) == NULL ) return NULL; if ( domcr_flags & DOMCRF_hvm ) d->is_hvm = 1; rangeset_domain_initialise(d); if ( !is_idle_domain(d) ) { if ( xsm_domain_create(d, ssidref) != 0 ) goto fail; d->is_paused_by_controller = 1; atomic_inc(&d->pause_count); if ( evtchn_init(d) != 0 ) goto fail; init_status |= INIT_evtchn; if ( grant_table_create(d) != 0 ) goto fail; init_status |= INIT_gnttab; } if ( arch_domain_create(d) != 0 ) goto fail; init_status |= INIT_arch; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail; if ( sched_init_domain(d) != 0 ) goto fail; if ( !is_idle_domain(d) ) { spin_lock(&domlist_update_lock); pd = &domain_list; /* NB. domain_list maintained in order of domid. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)]; rcu_assign_pointer(*pd, d); rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d); spin_unlock(&domlist_update_lock); } return d; fail: d->is_dying = DOMDYING_dead; atomic_set(&d->refcnt, DOMAIN_DESTROYED); if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) grant_table_destroy(d); if ( init_status & INIT_evtchn ) evtchn_destroy(d); rangeset_domain_destroy(d); free_domain(d); return NULL; } struct domain *get_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) { if ( unlikely(!get_domain(d)) ) d = NULL; break; } } rcu_read_unlock(&domlist_read_lock); return d; } struct domain *rcu_lock_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) return d; } rcu_read_unlock(&domlist_read_lock); return NULL; } int domain_kill(struct domain *d) { int rc = 0; if ( d == current->domain ) return -EINVAL; /* Protected by domctl_lock. */ switch ( d->is_dying ) { case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; evtchn_destroy(d); gnttab_release_mappings(d); case DOMDYING_dying: rc = domain_relinquish_resources(d); page_scrub_kick(); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); break; } d->is_dying = DOMDYING_dead; put_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); case DOMDYING_dead: break; } return rc; } void __domain_crash(struct domain *d) { if ( d->is_shutting_down ) { /* Print nothing: the domain is already shutting down. */ } else if ( d == current->domain ) { printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", d->domain_id, current->vcpu_id, smp_processor_id()); show_execution_state(guest_cpu_user_regs()); } else { printk("Domain %d reported crashed by domain %d on cpu#%d:\n", d->domain_id, current->domain->domain_id, smp_processor_id()); } domain_shutdown(d, SHUTDOWN_crash); } void __domain_crash_synchronous(void) { __domain_crash(current->domain); /* * Flush multicall state before dying if a multicall is in progress. * This shouldn't be necessary, but some architectures are calling * domain_crash_synchronous() when they really shouldn't (i.e., from * within hypercall context). */ if ( this_cpu(mc_state).flags != 0 ) { dprintk(XENLOG_ERR, "FIXME: synchronous domain crash during a multicall!\n"); this_cpu(mc_state).flags = 0; } for ( ; ; ) do_softirq(); } void domain_shutdown(struct domain *d, u8 reason) { struct vcpu *v; if ( d->domain_id == 0 ) dom0_shutdown(reason); spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { spin_unlock(&d->shutdown_lock); return; } d->is_shutting_down = 1; d->shutdown_code = reason; smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ for_each_vcpu ( d, v ) { if ( v->defer_shutdown ) continue; atomic_inc(&v->pause_count); v->paused_for_shutdown = 1; } __domain_finalise_shutdown(d); spin_unlock(&d->shutdown_lock); } void domain_resume(struct domain *d) { struct vcpu *v; /* * Some code paths assume that shutdown status does not get reset under * their feet (e.g., some assertions make this assumption). */ domain_pause(d); spin_lock(&d->shutdown_lock); d->is_shutting_down = d->is_shut_down = 0; for_each_vcpu ( d, v ) { if ( v->paused_for_shutdown ) vcpu_unpause(v); v->paused_for_shutdown = 0; } spin_unlock(&d->shutdown_lock); domain_unpause(d); } int vcpu_start_shutdown_deferral(struct vcpu *v) { v->defer_shutdown = 1; smp_mb(); /* set deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); return v->defer_shutdown; } void vcpu_end_shutdown_deferral(struct vcpu *v) { v->defer_shutdown = 0; smp_mb(); /* clear deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); } void domain_pause_for_debugger(void) { struct domain *d = current->domain; struct vcpu *v; atomic_inc(&d->pause_count); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */ for_each_vcpu ( d, v ) vcpu_sleep_nosync(v); send_guest_global_virq(dom0, VIRQ_DEBUGGER); } /* Complete domain destroy after RCU readers are not holding old references. */ static void complete_domain_destroy(struct rcu_head *head) { struct domain *d = container_of(head, struct domain, rcu); struct vcpu *v; int i; for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) { if ( (v = d->vcpu[i]) == NULL ) continue; vcpu_destroy(v); sched_destroy_vcpu(v); } rangeset_domain_destroy(d); grant_table_destroy(d); arch_domain_destroy(d); sched_destroy_domain(d); for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- ) if ( (v = d->vcpu[i]) != NULL ) free_vcpu_struct(v); free_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); } /* Release resources belonging to task @p. */ void domain_destroy(struct domain *d) { struct domain **pd; atomic_t old, new; BUG_ON(!d->is_dying); /* May be already destroyed, or get_domain() can race us. */ _atomic_set(old, 0); _atomic_set(new, DOMAIN_DESTROYED); old = atomic_compareandswap(old, new, &d->refcnt); if ( _atomic_read(old) != 0 ) return; /* Delete from task list and task hashtable. */ spin_lock(&domlist_update_lock); pd = &domain_list; while ( *pd != d ) pd = &(*pd)->next_in_list; rcu_assign_pointer(*pd, d->next_in_list); pd = &domain_hash[DOMAIN_HASH(d->domain_id)]; while ( *pd != d ) pd = &(*pd)->next_in_hashbucket; rcu_assign_pointer(*pd, d->next_in_hashbucket); spin_unlock(&domlist_update_lock); /* Schedule RCU asynchronous completion of domain destroy. */ call_rcu(&d->rcu, complete_domain_destroy); } void vcpu_pause(struct vcpu *v) { ASSERT(v != current); atomic_inc(&v->pause_count); vcpu_sleep_sync(v); } void vcpu_pause_nosync(struct vcpu *v) { atomic_inc(&v->pause_count); vcpu_sleep_nosync(v); } void vcpu_unpause(struct vcpu *v) { if ( atomic_dec_and_test(&v->pause_count) ) vcpu_wake(v); } void domain_pause(struct domain *d) { struct vcpu *v; ASSERT(d != current->domain); atomic_inc(&d->pause_count); for_each_vcpu( d, v ) vcpu_sleep_sync(v); } void domain_unpause(struct domain *d) { struct vcpu *v; if ( atomic_dec_and_test(&d->pause_count) ) for_each_vcpu( d, v ) vcpu_wake(v); } void domain_pause_by_systemcontroller(struct domain *d) { domain_pause(d); if ( test_and_set_bool(d->is_paused_by_controller) ) domain_unpause(d); } void domain_unpause_by_systemcontroller(struct domain *d) { if ( test_and_clear_bool(d->is_paused_by_controller) ) domain_unpause(d); } int boot_vcpu(struct domain *d, int vcpuid, vcpu_guest_context_u ctxt) { struct vcpu *v = d->vcpu[vcpuid]; BUG_ON(v->is_initialised); return arch_set_info_guest(v, ctxt); } int vcpu_reset(struct vcpu *v) { struct domain *d = v->domain; int rc; domain_pause(d); LOCK_BIGLOCK(d); rc = arch_vcpu_reset(v); if ( rc != 0 ) goto out; set_bit(_VPF_down, &v->pause_flags); v->fpu_initialised = 0; v->fpu_dirtied = 0; v->is_polling = 0; v->is_initialised = 0; v->nmi_pending = 0; v->nmi_masked = 0; clear_bit(_VPF_blocked, &v->pause_flags); out: UNLOCK_BIGLOCK(v->domain); domain_unpause(d); return rc; } long do_vcpu_op(int cmd, int vcpuid, XEN_GUEST_HANDLE(void) arg) { struct domain *d = current->domain; struct vcpu *v; struct vcpu_guest_context *ctxt; long rc = 0; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return -EINVAL; if ( (v = d->vcpu[vcpuid]) == NULL ) return -ENOENT; switch ( cmd ) { case VCPUOP_initialise: if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL ) return -ENOMEM; if ( copy_from_guest(ctxt, arg, 1) ) { xfree(ctxt); return -EFAULT; } LOCK_BIGLOCK(d); rc = -EEXIST; if ( !v->is_initialised ) rc = boot_vcpu(d, vcpuid, ctxt); UNLOCK_BIGLOCK(d); xfree(ctxt); break; case VCPUOP_up: if ( !v->is_initialised ) return -EINVAL; if ( test_and_clear_bit(_VPF_down, &v->pause_flags) ) vcpu_wake(v); break; case VCPUOP_down: if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) vcpu_sleep_nosync(v); break; case VCPUOP_is_up: rc = !test_bit(_VPF_down, &v->pause_flags); break; case VCPUOP_get_runstate_info: { struct vcpu_runstate_info runstate; vcpu_runstate_get(v, &runstate); if ( copy_to_guest(arg, &runstate, 1) ) rc = -EFAULT; break; } case VCPUOP_set_periodic_timer: { struct vcpu_set_periodic_timer set; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( set.period_ns < MILLISECS(1) ) return -EINVAL; v->periodic_period = set.period_ns; vcpu_force_reschedule(v); break; } case VCPUOP_stop_periodic_timer: { v->periodic_period = 0; vcpu_force_reschedule(v); break; } case VCPUOP_set_singleshot_timer: { struct vcpu_set_singleshot_timer set; if ( v != current ) return -EINVAL; if ( copy_from_guest(&set, arg, 1) ) return -EFAULT; if ( (set.flags & VCPU_SSHOTTMR_future) && (set.timeout_abs_ns < NOW()) ) return -ETIME; if ( v->singleshot_timer.cpu != smp_processor_id() ) { stop_timer(&v->singleshot_timer); v->singleshot_timer.cpu = smp_processor_id(); } set_timer(&v->singleshot_timer, set.timeout_abs_ns); break; } case VCPUOP_stop_singleshot_timer: { if ( v != current ) return -EINVAL; stop_timer(&v->singleshot_timer); break; } default: rc = arch_do_vcpu_op(cmd, v, arg); break; } return rc; } long vm_assist(struct domain *p, unsigned int cmd, unsigned int type) { if ( type > MAX_VMASST_TYPE ) return -EINVAL; switch ( cmd ) { case VMASST_CMD_enable: set_bit(type, &p->vm_assist); return 0; case VMASST_CMD_disable: clear_bit(type, &p->vm_assist); return 0; } return -ENOSYS; } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */