aboutsummaryrefslogtreecommitdiffstats
path: root/test/testdyn.c
blob: 2035224a1a3155a307f5e7b58e06c2b49d360708 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
/*
    ChibiOS/RT - Copyright (C) 2006,2007,2008,2009,2010 Giovanni Di Sirio.

    This file is part of ChibiOS/RT.

    ChibiOS/RT is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 3 of the License, or
    (at your option) any later version.

    ChibiOS/RT is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*/

#include "ch.h"
#include "test.h"

/**
 * @page test_dynamic Dynamic APIs test
 *
 * File: @ref testdyn.c
 *
 * <h2>Description</h2>
 * This module implements the test sequence for the dynamic thread creation
 * APIs.
 *
 * <h2>Objective</h2>
 * Objective of the test module is to cover 100% of the dynamic APIs code.
 *
 * <h2>Preconditions</h2>
 * The module requires the following kernel options:
 * - @p CH_USE_DYNAMIC
 * - @p CH_USE_HEAP
 * - @p CH_USE_MEMPOOLS
 * .
 * In case some of the required options are not enabled then some or all tests
 * may be skipped.
 *
 * <h2>Test Cases</h2>
 * - @subpage test_dynamic_001
 * - @subpage test_dynamic_002
 * - @subpage test_dynamic_003
 * .
 * @file testdyn.c
 * @brief Dynamic thread APIs test source file
 * @file testdyn.h
 * @brief Dynamic thread APIs test header file
 */

#if CH_USE_DYNAMIC
#if CH_USE_HEAP
static MemoryHeap heap1;
#endif
#if CH_USE_MEMPOOLS
static MemoryPool mp1;
#endif

/**
 * @page test_dynamic_001 Threads creation from Memory Heap
 *
 * <h2>Description</h2>
 * Two threads are started by allocating the memory from the Memory Heap then
 * the remaining heap space is arbitrarily allocated and a third tread startup
 * is attempted.<br>
 * The test expects the first two threads to successfully start and the last
 * one to fail.
 */

static msg_t thread(void *p) {

  test_emit_token(*(char *)p);
  return 0;
}

#if CH_USE_HEAP
static void dyn1_setup(void) {

  chHeapInit(&heap1, test.buffer, sizeof(union test_buffers));
}

static void dyn1_execute(void) {
  size_t n, sz;
  void *p1;
  tprio_t prio = chThdGetPriority();

  (void)chHeapStatus(&heap1, &sz);
  /* Starting threads from the heap. */
  threads[0] = chThdCreateFromHeap(&heap1, THD_WA_SIZE(THREADS_STACK_SIZE),
                                   prio-1, thread, "A");
  threads[1] = chThdCreateFromHeap(&heap1, THD_WA_SIZE(THREADS_STACK_SIZE),
                                   prio-2, thread, "B");
  /* Allocating the whole heap in order to make the thread creation fail.*/
  (void)chHeapStatus(&heap1, &n);
  p1 = chHeapAlloc(&heap1, n);
  threads[2] = chThdCreateFromHeap(&heap1, THD_WA_SIZE(THREADS_STACK_SIZE),
                                   prio-3, thread, "C");
  chHeapFree(p1);

  test_assert(1, (threads[0] != NULL) &&
                 (threads[1] != NULL) &&
                 (threads[2] == NULL) &&
                 (threads[3] == NULL) &&
                 (threads[4] == NULL),
                 "thread creation failed");

  /* Claiming the memory from terminated threads. */
  test_wait_threads();
  test_assert_sequence(2, "AB");

  /* Heap status checked again.*/
  test_assert(3, chHeapStatus(&heap1, &n) == 1, "heap fragmented");
  test_assert(4, n == sz, "heap size changed");
}

ROMCONST struct testcase testdyn1 = {
  "Dynamic APIs, threads creation from heap",
  dyn1_setup,
  NULL,
  dyn1_execute
};
#endif /* CH_USE_HEAP */

#if CH_USE_MEMPOOLS
/**
 * @page test_dynamic_002 Threads creation from Memory Pool
 *
 * <h2>Description</h2>
 * Five thread creation are attempted from a pool containing only four
 * elements.<br>
 * The test expects the first four threads to successfully start and the last
 * one to fail.
 */

static void dyn2_setup(void) {

  chPoolInit(&mp1, THD_WA_SIZE(THREADS_STACK_SIZE), NULL);
}

static void dyn2_execute(void) {
  int i;
  tprio_t prio = chThdGetPriority();

  /* Adding the WAs to the pool. */
  for (i = 0; i < 4; i++)
    chPoolFree(&mp1, wa[i]);

  /* Starting threads from the memory pool. */
  threads[0] = chThdCreateFromMemoryPool(&mp1, prio-1, thread, "A");
  threads[1] = chThdCreateFromMemoryPool(&mp1, prio-2, thread, "B");
  threads[2] = chThdCreateFromMemoryPool(&mp1, prio-3, thread, "C");
  threads[3] = chThdCreateFromMemoryPool(&mp1, prio-4, thread, "D");
  threads[4] = chThdCreateFromMemoryPool(&mp1, prio-5, thread, "E");

  test_assert(1, (threads[0] != NULL) &&
                 (threads[1] != NULL) &&
                 (threads[2] != NULL) &&
                 (threads[3] != NULL) &&
                 (threads[4] == NULL),
                 "thread creation failed");

  /* Claiming the memory from terminated threads. */
  test_wait_threads();
  test_assert_sequence(2, "ABCD");

  /* Now the pool must be full again. */
  for (i = 0; i < 4; i++)
    test_assert(3, chPoolAlloc(&mp1) != NULL, "pool list empty");
  test_assert(4, chPoolAlloc(&mp1) == NULL, "pool list not empty");
}

ROMCONST struct testcase testdyn2 = {
  "Dynamic APIs, threads creation from memory pool",
  dyn2_setup,
  NULL,
  dyn2_execute
};
#endif /* CH_USE_MEMPOOLS */

#if CH_USE_HEAP && CH_USE_REGISTRY
/**
 * @page test_dynamic_003 Registry and References test
 *
 * <h2>Description</h2>
 * Registry and Thread References APIs are tested for functionality and
 * coverage.
 */

static bool_t regfind(Thread *tp) {
  Thread *ftp;
  bool_t found = FALSE;

  ftp = chRegFirstThread();
  do {
    found |= ftp == tp;
    ftp = chRegNextThread(ftp);
  } while (ftp != NULL);
  return found;
}

static void dyn3_setup(void) {

  chHeapInit(&heap1, test.buffer, sizeof(union test_buffers));
}

static void dyn3_execute(void) {
  Thread *tp;
  tprio_t prio = chThdGetPriority();

  /* Testing references increase/decrease and final detach.*/
  tp = chThdCreateFromHeap(&heap1, WA_SIZE, prio-1, thread, "A");
  test_assert(1, tp->p_refs == 1, "wrong initial reference counter");
  chThdAddRef(tp);
  test_assert(2, tp->p_refs == 2, "references increase failure");
  chThdRelease(tp);
  test_assert(3, tp->p_refs == 1, "references decrease failure");

  /* Verify the new threads count.*/
  test_assert(4, regfind(tp), "thread missing from registry");
  test_assert(5, regfind(tp), "thread disappeared");

  /* Detach and let the thread execute and terminate.*/
  chThdRelease(tp);
  test_assert(6, tp->p_refs == 0, "detach failure");
  test_assert(7, tp->p_state == THD_STATE_READY, "invalid state");
  test_assert(8, regfind(tp), "thread disappeared");
  test_assert(9, regfind(tp), "thread disappeared");
  chThdSleepMilliseconds(50);           /* The thread just terminates.      */
  test_assert(10, tp->p_state == THD_STATE_FINAL, "invalid state");

  /* Clearing the zombie by scanning the registry.*/
  test_assert(11, regfind(tp), "thread disappeared");
  test_assert(12, !regfind(tp), "thread still in registry");
}

ROMCONST struct testcase testdyn3 = {
  "Dynamic APIs, registry and references",
  dyn3_setup,
  NULL,
  dyn3_execute
};
#endif /* CH_USE_HEAP && CH_USE_REGISTRY */
#endif /* CH_USE_DYNAMIC */

/**
 * @brief   Test sequence for dynamic APIs.
 */
ROMCONST struct testcase * ROMCONST patterndyn[] = {
#if CH_USE_DYNAMIC
#if CH_USE_HEAP
  &testdyn1,
#endif
#if CH_USE_MEMPOOLS
  &testdyn2,
#endif
#if CH_USE_HEAP && CH_USE_REGISTRY
  &testdyn3,
#endif
#endif
  NULL
};
s="n">domain = (domid_t)*pdomid; domctl.u.createdomain.ssidref = ssidref; domctl.u.createdomain.flags = flags; memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t)); if ( (err = do_domctl(xc_handle, &domctl)) != 0 ) return err; *pdomid = (uint16_t)domctl.domain; return 0; } int xc_domain_pause(int xc_handle, uint32_t domid) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_pausedomain; domctl.domain = (domid_t)domid; return do_domctl(xc_handle, &domctl); } int xc_domain_unpause(int xc_handle, uint32_t domid) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_unpausedomain; domctl.domain = (domid_t)domid; return do_domctl(xc_handle, &domctl); } int xc_domain_destroy(int xc_handle, uint32_t domid) { int ret; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_destroydomain; domctl.domain = (domid_t)domid; do { ret = do_domctl(xc_handle, &domctl); } while ( ret && (errno == EAGAIN) ); return ret; } int xc_domain_shutdown(int xc_handle, uint32_t domid, int reason) { int ret = -1; sched_remote_shutdown_t arg; DECLARE_HYPERCALL; hypercall.op = __HYPERVISOR_sched_op; hypercall.arg[0] = (unsigned long)SCHEDOP_remote_shutdown; hypercall.arg[1] = (unsigned long)&arg; arg.domain_id = domid; arg.reason = reason; if ( lock_pages(&arg, sizeof(arg)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out1; } ret = do_xen_hypercall(xc_handle, &hypercall); unlock_pages(&arg, sizeof(arg)); out1: return ret; } int xc_vcpu_setaffinity(int xc_handle, uint32_t domid, int vcpu, uint64_t cpumap) { DECLARE_DOMCTL; int ret = -1; uint8_t local[sizeof (cpumap)]; domctl.cmd = XEN_DOMCTL_setvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; bitmap_64_to_byte(local, &cpumap, sizeof(cpumap) * 8); set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; } ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof(local)); out: return ret; } int xc_vcpu_getaffinity(int xc_handle, uint32_t domid, int vcpu, uint64_t *cpumap) { DECLARE_DOMCTL; int ret = -1; uint8_t local[sizeof (cpumap)]; domctl.cmd = XEN_DOMCTL_getvcpuaffinity; domctl.domain = (domid_t)domid; domctl.u.vcpuaffinity.vcpu = vcpu; set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local); domctl.u.vcpuaffinity.cpumap.nr_cpus = sizeof(cpumap) * 8; if ( lock_pages(local, sizeof(local)) != 0 ) { PERROR("Could not lock memory for Xen hypercall"); goto out; } ret = do_domctl(xc_handle, &domctl); unlock_pages(local, sizeof (local)); bitmap_byte_to_64(cpumap, local, sizeof(local) * 8); out: return ret; } int xc_domain_getinfo(int xc_handle, uint32_t first_domid, unsigned int max_doms, xc_dominfo_t *info) { unsigned int nr_doms; uint32_t next_domid = first_domid; DECLARE_DOMCTL; int rc = 0; memset(info, 0, max_doms*sizeof(xc_dominfo_t)); for ( nr_doms = 0; nr_doms < max_doms; nr_doms++ ) { domctl.cmd = XEN_DOMCTL_getdomaininfo; domctl.domain = (domid_t)next_domid; if ( (rc = do_domctl(xc_handle, &domctl)) < 0 ) break; info->domid = (uint16_t)domctl.domain; info->dying = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_dying); info->shutdown = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_shutdown); info->paused = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_paused); info->blocked = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_blocked); info->running = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running); info->hvm = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest); info->debugged = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged); info->shutdown_reason = (domctl.u.getdomaininfo.flags>>XEN_DOMINF_shutdownshift) & XEN_DOMINF_shutdownmask; if ( info->shutdown && (info->shutdown_reason == SHUTDOWN_crash) ) { info->shutdown = 0; info->crashed = 1; } info->ssidref = domctl.u.getdomaininfo.ssidref; info->nr_pages = domctl.u.getdomaininfo.tot_pages; info->max_memkb = domctl.u.getdomaininfo.max_pages << (PAGE_SHIFT-10); info->shared_info_frame = domctl.u.getdomaininfo.shared_info_frame; info->cpu_time = domctl.u.getdomaininfo.cpu_time; info->nr_online_vcpus = domctl.u.getdomaininfo.nr_online_vcpus; info->max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id; memcpy(info->handle, domctl.u.getdomaininfo.handle, sizeof(xen_domain_handle_t)); next_domid = (uint16_t)domctl.domain + 1; info++; } if ( nr_doms == 0 ) return rc; return nr_doms; } int xc_domain_getinfolist(int xc_handle, uint32_t first_domain, unsigned int max_domains, xc_domaininfo_t *info) { int ret = 0; DECLARE_SYSCTL; if ( lock_pages(info, max_domains*sizeof(xc_domaininfo_t)) != 0 ) return -1; sysctl.cmd = XEN_SYSCTL_getdomaininfolist; sysctl.u.getdomaininfolist.first_domain = first_domain; sysctl.u.getdomaininfolist.max_domains = max_domains; set_xen_guest_handle(sysctl.u.getdomaininfolist.buffer, info); if ( xc_sysctl(xc_handle, &sysctl) < 0 ) ret = -1; else ret = sysctl.u.getdomaininfolist.num_domains; unlock_pages(info, max_domains*sizeof(xc_domaininfo_t)); return ret; } /* get info from hvm guest for save */ int xc_domain_hvm_getcontext(int xc_handle, uint32_t domid, uint8_t *ctxt_buf, uint32_t size) { int ret; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_gethvmcontext; domctl.domain = (domid_t)domid; domctl.u.hvmcontext.size = size; set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf); if ( ctxt_buf ) if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) return ret; ret = do_domctl(xc_handle, &domctl); if ( ctxt_buf ) unlock_pages(ctxt_buf, size); return (ret < 0 ? -1 : domctl.u.hvmcontext.size); } /* set info to hvm guest for restore */ int xc_domain_hvm_setcontext(int xc_handle, uint32_t domid, uint8_t *ctxt_buf, uint32_t size) { int ret; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_sethvmcontext; domctl.domain = domid; domctl.u.hvmcontext.size = size; set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf); if ( (ret = lock_pages(ctxt_buf, size)) != 0 ) return ret; ret = do_domctl(xc_handle, &domctl); unlock_pages(ctxt_buf, size); return ret; } int xc_vcpu_getcontext(int xc_handle, uint32_t domid, uint32_t vcpu, vcpu_guest_context_any_t *ctxt) { int rc; DECLARE_DOMCTL; size_t sz = sizeof(vcpu_guest_context_any_t); domctl.cmd = XEN_DOMCTL_getvcpucontext; domctl.domain = (domid_t)domid; domctl.u.vcpucontext.vcpu = (uint16_t)vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); if ( (rc = lock_pages(ctxt, sz)) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); unlock_pages(ctxt, sz); return rc; } int xc_shadow_control(int xc_handle, uint32_t domid, unsigned int sop, unsigned long *dirty_bitmap, unsigned long pages, unsigned long *mb, uint32_t mode, xc_shadow_op_stats_t *stats) { int rc; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_shadow_op; domctl.domain = (domid_t)domid; domctl.u.shadow_op.op = sop; domctl.u.shadow_op.pages = pages; domctl.u.shadow_op.mb = mb ? *mb : 0; domctl.u.shadow_op.mode = mode; set_xen_guest_handle(domctl.u.shadow_op.dirty_bitmap, (uint8_t *)dirty_bitmap); rc = do_domctl(xc_handle, &domctl); if ( stats ) memcpy(stats, &domctl.u.shadow_op.stats, sizeof(xc_shadow_op_stats_t)); if ( mb ) *mb = domctl.u.shadow_op.mb; return (rc == 0) ? domctl.u.shadow_op.pages : rc; } int xc_domain_setmaxmem(int xc_handle, uint32_t domid, unsigned int max_memkb) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_max_mem; domctl.domain = (domid_t)domid; domctl.u.max_mem.max_memkb = max_memkb; return do_domctl(xc_handle, &domctl); } int xc_domain_pin_memory_cacheattr(int xc_handle, uint32_t domid, uint64_t start, uint64_t end, uint32_t type) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_pin_mem_cacheattr; domctl.domain = (domid_t)domid; domctl.u.pin_mem_cacheattr.start = start; domctl.u.pin_mem_cacheattr.end = end; domctl.u.pin_mem_cacheattr.type = type; return do_domctl(xc_handle, &domctl); } #if defined(__i386__) || defined(__x86_64__) #include "xc_e820.h" int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { int rc; struct xen_foreign_memory_map fmap = { .domid = domid, .map = { .nr_entries = 1 } }; struct e820entry e820 = { .addr = 0, .size = (uint64_t)map_limitkb << 10, .type = E820_RAM }; set_xen_guest_handle(fmap.map.buffer, &e820); if ( lock_pages(&fmap, sizeof(fmap)) || lock_pages(&e820, sizeof(e820)) ) { PERROR("Could not lock memory for Xen hypercall"); rc = -1; goto out; } rc = xc_memory_op(xc_handle, XENMEM_set_memory_map, &fmap); out: unlock_pages(&fmap, sizeof(fmap)); unlock_pages(&e820, sizeof(e820)); return rc; } #else int xc_domain_set_memmap_limit(int xc_handle, uint32_t domid, unsigned long map_limitkb) { PERROR("Function not implemented"); errno = ENOSYS; return -1; } #endif int xc_domain_set_time_offset(int xc_handle, uint32_t domid, int32_t time_offset_seconds) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_settimeoffset; domctl.domain = (domid_t)domid; domctl.u.settimeoffset.time_offset_seconds = time_offset_seconds; return do_domctl(xc_handle, &domctl); } int xc_domain_memory_increase_reservation(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = mem_flags, .domid = domid }; /* may be NULL */ set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed allocation for dom %d: " "%ld extents of order %d, mem_flags %x\n", domid, nr_extents, extent_order, mem_flags); errno = ENOMEM; err = -1; } return err; } int xc_domain_memory_decrease_reservation(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = 0, .domid = domid }; set_xen_guest_handle(reservation.extent_start, extent_start); if ( extent_start == NULL ) { DPRINTF("decrease_reservation extent_start is NULL!\n"); errno = EINVAL; return -1; } err = xc_memory_op(xc_handle, XENMEM_decrease_reservation, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed deallocation for dom %d: %ld extents of order %d\n", domid, nr_extents, extent_order); errno = EINVAL; err = -1; } return err; } int xc_domain_memory_populate_physmap(int xc_handle, uint32_t domid, unsigned long nr_extents, unsigned int extent_order, unsigned int mem_flags, xen_pfn_t *extent_start) { int err; struct xen_memory_reservation reservation = { .nr_extents = nr_extents, .extent_order = extent_order, .mem_flags = mem_flags, .domid = domid }; set_xen_guest_handle(reservation.extent_start, extent_start); err = xc_memory_op(xc_handle, XENMEM_populate_physmap, &reservation); if ( err == nr_extents ) return 0; if ( err >= 0 ) { DPRINTF("Failed allocation for dom %d: %ld extents of order %d\n", domid, nr_extents, extent_order); errno = EBUSY; err = -1; } return err; } int xc_domain_memory_translate_gpfn_list(int xc_handle, uint32_t domid, unsigned long nr_gpfns, xen_pfn_t *gpfn_list, xen_pfn_t *mfn_list) { int err; struct xen_translate_gpfn_list translate_gpfn_list = { .domid = domid, .nr_gpfns = nr_gpfns, }; set_xen_guest_handle(translate_gpfn_list.gpfn_list, gpfn_list); set_xen_guest_handle(translate_gpfn_list.mfn_list, mfn_list); err = xc_memory_op(xc_handle, XENMEM_translate_gpfn_list, &translate_gpfn_list); if ( err != 0 ) { DPRINTF("Failed translation for dom %d (%ld PFNs)\n", domid, nr_gpfns); errno = -err; err = -1; } return err; } int xc_domain_max_vcpus(int xc_handle, uint32_t domid, unsigned int max) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_max_vcpus; domctl.domain = (domid_t)domid; domctl.u.max_vcpus.max = max; return do_domctl(xc_handle, &domctl); } int xc_domain_sethandle(int xc_handle, uint32_t domid, xen_domain_handle_t handle) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_setdomainhandle; domctl.domain = (domid_t)domid; memcpy(domctl.u.setdomainhandle.handle, handle, sizeof(xen_domain_handle_t)); return do_domctl(xc_handle, &domctl); } int xc_vcpu_getinfo(int xc_handle, uint32_t domid, uint32_t vcpu, xc_vcpuinfo_t *info) { int rc; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_getvcpuinfo; domctl.domain = (domid_t)domid; domctl.u.getvcpuinfo.vcpu = (uint16_t)vcpu; rc = do_domctl(xc_handle, &domctl); memcpy(info, &domctl.u.getvcpuinfo, sizeof(*info)); return rc; } int xc_domain_ioport_permission(int xc_handle, uint32_t domid, uint32_t first_port, uint32_t nr_ports, uint32_t allow_access) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_ioport_permission; domctl.domain = (domid_t)domid; domctl.u.ioport_permission.first_port = first_port; domctl.u.ioport_permission.nr_ports = nr_ports; domctl.u.ioport_permission.allow_access = allow_access; return do_domctl(xc_handle, &domctl); } int xc_availheap(int xc_handle, int min_width, int max_width, int node, uint64_t *bytes) { DECLARE_SYSCTL; int rc; sysctl.cmd = XEN_SYSCTL_availheap; sysctl.u.availheap.min_bitwidth = min_width; sysctl.u.availheap.max_bitwidth = max_width; sysctl.u.availheap.node = node; rc = xc_sysctl(xc_handle, &sysctl); *bytes = sysctl.u.availheap.avail_bytes; return rc; } int xc_vcpu_setcontext(int xc_handle, uint32_t domid, uint32_t vcpu, vcpu_guest_context_any_t *ctxt) { DECLARE_DOMCTL; int rc; size_t sz = sizeof(vcpu_guest_context_any_t); if (ctxt == NULL) { errno = EINVAL; return -1; } domctl.cmd = XEN_DOMCTL_setvcpucontext; domctl.domain = domid; domctl.u.vcpucontext.vcpu = vcpu; set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c); if ( (rc = lock_pages(ctxt, sz)) != 0 ) return rc; rc = do_domctl(xc_handle, &domctl); unlock_pages(ctxt, sz); return rc; } int xc_domain_irq_permission(int xc_handle, uint32_t domid, uint8_t pirq, uint8_t allow_access) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_irq_permission; domctl.domain = domid; domctl.u.irq_permission.pirq = pirq; domctl.u.irq_permission.allow_access = allow_access; return do_domctl(xc_handle, &domctl); } int xc_domain_iomem_permission(int xc_handle, uint32_t domid, unsigned long first_mfn, unsigned long nr_mfns, uint8_t allow_access) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_iomem_permission; domctl.domain = domid; domctl.u.iomem_permission.first_mfn = first_mfn; domctl.u.iomem_permission.nr_mfns = nr_mfns; domctl.u.iomem_permission.allow_access = allow_access; return do_domctl(xc_handle, &domctl); } int xc_domain_send_trigger(int xc_handle, uint32_t domid, uint32_t trigger, uint32_t vcpu) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_sendtrigger; domctl.domain = domid; domctl.u.sendtrigger.trigger = trigger; domctl.u.sendtrigger.vcpu = vcpu; return do_domctl(xc_handle, &domctl); } int xc_set_hvm_param(int handle, domid_t dom, int param, unsigned long value) { DECLARE_HYPERCALL; xen_hvm_param_t arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_set_param; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.index = param; arg.value = value; if ( lock_pages(&arg, sizeof(arg)) != 0 ) return -1; rc = do_xen_hypercall(handle, &hypercall); unlock_pages(&arg, sizeof(arg)); return rc; } int xc_get_hvm_param(int handle, domid_t dom, int param, unsigned long *value) { DECLARE_HYPERCALL; xen_hvm_param_t arg; int rc; hypercall.op = __HYPERVISOR_hvm_op; hypercall.arg[0] = HVMOP_get_param; hypercall.arg[1] = (unsigned long)&arg; arg.domid = dom; arg.index = param; if ( lock_pages(&arg, sizeof(arg)) != 0 ) return -1; rc = do_xen_hypercall(handle, &hypercall); unlock_pages(&arg, sizeof(arg)); *value = arg.value; return rc; } int xc_domain_setdebugging(int xc_handle, uint32_t domid, unsigned int enable) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_setdebugging; domctl.domain = domid; domctl.u.setdebugging.enable = enable; return do_domctl(xc_handle, &domctl); } int xc_assign_device( int xc_handle, uint32_t domid, uint32_t machine_bdf) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_assign_device; domctl.domain = domid; domctl.u.assign_device.machine_bdf = machine_bdf; return do_domctl(xc_handle, &domctl); } int xc_get_device_group( int xc_handle, uint32_t domid, uint32_t machine_bdf, uint32_t max_sdevs, uint32_t *num_sdevs, uint32_t *sdev_array) { int rc; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_get_device_group; domctl.domain = (domid_t)domid; domctl.u.get_device_group.machine_bdf = machine_bdf; domctl.u.get_device_group.max_sdevs = max_sdevs; set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array); if ( lock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 ) { PERROR("Could not lock memory for xc_get_device_group\n"); return -ENOMEM; } rc = do_domctl(xc_handle, &domctl); unlock_pages(sdev_array, max_sdevs * sizeof(*sdev_array)); *num_sdevs = domctl.u.get_device_group.num_sdevs; return rc; } int xc_test_assign_device( int xc_handle, uint32_t domid, uint32_t machine_bdf) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_test_assign_device; domctl.domain = domid; domctl.u.assign_device.machine_bdf = machine_bdf; return do_domctl(xc_handle, &domctl); } int xc_deassign_device( int xc_handle, uint32_t domid, uint32_t machine_bdf) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_deassign_device; domctl.domain = domid; domctl.u.assign_device.machine_bdf = machine_bdf; return do_domctl(xc_handle, &domctl); } int xc_domain_update_msi_irq( int xc_handle, uint32_t domid, uint32_t gvec, uint32_t pirq, uint32_t gflags) { int rc; xen_domctl_bind_pt_irq_t *bind; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_bind_pt_irq; domctl.domain = (domid_t)domid; bind = &(domctl.u.bind_pt_irq); bind->hvm_domid = domid; bind->irq_type = PT_IRQ_TYPE_MSI; bind->machine_irq = pirq; bind->u.msi.gvec = gvec; bind->u.msi.gflags = gflags; rc = do_domctl(xc_handle, &domctl); return rc; } /* Pass-through: binds machine irq to guests irq */ int xc_domain_bind_pt_irq( int xc_handle, uint32_t domid, uint8_t machine_irq, uint8_t irq_type, uint8_t bus, uint8_t device, uint8_t intx, uint8_t isa_irq) { int rc; xen_domctl_bind_pt_irq_t * bind; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_bind_pt_irq; domctl.domain = (domid_t)domid; bind = &(domctl.u.bind_pt_irq); bind->hvm_domid = domid; bind->irq_type = irq_type; bind->machine_irq = machine_irq; if ( irq_type == PT_IRQ_TYPE_PCI ) { bind->u.pci.bus = bus; bind->u.pci.device = device; bind->u.pci.intx = intx; } else if ( irq_type == PT_IRQ_TYPE_ISA ) bind->u.isa.isa_irq = isa_irq; rc = do_domctl(xc_handle, &domctl); return rc; } int xc_domain_unbind_pt_irq( int xc_handle, uint32_t domid, uint8_t machine_irq, uint8_t irq_type, uint8_t bus, uint8_t device, uint8_t intx, uint8_t isa_irq) { int rc; xen_domctl_bind_pt_irq_t * bind; DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_unbind_pt_irq; domctl.domain = (domid_t)domid; bind = &(domctl.u.bind_pt_irq); bind->hvm_domid = domid; bind->irq_type = irq_type; bind->machine_irq = machine_irq; bind->u.pci.bus = bus; bind->u.pci.device = device; bind->u.pci.intx = intx; bind->u.isa.isa_irq = isa_irq; rc = do_domctl(xc_handle, &domctl); return rc; } int xc_domain_bind_pt_pci_irq( int xc_handle, uint32_t domid, uint8_t machine_irq, uint8_t bus, uint8_t device, uint8_t intx) { return (xc_domain_bind_pt_irq(xc_handle, domid, machine_irq, PT_IRQ_TYPE_PCI, bus, device, intx, 0)); } int xc_domain_bind_pt_isa_irq( int xc_handle, uint32_t domid, uint8_t machine_irq) { return (xc_domain_bind_pt_irq(xc_handle, domid, machine_irq, PT_IRQ_TYPE_ISA, 0, 0, 0, machine_irq)); } int xc_domain_memory_mapping( int xc_handle, uint32_t domid, unsigned long first_gfn, unsigned long first_mfn, unsigned long nr_mfns, uint32_t add_mapping) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_memory_mapping; domctl.domain = domid; domctl.u.memory_mapping.first_gfn = first_gfn; domctl.u.memory_mapping.first_mfn = first_mfn; domctl.u.memory_mapping.nr_mfns = nr_mfns; domctl.u.memory_mapping.add_mapping = add_mapping; return do_domctl(xc_handle, &domctl); } int xc_domain_ioport_mapping( int xc_handle, uint32_t domid, uint32_t first_gport, uint32_t first_mport, uint32_t nr_ports, uint32_t add_mapping) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_ioport_mapping; domctl.domain = domid; domctl.u.ioport_mapping.first_gport = first_gport; domctl.u.ioport_mapping.first_mport = first_mport; domctl.u.ioport_mapping.nr_ports = nr_ports; domctl.u.ioport_mapping.add_mapping = add_mapping; return do_domctl(xc_handle, &domctl); } int xc_domain_set_target( int xc_handle, uint32_t domid, uint32_t target) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_set_target; domctl.domain = domid; domctl.u.set_target.target = target; return do_domctl(xc_handle, &domctl); } int xc_domain_subscribe_for_suspend( int xc_handle, domid_t dom, evtchn_port_t port) { DECLARE_DOMCTL; domctl.cmd = XEN_DOMCTL_subscribe; domctl.domain = dom; domctl.u.subscribe.port = port; return do_domctl(xc_handle, &domctl); } int xc_domain_set_machine_address_size(int xc, uint32_t domid, unsigned int width) { DECLARE_DOMCTL; memset(&domctl, 0, sizeof(domctl)); domctl.domain = domid; domctl.cmd = XEN_DOMCTL_set_machine_address_size; domctl.u.address_size.size = width; return do_domctl(xc, &domctl); } int xc_domain_get_machine_address_size(int xc, uint32_t domid) { DECLARE_DOMCTL; int rc; memset(&domctl, 0, sizeof(domctl)); domctl.domain = domid; domctl.cmd = XEN_DOMCTL_get_machine_address_size; rc = do_domctl(xc, &domctl); return rc == 0 ? domctl.u.address_size.size : rc; } int xc_domain_suppress_spurious_page_faults(int xc, uint32_t domid) { DECLARE_DOMCTL; memset(&domctl, 0, sizeof(domctl)); domctl.domain = domid; domctl.cmd = XEN_DOMCTL_suppress_spurious_page_faults; return do_domctl(xc, &domctl); } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */