import React, { PropTypes } from 'react' import ReactDOM from 'react-dom' import shallowEqual from 'shallowequal' import AutoScroll from './helpers/AutoScroll' import { calcVScroll } from './helpers/VirtualScroll' import FlowTableHead from './FlowTable/FlowTableHead' import FlowRow from './FlowTable/FlowRow' import Filt from "../filt/filt" class FlowTable extends React.Component { static propTypes = { onSelect: PropTypes.func.isRequired, flows: PropTypes.array.isRequired, rowHeight: PropTypes.number, highlight: PropTypes.string, selected: PropTypes.object, } static defaultProps = { rowHeight: 32, } constructor(props, context) { super(props, context) this.state = { vScroll: calcVScroll() } this.onViewportUpdate = this.onViewportUpdate.bind(this) } componentWillMount() { window.addEventListener('resize', this.onViewportUpdate) } componentWillUnmount() { window.removeEventListener('resize', this.onViewportUpdate) } componentDidUpdate() { this.onViewportUpdate() if (!this.shouldScrollIntoView) { return } this.shouldScrollIntoView = false const { rowHeight, flows, selected } = this.props const viewport = ReactDOM.findDOMNode(this) const head = ReactDOM.findDOMNode(this.refs.head) const headHeight = head ? head.offsetHeight : 0 const rowTop = (flows.indexOf(selected) * rowHeight) + headHeight const rowBottom = rowTop + rowHeight const viewportTop = viewport.scrollTop const viewportHeight = viewport.offsetHeight // Account for pinned thead if (rowTop - headHeight < viewportTop) { viewport.scrollTop = rowTop - headHeight } else if (rowBottom > viewportTop + viewportHeight) { viewport.scrollTop = rowBottom - viewportHeight } } componentWillReceiveProps(nextProps) { if (nextProps.selected && nextProps.selected !== this.props.selected) { this.shouldScrollIntoView = true } } onViewportUpdate() { const viewport = ReactDOM.findDOMNode(this) const viewportTop = viewport.scrollTop const vScroll = calcVScroll({ viewportTop, viewportHeight: viewport.offsetHeight, itemCount: this.props.flows.length, rowHeight: this.props.rowHeight, }) if (this.state.viewportTop !== viewportTop || !shallowEqual(this.state.vScroll, vScroll)) { this.setState({ vScroll, viewportTop }) } } render() { const { vScroll, viewportTop } = this.state const { flows, selected, highlight } = this.props const isHighlighted = highlight ? Filt.parse(highlight) : () => false return (
/******************************************************************************
* xenctrl.h
*
* A library for low-level access to the Xen control interfaces.
*
* Copyright (c) 2003-2004, K A Fraser.
*
* xc_gnttab functions:
* Copyright (c) 2007-2008, D G Murray <Derek.Murray@cl.cam.ac.uk>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation;
* version 2.1 of the License.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef XENCTRL_H
#define XENCTRL_H
/* Tell the Xen public headers we are a user-space tools build. */
#ifndef __XEN_TOOLS__
#define __XEN_TOOLS__ 1
#endif
#include <stddef.h>
#include <stdint.h>
#include <stdio.h>
#include <xen/xen.h>
#include <xen/domctl.h>
#include <xen/physdev.h>
#include <xen/sysctl.h>
#include <xen/version.h>
#include <xen/event_channel.h>
#include <xen/sched.h>
#include <xen/memory.h>
#include <xen/grant_table.h>
#include <xen/hvm/params.h>
#include <xen/xsm/flask_op.h>
#include <xen/tmem.h>
#include "xentoollog.h"
#if defined(__i386__) || defined(__x86_64__)
#include <xen/foreign/x86_32.h>
#include <xen/foreign/x86_64.h>
#include <xen/arch-x86/xen-mca.h>
#endif
#ifdef __ia64__
#define XC_PAGE_SHIFT 14
#else
#define XC_PAGE_SHIFT 12
#endif
#define XC_PAGE_SIZE (1UL << XC_PAGE_SHIFT)
#define XC_PAGE_MASK (~(XC_PAGE_SIZE-1))
#define INVALID_MFN (~0UL)
/*
* DEFINITIONS FOR CPU BARRIERS
*/
#define xen_barrier() asm volatile ( "" : : : "memory")
#if defined(__i386__)
#define xen_mb() asm volatile ( "lock; addl $0,0(%%esp)" : : : "memory" )
#define xen_rmb() xen_barrier()
#define xen_wmb() xen_barrier()
#elif defined(__x86_64__)
#define xen_mb() asm volatile ( "mfence" : : : "memory")
#define xen_rmb() xen_barrier()
#define xen_wmb() xen_barrier()
#elif defined(__ia64__)
#define xen_mb() asm volatile ("mf" ::: "memory")
#define xen_rmb() asm volatile ("mf" ::: "memory")
#define xen_wmb() asm volatile ("mf" ::: "memory")
#else
#error "Define barriers"
#endif
#define XENCTRL_HAS_XC_INTERFACE 1
/* In Xen 4.0 and earlier, xc_interface_open and xc_evtchn_open would
* both return ints being the file descriptor. In 4.1 and later, they
* return an xc_interface* and xc_evtchn*, respectively - ie, a
* pointer to an opaque struct. This #define is provided in 4.1 and
* later, allowing out-of-tree callers to more easily distinguish
* between, and be compatible with, both versions.
*/
/*
* GENERAL
*
* Unless otherwise specified, each function here returns zero or a
* non-null pointer on success; or in case of failure, sets errno and
* returns -1 or a null pointer.
*
* Unless otherwise specified, errors result in a call to the error
* handler function, which by default prints a message to the
* FILE* passed as the caller_data, which by default is stderr.
* (This is described below as "logging errors".)
*
* The error handler can safely trash errno, as libxc saves it across
* the callback.
*/
typedef struct xc_interface_core xc_interface;
typedef struct xc_interface_core xc_evtchn;
typedef struct xc_interface_core xc_gnttab;
typedef struct xc_interface_core xc_gntshr;
typedef enum xc_error_code xc_error_code;
/*
* INITIALIZATION FUNCTIONS
*/
/**
* This function opens a handle to the hypervisor interface. This function can
* be called multiple times within a single process. Multiple processes can
* have an open hypervisor interface at the same time.
*
* Each call to this function should have a corresponding call to
* xc_interface_close().
*
* This function can fail if the caller does not have superuser permission or
* if a Xen-enabled kernel is not currently running.
*
* @return a handle to the hypervisor interface
*/
xc_interface *xc_interface_open(xentoollog_logger *logger,
xentoollog_logger *dombuild_logger,
unsigned open_flags);
/* if logger==NULL, will log to stderr
* if dombuild_logger=NULL, will log to a file
*/
/*
* Note: if XC_OPENFLAG_NON_REENTRANT is passed then libxc must not be
* called reentrantly and the calling application is responsible for
* providing mutual exclusion surrounding all libxc calls itself.
*
* In particular xc_{get,clear}_last_error only remain valid for the
* duration of the critical section containing the call which failed.
*/
enum xc_open_flags {
XC_OPENFLAG_DUMMY = 1<<0, /* do not actually open a xenctrl interface */
XC_OPENFLAG_NON_REENTRANT = 1<<1, /* assume library is only every called from a single thread */
};
/**
* This function closes an open hypervisor interface.
*
* This function can fail if the handle does not represent an open interface or
* if there were problems closing the interface. In the latter case
* the interface is still closed.
*
* @parm xch a handle to an open hypervisor interface
* @return 0 on success, -1 otherwise.
*/
int xc_interface_close(xc_interface *xch);
/**
* Query the active OS interface (i.e. that which would be returned by
* xc_interface_open) to find out if it is fake (i.e. backends onto
* something other than an actual Xen hypervisor).
*
* @return 0 is "real", >0 if fake, -1 on error.
*/
int xc_interface_is_fake(void);
/*
* HYPERCALL SAFE MEMORY BUFFER
*
* Ensure that memory which is passed to a hypercall has been
* specially allocated in order to be safe to access from the
* hypervisor.
*
* Each user data pointer is shadowed by an xc_hypercall_buffer data
* structure. You should never define an xc_hypercall_buffer type
* directly, instead use the DECLARE_HYPERCALL_BUFFER* macros below.
*
* The strucuture should be considered opaque and all access should be
* via the macros and helper functions defined below.
*
* Once the buffer is declared the user is responsible for explicitly
* allocating and releasing the memory using
* xc_hypercall_buffer_alloc(_pages) and
* xc_hypercall_buffer_free(_pages).
*
* Once the buffer has been allocated the user can initialise the data
* via the normal pointer. The xc_hypercall_buffer structure is
* transparently referenced by the helper macros (such as
* xen_set_guest_handle) in order to check at compile time that the
* correct type of memory is being used.
*/
struct xc_hypercall_buffer {
/* Hypercall safe memory buffer. */
void *hbuf;
/*
* Reference to xc_hypercall_buffer passed as argument to the
* current function.
*/
struct xc_hypercall_buffer *param_shadow;
/*
* Direction of copy for bounce buffering.
*/
int dir;
/* Used iff dir != 0. */
void *ubuf;
size_t sz;
};
typedef struct xc_hypercall_buffer xc_hypercall_buffer_t;
/*
* Construct the name of the hypercall buffer for a given variable.
* For internal use only
*/
#define XC__HYPERCALL_BUFFER_NAME(_name) xc__hypercall_buffer_##_name
/*
* Returns the hypercall_buffer associated with a variable.
*/
#define HYPERCALL_BUFFER(_name) \
({ xc_hypercall_buffer_t _val1; \
typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_val2 = &XC__HYPERCALL_BUFFER_NAME(_name); \
(void)(&_val1 == _val2); \
(_val2)->param_shadow ? (_val2)->param_shadow : (_val2); \
})
#define HYPERCALL_BUFFER_INIT_NO_BOUNCE .dir = 0, .sz = 0, .ubuf = (void *)-1
/*
* Defines a hypercall buffer and user pointer with _name of _type.
*
* The user accesses the data as normal via _name which will be
* transparently converted to the hypercall buffer as necessary.
*/
#define DECLARE_HYPERCALL_BUFFER(_type, _name) \
_type *_name = NULL; \
xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
.hbuf = NULL, \
.param_shadow = NULL, \
HYPERCALL_BUFFER_INIT_NO_BOUNCE \
}
/*
* Declare the necessary data structure to allow a hypercall buffer
* passed as an argument to a function to be used in the normal way.
*/
#define DECLARE_HYPERCALL_BUFFER_ARGUMENT(_name) \
xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(_name) = { \
.hbuf = (void *)-1, \
.param_shadow = _name, \
HYPERCALL_BUFFER_INIT_NO_BOUNCE \
}
/*
* Get the hypercall buffer data pointer in a form suitable for use
* directly as a hypercall argument.
*/
#define HYPERCALL_BUFFER_AS_ARG(_name) \
({ xc_hypercall_buffer_t _val1; \
typeof(XC__HYPERCALL_BUFFER_NAME(_name)) *_val2 = HYPERCALL_BUFFER(_name); \
(void)(&_val1 == _val2); \
(unsigned long)(_val2)->hbuf; \
})
/*
* Set a xen_guest_handle in a type safe manner, ensuring that the
* data pointer has been correctly allocated.
*/
#undef set_xen_guest_handle
#define set_xen_guest_handle(_hnd, _val) \
do { \
xc_hypercall_buffer_t _val1; \
typeof(XC__HYPERCALL_BUFFER_NAME(_val)) *_val2 = HYPERCALL_BUFFER(_val); \
(void) (&_val1 == _val2); \
set_xen_guest_handle_raw(_hnd, (_val2)->hbuf); \
} while (0)
/* Use with set_xen_guest_handle in place of NULL */
extern xc_hypercall_buffer_t XC__HYPERCALL_BUFFER_NAME(HYPERCALL_BUFFER_NULL);
/*
* Allocate and free hypercall buffers with byte granularity.
*/
void *xc__hypercall_buffer_alloc(xc_interface *xch, xc_hypercall_buffer_t *b, size_t size);
#define xc_hypercall_buffer_alloc(_xch, _name, _size) xc__hypercall_buffer_alloc(_xch, HYPERCALL_BUFFER(_name), _size)
void xc__hypercall_buffer_free(xc_interface *xch, xc_hypercall_buffer_t *b);
#define xc_hypercall_buffer_free(_xch, _name) xc__hypercall_buffer_free(_xch, HYPERCALL_BUFFER(_name))
/*
* Allocate and free hypercall buffers with page alignment.
*/
void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
#define xc_hypercall_buffer_alloc_pages(_xch, _name, _nr) xc__hypercall_buffer_alloc_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages);
#define xc_hypercall_buffer_free_pages(_xch, _name, _nr) xc__hypercall_buffer_free_pages(_xch, HYPERCALL_BUFFER(_name), _nr)
/*
* CPUMAP handling
*/
typedef uint8_t *xc_cpumap_t;
/* return maximum number of cpus the hypervisor supports */
int xc_get_max_cpus(xc_interface *xch);
/* return array size for cpumap */
int xc_get_cpumap_size(xc_interface *xch);
/* allocate a cpumap */
xc_cpumap_t xc_cpumap_alloc(xc_interface *xch);
/*
* DOMAIN DEBUGGING FUNCTIONS
*/
typedef struct xc_core_header {
unsigned int xch_magic;
unsigned int xch_nr_vcpus;
unsigned int xch_nr_pages;
unsigned int xch_ctxt_offset;
unsigned int xch_index_offset;
unsigned int xch_pages_offset;
} xc_core_header_t;
#define XC_CORE_MAGIC 0xF00FEBED
#define XC_CORE_MAGIC_HVM 0xF00FEBEE
/*
* DOMAIN MANAGEMENT FUNCTIONS
*/
typedef struct xc_dominfo {
uint32_t domid;
uint32_t ssidref;
unsigned int dying:1, crashed:1, shutdown:1,
paused:1, blocked:1, running:1,
hvm:1, debugged:1;
unsigned int shutdown_reason; /* only meaningful if shutdown==1 */
unsigned long nr_pages; /* current number, not maximum */
unsigned long nr_shared_pages;
unsigned long nr_paged_pages;
unsigned long shared_info_frame;
uint64_t cpu_time;
unsigned long max_memkb;
unsigned int nr_online_vcpus;
unsigned int max_vcpu_id;
xen_domain_handle_t handle;
unsigned int cpupool;
} xc_dominfo_t;
typedef xen_domctl_getdomaininfo_t xc_domaininfo_t;
typedef union
{
#if defined(__i386__) || defined(__x86_64__)
vcpu_guest_context_x86_64_t x64;
vcpu_guest_context_x86_32_t x32;
#endif
vcpu_guest_context_t c;
} vcpu_guest_context_any_t;
typedef union
{
#if defined(__i386__) || defined(__x86_64__)
shared_info_x86_64_t x64;
shared_info_x86_32_t x32;
#endif
shared_info_t s;
} shared_info_any_t;
typedef union
{
#if defined(__i386__) || defined(__x86_64__)
start_info_x86_64_t x64;
start_info_x86_32_t x32;
#endif
start_info_t s;
} start_info_any_t;
int xc_domain_create(xc_interface *xch,
uint32_t ssidref,
xen_domain_handle_t handle,
uint32_t flags,
uint32_t *pdomid);
/* Functions to produce a dump of a given domain
* xc_domain_dumpcore - produces a dump to a specified file
* xc_domain_dumpcore_via_callback - produces a dump, using a specified
* callback function
*/
int xc_domain_dumpcore(xc_interface *xch,
uint32_t domid,
const char *corename);
/* Define the callback function type for xc_domain_dumpcore_via_callback.
*
* This function is called by the coredump code for every "write",
* and passes an opaque object for the use of the function and
* created by the caller of xc_domain_dumpcore_via_callback.
*/
typedef int (dumpcore_rtn_t)(xc_interface *xch,
void *arg, char *buffer, unsigned int length);
int xc_domain_dumpcore_via_callback(xc_interface *xch,
uint32_t domid,
void *arg,
dumpcore_rtn_t dump_rtn);
/*
* This function sets the maximum number of vcpus that a domain may create.
*
* @parm xch a handle to an open hypervisor interface.
* @parm domid the domain id in which vcpus are to be created.
* @parm max the maximum number of vcpus that the domain may create.
* @return 0 on success, -1 on failure.
*/
int xc_domain_max_vcpus(xc_interface *xch,
uint32_t domid,
unsigned int max);
/**
* This function pauses a domain. A paused domain still exists in memory
* however it does not receive any timeslices from the hypervisor.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to pause
* @return 0 on success, -1 on failure.
*/
int xc_domain_pause(xc_interface *xch,
uint32_t domid);
/**
* This function unpauses a domain. The domain should have been previously
* paused.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to unpause
* return 0 on success, -1 on failure
*/
int xc_domain_unpause(xc_interface *xch,
uint32_t domid);
/**
* This function will destroy a domain. Destroying a domain removes the domain
* completely from memory. This function should be called after sending the
* domain a SHUTDOWN control message to free up the domain resources.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to destroy
* @return 0 on success, -1 on failure
*/
int xc_domain_destroy(xc_interface *xch,
uint32_t domid);
/**
* This function resumes a suspended domain. The domain should have
* been previously suspended.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to resume
* @parm fast use cooperative resume (guest must support this)
* return 0 on success, -1 on failure
*/
int xc_domain_resume(xc_interface *xch,
uint32_t domid,
int fast);
/**
* This function will shutdown a domain. This is intended for use in
* fully-virtualized domains where this operation is analogous to the
* sched_op operations in a paravirtualized domain. The caller is
* expected to give the reason for the shutdown.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to destroy
* @parm reason is the reason (SHUTDOWN_xxx) for the shutdown
* @return 0 on success, -1 on failure
*/
int xc_domain_shutdown(xc_interface *xch,
uint32_t domid,
int reason);
int xc_watchdog(xc_interface *xch,
uint32_t id,
uint32_t timeout);
int xc_vcpu_setaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
xc_cpumap_t cpumap);
int xc_vcpu_getaffinity(xc_interface *xch,
uint32_t domid,
int vcpu,
xc_cpumap_t cpumap);
/**
* This function will return information about one or more domains. It is
* designed to iterate over the list of domains. If a single domain is
* requested, this function will return the next domain in the list - if
* one exists. It is, therefore, important in this case to make sure the
* domain requested was the one returned.
*
* @parm xch a handle to an open hypervisor interface
* @parm first_domid the first domain to enumerate information from. Domains
* are currently enumerate in order of creation.
* @parm max_doms the number of elements in info
* @parm info an array of max_doms size that will contain the information for
* the enumerated domains.
* @return the number of domains enumerated or -1 on error
*/
int xc_domain_getinfo(xc_interface *xch,
uint32_t first_domid,
unsigned int max_doms,
xc_dominfo_t *info);
/**
* This function will set the execution context for the specified vcpu.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to set the vcpu context for
* @parm vcpu the vcpu number for the context
* @parm ctxt pointer to the the cpu context with the values to set
* @return the number of domains enumerated or -1 on error
*/
int xc_vcpu_setcontext(xc_interface *xch,
uint32_t domid,
uint32_t vcpu,
vcpu_guest_context_any_t *ctxt);
/**
* This function will return information about one or more domains, using a
* single hypercall. The domain information will be stored into the supplied
* array of xc_domaininfo_t structures.
*
* @parm xch a handle to an open hypervisor interface
* @parm first_domain the first domain to enumerate information from.
* Domains are currently enumerate in order of creation.
* @parm max_domains the number of elements in info
* @parm info an array of max_doms size that will contain the information for
* the enumerated domains.
* @return the number of domains enumerated or -1 on error
*/
int xc_domain_getinfolist(xc_interface *xch,
uint32_t first_domain,
unsigned int max_domains,
xc_domaininfo_t *info);
/**
* This function returns information about the context of a hvm domain
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to get information from
* @parm ctxt_buf a pointer to a structure to store the execution context of
* the hvm domain
* @parm size the size of ctxt_buf in bytes
* @return 0 on success, -1 on failure
*/
int xc_domain_hvm_getcontext(xc_interface *xch,
uint32_t domid,
uint8_t *ctxt_buf,
uint32_t size);
/**
* This function returns one element of the context of a hvm domain
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to get information from
* @parm typecode which type of elemnt required
* @parm instance which instance of the type
* @parm ctxt_buf a pointer to a structure to store the execution context of
* the hvm domain
* @parm size the size of ctxt_buf (must be >= HVM_SAVE_LENGTH(typecode))
* @return 0 on success, -1 on failure
*/
int xc_domain_hvm_getcontext_partial(xc_interface *xch,
uint32_t domid,
uint16_t typecode,
uint16_t instance,
void *ctxt_buf,
uint32_t size);
/**
* This function will set the context for hvm domain
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to set the hvm domain context for
* @parm hvm_ctxt pointer to the the hvm context with the values to set
* @parm size the size of hvm_ctxt in bytes
* @return 0 on success, -1 on failure
*/
int xc_domain_hvm_setcontext(xc_interface *xch,
uint32_t domid,
uint8_t *hvm_ctxt,
uint32_t size);
/**
* This function returns information about the execution context of a
* particular vcpu of a domain.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to get information from
* @parm vcpu the vcpu number
* @parm ctxt a pointer to a structure to store the execution context of the
* domain
* @return 0 on success, -1 on failure
*/
int xc_vcpu_getcontext(xc_interface *xch,
uint32_t domid,
uint32_t vcpu,
vcpu_guest_context_any_t *ctxt);
typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
int xc_vcpu_getinfo(xc_interface *xch,
uint32_t domid,
uint32_t vcpu,
xc_vcpuinfo_t *info);
long long xc_domain_get_cpu_usage(xc_interface *xch,
domid_t domid,
int vcpu);
int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
xen_domain_handle_t handle);
typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
int xc_shadow_control(xc_interface *xch,
uint32_t domid,
unsigned int sop,
xc_hypercall_buffer_t *dirty_bitmap,
unsigned long pages,
unsigned long *mb,
uint32_t mode,
xc_shadow_op_stats_t *stats);
int xc_sedf_domain_set(xc_interface *xch,
uint32_t domid,
uint64_t period, uint64_t slice,
uint64_t latency, uint16_t extratime,
uint16_t weight);
int xc_sedf_domain_get(xc_interface *xch,
uint32_t domid,
uint64_t* period, uint64_t *slice,
uint64_t *latency, uint16_t *extratime,
uint16_t *weight);
int xc_sched_credit_domain_set(xc_interface *xch,
uint32_t domid,
struct xen_domctl_sched_credit *sdom);
int xc_sched_credit_domain_get(xc_interface *xch,
uint32_t domid,
struct xen_domctl_sched_credit *sdom);
int xc_sched_credit2_domain_set(xc_interface *xch,
uint32_t domid,
struct xen_domctl_sched_credit2 *sdom);
int xc_sched_credit2_domain_get(xc_interface *xch,
uint32_t domid,
struct xen_domctl_sched_credit2 *sdom);
int
xc_sched_arinc653_schedule_set(
xc_interface *xch,
struct xen_sysctl_arinc653_schedule *schedule);
int
xc_sched_arinc653_schedule_get(
xc_interface *xch,
struct xen_sysctl_arinc653_schedule *schedule);
/**
* This function sends a trigger to a domain.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to send trigger
* @parm trigger the trigger type
* @parm vcpu the vcpu number to send trigger
* return 0 on success, -1 on failure
*/
int xc_domain_send_trigger(xc_interface *xch,
uint32_t domid,
uint32_t trigger,
uint32_t vcpu);
/**
* This function enables or disable debugging of a domain.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to send trigger
* @parm enable true to enable debugging
* return 0 on success, -1 on failure
*/
int xc_domain_setdebugging(xc_interface *xch,
uint32_t domid,
unsigned int enable);
/**
* This function audits the (top level) p2m of a domain
* and returns the different error counts, if any.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id whose top level p2m we
* want to audit
* @parm orphans count of m2p entries for valid
* domain pages containing an invalid value
* @parm m2p_bad count of m2p entries mismatching the
* associated p2m entry for this domain
* @parm p2m_bad count of p2m entries for this domain
* mismatching the associated m2p entry
* return 0 on success, -1 on failure
* errno values on failure include:
* -ENOSYS: not implemented
* -EFAULT: could not copy results back to guest
*/
int xc_domain_p2m_audit(xc_interface *xch,
uint32_t domid,
uint64_t *orphans,
uint64_t *m2p_bad,
uint64_t *p2m_bad);
/**
* This function sets or clears the requirement that an access memory
* event listener is required on the domain.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id to send trigger
* @parm enable true to require a listener
* return 0 on success, -1 on failure
*/
int xc_domain_set_access_required(xc_interface *xch,
uint32_t domid,
unsigned int required);
/**
* This function sets the handler of global VIRQs sent by the hypervisor
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain id which will handle the VIRQ
* @parm virq the virq number (VIRQ_*)
* return 0 on success, -1 on failure
*/
int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
/*
* CPUPOOL MANAGEMENT FUNCTIONS
*/
typedef struct xc_cpupoolinfo {
uint32_t cpupool_id;
uint32_t sched_id;
uint32_t n_dom;
xc_cpumap_t cpumap;
} xc_cpupoolinfo_t;
/**
* Create a new cpupool.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm ppoolid pointer to the new cpupool id (in/out)
* @parm sched_id id of scheduler to use for pool
* return 0 on success, -1 on failure
*/
int xc_cpupool_create(xc_interface *xch,
uint32_t *ppoolid,
uint32_t sched_id);
/**
* Destroy a cpupool. Pool must be unused and have no cpu assigned.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid id of the cpupool to destroy
* return 0 on success, -1 on failure
*/
int xc_cpupool_destroy(xc_interface *xch,
uint32_t poolid);
/**
* Get cpupool info. Returns info for up to the specified number of cpupools
* starting at the given id.
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid lowest id for which info is returned
* return cpupool info ptr (to be freed via xc_cpupool_infofree)
*/
xc_cpupoolinfo_t *xc_cpupool_getinfo(xc_interface *xch,
uint32_t poolid);
/**
* Free cpupool info. Used to free info obtained via xc_cpupool_getinfo.
* @parm xc_handle a handle to an open hypervisor interface
* @parm info area to free
*/
void xc_cpupool_infofree(xc_interface *xch,
xc_cpupoolinfo_t *info);
/**
* Add cpu to a cpupool. cpu may be -1 indicating the first unassigned.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid id of the cpupool
* @parm cpu cpu number to add
* return 0 on success, -1 on failure
*/
int xc_cpupool_addcpu(xc_interface *xch,
uint32_t poolid,
int cpu);
/**
* Remove cpu from cpupool. cpu may be -1 indicating the last cpu of the pool.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid id of the cpupool
* @parm cpu cpu number to remove
* return 0 on success, -1 on failure
*/
int xc_cpupool_removecpu(xc_interface *xch,
uint32_t poolid,
int cpu);
/**
* Move domain to another cpupool.
*
* @parm xc_handle a handle to an open hypervisor interface
* @parm poolid id of the destination cpupool
* @parm domid id of the domain to move
* return 0 on success, -1 on failure
*/
int xc_cpupool_movedomain(xc_interface *xch,
uint32_t poolid,
uint32_t domid);
/**
* Return map of cpus not in any cpupool.
*
* @parm xc_handle a handle to an open hypervisor interface
* return cpumap array on success, NULL else
*/
xc_cpumap_t xc_cpupool_freeinfo(xc_interface *xch);
/*
* EVENT CHANNEL FUNCTIONS
*
* None of these do any logging.
*/
/* A port identifier is guaranteed to fit in 31 bits. */
typedef int evtchn_port_or_error_t;
/**
* This function allocates an unbound port. Ports are named endpoints used for
* interdomain communication. This function is most useful in opening a
* well-known port within a domain to receive events on.
*
* NOTE: If you are allocating a *local* unbound port, you probably want to
* use xc_evtchn_bind_unbound_port(). This function is intended for allocating
* ports *only* during domain creation.
*
* @parm xch a handle to an open hypervisor interface
* @parm dom the ID of the local domain (the 'allocatee')
* @parm remote_dom the ID of the domain who will later bind
* @return allocated port (in @dom) on success, -1 on failure
*/
evtchn_port_or_error_t
xc_evtchn_alloc_unbound(xc_interface *xch,
uint32_t dom,
uint32_t remote_dom);
int xc_evtchn_reset(xc_interface *xch,
uint32_t dom);
typedef struct evtchn_status xc_evtchn_status_t;
int xc_evtchn_status(xc_interface *xch, xc_evtchn_status_t *status);
/*
* Return a handle to the event channel driver, or -1 on failure, in which case
* errno will be set appropriately.
*
* Before Xen pre-4.1 this function would sometimes report errors with perror.
*/
xc_evtchn *xc_evtchn_open(xentoollog_logger *logger,
unsigned open_flags);
/*
* Close a handle previously allocated with xc_evtchn_open().
*/
int xc_evtchn_close(xc_evtchn *xce);
/*
* Return an fd that can be select()ed on.
*/
int xc_evtchn_fd(xc_evtchn *xce);
/*
* Notify the given event channel. Returns -1 on failure, in which case
* errno will be set appropriately.
*/
int xc_evtchn_notify(xc_evtchn *xce, evtchn_port_t port);
/*
* Returns a new event port awaiting interdomain connection from the given
* domain ID, or -1 on failure, in which case errno will be set appropriately.
*/
evtchn_port_or_error_t
xc_evtchn_bind_unbound_port(xc_evtchn *xce, int domid);
/*
* Returns a new event port bound to the remote port for the given domain ID,
* or -1 on failure, in which case errno will be set appropriately.
*/
evtchn_port_or_error_t
xc_evtchn_bind_interdomain(xc_evtchn *xce, int domid,
evtchn_port_t remote_port);
/*
* Bind an event channel to the given VIRQ. Returns the event channel bound to
* the VIRQ, or -1 on failure, in which case errno will be set appropriately.
*/
evtchn_port_or_error_t
xc_evtchn_bind_virq(xc_evtchn *xce, unsigned int virq);
/*
* Unbind the given event channel. Returns -1 on failure, in which case errno
* will be set appropriately.
*/
int xc_evtchn_unbind(xc_evtchn *xce, evtchn_port_t port);
/*
* Return the next event channel to become pending, or -1 on failure, in which
* case errno will be set appropriately.
*/
evtchn_port_or_error_t
xc_evtchn_pending(xc_evtchn *xce);
/*
* Unmask the given event channel. Returns -1 on failure, in which case errno
* will be set appropriately.
*/
int xc_evtchn_unmask(xc_evtchn *xce, evtchn_port_t port);
int xc_physdev_pci_access_modify(xc_interface *xch,
uint32_t domid,
int bus,
int dev,
int func,
int enable);
int xc_readconsolering(xc_interface *xch,
char *buffer,
unsigned int *pnr_chars,
int clear, int incremental, uint32_t *pindex);
int xc_send_debug_keys(xc_interface *xch, char *keys);
typedef xen_sysctl_physinfo_t xc_physinfo_t;
typedef xen_sysctl_topologyinfo_t xc_topologyinfo_t;
typedef xen_sysctl_numainfo_t xc_numainfo_t;
typedef uint32_t xc_cpu_to_node_t;
typedef uint32_t xc_cpu_to_socket_t;
typedef uint32_t xc_cpu_to_core_t;
typedef uint64_t xc_node_to_memsize_t;
typedef uint64_t xc_node_to_memfree_t;
typedef uint32_t xc_node_to_node_dist_t;
int xc_physinfo(xc_interface *xch, xc_physinfo_t *info);
int xc_topologyinfo(xc_interface *xch, xc_topologyinfo_t *info);
int xc_numainfo(xc_interface *xch, xc_numainfo_t *info);
int xc_sched_id(xc_interface *xch,
int *sched_id);
int xc_machphys_mfn_list(xc_interface *xch,
unsigned long max_extents,
xen_pfn_t *extent_start);
typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
int xc_getcpuinfo(xc_interface *xch, int max_cpus,
xc_cpuinfo_t *info, int *nr_cpus);
int xc_domain_setmaxmem(xc_interface *xch,
uint32_t domid,
unsigned int max_memkb);
int xc_domain_set_memmap_limit(xc_interface *xch,
uint32_t domid,
unsigned long map_limitkb);
#if defined(__i386__) || defined(__x86_64__)
/*
* PC BIOS standard E820 types and structure.
*/
#define E820_RAM 1
#define E820_RESERVED 2
#define E820_ACPI 3
#define E820_NVS 4
#define E820_UNUSABLE 5
#define E820MAX (128)
struct e820entry {
uint64_t addr;
uint64_t size;
uint32_t type;
} __attribute__((packed));
int xc_domain_set_memory_map(xc_interface *xch,
uint32_t domid,
struct e820entry entries[],
uint32_t nr_entries);
int xc_get_machine_memory_map(xc_interface *xch,
struct e820entry entries[],
uint32_t max_entries);
#endif
int xc_domain_set_time_offset(xc_interface *xch,
uint32_t domid,
int32_t time_offset_seconds);
int xc_domain_set_tsc_info(xc_interface *xch,
uint32_t domid,
uint32_t tsc_mode,
uint64_t elapsed_nsec,
uint32_t gtsc_khz,
uint32_t incarnation);
int xc_domain_get_tsc_info(xc_interface *xch,
uint32_t domid,
uint32_t *tsc_mode,
uint64_t *elapsed_nsec,
uint32_t *gtsc_khz,
uint32_t *incarnation);
int xc_domain_disable_migrate(xc_interface *xch, uint32_t domid);
int xc_domain_maximum_gpfn(xc_interface *xch, domid_t domid);
int xc_domain_increase_reservation(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int mem_flags,
xen_pfn_t *extent_start);
int xc_domain_increase_reservation_exact(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int mem_flags,
xen_pfn_t *extent_start);
int xc_domain_decrease_reservation(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
xen_pfn_t *extent_start);
int xc_domain_decrease_reservation_exact(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
xen_pfn_t *extent_start);
int xc_domain_add_to_physmap(xc_interface *xch,
uint32_t domid,
unsigned int space,
unsigned long idx,
xen_pfn_t gpfn);
int xc_domain_populate_physmap(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int mem_flags,
xen_pfn_t *extent_start);
int xc_domain_populate_physmap_exact(xc_interface *xch,
uint32_t domid,
unsigned long nr_extents,
unsigned int extent_order,
unsigned int mem_flags,
xen_pfn_t *extent_start);
int xc_domain_memory_exchange_pages(xc_interface *xch,
int domid,
unsigned long nr_in_extents,
unsigned int in_order,
xen_pfn_t *in_extents,
unsigned long nr_out_extents,
unsigned int out_order,
xen_pfn_t *out_extents);
int xc_domain_set_pod_target(xc_interface *xch,
uint32_t domid,
uint64_t target_pages,
uint64_t *tot_pages,
uint64_t *pod_cache_pages,
uint64_t *pod_entries);
int xc_domain_get_pod_target(xc_interface *xch,
uint32_t domid,
uint64_t *tot_pages,
uint64_t *pod_cache_pages,
uint64_t *pod_entries);
int xc_domain_ioport_permission(xc_interface *xch,
uint32_t domid,
uint32_t first_port,
uint32_t nr_ports,
uint32_t allow_access);
int xc_domain_irq_permission(xc_interface *xch,
uint32_t domid,
uint8_t pirq,
uint8_t allow_access);
int xc_domain_iomem_permission(xc_interface *xch,
uint32_t domid,
unsigned long first_mfn,
unsigned long nr_mfns,
uint8_t allow_access);
int xc_domain_pin_memory_cacheattr(xc_interface *xch,
uint32_t domid,
uint64_t start,
uint64_t end,
uint32_t type);
unsigned long xc_make_page_below_4G(xc_interface *xch, uint32_t domid,
unsigned long mfn);
typedef xen_sysctl_perfc_desc_t xc_perfc_desc_t;
typedef xen_sysctl_perfc_val_t xc_perfc_val_t;
int xc_perfc_reset(xc_interface *xch);
int xc_perfc_query_number(xc_interface *xch,
int *nbr_desc,
int *nbr_val);
int xc_perfc_query(xc_interface *xch,
xc_hypercall_buffer_t *desc,
xc_hypercall_buffer_t *val);
typedef xen_sysctl_lockprof_data_t xc_lockprof_data_t;
int xc_lockprof_reset(xc_interface *xch);
int xc_lockprof_query_number(xc_interface *xch,
uint32_t *n_elems);
int xc_lockprof_query(xc_interface *xch,
uint32_t *n_elems,
uint64_t *time,
xc_hypercall_buffer_t *data);
void *xc_memalign(xc_interface *xch, size_t alignment, size_t size);
/**
* Memory maps a range within one domain to a local address range. Mappings
* should be unmapped with munmap and should follow the same rules as mmap
* regarding page alignment. Returns NULL on failure.
*
* @parm xch a handle on an open hypervisor interface
* @parm dom the domain to map memory from
* @parm size the amount of memory to map (in multiples of page size)
* @parm prot same flag as in mmap().
* @parm mfn the frame address to map.
*/
void *xc_map_foreign_range(xc_interface *xch, uint32_t dom,
int size, int prot,
unsigned long mfn );
void *xc_map_foreign_pages(xc_interface *xch, uint32_t dom, int prot,
const xen_pfn_t *arr, int num );
/**
* DEPRECATED - use xc_map_foreign_bulk() instead.
*
* Like xc_map_foreign_pages(), except it can succeeed partially.
* When a page cannot be mapped, its PFN in @arr is or'ed with
* 0xF0000000 to indicate the error.
*/
void *xc_map_foreign_batch(xc_interface *xch, uint32_t dom, int prot,
xen_pfn_t *arr, int num );
/**
* Like xc_map_foreign_pages(), except it can succeed partially.
* When a page cannot be mapped, its respective field in @err is
* set to the corresponding errno value.
*/
void *xc_map_foreign_bulk(xc_interface *xch, uint32_t dom, int prot,
const xen_pfn_t *arr, int *err, unsigned int num);
/**
* Translates a virtual address in the context of a given domain and
* vcpu returning the GFN containing the address (that is, an MFN for
* PV guests, a PFN for HVM guests). Returns 0 for failure.
*
* @parm xch a handle on an open hypervisor interface
* @parm dom the domain to perform the translation in
* @parm vcpu the vcpu to perform the translation on
* @parm virt the virtual address to translate
*/
unsigned long xc_translate_foreign_address(xc_interface *xch, uint32_t dom,
int vcpu, unsigned long long virt);
/**
* DEPRECATED. Avoid using this, as it does not correctly account for PFNs
* without a backing MFN.
*/
int xc_get_pfn_list(xc_interface *xch, uint32_t domid, uint64_t *pfn_buf,
unsigned long max_pfns);
unsigned long xc_ia64_fpsr_default(void);
int xc_copy_to_domain_page(xc_interface *xch, uint32_t domid,
unsigned long dst_pfn, const char *src_page);
int xc_clear_domain_page(xc_interface *xch, uint32_t domid,
unsigned long dst_pfn);
int xc_mmuext_op(xc_interface *xch, struct mmuext_op *op, unsigned int nr_ops,
domid_t dom);
/* System wide memory properties */
long xc_maximum_ram_page(xc_interface *xch);
/**
* This function returns the total number of pages freed by using sharing
* on the system. For example, if two domains contain a single entry in
* their p2m map that points to the same shared page (and no other pages
* in the system are shared), then this function should return 1.
*/
long xc_sharing_freed_pages(xc_interface *xch);
/**
* This function returns the total number of frames occupied by shared
* pages on the system. This is independent of the number of domains
* pointing at these frames. For example, in the above scenario this
* should return 1. The following should hold:
* memory usage without sharing = freed_pages + used_frames
*/
long xc_sharing_used_frames(xc_interface *xch);
/* Get current total pages allocated to a domain. */
long xc_get_tot_pages(xc_interface *xch, uint32_t domid);
/**
* This function retrieves the the number of bytes available
* in the heap in a specific range of address-widths and nodes.
*
* @parm xch a handle to an open hypervisor interface
* @parm domid the domain to query
* @parm min_width the smallest address width to query (0 if don't care)
* @parm max_width the largest address width to query (0 if don't care)
* @parm node the node to query (-1 for all)
* @parm *bytes caller variable to put total bytes counted
* @return 0 on success, <0 on failure.
*/
int xc_availheap(xc_interface *xch, int min_width, int max_width, int node,
uint64_t *bytes);
/*
* Trace Buffer Operations
*/
/**
* xc_tbuf_enable - enable tracing buffers
*
* @parm xch a handle to an open hypervisor interface
* @parm cnt size of tracing buffers to create (in pages)
* @parm mfn location to store mfn of the trace buffers to
* @parm size location to store the size (in bytes) of a trace buffer to
*
* Gets the machine address of the trace pointer area and the size of the
* per CPU buffers.
*/
int xc_tbuf_enable(xc_interface *xch, unsigned long pages,
unsigned long *mfn, unsigned long *size);
/*
* Disable tracing buffers.
*/
int xc_tbuf_disable(xc_interface *xch);
/**
* This function sets the size of the trace buffers. Setting the size
* is currently a one-shot operation that may be performed either at boot
* time or via this interface, not both. The buffer size must be set before
* enabling tracing.
*
* @parm xch a handle to an open hypervisor interface
* @parm size the size in pages per cpu for the trace buffers
* @return 0 on success, -1 on failure.
*/
int xc_tbuf_set_size(xc_interface *xch, unsigned long size);
/**
* This function retrieves the current size of the trace buffers.
* Note that the size returned is in terms of bytes, not pages.
* @parm xch a handle to an open hypervisor interface
* @parm size will contain the size in bytes for the trace buffers
* @return 0 on success, -1 on failure.
*/
int xc_tbuf_get_size(xc_interface *xch, unsigned long *size);
int xc_tbuf_set_cpu_mask(xc_interface *xch, uint32_t mask);
int xc_tbuf_set_evt_mask(xc_interface *xch, uint32_t mask);