/****************************************************************************** * domain.c * * Generic domain-handling functions. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* Linux config option: propageted to domain0 */ /* xen_processor_pmbits: xen control Cx, Px, ... */ unsigned int xen_processor_pmbits = XEN_PROCESSOR_PM_PX; /* opt_dom0_vcpus_pin: If true, dom0 VCPUs are pinned. */ static unsigned int opt_dom0_vcpus_pin; boolean_param("dom0_vcpus_pin", opt_dom0_vcpus_pin); /* set xen as default cpufreq */ enum cpufreq_controller cpufreq_controller = FREQCTL_xen; static void __init setup_cpufreq_option(char *str) { char *arg; if ( !strcmp(str, "dom0-kernel") ) { xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX; cpufreq_controller = FREQCTL_dom0_kernel; opt_dom0_vcpus_pin = 1; return; } if ( !strcmp(str, "none") ) { xen_processor_pmbits &= ~XEN_PROCESSOR_PM_PX; cpufreq_controller = FREQCTL_none; return; } if ( (arg = strpbrk(str, ",:")) != NULL ) *arg++ = '\0'; if ( !strcmp(str, "xen") ) if ( arg && *arg ) cpufreq_cmdline_parse(arg); } custom_param("cpufreq", setup_cpufreq_option); /* Protect updates/reads (resp.) of domain_list and domain_hash. */ DEFINE_SPINLOCK(domlist_update_lock); DEFINE_RCU_READ_LOCK(domlist_read_lock); #define DOMAIN_HASH_SIZE 256 #define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1)) static struct domain *domain_hash[DOMAIN_HASH_SIZE]; struct domain *domain_list; struct domain *dom0; struct vcpu *idle_vcpu[NR_CPUS] __read_mostly; vcpu_info_t dummy_vcpu_info; int current_domain_id(void) { return current->domain->domain_id; } static void __domain_finalise_shutdown(struct domain *d) { struct vcpu *v; BUG_ON(!spin_is_locked(&d->shutdown_lock)); if ( d->is_shut_down ) return; for_each_vcpu ( d, v ) if ( !v->paused_for_shutdown ) return; d->is_shut_down = 1; if ( (d->shutdown_code == SHUTDOWN_suspend) && d->suspend_evtchn ) evtchn_send(d, d->suspend_evtchn); else send_guest_global_virq(dom0, VIRQ_DOM_EXC); } static void vcpu_check_shutdown(struct vcpu *v) { struct domain *d = v->domain; spin_lock(&d->shutdown_lock); if ( d->is_shutting_down ) { if ( !v->paused_for_shutdown ) vcpu_pause_nosync(v); v->paused_for_shutdown = 1; v->defer_shutdown = 0; __domain_finalise_shutdown(d); } spin_unlock(&d->shutdown_lock); } struct vcpu *alloc_vcpu( struct domain *d, unsigned int vcpu_id, unsigned int cpu_id) { struct vcpu *v; BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]); if ( (v = alloc_vcpu_struct()) == NULL ) return NULL; v->domain = d; v->vcpu_id = vcpu_id; spin_lock_init(&v->virq_lock); if ( is_idle_domain(d) ) { v->runstate.state = RUNSTATE_running; } else { v->runstate.state = RUNSTATE_offline; v->runstate.state_entry_time = NOW(); set_bit(_VPF_down, &v->pause_flags); v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS) ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id]) : &dummy_vcpu_info); } if ( sched_init_vcpu(v, cpu_id) != 0 ) { free_vcpu_struct(v); return NULL; } if ( vcpu_initialise(v) != 0 ) { sched_destroy_vcpu(v); free_vcpu_struct(v); return NULL; } d->vcpu[vcpu_id] = v; if ( vcpu_id != 0 ) { int prev_id = v->vcpu_id - 1; while ( (prev_id >= 0) && (d->vcpu[prev_id] == NULL) ) prev_id--; BUG_ON(prev_id < 0); v->next_in_list = d->vcpu[prev_id]->next_in_list; d->vcpu[prev_id]->next_in_list = v; } /* Must be called after making new vcpu visible to for_each_vcpu(). */ vcpu_check_shutdown(v); return v; } struct vcpu *alloc_idle_vcpu(unsigned int cpu_id) { return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain, cpu_id, cpu_id); } static unsigned int __read_mostly extra_dom0_irqs = 256; static unsigned int __read_mostly extra_domU_irqs = 32; static void __init parse_extra_guest_irqs(const char *s) { if ( isdigit(*s) ) extra_domU_irqs = simple_strtoul(s, &s, 0); if ( *s == ',' && isdigit(*++s) ) extra_dom0_irqs = simple_strtoul(s, &s, 0); } custom_param("extra_guest_irqs", parse_extra_guest_irqs); struct domain *domain_create( domid_t domid, unsigned int domcr_flags, ssidref_t ssidref) { struct domain *d, **pd; enum { INIT_xsm = 1u<<0, INIT_rangeset = 1u<<1, INIT_evtchn = 1u<<2, INIT_gnttab = 1u<<3, INIT_arch = 1u<<4 }; int init_status = 0; if ( (d = alloc_domain_struct()) == NULL ) return NULL; memset(d, 0, sizeof(*d)); d->domain_id = domid; lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain"); if ( xsm_alloc_security_domain(d) != 0 ) goto fail; init_status |= INIT_xsm; atomic_set(&d->refcnt, 1); spin_lock_init_prof(d, domain_lock); spin_lock_init_prof(d, page_alloc_lock); spin_lock_init(&d->shutdown_lock); spin_lock_init(&d->hypercall_deadlock_mutex); INIT_PAGE_LIST_HEAD(&d->page_list); INIT_PAGE_LIST_HEAD(&d->xenpage_list); if ( domcr_flags & DOMCRF_hvm ) d->is_hvm = 1; if ( domid == 0 ) { d->is_pinned = opt_dom0_vcpus_pin; d->disable_migrate = 1; } if ( domcr_flags & DOMCRF_dummy ) return d; rangeset_domain_initialise(d); init_status |= INIT_rangeset; if ( !is_idle_domain(d) ) { if ( xsm_domain_create(d, ssidref) != 0 ) goto fail; d->is_paused_by_controller = 1; atomic_inc(&d->pause_count); if ( domid ) d->nr_pirqs = nr_irqs_gsi + extra_domU_irqs; else d->nr_pirqs = nr_irqs_gsi + extra_dom0_irqs; d->pirq_to_evtchn = xmalloc_array(u16, d->nr_pirqs); d->pirq_mask = xmalloc_array( unsigned long, BITS_TO_LONGS(d->nr_pirqs)); if ( (d->pirq_to_evtchn == NULL) || (d->pirq_mask == NULL) ) goto fail; memset(d->pirq_to_evtchn, 0, d->nr_pirqs * sizeof(*d->pirq_to_evtchn)); bitmap_zero(d->pirq_mask, d->nr_pirqs); if ( evtchn_init(d) != 0 ) goto fail; init_status |= INIT_evtchn; if ( grant_table_create(d) != 0 ) goto fail; init_status |= INIT_gnttab; } if ( arch_domain_create(d, domcr_flags) != 0 ) goto fail; init_status |= INIT_arch; d->iomem_caps = rangeset_new(d, "I/O Memory", RANGESETF_prettyprint_hex); d->irq_caps = rangeset_new(d, "Interrupts", 0); if ( (d->iomem_caps == NULL) || (d->irq_caps == NULL) ) goto fail; if ( sched_init_domain(d) != 0 ) goto fail; if ( !is_idle_domain(d) ) { spin_lock(&domlist_update_lock); pd = &domain_list; /* NB. domain_list maintained in order of domid. */ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_in_list ) if ( (*pd)->domain_id > d->domain_id ) break; d->next_in_list = *pd; d->next_in_hashbucket = domain_hash[DOMAIN_HASH(domid)]; rcu_assign_pointer(*pd, d); rcu_assign_pointer(domain_hash[DOMAIN_HASH(domid)], d); spin_unlock(&domlist_update_lock); } return d; fail: d->is_dying = DOMDYING_dead; atomic_set(&d->refcnt, DOMAIN_DESTROYED); if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) grant_table_destroy(d); if ( init_status & INIT_evtchn ) evtchn_destroy(d); if ( init_status & INIT_rangeset ) rangeset_domain_destroy(d); if ( init_status & INIT_xsm ) xsm_free_security_domain(d); xfree(d->pirq_mask); xfree(d->pirq_to_evtchn); free_domain_struct(d); return NULL; } struct domain *get_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) { if ( unlikely(!get_domain(d)) ) d = NULL; break; } } rcu_read_unlock(&domlist_read_lock); return d; } struct domain *rcu_lock_domain_by_id(domid_t dom) { struct domain *d; rcu_read_lock(&domlist_read_lock); for ( d = rcu_dereference(domain_hash[DOMAIN_HASH(dom)]); d != NULL; d = rcu_dereference(d->next_in_hashbucket) ) { if ( d->domain_id == dom ) return d; } rcu_read_unlock(&domlist_read_lock); return NULL; } int rcu_lock_target_domain_by_id(domid_t dom, struct domain **d) { if ( dom == DOMID_SELF ) { *d = rcu_lock_current_domain(); return 0; } if ( (*d = rcu_lock_domain_by_id(dom)) == NULL ) return -ESRCH; if ( !IS_PRIV_FOR(current->domain, *d) ) { rcu_unlock_domain(*d); return -EPERM; } return 0; } int domain_kill(struct domain *d) { int rc = 0; if ( d == current->domain ) return -EINVAL; /* Protected by domctl_lock. */ switch ( d->is_dying ) { case DOMDYING_alive: domain_pause(d); d->is_dying = DOMDYING_dying; spin_barrier(&d->domain_lock); evtchn_destroy(d); gnttab_release_mappings(d); tmem_destroy(d->tmem); d->tmem = NULL; /* fallthrough */ case DOMDYING_dying: rc = domain_relinquish_resources(d); if ( rc != 0 ) { BUG_ON(rc != -EAGAIN); break; } d->is_dying = DOMDYING_dead; put_domain(d); send_guest_global_virq(dom0, VIRQ_DOM_EXC); /* fallthrough */ case DOMDYING_dead: break; } return rc; } void __domain_crash(struct domain *d) { if ( d->is_shutting_down ) { /* Print nothing: the domain is already shutting down. */ } else if ( d == current->domain ) { printk("Domain %d (vcpu#%d) crashed on cpu#%d:\n", d->domain_id, current->vcpu_id, smp_processor_id()); show_execution_state(guest_cpu_user_regs()); } else { printk("Domain %d reported crashed by domain %d on cpu#%d:\n", d->domain_id, current->domain->domain_id, smp_processor_id()); } domain_shutdown(d, SHUTDOWN_crash); } void __domain_crash_synchronous(void) { __domain_crash(current->domain); /* * Flush multicall state before dying if a multicall is in progress. * This shouldn't be necessary, but some architectures are calling * domain_crash_synchronous() when they really shouldn't (i.e., from * within hypercall context). */ if ( this_cpu(mc_state).flags != 0 ) { dprintk(XENLOG_ERR, "FIXME: synchronous domain crash during a multicall!\n"); this_cpu(mc_state).flags = 0; } vcpu_end_shutdown_deferral(cu
/*
 *  Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
 *
 *  This program is free software; you can redistribute it and/or modify it
 *  under the terms of the GNU General Public License version 2 as published
 *  by the Free Software Foundation.
 *
 */

#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include <libgen.h>
#include <getopt.h>
#include <stdarg.h>
#include <errno.h>
#include <sys/stat.h>

#include "md5.h"

#define ERR(fmt, ...) do { \
	fflush(0); \
	fprintf(stderr, "[%s] *** error: " fmt "\n", \
			progname, ## __VA_ARGS__ ); \
} while (0)

#define ERRS(fmt, ...) do { \
	int save = errno; \
	fflush(0); \
	fprintf(stderr, "[%s] *** error: " fmt ", %s\n", \
			progname, ## __VA_ARGS__, strerror(save)); \
} while (0)

#define WRG_MAGIC	0x20040220

struct wrg_header {
	char		signature[32];
	uint32_t	magic1;
	uint32_t	magic2;
	uint32_t	size;
	uint32_t	offset;
	char		devname[32];
	char		digest[16];
} __attribute__ ((packed));

static char *progname;
static char *ifname;
static char *ofname;
static char *signature;
static char *dev_name;
static uint32_t offset;
static int big_endian;

void usage(int status)
{
	FILE *stream = (status != EXIT_SUCCESS) ? stderr : stdout;

	fprintf(stream, "Usage: %s [OPTIONS...]\n", progname);
	fprintf(stream,
"\n"
"Options:\n"
"  -b              create image in big endian format\n"
"  -i <file>       read input from the file <file>\n"
"  -d <name>       set device name to <name>\n"
"  -o <file>       write output to the file <file>\n"
"  -O <offset>     set offset to <offset>\n"
"  -s <sig>        set image signature to <sig>\n"
"  -h              show this screen\n"
	);

	exit(status);
}

static void put_u32(void *data, uint32_t val)
{
	unsigned char *p = data;

	if (big_endian) {
		p[0] = (val >> 24) & 0xff;
		p[1] = (val >> 16) & 0xff;
		p[2] = (val >> 8) & 0xff;
		p[3] = val & 0xff;
	} else {
		p[3] = (val >> 24) & 0xff;
		p[2] = (val >> 16) & 0xff;
		p[1] = (val >> 8) & 0xff;
		p[0] = val & 0xff;
	}
}

static void get_digest(struct wrg_header *header, char *data, int size)
{
	MD5_CTX ctx;

	MD5_Init(&ctx);

	MD5_Update(&ctx, (char *)&header->offset, sizeof(header->offset));
	MD5_Update(&ctx, (char *)&header->devname, sizeof(header->devname));
	MD5_Update(&ctx, data, size);

	MD5_Final(header->digest, &ctx);
}

int main(int argc, char *argv[])
{
	struct wrg_header *header;
	char *buf;
	struct stat st;
	int buflen;
	int err;
	int res = EXIT_FAILURE;

	FILE *outfile, *infile;

	progname = basename(argv[0]);

	while ( 1 ) {
		int c;

		c = getopt(argc, argv, "bd:i:o:s:O:h");
		if (c == -1)
			break;

		switch (c) {
		case 'b':
			big_endian = 1;
			break;
		case 'd':
			dev_name = optarg;
			break;
		case 'i':
			ifname = optarg;
			break;
		case 'o':
			ofname = optarg;
			break;
		case 's':
			signature = optarg;
			break;
		case 'O':
			offset = strtoul(optarg, NULL, 0);
			break;
		case 'h':
			usage(EXIT_SUCCESS);
			break;

		default:
			usage(EXIT_FAILURE);
			break;
		}
	}

	if (signature == NULL) {
		ERR("no signature specified");
		goto err;
	}

	if (ifname == NULL) {
		ERR("no input file specified");
		goto err;
	}

	if (ofname == NULL) {
		ERR("no output file specified");
		goto err;
	}

	if (dev_name == NULL) {
		ERR("no device name specified");
		goto err;
	}

	err = stat(ifname, &st);
	if (err){
		ERRS("stat failed on %s", ifname);
		goto err;
	}

	buflen = st.st_size + sizeof(struct wrg_header);
	buf = malloc(buflen);
	if (!buf) {
		ERR("no memory for buffer\n");
		goto err;
	}

	infile = fopen(ifname, "r");
	if (infile == NULL) {
		ERRS("could not open \"%s\" for reading", ifname);
		goto err_free;
	}

	errno = 0;
	fread(buf + sizeof(struct wrg_header), st.st_size, 1, infile);
	if (errno != 0) {
		ERRS("unable to read from file %s", ifname);
		goto close_in;
	}

	header = (struct wrg_header *) buf;
	memset(header, '\0', sizeof(struct wrg_header));

	strncpy(header->signature, signature, sizeof(header->signature));
	strncpy(header->devname, dev_name, sizeof(header->signature));
	put_u32(&header->magic1, WRG_MAGIC);
	put_u32(&header->magic2, WRG_MAGIC);
	put_u32(&header->size, st.st_size);
	put_u32(&header->offset, offset);

	get_digest(header, buf + sizeof(struct wrg_header), st.st_size);

	outfile = fopen(ofname, "w");
	if (outfile == NULL) {
		ERRS("could not open \"%s\" for writing", ofname);
		goto close_in;
	}

	errno = 0;
	fwrite(buf, buflen, 1, outfile);
	if (errno) {
		ERRS("unable to write to file %s", ofname);
		goto close_out;
	}

	fflush(outfile);

	res = EXIT_SUCCESS;

close_out:
	fclose(outfile);
	if (res != EXIT_SUCCESS)
		unlink(ofname);
close_in:
	fclose(infile);
err_free:
	free(buf);
err:
	return res;
}