/**************************************************************************** * (C) 2002-2003 - Rolf Neugebauer - Intel Research Cambridge * (C) 2002-2003 University of Cambridge * (C) 2004 - Mark Williamson - Intel Research Cambridge **************************************************************************** * * File: common/schedule.c * Author: Rolf Neugebauer & Keir Fraser * Updated for generic API by Mark Williamson * * Description: Generic CPU scheduling code * implements support functionality for the Xen scheduler API. * */ #ifndef COMPAT #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* opt_sched: scheduler - default to credit */ static char opt_sched[10] = "credit"; string_param("sched", opt_sched); /* if sched_smt_power_savings is set, * scheduler will give preferrence to partially idle package compared to * the full idle package, when picking pCPU to schedule vCPU. */ int sched_smt_power_savings = 0; boolean_param("sched_smt_power_savings", sched_smt_power_savings); #define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */ /* Various timer handlers. */ static void s_timer_fn(void *unused); static void vcpu_periodic_timer_fn(void *data); static void vcpu_singleshot_timer_fn(void *data); static void poll_timer_fn(void *data); /* This is global for now so that private implementations can reach it */ DEFINE_PER_CPU(struct schedule_data, schedule_data); extern struct scheduler sched_sedf_def; extern struct scheduler sched_credit_def; static struct scheduler *schedulers[] = { &sched_sedf_def, &sched_credit_def, NULL }; static struct scheduler ops; #define SCHED_OP(fn, ...) \ (( ops.fn != NULL ) ? ops.fn( __VA_ARGS__ ) \ : (typeof(ops.fn(__VA_ARGS__)))0 ) static inline void trace_runstate_change(struct vcpu *v, int new_state) { struct { uint32_t vcpu:16, domain:16; } d; uint32_t event; if ( likely(!tb_init_done) ) return; d.vcpu = v->vcpu_id; d.domain = v->domain->domain_id; event = TRC_SCHED_RUNSTATE_CHANGE; event |= ( v->runstate.state & 0x3 ) << 8; event |= ( new_state & 0x3 ) << 4; __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d); } static inline void trace_continue_running(struct vcpu *v) { struct { uint32_t vcpu:16, domain:16; } d; if ( likely(!tb_init_done) ) return; d.vcpu = v->vcpu_id; d.domain = v->domain->domain_id; __trace_var(TRC_SCHED_CONTINUE_RUNNING, 1/*tsc*/, sizeof(d), (unsigned char *)&d); } static inline void vcpu_runstate_change( struct vcpu *v, int new_state, s_time_t new_entry_time) { s_time_t delta; ASSERT(v->runstate.state != new_state); ASSERT(spin_is_locked(&per_cpu(schedule_data,v->processor).schedule_lock)); trace_runstate_change(v, new_state); delta = new_entry_time - v->runstate.state_entry_time; if ( delta > 0 ) { v->runstate.time[v->runstate.state] += delta; v->runstate.state_entry_time = new_entry_time; } v->runstate.state = new_state; } void vcpu_runstate_get(struct vcpu *v, struct vcpu_runstate_info *runstate) { s_time_t delta; if ( unlikely(v != current) ) vcpu_schedule_lock_irq(v); memcpy(runstate, &v->runstate, sizeof(*runstate)); delta = NOW() - runstate->state_entry_time; if ( delta > 0 ) runstate->time[runstate->state] += delta; if ( unlikely(v != current) ) vcpu_schedule_unlock_irq(v); } uint64_t get_cpu_idle_time(unsigned int cpu) { struct vcpu_runstate_info state; struct vcpu *v; if ( (v = idle_vcpu[cpu]) == NULL ) return 0; vcpu_runstate_get(v, &state); return state.time[RUNSTATE_running]; } int sched_init_vcpu(struct vcpu *v, unsigned int processor) { struct domain *d = v->domain; /* * Initialize processor and affinity settings. The idler, and potentially * domain-0 VCPUs, are pinned onto their respective physical CPUs. */ v->processor = processor; if ( is_idle_domain(d) || d->is_pinned ) v->cpu_affinity = cpumask_of_cpu(processor); else cpus_setall(v->cpu_affinity); /* Initialise the per-vcpu timers. */ init_timer(&v->periodic_timer, vcpu_periodic_timer_fn, v, v->processor); init_timer(&v->singleshot_timer, vcpu_singleshot_timer_fn, v, v->processor); init_timer(&v->poll_timer, poll_timer_fn, v, v->processor); /* Idle VCPUs are scheduled immediately. */ if ( is_idle_domain(d) ) { per_cpu(schedule_data, v->processor).curr = v; per_cpu(schedule_data, v->processor).idle = v; v->is_running = 1; } TRACE_2D(TRC_SCHED_DOM_ADD, v->domain->domain_id, v->vcpu_id); return SCHED_OP(init_vcpu, v); } void sched_destroy_vcpu(struct vcpu *v) { kill_timer(&v->periodic_timer); kill_timer(&v->singleshot_timer); kill_timer(&v->poll_timer); SCHED_OP(destroy_vcpu, v); } int sched_init_domain(struct domain *d) { return SCHED_OP(init_domain, d); } void sched_destroy_domain(struct domain *d) { SCHED_OP(destroy_domain, d); } void vcpu_sleep_nosync(struct vcpu *v) { unsigned long flags; vcpu_schedule_lock_irqsave(v, flags); if ( likely(!vcpu_runnable(v)) ) { if ( v->runstate.state == RUNSTATE_runnable ) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); SCHED_OP(sleep, v); } vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_SLEEP, v->domain->domain_id, v->vcpu_id); } void vcpu_sleep_sync(struct vcpu *v) { vcpu_sleep_nosync(v); while ( !vcpu_runnable(v) && v->is_running ) cpu_relax(); sync_vcpu_execstate(v); } void vcpu_wake(struct vcpu *v) { unsigned long flags; vcpu_schedule_lock_irqsave(v, flags); if ( likely(vcpu_runnable(v)) ) { if ( v->runstate.state >= RUNSTATE_blocked ) vcpu_runstate_change(v, RUNSTATE_runnable, NOW()); SCHED_OP(wake, v); } else if ( !test_bit(_VPF_blocked, &v->pause_flags) ) { if ( v->runstate.state == RUNSTATE_blocked ) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); } vcpu_schedule_unlock_irqrestore(v, flags); TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); } void vcpu_unblock(struct vcpu *v) { if ( !test_and_clear_bit(_VPF_blocked, &v->pause_flags) ) return; /* Polling period ends when a VCPU is unblocked. */ if ( unlikely(v->poll_evtchn != 0) ) { v->poll_evtchn = 0; /* * We *must* re-clear _VPF_blocked to avoid racing other wakeups of * this VCPU (and it then going back to sleep on poll_mask). * Test-and-clear is idiomatic and ensures clear_bit not reordered. */ if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) ) clear_bit(_VPF_blocked, &v->pause_flags); } vcpu_wake(v); } static void vcpu_migrate(struct vcpu *v) { unsigned long flags; int old_cpu; vcpu_schedule_lock_irqsave(v, flags); /* * NB. Check of v->running happens /after/ setting migration flag * because they both happen in (different) spinlock regions, and those * regions are strictly serialised. */ if ( v->is_running || !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) { vcpu_schedule_unlock_irqrestore(v, flags); return; } /* Switch to new CPU, then unlock old CPU. */ old_cpu = v->processor; v->processor = SCHED_OP(pick_cpu, v); spin_unlock_irqrestore( &per_cpu(schedule_data, old_cpu).schedule_lock, flags); /* Wake on new CPU. */ vcpu_wake(v); } /* * Force a VCPU through a deschedule/reschedule path. * For example, using this when setting the periodic timer period means that * most periodic-timer state need only be touched from within the scheduler * which can thus be done without need for synchronisation. */ void vcpu_force_reschedule(struct vcpu *v) { vcpu_schedule_lock_irq(v); if ( v->is_running ) set_bit(_VPF_migrating, &v->pause_flags); vcpu_sche
/*
 * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file
 *
 * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com>
 *
 * This file is released under the GPLv2
 *
 *
 * compile with
 *
 * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode
 *
 * Executing the resulting binary on your build-host creates the
 * "NPE-[ABC].xxxxxxxx" files containing the selected microcode
 *
 * fetch the IxNpeMicrocode.c from the Intel Access Library.
 * It will include this header.
 *
 * select Images for every NPE from the following
 * (used C++ comments for easy uncommenting ....)
 */

// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH
#define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL
// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT
// #define IX_NPEDL_NPEIMAGE_NPEA_DMA
// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT
// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT
// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT
// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0
// #define IX_NPEDL_NPEIMAGE_NPEA_WEP


// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEB_DMA
#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL


// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB
// #define IX_NPEDL_NPEIMAGE_NPEC_DMA
// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN
// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL
#define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH
// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_EXTSHA_ETH
// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL
// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV
// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS
// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL


#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <netinet/in.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <endian.h>
#include <byteswap.h>
#include <string.h>

#if __BYTE_ORDER == __LITTLE_ENDIAN
#define to_le32(x) (x)
#define to_be32(x) bswap_32(x)
#else
#define to_be32(x) (x)
#define to_le32(x) bswap_32(x)
#endif

struct dl_image {
	unsigned magic;
	unsigned id;
	unsigned size;
	unsigned data[0];
};

const unsigned IxNpeMicrocode_array[];

int main(int argc, char *argv[])
{
	struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array;
	int imgsiz, i, fd, cnt;
	const unsigned *arrayptr = IxNpeMicrocode_array;
	const char *names[] = { "IXP425", "IXP465", "unknown" };
	int bigendian = 1;

	if (argc > 1) {
		if (!strcmp(argv[1], "-le"))
			bigendian = 0;
		else if (!strcmp(argv[1], "-be"))
			bigendian = 1;
		else {
			printf("Usage: %s <-le|-be>\n", argv[0]);
			return EXIT_FAILURE;
		}
	}

	for (image = (struct dl_image *)arrayptr, cnt=0;
		(image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d);
		image = (struct dl_image *)(arrayptr), cnt++)
	{
		unsigned char field[4];
		imgsiz = image->size + 3;
		*(unsigned*)field = to_be32(image->id);
		char filename[40], slnk[10];

		sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A',
			image->id);
		sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A');
		printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x "
			"Size: %5d to: '%s'\n",
			names[field[0] >> 4], (field[0] & 0xf) + 'A',
			field[1], field[2], field[3], imgsiz*4, filename);
		fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644);
		if (fd >= 0) {
			for (i=0; i<imgsiz; i++) {
				*(unsigned*)field = bigendian ?
					to_be32(arrayptr[i]) :
					to_le32(arrayptr[i]);
				write(fd, field, sizeof(field));
			}
			close(fd);
			unlink(slnk);
			symlink(filename, slnk);
		} else {
			perror(filename);
		}
		arrayptr += imgsiz;
	}
	close(fd);
	return 0;
}