/**************************************************************************** * (C) 2005-2006 - Emmanuel Ackaouy - XenSource Inc. **************************************************************************** * * File: common/csched_credit.c * Author: Emmanuel Ackaouy * * Description: Credit-based SMP CPU scheduler */ #include #include #include #include #include #include #include #include #include #include #include #include #include /* * CSCHED_STATS * * Manage very basic counters and stats. * * Useful for debugging live systems. The stats are displayed * with runq dumps ('r' on the Xen console). */ #define CSCHED_STATS /* * Basic constants */ #define CSCHED_DEFAULT_WEIGHT 256 #define CSCHED_TICKS_PER_TSLICE 3 #define CSCHED_TICKS_PER_ACCT 3 #define CSCHED_MSECS_PER_TICK 10 #define CSCHED_MSECS_PER_TSLICE \ (CSCHED_MSECS_PER_TICK * CSCHED_TICKS_PER_TSLICE) #define CSCHED_CREDITS_PER_TICK 100 #define CSCHED_CREDITS_PER_TSLICE \ (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_TSLICE) #define CSCHED_CREDITS_PER_ACCT \ (CSCHED_CREDITS_PER_TICK * CSCHED_TICKS_PER_ACCT) /* * Priorities */ #define CSCHED_PRI_TS_BOOST 0 /* time-share waking up */ #define CSCHED_PRI_TS_UNDER -1 /* time-share w/ credits */ #define CSCHED_PRI_TS_OVER -2 /* time-share w/o credits */ #define CSCHED_PRI_IDLE -64 /* idle */ /* * Flags */ #define CSCHED_FLAG_VCPU_PARKED 0x0001 /* VCPU over capped credits */ /* * Useful macros */ #define CSCHED_PCPU(_c) \ ((struct csched_pcpu *)per_cpu(schedule_data, _c).sched_priv) #define CSCHED_VCPU(_vcpu) ((struct csched_vcpu *) (_vcpu)->sched_priv) #define CSCHED_DOM(_dom) ((struct csched_dom *) (_dom)->sched_priv) #define RUNQ(_cpu) (&(CSCHED_PCPU(_cpu)->runq)) /* * Stats */ #ifdef CSCHED_STATS #define CSCHED_STAT(_X) (csched_priv.stats._X) #define CSCHED_STAT_DEFINE(_X) uint32_t _X; #define CSCHED_STAT_PRINTK(_X) \ do \ { \ printk("\t%-30s = %u\n", #_X, CSCHED_STAT(_X)); \ } while ( 0 ); /* * Try and keep often cranked stats on top so they'll fit on one * cache line. */ #define CSCHED_STATS_EXPAND_SCHED(_MACRO) \ _MACRO(schedule) \ _MACRO(acct_run) \ _MACRO(acct_no_work) \ _MACRO(acct_balance) \ _MACRO(acct_reorder) \ _MACRO(acct_min_credit) \ _MACRO(acct_vcpu_active) \ _MACRO(acct_vcpu_idle) \ _MACRO(vcpu_sleep) \ _MACRO(vcpu_wake_running) \ _MACRO(vcpu_wake_onrunq) \ _MACRO(vcpu_wake_runnable) \ _MACRO(vcpu_wake_not_runnable) \ _MACRO(vcpu_park) \ _MACRO(vcpu_unpark) \ _MACRO(tickle_local_idler) \ _MACRO(tickle_local_over) \ _MACRO(tickle_local_under) \ _MACRO(tickle_local_other) \ _MACRO(tickle_idlers_none) \ _MACRO(tickle_idlers_some) \ _MACRO(load_balance_idle) \ _MACRO(load_balance_over) \ _MACRO(load_balance_other) \ _MACRO(steal_trylock_failed) \ _MACRO(steal_peer_idle) \ _MACRO(migrate_queued) \ _MACRO(migrate_running) \ _MACRO(dom_init) \ _MACRO(dom_destroy) \ _MACRO(vcpu_init) \ _MACRO(vcpu_destroy) #ifndef NDEBUG #define CSCHED_STATS_EXPAND_CHECKS(_MACRO) \ _MACRO(vcpu_check) #else #define CSCHED_STATS_EXPAND_CHECKS(_MACRO) #endif #define CSCHED_STATS_EXPAND(_MACRO) \ CSCHED_STATS_EXPAND_CHECKS(_MACRO) \ CSCHED_STATS_EXPAND_SCHED(_MACRO) #define CSCHED_STATS_RESET() \ do \ { \ memset(&csched_priv.stats, 0, sizeof(csched_priv.stats)); \ } while ( 0 ) #define CSCHED_STATS_DEFINE() \ struct \ { \ CSCHED_STATS_EXPAND(CSCHED_STAT_DEFINE) \ } stats; #define CSCHED_STATS_PRINTK() \ do \ { \ printk("stats:\n"); \ CSCHED_STATS_EXPAND(CSCHED_STAT_PRINTK) \ } while ( 0 ) #define CSCHED_STAT_CRANK(_X) (CSCHED_STAT(_X)++) #define CSCHED_VCPU_STATS_RESET(_V) \ do \ { \ memset(&(_V)->stats, 0, sizeof((_V)->stats)); \ } while ( 0 ) #define CSCHED_VCPU_STAT_CRANK(_V, _X) (((_V)->stats._X)++) #define CSCHED_VCPU_STAT_SET(_V, _X, _Y) (((_V)->stats._X) = (_Y)) #else /* CSCHED_STATS */ #define CSCHED_STATS_RESET() do {} while ( 0 ) #define CSCHED_STATS_DEFINE() #define CSCHED_STATS_PRINTK() do {} while ( 0 ) #define CSCHED_STAT_CRANK(_X) do {} while ( 0 ) #define CSCHED_VCPU_STATS_RESET(_V) do {} while ( 0 ) #define CSCHED_VCPU_STAT_CRANK(_V, _X) do {} while ( 0 ) #define CSCHED_VCPU_STAT_SET(_V, _X, _Y) do {} while ( 0 ) #endif /* CSCHED_STATS */ /* * Physical CPU */ struct csched_pcpu { struct list_head runq; uint32_t runq_sort_last; struct timer ticker; unsigned int tick; }; /* * Virtual CPU */ struct csched_vcpu { struct list_head runq_elem; struct list_head active_vcpu_elem; struct csched_dom *sdom; struct vcpu *vcpu; atomic_t credit; uint16_t flags; int16_t pri; #ifdef CSCHED_STATS struct { int credit_last; uint32_t credit_incr; uint32_t state_active; uint32_t state_idle; uint32_t migrate_q; uint32_t migrate_r; } stats; #endif }; /* * Domain */ struct csched_dom { struct list_head active_vcpu; struct list_head active_sdom_elem; struct domain *dom; uint16_t active_vcpu_count; uint16_t weight; uint16_t cap; }; /* * System-wide private data */ struct csched_private { spinlock_t lock; struct list_head active_sdom; uint32_t ncpus; unsigned int master; cpumask_t idlers; uint32_t weight; uint32_t credit; int credit_balance; uint32_t runq_sort; CSCHED_STATS_DEFINE() }; /* * Global variables */ static struct csched_private csched_priv; static void csched_tick(void *_cpu); static inline int __cycle_cpu(int cpu, const cpumask_t *mask) { int nxt = next_cpu(cpu, *mask); if (nxt == NR_CPUS) nxt = first_cpu(*mask); return nxt; } static inline int __vcpu_on_runq(struct csched_vcpu *svc) { return !list_empty(&svc->runq_elem); } static inline struct csched_vcpu * __runq_elem(struct list_head *elem) { return list_entry(elem, struct csched_vcpu, runq_elem); } static inline void __runq_insert(unsigned int cpu, struct csched_vcpu *svc) { const struct list_head * const runq = RUNQ(cpu); struct list_head *iter; BUG_ON( __vcpu_on_runq(svc) ); BUG_ON( cpu != svc->vcpu->processor ); list_for_each( iter, runq ) { const struct csched_vcpu * const iter_svc = __runq_elem(iter); if ( svc->pri > iter_svc->pri ) break; } list_add_tail(&svc->runq_elem, iter); } static inline void __runq_remove(struct csched_vcpu *svc) { BUG_ON( !__vcpu_on_runq(svc) ); list_del_init(&svc->runq_elem); } static inline void __runq_tickle(unsigned int cpu, struct csched_vcpu *new) { struct csched_vcpu * const cur = CSCHED_VCPU(per_cpu(schedule_data, cpu).curr); cpumask_t mask; ASSERT(cur); cpus_clear(mask); /* If strictly higher priority than current VCPU, signal the CPU */ if ( new->pri > cur->pri ) { if ( cur->pri == CSCHED_PRI_IDLE ) CSCHED_STAT_CRANK(tickle_local_idler); else if ( cur->pri == CSCHED_PRI_TS_OVER ) CSCHED_STAT_CRANK(tickle_local_over); else if ( cur->pri == CSCHED_PRI_TS_UNDER ) CSCHED_STAT_CRANK(tickle_local_under); else CSCHED_STAT_CRANK(tickle_local_other); cpu_set(cpu, mask); } /* * If this CPU has at least two runnable VCPUs, we tickle any idlers to * let them know there is runnable work in the system... */ if ( cur->pri > CSCHED_PRI_IDLE ) { if ( cpus_empty(csched_priv.idlers) ) { CSCHED_STAT_CRANK(tickle_idlers_none); } else { CSCHED_STAT_CRANK(tickle_idlers_some); cpus_or(mask, mask, csched_priv.idlers); cpus_and(mask, mask, new->vcpu->cpu_affinity); } } /* Send scheduler interrupts to designated CPUs */ if ( !cpus_empty(mask) ) cpumask_raise_softirq(mask, SCHEDULE_SOFTIRQ); } static int csched_pcpu_init(int cpu) { struct csched_pcpu *spc; unsigned long flags; /* Allocate per-PCPU info */ spc = xmalloc(struct csched_pcpu); if ( spc == NULL ) return -1; spin_lock_irqsave(&csched_priv.lock, flags); /* Initialize/update system-wide config */ csched_priv.credit += CSCHED_CREDITS_PER_ACCT; if ( csched_priv.ncpus <= cpu ) csched_priv.ncpus = cpu + 1; if ( csched_priv.master >= csched_priv.ncpus ) csched_priv.master = cpu; init_timer(&spc->ticker, csched_tick, (void *)(unsigned long)cpu, cpu); INIT_LIST_HEAD(&spc->runq); spc->runq_sort_last = csched_priv.runq_sort; per_cpu(schedule_data, cpu).sched_priv = spc; /* Start off idling... */ BUG_ON(!is_idle_vcpu(per_cpu(schedule_data, cpu).curr)); cpu_set(cpu, csched_priv.idlers); spin_unlock_irqrestore(&csched_priv.lock, flags); return 0; } #ifndef NDEBUG static inline void __csched_vcpu_check(struct vcpu *vc) { struct csched_vcpu * const svc = CSCHED_VCPU(vc); struct csched_dom * const sdom = svc->sdom; BUG_ON( svc->vcpu != vc ); BUG_ON( sdom != CSCHED_DOM(vc->domain) ); if ( sdom ) { BUG_ON( is_idle_vcpu(vc) ); BUG_ON( sdom->dom != vc->domain ); } else { BUG_ON( !is_idle_vcpu(vc) ); } CSCHED_STAT_CRANK(vcpu_check); } #define CSCHED_VCPU_CHECK(_vc) (__csched_vcpu_check(_vc)) #else #define CSCHED_VCPU_CHECK(_vc) #endif static inline int __csched_vcpu_is_migrateable(struct vcpu *vc, int dest_cpu) { /* * Don't pick up work that's in the peer's scheduling tail. Also only pick * up work that's allowed to run on our CPU. */ return !vc->is_running && cpu_isset(dest_cpu, vc->cpu_affinity); } static int csched_cpu_pick(struct vcpu *vc) { cpumask_t cpus; cpumask_t idlers; int cpu; /* * Pick from online CPUs in VCPU's affinity mask, giving a * preference to its current processor if it's in there. */ cpus_and(cpus, cpu_online_map, vc->cpu_affinity); cpu = cpu_isset(vc->processor, cpus) ? vc->processor : __cycle_cpu(vc->processor, &cpus); ASSERT( !cpus_empty(cpus) && cpu_isset(cpu, cpus) ); /* * Try to find an idle processor within the above constraints. * * In multi-core and multi-threaded CPUs, not all idle execution * vehicles are equal! * * We give preference to the idle execution vehicle with the most * idling neighbours in its grouping. This distributes work across * distinct cores first and guarantees we don't do something stupid * like run two VCPUs on co-hyperthreads while there are idle cores * or sockets. */ idlers = csched_priv.idlers; cpu_set(cpu, idlers); cpus_and(cpus, cpus, idlers); cpu_clear(cpu, cpus); while ( !cpus_empty(cpus) ) { cpumask_t cpu_idlers; cpumask_t nxt_idlers; int nxt; nxt = __cycle_cpu(cpu, &cpus); if ( cpu_isset(cpu, cpu_core_map[nxt]) ) { ASSERT( cpu_isset(nxt, cpu_core_map[cpu]) ); cpus_and(cpu_idlers, idlers, cpu_sibling_map[cpu]); cpus_and(nxt_idlers, idlers, cpu_sibling_map[nxt]); } else { ASSERT( !cpu_isset(nxt, cpu_core_map[cpu]) ); cpus_and(cpu_idlers, idlers, cpu_core_map[cpu]); cpus_and(nxt_idlers, idlers, cpu_core_map[nxt]); } if ( cpus_weight(cpu_idlers) < cpus_weight(nxt_idlers) ) { cpu = nxt; cpu_clear(cpu, cpus); } else { cpus_andnot(cpus, cpus, nxt_idlers); } } return cpu; } static inline void __csched_vcpu_acct_start(struct csched_vcpu *svc) { struct csched_dom * const sdom = svc->sdom; unsigned long flags; spin_lock_irqsave(&csched_priv.lock, flags); if ( list_empty(&svc->active_vcpu_elem) ) { CSCHED_VCPU_STAT_CRANK(svc, state_active); CSCHED_STAT_CRANK(acct_vcpu_active); sdom->active_vcpu_count++; list_add(&svc->active_vcpu_elem, &sdom->active_vcpu); if ( list_empty(&sdom->active_sdom_elem) ) { list_add(&sdom->active_sdom_e
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.

from __future__ import absolute_import, division, print_function

INCLUDES = """
#include <openssl/x509v3.h>

/*
 * This is part of a work-around for the difficulty cffi has in dealing with
 * `LHASH_OF(foo)` as the name of a type.  We invent a new, simpler name that
 * will be an alias for this type and use the alias throughout.  This works
 * together with another opaque typedef for the same name in the TYPES section.
 * Note that the result is an opaque type.
 */
typedef LHASH_OF(CONF_VALUE) Cryptography_LHASH_OF_CONF_VALUE;

typedef STACK_OF(ACCESS_DESCRIPTION) Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef STACK_OF(DIST_POINT) Cryptography_STACK_OF_DIST_POINT;
typedef STACK_OF(POLICYQUALINFO) Cryptography_STACK_OF_POLICYQUALINFO;
typedef STACK_OF(POLICYINFO) Cryptography_STACK_OF_POLICYINFO;
typedef STACK_OF(ASN1_INTEGER) Cryptography_STACK_OF_ASN1_INTEGER;
typedef STACK_OF(GENERAL_SUBTREE) Cryptography_STACK_OF_GENERAL_SUBTREE;
"""

TYPES = """
typedef ... Cryptography_STACK_OF_ACCESS_DESCRIPTION;
typedef ... Cryptography_STACK_OF_POLICYQUALINFO;
typedef ... Cryptography_STACK_OF_POLICYINFO;
typedef ... Cryptography_STACK_OF_ASN1_INTEGER;
typedef ... Cryptography_STACK_OF_GENERAL_SUBTREE;
typedef ... EXTENDED_KEY_USAGE;
typedef ... CONF;

typedef struct {
    X509 *issuer_cert;
    X509 *subject_cert;
    ...;
} X509V3_CTX;

typedef void * (*X509V3_EXT_D2I)(void *, const unsigned char **, long);

typedef struct {
    ASN1_ITEM_EXP *it;
    X509V3_EXT_D2I d2i;
    ...;
} X509V3_EXT_METHOD;

static const int GEN_OTHERNAME;
static const int GEN_EMAIL;
static const int GEN_X400;
static const int GEN_DNS;
static const int GEN_URI;
static const int GEN_DIRNAME;
static const int GEN_EDIPARTY;
static const int GEN_IPADD;
static const int GEN_RID;

typedef struct {
    ASN1_OBJECT *type_id;
    ASN1_TYPE *value;
} OTHERNAME;

typedef struct {
    ...;
} EDIPARTYNAME;

typedef struct {
    int ca;
    ASN1_INTEGER *pathlen;
} BASIC_CONSTRAINTS;

typedef struct {
    Cryptography_STACK_OF_GENERAL_SUBTREE *permittedSubtrees;
    Cryptography_STACK_OF_GENERAL_SUBTREE *excludedSubtrees;
} NAME_CONSTRAINTS;

typedef struct {
    ASN1_INTEGER *requireExplicitPolicy;
    ASN1_INTEGER *inhibitPolicyMapping;
} POLICY_CONSTRAINTS;


typedef struct {
    int type;
    union {
        char *ptr;
        OTHERNAME *otherName;  /* otherName */
        ASN1_IA5STRING *rfc822Name;
        ASN1_IA5STRING *dNSName;
        ASN1_TYPE *x400Address;
        X509_NAME *directoryName;
        EDIPARTYNAME *ediPartyName;
        ASN1_IA5STRING *uniformResourceIdentifier;
        ASN1_OCTET_STRING *iPAddress;
        ASN1_OBJECT *registeredID;

        /* Old names */
        ASN1_OCTET_STRING *ip; /* iPAddress */
        X509_NAME *dirn;       /* dirn */
        ASN1_IA5STRING *ia5;   /* rfc822Name, dNSName, */
                               /*   uniformResourceIdentifier */
        ASN1_OBJECT *rid;      /* registeredID */
        ASN1_TYPE *other;      /* x400Address */
    } d;
    ...;
} GENERAL_NAME;

typedef struct {
    GENERAL_NAME *base;
    ASN1_INTEGER *minimum;
    ASN1_INTEGER *maximum;
} GENERAL_SUBTREE;

typedef struct stack_st_GENERAL_NAME GENERAL_NAMES;

typedef struct {
    ASN1_OCTET_STRING *keyid;
    GENERAL_NAMES *issuer;
    ASN1_INTEGER *serial;
} AUTHORITY_KEYID;

typedef struct {
    ASN1_OBJECT *method;
    GENERAL_NAME *location;
} ACCESS_DESCRIPTION;

typedef ... Cryptography_LHASH_OF_CONF_VALUE;


typedef ... Cryptography_STACK_OF_DIST_POINT;

typedef struct {
    int type;
    union {
        GENERAL_NAMES *fullname;
        Cryptography_STACK_OF_X509_NAME_ENTRY *relativename;
    } name;
    ...;
} DIST_POINT_NAME;

typedef struct {
    DIST_POINT_NAME *distpoint;
    ASN1_BIT_STRING *reasons;
    GENERAL_NAMES *CRLissuer;
    ...;
} DIST_POINT;

typedef struct {
    ASN1_STRING *organization;
    Cryptography_STACK_OF_ASN1_INTEGER *noticenos;
} NOTICEREF;

typedef struct {
    NOTICEREF *noticeref;
    ASN1_STRING *exptext;
} USERNOTICE;

typedef struct {
    ASN1_OBJECT *pqualid;
    union {
        ASN1_IA5STRING *cpsuri;
        USERNOTICE *usernotice;
        ASN1_TYPE *other;
    } d;
} POLICYQUALINFO;

typedef struct {
    ASN1_OBJECT *policyid;
    Cryptography_STACK_OF_POLICYQUALINFO *qualifiers;
} POLICYINFO;

typedef void (*sk_GENERAL_NAME_freefunc)(GENERAL_NAME *);
typedef void (*sk_DIST_POINT_freefunc)(DIST_POINT *);
typedef void (*sk_POLICYINFO_freefunc)(POLICYINFO *);
"""


FUNCTIONS = """
int X509V3_EXT_add_alias(int, int);
void X509V3_set_ctx(X509V3_CTX *, X509 *, X509 *, X509_REQ *, X509_CRL *, int);
int GENERAL_NAME_print(BIO *, GENERAL_NAME *);
GENERAL_NAMES *GENERAL_NAMES_new(void);
void GENERAL_NAMES_free(GENERAL_NAMES *);
void *X509V3_EXT_d2i(X509_EXTENSION *);
int X509_check_ca(X509 *);
"""

MACROS = """
/* X509 became a const arg in 1.1.0 */
void *X509_get_ext_d2i(X509 *, int, int *, int *);
/* The last two char * args became const char * in 1.1.0 */
X509_EXTENSION *X509V3_EXT_nconf(CONF *, X509V3_CTX *, char *, char *);
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
   x509v3.h header. */
BASIC_CONSTRAINTS *BASIC_CONSTRAINTS_new(void);
void BASIC_CONSTRAINTS_free(BASIC_CONSTRAINTS *);
/* This is a macro defined by a call to DECLARE_ASN1_FUNCTIONS in the
   x509v3.h header. */
AUTHORITY_KEYID *AUTHORITY_KEYID_new(void);
void AUTHORITY_KEYID_free(AUTHORITY_KEYID *);

NAME_CONSTRAINTS *NAME_CONSTRAINTS_new(void);
void NAME_CONSTRAINTS_free(NAME_CONSTRAINTS *);

OTHERNAME *OTHERNAME_new(void);
void OTHERNAME_free(OTHERNAME *);

POLICY_CONSTRAINTS *POLICY_CONSTRAINTS_new(void);
void POLICY_CONSTRAINTS_free(POLICY_CONSTRAINTS *);

void *X509V3_set_ctx_nodb(X509V3_CTX *);

int i2d_GENERAL_NAMES(GENERAL_NAMES *, unsigned char **);
GENERAL_NAMES *d2i_GENERAL_NAMES(GENERAL_NAMES **, const unsigned char **,
                                 long);

int sk_GENERAL_NAME_num(struct stack_st_GENERAL_NAME *);
int sk_GENERAL_NAME_push(struct stack_st_GENERAL_NAME *, GENERAL_NAME *);
GENERAL_NAME *sk_GENERAL_NAME_value(struct stack_st_GENERAL_NAME *, int);
void sk_GENERAL_NAME_pop_free(struct stack_st_GENERAL_NAME *,
                              sk_GENERAL_NAME_freefunc);

Cryptography_STACK_OF_ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_new_null(void);
int sk_ACCESS_DESCRIPTION_num(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
ACCESS_DESCRIPTION *sk_ACCESS_DESCRIPTION_value(
    Cryptography_STACK_OF_ACCESS_DESCRIPTION *, int
);
void sk_ACCESS_DESCRIPTION_free(Cryptography_STACK_OF_ACCESS_DESCRIPTION *);
int sk_ACCESS_DESCRIPTION_push(Cryptography_STACK_OF_ACCESS_DESCRIPTION *,
                               ACCESS_DESCRIPTION *);

ACCESS_DESCRIPTION *ACCESS_DESCRIPTION_new(void);
void ACCESS_DESCRIPTION_free(ACCESS_DESCRIPTION *);

X509_EXTENSION *X509V3_EXT_conf_nid(Cryptography_LHASH_OF_CONF_VALUE *,
                                    X509V3_CTX *, int, char *);

/* These aren't macros these functions are all const X on openssl > 1.0.x */
const X509V3_EXT_METHOD *X509V3_EXT_get(X509_EXTENSION *);
const X509V3_EXT_METHOD *X509V3_EXT_get_nid(int);

Cryptography_STACK_OF_DIST_POINT *sk_DIST_POINT_new_null(void);
void sk_DIST_POINT_free(Cryptography_STACK_OF_DIST_POINT *);
int sk_DIST_POINT_num(Cryptography_STACK_OF_DIST_POINT *);
DIST_POINT *sk_DIST_POINT_value(Cryptography_STACK_OF_DIST_POINT *, int);
int sk_DIST_POINT_push(Cryptography_STACK_OF_DIST_POINT *, DIST_POINT *);
void sk_DIST_POINT_pop_free(Cryptography_STACK_OF_DIST_POINT *,
                            sk_DIST_POINT_freefunc);
void CRL_DIST_POINTS_free(Cryptography_STACK_OF_DIST_POINT *);

void sk_POLICYINFO_free(Cryptography_STACK_OF_POLICYINFO *);
int sk_POLICYINFO_num(Cryptography_STACK_OF_POLICYINFO *);
POLICYINFO *sk_POLICYINFO_value(Cryptography_STACK_OF_POLICYINFO *, int);
int sk_POLICYINFO_push(Cryptography_STACK_OF_POLICYINFO *, POLICYINFO *);
Cryptography_STACK_OF_POLICYINFO *sk_POLICYINFO_new_null(void);
void sk_POLICYINFO_pop_free(Cryptography_STACK_OF_POLICYINFO *,
                            sk_POLICYINFO_freefunc);
void CERTIFICATEPOLICIES_free(Cryptography_STACK_OF_POLICYINFO *);

POLICYINFO *POLICYINFO_new(void);
void POLICYINFO_free(POLICYINFO *);

POLICYQUALINFO *POLICYQUALINFO_new(void);
void POLICYQUALINFO_free(POLICYQUALINFO *);

NOTICEREF *NOTICEREF_new(void);
void NOTICEREF_free(NOTICEREF *);

USERNOTICE *USERNOTICE_new(void);
void USERNOTICE_free(USERNOTICE *);

void sk_POLICYQUALINFO_free(Cryptography_STACK_OF_POLICYQUALINFO *);
int sk_POLICYQUALINFO_num(Cryptography_STACK_OF_POLICYQUALINFO *);
POLICYQUALINFO *sk_POLICYQUALINFO_value(Cryptography_STACK_OF_POLICYQUALINFO *,
                                        int);
int sk_POLICYQUALINFO_push(Cryptography_STACK_OF_POLICYQUALINFO *,
                           POLICYQUALINFO *);
Cryptography_STACK_OF_POLICYQUALINFO *sk_POLICYQUALINFO_new_null(void);

Cryptography_STACK_OF_GENERAL_SUBTREE *sk_GENERAL_SUBTREE_new_null(void);
void sk_GENERAL_SUBTREE_free(Cryptography_STACK_OF_GENERAL_SUBTREE *);
int sk_GENERAL_SUBTREE_num(Cryptography_STACK_OF_GENERAL_SUBTREE *);
GENERAL_SUBTREE *sk_GENERAL_SUBTREE_value(
    Cryptography_STACK_OF_GENERAL_SUBTREE *, int
);
int sk_GENERAL_SUBTREE_push(Cryptography_STACK_OF_GENERAL_SUBTREE *,
                            GENERAL_SUBTREE *);

GENERAL_SUBTREE *GENERAL_SUBTREE_new(void);

void sk_ASN1_INTEGER_free(Cryptography_STACK_OF_ASN1_INTEGER *);
int sk_ASN1_INTEGER_num(Cryptography_STACK_OF_ASN1_INTEGER *);
ASN1_INTEGER *sk_ASN1_INTEGER_value(Cryptography_STACK_OF_ASN1_INTEGER *, int);
int sk_ASN1_INTEGER_push(Cryptography_STACK_OF_ASN1_INTEGER *, ASN1_INTEGER *);
Cryptography_STACK_OF_ASN1_INTEGER *sk_ASN1_INTEGER_new_null(void);

X509_EXTENSION *X509V3_EXT_i2d(int, int, void *);

DIST_POINT *DIST_POINT_new(void);
void DIST_POINT_free(DIST_POINT *);

DIST_POINT_NAME *DIST_POINT_NAME_new(void);
void DIST_POINT_NAME_free(DIST_POINT_NAME *);

GENERAL_NAME *GENERAL_NAME_new(void);
void GENERAL_NAME_free(GENERAL_NAME *);
"""

CUSTOMIZATIONS = """
"""
v.weight, csched_priv.runq_sort, CSCHED_DEFAULT_WEIGHT, CSCHED_MSECS_PER_TICK, CSCHED_CREDITS_PER_TICK, CSCHED_TICKS_PER_TSLICE, CSCHED_TICKS_PER_ACCT); printk("idlers: 0x%lx\n", csched_priv.idlers.bits[0]); CSCHED_STATS_PRINTK(); printk("active vcpus:\n"); loop = 0; list_for_each( iter_sdom, &csched_priv.active_sdom ) { struct csched_dom *sdom; sdom = list_entry(iter_sdom, struct csched_dom, active_sdom_elem); list_for_each( iter_svc, &sdom->active_vcpu ) { struct csched_vcpu *svc; svc = list_entry(iter_svc, struct csched_vcpu, active_vcpu_elem); printk("\t%3d: ", ++loop); csched_dump_vcpu(svc); } } } static void csched_init(void) { spin_lock_init(&csched_priv.lock); INIT_LIST_HEAD(&csched_priv.active_sdom); csched_priv.ncpus = 0; csched_priv.master = UINT_MAX; cpus_clear(csched_priv.idlers); csched_priv.weight = 0U; csched_priv.credit = 0U; csched_priv.credit_balance = 0; csched_priv.runq_sort = 0U; CSCHED_STATS_RESET(); } /* Tickers cannot be kicked until SMP subsystem is alive. */ static __init int csched_start_tickers(void) { struct csched_pcpu *spc; unsigned int cpu; /* Is the credit scheduler initialised? */ if ( csched_priv.ncpus == 0 ) return 0; for_each_online_cpu ( cpu ) { spc = CSCHED_PCPU(cpu); set_timer(&spc->ticker, NOW() + MILLISECS(CSCHED_MSECS_PER_TICK)); } return 0; } __initcall(csched_start_tickers); struct scheduler sched_credit_def = { .name = "SMP Credit Scheduler", .opt_name = "credit", .sched_id = XEN_SCHEDULER_CREDIT, .init_domain = csched_dom_init, .destroy_domain = csched_dom_destroy, .init_vcpu = csched_vcpu_init, .destroy_vcpu = csched_vcpu_destroy, .sleep = csched_vcpu_sleep, .wake = csched_vcpu_wake, .adjust = csched_dom_cntl, .pick_cpu = csched_cpu_pick, .do_schedule = csched_schedule, .dump_cpu_state = csched_dump_pcpu, .dump_settings = csched_dump, .init = csched_init, };