aboutsummaryrefslogtreecommitdiffstats
path: root/xen/include/xen/sched-if.h
blob: ad4d80c223cae5f54e472837b0383e3df97ed997 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
/******************************************************************************
 * Additional declarations for the generic scheduler interface.  This should
 * only be included by files that implement conforming schedulers.
 *
 * Portions by Mark Williamson are (C) 2004 Intel Research Cambridge
 */

#ifndef __XEN_SCHED_IF_H__
#define __XEN_SCHED_IF_H__

#include <xen/percpu.h>

/* A global pointer to the initial cpupool (POOL0). */
extern struct cpupool *cpupool0;

/* cpus currently in no cpupool */
extern cpumask_t cpupool_free_cpus;

/* Scheduler generic parameters
 * */
#define SCHED_DEFAULT_RATELIMIT_US 1000
extern int sched_ratelimit_us;


/*
 * In order to allow a scheduler to remap the lock->cpu mapping,
 * we have a per-cpu pointer, along with a pre-allocated set of
 * locks.  The generic schedule init code will point each schedule lock
 * pointer to the schedule lock; if the scheduler wants to remap them,
 * it can simply modify the schedule locks.
 * 
 * For cache betterness, keep the actual lock in the same cache area
 * as the rest of the struct.  Just have the scheduler point to the
 * one it wants (This may be the one right in front of it).*/
struct schedule_data {
    spinlock_t         *schedule_lock,
                       _lock;
    struct vcpu        *curr;           /* current task                    */
    void               *sched_priv;
    struct timer        s_timer;        /* scheduling timer                */
    atomic_t            urgent_count;   /* how many urgent vcpus           */
};

#define curr_on_cpu(c)    (per_cpu(schedule_data, c).curr)

DECLARE_PER_CPU(struct schedule_data, schedule_data);
DECLARE_PER_CPU(struct scheduler *, scheduler);
DECLARE_PER_CPU(struct cpupool *, cpupool);

static inline spinlock_t * pcpu_schedule_lock(int cpu)
{
    spinlock_t * lock=NULL;

    for ( ; ; )
    {
        /* The per_cpu(v->processor) may also change, if changing
         * cpu pool also changes the scheduler lock.  Retry
         * until they match.
         */
        lock=per_cpu(schedule_data, cpu).schedule_lock;

        spin_lock(lock);
        if ( likely(lock == per_cpu(schedule_data, cpu).schedule_lock) )
            break;
        spin_unlock(lock);
    }
    return lock;
}

static inline int pcpu_schedule_trylock(int cpu)
{
    spinlock_t * lock=NULL;

    lock=per_cpu(schedule_data, cpu).schedule_lock;
    if ( ! spin_trylock(lock) )
        return 0;
    if ( lock == per_cpu(schedule_data, cpu).schedule_lock )
        return 1;
    else
    {
        spin_unlock(lock);
        return 0;
    }
}

#define pcpu_schedule_lock_irq(p) \
    do { local_irq_disable(); pcpu_schedule_lock(p); } while ( 0 )
#define pcpu_schedule_lock_irqsave(p, flags) \
    do { local_irq_save(flags); pcpu_schedule_lock(p); } while ( 0 )

static inline void pcpu_schedule_unlock(int cpu)
{
    spin_unlock(per_cpu(schedule_data, cpu).schedule_lock);
}

#define pcpu_schedule_unlock_irq(p) \
    do { pcpu_schedule_unlock(p); local_irq_enable(); } while ( 0 )
#define pcpu_schedule_unlock_irqrestore(p, flags) \
    do { pcpu_schedule_unlock(p); local_irq_restore(flags); } while ( 0 )

static inline void vcpu_schedule_lock(struct vcpu *v)
{
    spinlock_t * lock;

    for ( ; ; )
    {
        /* v->processor may change when grabbing the lock; but
         * per_cpu(v->processor) may also change, if changing
         * cpu pool also changes the scheduler lock.  Retry
         * until they match.
         *
         * It may also be the case that v->processor may change
         * but the lock may be the same; this will succeed
         * in that case.
         */
        lock=per_cpu(schedule_data, v->processor).schedule_lock;

        spin_lock(lock);
        if ( likely(lock == per_cpu(schedule_data, v->processor).schedule_lock) )
            break;
        spin_unlock(lock);
    }
}

#define vcpu_schedule_lock_irq(v) \
    do { local_irq_disable(); vcpu_schedule_lock(v); } while ( 0 )
#define vcpu_schedule_lock_irqsave(v, flags) \
    do { local_irq_save(flags); vcpu_schedule_lock(v); } while ( 0 )

static inline void vcpu_schedule_unlock(struct vcpu *v)
{
    spin_unlock(per_cpu(schedule_data, v->processor).schedule_lock);
}

#define vcpu_schedule_unlock_irq(v) \
    do { vcpu_schedule_unlock(v); local_irq_enable(); } while ( 0 )
#define vcpu_schedule_unlock_irqrestore(v, flags) \
    do { vcpu_schedule_unlock(v); local_irq_restore(flags); } while ( 0 )

struct task_slice {
    struct vcpu *task;
    s_time_t     time;
    bool_t       migrated;
};

struct scheduler {
    char *name;             /* full name for this scheduler      */
    char *opt_name;         /* option name for this scheduler    */
    unsigned int sched_id;  /* ID for this scheduler             */
    void *sched_data;       /* global data pointer               */

    int          (*global_init)    (void);

    int          (*init)           (struct scheduler *);
    void         (*deinit)         (const struct scheduler *);

    void         (*free_vdata)     (const struct scheduler *, void *);
    void *       (*alloc_vdata)    (const struct scheduler *, struct vcpu *,
                                    void *);
    void         (*free_pdata)     (const struct scheduler *, void *, int);
    void *       (*alloc_pdata)    (const struct scheduler *, int);
    void         (*free_domdata)   (const struct scheduler *, void *);
    void *       (*alloc_domdata)  (const struct scheduler *, struct domain *);

    int          (*init_domain)    (const struct scheduler *, struct domain *);
    void         (*destroy_domain) (const struct scheduler *, struct domain *);

    /* Activate / deactivate vcpus in a cpu pool */
    void         (*insert_vcpu)    (const struct scheduler *, struct vcpu *);
    void         (*remove_vcpu)    (const struct scheduler *, struct vcpu *);

    void         (*sleep)          (const struct scheduler *, struct vcpu *);
    void         (*wake)           (const struct scheduler *, struct vcpu *);
    void         (*yield)          (const struct scheduler *, struct vcpu *);
    void         (*context_saved)  (const struct scheduler *, struct vcpu *);

    struct task_slice (*do_schedule) (const struct scheduler *, s_time_t,
                                      bool_t tasklet_work_scheduled);

    int          (*pick_cpu)       (const struct scheduler *, struct vcpu *);
    void         (*migrate)        (const struct scheduler *, struct vcpu *,
                                    unsigned int);
    int          (*adjust)         (const struct scheduler *, struct domain *,
                                    struct xen_domctl_scheduler_op *);
    int          (*adjust_global)  (const struct scheduler *,
                                    struct xen_sysctl_scheduler_op *);
    void         (*set_node_affinity) (const struct scheduler *,
                                       struct domain *, nodemask_t *);
    void         (*dump_settings)  (const struct scheduler *);
    void         (*dump_cpu_state) (const struct scheduler *, int);

    void         (*tick_suspend)    (const struct scheduler *, unsigned int);
    void         (*tick_resume)     (const struct scheduler *, unsigned int);
};

extern const struct scheduler sched_sedf_def;
extern const struct scheduler sched_credit_def;
extern const struct scheduler sched_credit2_def;
extern const struct scheduler sched_arinc653_def;


struct cpupool
{
    int              cpupool_id;
    cpumask_var_t    cpu_valid;      /* all cpus assigned to pool */
    cpumask_var_t    cpu_suspended;  /* cpus in S3 that should be in this pool */
    struct cpupool   *next;
    unsigned int     n_dom;
    struct scheduler *sched;
    atomic_t         refcnt;
};

#define cpupool_scheduler_cpumask(_pool) \
    (((_pool) == NULL) ? &cpupool_free_cpus : (_pool)->cpu_valid)
#define cpupool_online_cpumask(_pool) \
    (((_pool) == NULL) ? &cpu_online_map : (_pool)->cpu_valid)

#endif /* __XEN_SCHED_IF_H__ */