1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
</******************************************************************************
* kexec.c - Achitecture independent kexec code for Xen
*
* Xen port written by:
* - Simon 'Horms' Horman <horms@verge.net.au>
* - Magnus Damm <magnus@valinux.co.jp>
*/
#include <xen/lib.h>
#include <xen/ctype.h>
#include <xen/errno.h>
#include <xen/guest_access.h>
#include <xen/sched.h>
#include <xen/types.h>
#include <xen/kexec.h>
#include <xen/keyhandler.h>
#include <public/kexec.h>
#include <xen/cpumask.h>
#include <asm/atomic.h>
#include <xen/spinlock.h>
#include <xen/version.h>
#include <xen/console.h>
#include <xen/kexec.h>
#include <public/elfnote.h>
#include <xsm/xsm.h>
#ifdef CONFIG_COMPAT
#include <compat/kexec.h>
#endif
static DEFINE_PER_CPU(void *, crash_notes);
static Elf_Note *xen_crash_note;
static cpumask_t crash_saved_cpus;
static xen_kexec_image_t kexec_image[KEXEC_IMAGE_NR];
#define KEXEC_FLAG_DEFAULT_POS (KEXEC_IMAGE_NR + 0)
#define KEXEC_FLAG_CRASH_POS (KEXEC_IMAGE_NR + 1)
#define KEXEC_FLAG_IN_PROGRESS (KEXEC_IMAGE_NR + 2)
static unsigned long kexec_flags = 0; /* the lowest bits are for KEXEC_IMAGE... */
static spinlock_t kexec_lock = SPIN_LOCK_UNLOCKED;
xen_kexec_reserve_t kexec_crash_area;
static void __init parse_crashkernel(const char *str)
{
kexec_crash_area.size = parse_size_and_unit(str, &str);
if ( *str == '@' )
kexec_crash_area.start = parse_size_and_unit(str+1, NULL);
}
custom_param("crashkernel", parse_crashkernel);
static void one_cpu_only(void)
{
/* Only allow the first cpu to continue - force other cpus to spin */
if ( test_and_set_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
for ( ; ; ) ;
}
/* Save the registers in the per-cpu crash note buffer. */
void kexec_crash_save_cpu(void)
{
int cpu = smp_processor_id();
Elf_Note *note = per_cpu(crash_notes, cpu);
ELF_Prstatus *prstatus;
crash_xen_core_t *xencore;
if ( cpu_test_and_set(cpu, crash_saved_cpus) )
return;
prstatus = (ELF_Prstatus *)ELFNOTE_DESC(note);
note = ELFNOTE_NEXT(note);
xencore = (crash_xen_core_t *)ELFNOTE_DESC(note);
elf_core_save_regs(&prstatus->pr_reg, xencore);
}
/* Set up the single Xen-specific-info crash note. */
crash_xen_info_t *kexec_crash_save_info(void)
{
int cpu = smp_processor_id();
crash_xen_info_t info;
crash_xen_info_t *out = (crash_xen_info_t *)ELFNOTE_DESC(xen_crash_note);
BUG_ON(!cpu_test_and_set(cpu, crash_saved_cpus));
memset(&info, 0, sizeof(info));
info.xen_major_version = xen_major_version();
info.xen_minor_version = xen_minor_version();
info.xen_extra_version = __pa(xen_extra_version());
info.xen_changeset = __pa(xen_changeset());
info.xen_compiler = __pa(xen_compiler());
info.xen_compile_date = __pa(xen_compile_date());
info.xen_compile_time = __pa(xen_compile_time());
info.tainted = tainted;
/* Copy from guaranteed-aligned local copy to possibly-unaligned dest. */
memcpy(out, &info, sizeof(info));
return out;
}
void kexec_crash(void)
{
int pos;
pos = (test_bit(KEXEC_FLAG_CRASH_POS, &kexec_flags) != 0);
if ( !test_bit(KEXEC_IMAGE_CRASH_BASE + pos, &kexec_flags) )
return;
console_start_sync();
one_cpu_only();
kexec_crash_save_cpu();
machine_crash_shutdown();
machine_kexec(&kexec_image[KEXEC_IMAGE_CRASH_BASE + pos]);
BUG();
}
static void do_crashdump_trigger(unsigned char key)
{
printk("'%c' pressed -> triggering crashdump\n", key);
kexec_crash();
printk(" * no crash kernel loaded!\n");
}
static __init int register_crashdump_trigger(void)
{
register_keyhandler('C', do_crashdump_trigger, "trigger a crashdump");
return 0;
}
__initcall(register_crashdump_trigger);
static void setup_note(Elf_Note *n, const char *name, int type, int descsz)
{
int l = strlen(name) + 1;
strlcpy(ELFNOTE_NAME(n), name, l);
n->namesz = l;
n->descsz = descsz;
n->type = type;
}
static int sizeof_note(const char *name, int descsz)
{
return (sizeof(Elf_Note) +
ELFNOTE_ALIGN(strlen(name)+1) +
ELFNOTE_ALIGN(descsz));
}
static int kexec_get_reserve(xen_kexec_range_t *range)
{
if ( kexec_crash_area.size > 0 && kexec_crash_area.start > 0) {
range->start = kexec_crash_area.start;
range->size = kexec_crash_area.size;
}
else
range->start = range->size = 0;
return 0;
}
static int kexec_get_cpu(xen_kexec_range_t *range)
{
int nr = range->nr;
int nr_bytes = 0;
if ( nr < 0 || nr >= num_present_cpus() )
return -EINVAL;
nr_bytes += sizeof_note("CORE", sizeof(ELF_Prstatus));
nr_bytes += sizeof_note("Xen", sizeof(crash_xen_core_t));
/* The Xen info note is included in CPU0's range. */
if ( nr == 0 )
nr_bytes += sizeof_note("Xen", sizeof(crash_xen_info_t));
if ( per_cpu(crash_notes, nr) == NULL )
{
Elf_Note *note;
note = per_cpu(crash_notes, nr) = xmalloc_bytes(nr_bytes);
if ( note == NULL )
return -ENOMEM;
/* Setup CORE note. */
setup_note(note, "CORE", NT_PRSTATUS, sizeof(ELF_Prstatus));
/* Setup Xen CORE note. */
note = ELFNOTE_NEXT(note);
setup_note(note, "Xen", XEN_ELFNOTE_CRASH_REGS, sizeof(crash_xen_core_t));
if (nr == 0)
{
/* Setup system wide Xen info note. */
xen_crash_note = note = ELFNOTE_NEXT(note);
setup_note(note, "Xen", XEN_ELFNOTE_CRASH_INFO, sizeof(crash_xen_info_t));
}
}
range->start = __pa((unsigned long)per_cpu(crash_notes, nr));
range->size = nr_bytes;
return 0;
}
static int kexec_get_range_internal(xen_kexec_range_t *range)
{
int ret = -EINVAL;
switch ( range->range )
{
case KEXEC_RANGE_MA_CRASH:
ret = kexec_get_reserve(range);
break;
case KEXEC_RANGE_MA_CPU:
ret = kexec_get_cpu(range);
break;
default:
ret = machine_kexec_get(range);
break;
}
return ret;
}
static int kexec_get_range(XEN_GUEST_HANDLE(void) uarg)
{
xen_kexec_range_t range;
int ret = -EINVAL;
if ( unlikely(copy_from_guest(&range, uarg, 1)) )
return -EFAULT;
ret = kexec_get_range_internal(&range);
if ( ret == 0 && unlikely(copy_to_guest(uarg, &range, 1)) )
return -EFAULT;
return ret;
}
static int kexec_get_range_compat(XEN_GUEST_HANDLE(void) uarg)
{
#ifdef CONFIG_COMPAT
xen_kexec_range_t range;
compat_kexec_range_t compat_range;
int ret = -EINVAL;
if ( unlikely(copy_from_guest(&compat_range, uarg, 1)) )
return -EFAULT;
XLAT_kexec_range(&range, &compat_range);
ret = kexec_get_range_internal(&range);
if ( ret == 0 ) {
XLAT_kexec_range(&compat_range, &range);
if ( unlikely(copy_to_guest(uarg, &compat_range, 1)) )
return -EFAULT;
}
return ret;
#else /* CONFIG_COMPAT */
return 0;
#endif /* CONFIG_COMPAT */
}
static int kexec_load_get_bits(int type, int *base, int *bit)
{
switch ( type )
{
case KEXEC_TYPE_DEFAULT:
*base = KEXEC_IMAGE_DEFAULT_BASE;
*bit = KEXEC_FLAG_DEFAULT_POS;
break;
case KEXEC_TYPE_CRASH:
*base = KEXEC_IMAGE_CRASH_BASE;
*bit = KEXEC_FLAG_CRASH_POS;
break;
default:
return -1;
}
return 0;
}
static int kexec_load_unload_internal(unsigned long op, xen_kexec_load_t *load)
{
xen_kexec_image_t *image;
int base, bit, pos;
int ret = 0;
if ( kexec_load_get_bits(load->type, &base, &bit) )
return -EINVAL;
pos = (test_bit(bit, &kexec_flags) != 0);
/* Load the user data into an unused image */
if ( op == KEXEC_CMD_kexec_load )
{
image = &kexec_image[base + !pos];
BUG_ON(test_bit((base + !pos), &kexec_flags)); /* must be free */
memcpy(image, &load->image, sizeof(*image));
if ( !(ret = machine_kexec_load(load->type, base + !pos, image)) )
{
/* Set image present bit */
set_bit((base + !pos), &kexec_flags);
/* Make new image the active one */
change_bit(bit, &kexec_flags);
}
}
/* Unload the old image if present and load successful */
if ( ret == 0 && !test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags) )
{
if ( test_and_clear_bit((base + pos), &kexec_flags) )
{
image = &kexec_image[base + pos];
machine_kexec_unload(load->type, base + pos, image);
}
}
return ret;
}
static int kexec_load_unload(unsigned long op, XEN_GUEST_HANDLE(void) uarg)
{
xen_kexec_load_t load;
if ( unlikely(copy_from_guest(&load, uarg, 1)) )
return -EFAULT;
return kexec_load_unload_internal(op, &load);
}
static int kexec_load_unload_compat(unsigned long op,
XEN_GUEST_HANDLE(void) uarg)
{
#ifdef CONFIG_COMPAT
compat_kexec_load_t compat_load;
xen_kexec_load_t load;
if ( unlikely(copy_from_guest(&compat_load, uarg, 1)) )
return -EFAULT;
/* This is a bit dodgy, load.image is inside load,
* but XLAT_kexec_load (which is automatically generated)
* doesn't translate load.image (correctly)
* Just copy load->type, the only other member, manually instead.
*
* XLAT_kexec_load(&load, &compat_load);
*/
load.type = compat_load.type;
XLAT_kexec_image(&load.image, &compat_load.image);
return kexec_load_unload_internal(op, &load);
#else /* CONFIG_COMPAT */
return 0;
#endif /* CONFIG_COMPAT */
}
static int kexec_exec(XEN_GUEST_HANDLE(void) uarg)
{
xen_kexec_exec_t exec;
xen_kexec_image_t *image;
int base, bit, pos;
if ( unlikely(copy_from_guest(&exec, uarg, 1)) )
return -EFAULT;
if ( kexec_load_get_bits(exec.type, &base, &bit) )
return -EINVAL;
pos = (test_bit(bit, &kexec_flags) != 0);
/* Only allow kexec/kdump into loaded images */
if ( !test_bit(base + pos, &kexec_flags) )
return -ENOENT;
switch (exec.type)
{
case KEXEC_TYPE_DEFAULT:
image = &kexec_image[base + pos];
one_cpu_only();
machine_reboot_kexec(image); /* Does not return */
break;
case KEXEC_TYPE_CRASH:
kexec_crash(); /* Does not return */
break;
}
return -EINVAL; /* never reached */
}
int do_kexec_op_internal(unsigned long op, XEN_GUEST_HANDLE(void) uarg,
int compat)
{
unsigned long flags;
int ret = -EINVAL;
if ( !IS_PRIV(current->domain) )
return -EPERM;
ret = xsm_kexec();
if ( ret )
return ret;
switch ( op )
{
case KEXEC_CMD_kexec_get_range:
if (compat)
ret = kexec_get_range_compat(uarg);
else
ret = kexec_get_range(uarg);
break;
case KEXEC_CMD_kexec_load:
case KEXEC_CMD_kexec_unload:
spin_lock_irqsave(&kexec_lock, flags);
if (!test_bit(KEXEC_FLAG_IN_PROGRESS, &kexec_flags))
{
if (compat)
ret = kexec_load_unload_compat(op, uarg);
else
ret = kexec_load_unload(op, uarg);
}
spin_unlock_irqrestore(&kexec_lock, flags);
break;
case KEXEC_CMD_kexec:
ret = kexec_exec(uarg);
break;
}
return ret;
}
long do_kexec_op(unsigned long op, XEN_GUEST_HANDLE(void) uarg)
{
return do_kexec_op_internal(op, uarg, 0);
}
#ifdef CONFIG_COMPAT
int compat_kexec_op(unsigned long op, XEN_GUEST_HANDLE(void) uarg)
{
return do_kexec_op_internal(op, uarg, 1);
}
#endif
/*
* Local variables:
* mode: C
* c-set-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|