aboutsummaryrefslogtreecommitdiffstats
path: root/patches/linux-2.6.16.33/linux-2.6.19-rc1-kexec-xen-x86_64.patch
blob: c387519889af600bc82b725bc53e77051345613a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
--- 0001/arch/x86_64/kernel/crash.c
+++ work/arch/x86_64/kernel/crash.c
@@ -92,6 +92,7 @@ static void crash_save_self(struct pt_re
 	crash_save_this_cpu(regs, cpu);
 }
 
+#ifndef CONFIG_XEN
 #ifdef CONFIG_SMP
 static atomic_t waiting_for_crash_ipi;
 
@@ -156,6 +157,7 @@ static void nmi_shootdown_cpus(void)
 	/* There are no cpus to shootdown */
 }
 #endif
+#endif /* CONFIG_XEN */
 
 void machine_crash_shutdown(struct pt_regs *regs)
 {
@@ -173,6 +175,8 @@ void machine_crash_shutdown(struct pt_re
 
 	/* Make a note of crashing cpu. Will be used in NMI callback.*/
 	crashing_cpu = smp_processor_id();
+
+#ifndef CONFIG_XEN
 	nmi_shootdown_cpus();
 
 	if(cpu_has_apic)
@@ -181,6 +185,6 @@ void machine_crash_shutdown(struct pt_re
 #if defined(CONFIG_X86_IO_APIC)
 	disable_IO_APIC();
 #endif
-
+#endif /* CONFIG_XEN */
 	crash_save_self(regs);
 }
--- 0010/arch/x86_64/kernel/machine_kexec.c
+++ work/arch/x86_64/kernel/machine_kexec.c
@@ -24,6 +24,104 @@ static u64 kexec_pud1[512] PAGE_ALIGNED;
 static u64 kexec_pmd1[512] PAGE_ALIGNED;
 static u64 kexec_pte1[512] PAGE_ALIGNED;
 
+#ifdef CONFIG_XEN
+
+/* In the case of Xen, override hypervisor functions to be able to create
+ * a regular identity mapping page table...
+ */
+
+#include <xen/interface/kexec.h>
+#include <xen/interface/memory.h>
+
+#define x__pmd(x) ((pmd_t) { (x) } )
+#define x__pud(x) ((pud_t) { (x) } )
+#define x__pgd(x) ((pgd_t) { (x) } )
+
+#define x_pmd_val(x)   ((x).pmd)
+#define x_pud_val(x)   ((x).pud)
+#define x_pgd_val(x)   ((x).pgd)
+
+static inline void x_set_pmd(pmd_t *dst, pmd_t val)
+{
+	x_pmd_val(*dst) = x_pmd_val(val);
+}
+
+static inline void x_set_pud(pud_t *dst, pud_t val)
+{
+	x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
+}
+
+static inline void x_pud_clear (pud_t *pud)
+{
+	x_pud_val(*pud) = 0;
+}
+
+static inline void x_set_pgd(pgd_t *dst, pgd_t val)
+{
+	x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
+}
+
+static inline void x_pgd_clear (pgd_t * pgd)
+{
+	x_pgd_val(*pgd) = 0;
+}
+
+#define X__PAGE_KERNEL_LARGE_EXEC \
+         _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
+#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
+
+#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
+
+#if PAGES_NR > KEXEC_XEN_NO_PAGES
+#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
+#endif
+
+#if PA_CONTROL_PAGE != 0
+#error PA_CONTROL_PAGE is non zero - Xen support will break
+#endif
+
+void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
+{
+	void *control_page;
+	void *table_page;
+
+	memset(xki->page_list, 0, sizeof(xki->page_list));
+
+	control_page = page_address(image->control_code_page) + PAGE_SIZE;
+	memcpy(control_page, relocate_kernel, PAGE_SIZE);
+
+	table_page = page_address(image->control_code_page);
+
+	xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
+	xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
+
+	xki->page_list[PA_PGD] = __ma(kexec_pgd);
+	xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
+	xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
+	xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
+	xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
+	xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
+	xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
+}
+
+#else /* CONFIG_XEN */
+
+#define x__pmd(x) __pmd(x)
+#define x__pud(x) __pud(x)
+#define x__pgd(x) __pgd(x)
+
+#define x_set_pmd(x, y) set_pmd(x, y)
+#define x_set_pud(x, y) set_pud(x, y)
+#define x_set_pgd(x, y) set_pgd(x, y)
+
+#define x_pud_clear(x) pud_clear(x)
+#define x_pgd_clear(x) pgd_clear(x)
+
+#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#define X_KERNPG_TABLE _KERNPG_TABLE
+
+#endif /* CONFIG_XEN */
+
 static void init_level2_page(pmd_t *level2p, unsigned long addr)
 {
 	unsigned long end_addr;
@@ -31,7 +129,7 @@ static void init_level2_page(pmd_t *leve
 	addr &= PAGE_MASK;
 	end_addr = addr + PUD_SIZE;
 	while (addr < end_addr) {
-		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+		x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
 		addr += PMD_SIZE;
 	}
 }
@@ -56,12 +154,12 @@ static int init_level3_page(struct kimag
 		}
 		level2p = (pmd_t *)page_address(page);
 		init_level2_page(level2p, addr);
-		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
+		x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
 		addr += PUD_SIZE;
 	}
 	/* clear the unused entries */
 	while (addr < end_addr) {
-		pud_clear(level3p++);
+		x_pud_clear(level3p++);
 		addr += PUD_SIZE;
 	}
 out:
@@ -92,12 +190,12 @@ static int init_level4_page(struct kimag
 		if (result) {
 			goto out;
 		}
-		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
+		x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
 		addr += PGDIR_SIZE;
 	}
 	/* clear the unused entries */
 	while (addr < end_addr) {
-		pgd_clear(level4p++);
+		x_pgd_clear(level4p++);
 		addr += PGDIR_SIZE;
 	}
 out:
@@ -108,8 +206,14 @@ out:
 static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
 {
 	pgd_t *level4p;
+	unsigned long x_end_pfn = end_pfn;
+
+#ifdef CONFIG_XEN
+	x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
+#endif
+
 	level4p = (pgd_t *)__va(start_pgtable);
- 	return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
+ 	return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT);
 }
 
 int machine_kexec_prepare(struct kimage *image)
--- 0009/include/asm-x86_64/kexec.h
+++ work/include/asm-x86_64/kexec.h
@@ -91,6 +91,19 @@ relocate_kernel(unsigned long indirectio
 		unsigned long page_list,
 		unsigned long start_address) ATTRIB_NORET;
 
+/* Under Xen we need to work with machine addresses. These macros give the
+ * machine address of a certain page to the generic kexec code instead of 
+ * the pseudo physical address which would be given by the default macros.
+ */
+
+#ifdef CONFIG_XEN
+#define KEXEC_ARCH_HAS_PAGE_MACROS
+#define kexec_page_to_pfn(page)  pfn_to_mfn(page_to_pfn(page))
+#define kexec_pfn_to_page(pfn)   pfn_to_page(mfn_to_pfn(pfn))
+#define kexec_virt_to_phys(addr) virt_to_machine(addr)
+#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _X86_64_KEXEC_H */