--- 0001/arch/x86_64/kernel/crash.c +++ work/arch/x86_64/kernel/crash.c @@ -92,6 +92,7 @@ static void crash_save_self(struct pt_re crash_save_this_cpu(regs, cpu); } +#ifndef CONFIG_XEN #ifdef CONFIG_SMP static atomic_t waiting_for_crash_ipi; @@ -156,6 +157,7 @@ static void nmi_shootdown_cpus(void) /* There are no cpus to shootdown */ } #endif +#endif /* CONFIG_XEN */ void machine_crash_shutdown(struct pt_regs *regs) { @@ -173,6 +175,8 @@ void machine_crash_shutdown(struct pt_re /* Make a note of crashing cpu. Will be used in NMI callback.*/ crashing_cpu = smp_processor_id(); + +#ifndef CONFIG_XEN nmi_shootdown_cpus(); if(cpu_has_apic) @@ -181,6 +185,6 @@ void machine_crash_shutdown(struct pt_re #if defined(CONFIG_X86_IO_APIC) disable_IO_APIC(); #endif - +#endif /* CONFIG_XEN */ crash_save_self(regs); } --- 0010/arch/x86_64/kernel/machine_kexec.c +++ work/arch/x86_64/kernel/machine_kexec.c @@ -24,6 +24,104 @@ static u64 kexec_pud1[512] PAGE_ALIGNED; static u64 kexec_pmd1[512] PAGE_ALIGNED; static u64 kexec_pte1[512] PAGE_ALIGNED; +#ifdef CONFIG_XEN + +/* In the case of Xen, override hypervisor functions to be able to create + * a regular identity mapping page table... + */ + +#include +#include + +#define x__pmd(x) ((pmd_t) { (x) } ) +#define x__pud(x) ((pud_t) { (x) } ) +#define x__pgd(x) ((pgd_t) { (x) } ) + +#define x_pmd_val(x) ((x).pmd) +#define x_pud_val(x) ((x).pud) +#define x_pgd_val(x) ((x).pgd) + +static inline void x_set_pmd(pmd_t *dst, pmd_t val) +{ + x_pmd_val(*dst) = x_pmd_val(val); +} + +static inline void x_set_pud(pud_t *dst, pud_t val) +{ + x_pud_val(*dst) = phys_to_machine(x_pud_val(val)); +} + +static inline void x_pud_clear (pud_t *pud) +{ + x_pud_val(*pud) = 0; +} + +static inline void x_set_pgd(pgd_t *dst, pgd_t val) +{ + x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val)); +} + +static inline void x_pgd_clear (pgd_t * pgd) +{ + x_pgd_val(*pgd) = 0; +} + +#define X__PAGE_KERNEL_LARGE_EXEC \ + _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE +#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY + +#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT) + +#if PAGES_NR > KEXEC_XEN_NO_PAGES +#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break +#endif + +#if PA_CONTROL_PAGE != 0 +#error PA_CONTROL_PAGE is non zero - Xen support will break +#endif + +void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image) +{ + void *control_page; + void *table_page; + + memset(xki->page_list, 0, sizeof(xki->page_list)); + + control_page = page_address(image->control_code_page) + PAGE_SIZE; + memcpy(control_page, relocate_kernel, PAGE_SIZE); + + table_page = page_address(image->control_code_page); + + xki->page_list[PA_CONTROL_PAGE] = __ma(control_page); + xki->page_list[PA_TABLE_PAGE] = __ma(table_page); + + xki->page_list[PA_PGD] = __ma(kexec_pgd); + xki->page_list[PA_PUD_0] = __ma(kexec_pud0); + xki->page_list[PA_PUD_1] = __ma(kexec_pud1); + xki->page_list[PA_PMD_0] = __ma(kexec_pmd0); + xki->page_list[PA_PMD_1] = __ma(kexec_pmd1); + xki->page_list[PA_PTE_0] = __ma(kexec_pte0); + xki->page_list[PA_PTE_1] = __ma(kexec_pte1); +} + +#else /* CONFIG_XEN */ + +#define x__pmd(x) __pmd(x) +#define x__pud(x) __pud(x) +#define x__pgd(x) __pgd(x) + +#define x_set_pmd(x, y) set_pmd(x, y) +#define x_set_pud(x, y) set_pud(x, y) +#define x_set_pgd(x, y) set_pgd(x, y) + +#define x_pud_clear(x) pud_clear(x) +#define x_pgd_clear(x) pgd_clear(x) + +#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC +#define X_KERNPG_TABLE _KERNPG_TABLE + +#endif /* CONFIG_XEN */ + static void init_level2_page(pmd_t *level2p, unsigned long addr) { unsigned long end_addr; @@ -31,7 +129,7 @@ static void init_level2_page(pmd_t *leve addr &= PAGE_MASK; end_addr = addr + PUD_SIZE; while (addr < end_addr) { - set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); + x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC)); addr += PMD_SIZE; } } @@ -56,12 +154,12 @@ static int init_level3_page(struct kimag } level2p = (pmd_t *)page_address(page); init_level2_page(level2p, addr); - set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE)); + x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE)); addr += PUD_SIZE; } /* clear the unused entries */ while (addr < end_addr) { - pud_clear(level3p++); + x_pud_clear(level3p++); addr += PUD_SIZE; } out: @@ -92,12 +190,12 @@ static int init_level4_page(struct kimag if (result) { goto out; } - set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE)); + x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE)); addr += PGDIR_SIZE; } /* clear the unused entries */ while (addr < end_addr) { - pgd_clear(level4p++); + x_pgd_clear(level4p++); addr += PGDIR_SIZE; } out: @@ -108,8 +206,14 @@ out: static int init_pgtable(struct kimage *image, unsigned long start_pgtable) { pgd_t *level4p; + unsigned long x_end_pfn = end_pfn; + +#ifdef CONFIG_XEN + x_end_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL); +#endif + level4p = (pgd_t *)__va(start_pgtable); - return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); + return init_level4_page(image, level4p, 0, x_end_pfn << PAGE_SHIFT); } int machine_kexec_prepare(struct kimage *image) --- 0009/include/asm-x86_64/kexec.h +++ work/include/asm-x86_64/kexec.h @@ -91,6 +91,19 @@ relocate_kernel(unsigned long indirectio unsigned long page_list, unsigned long start_address) ATTRIB_NORET; +/* Under Xen we need to work with machine addresses. These macros give the + * machine address of a certain page to the generic kexec code instead of + * the pseudo physical address which would be given by the default macros. + */ + +#ifdef CONFIG_XEN +#define KEXEC_ARCH_HAS_PAGE_MACROS +#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page)) +#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn)) +#define kexec_virt_to_phys(addr) virt_to_machine(addr) +#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr)) +#endif + #endif /* __ASSEMBLY__ */ #endif /* _X86_64_KEXEC_H */