#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_COMPAT #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(CONFIG_X86_64) #define BOOTSTRAP_DIRECTMAP_END (1UL << 32) /* 4GB */ #define maddr_to_bootstrap_virt(m) maddr_to_virt(m) #else #define BOOTSTRAP_DIRECTMAP_END (1UL << 30) /* 1GB */ #define maddr_to_bootstrap_virt(m) ((void *)(long)(m)) #endif extern void generic_apic_probe(void); extern void numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn); extern u16 boot_edid_caps; extern u8 boot_edid_info[128]; extern struct boot_video_info boot_vid_info; /* * opt_xenheap_megabytes: Size of Xen heap in megabytes, excluding the * page_info table and allocation bitmap. */ static unsigned int opt_xenheap_megabytes = XENHEAP_DEFAULT_MB; #if defined(CONFIG_X86_64) integer_param("xenheap_megabytes", opt_xenheap_megabytes); #endif /* opt_nosmp: If true, secondary processors are ignored. */ static int opt_nosmp = 0; boolean_param("nosmp", opt_nosmp); /* maxcpus: maximum number of CPUs to activate. */ static unsigned int max_cpus = NR_CPUS; integer_param("maxcpus", max_cpus); /* opt_watchdog: If true, run a watchdog NMI on each processor. */ static int opt_watchdog = 0; boolean_param("watchdog", opt_watchdog); /* **** Linux config option: propagated to domain0. */ /* "acpi=off": Sisables both ACPI table parsing and interpreter. */ /* "acpi=force": Override the disable blacklist. */ /* "acpi=strict": Disables out-of-spec workarounds. */ /* "acpi=ht": Limit ACPI just to boot-time to enable HT. */ /* "acpi=noirq": Disables ACPI interrupt routing. */ static void parse_acpi_param(char *s); custom_param("acpi", parse_acpi_param); /* **** Linux config option: propagated to domain0. */ /* acpi_skip_timer_override: Skip IRQ0 overrides. */ extern int acpi_skip_timer_override; boolean_param("acpi_skip_timer_override", acpi_skip_timer_override); /* **** Linux config option: propagated to domain0. */ /* noapic: Disable IOAPIC setup. */ extern int skip_ioapic_setup; boolean_param("noapic", skip_ioapic_setup); int early_boot = 1; cpumask_t cpu_present_map; unsigned long xen_phys_start; /* Limits of Xen heap, used to initialise the allocator. */ unsigned long xenheap_phys_start, xenheap_phys_end; extern void arch_init_memory(void); extern void init_IRQ(void); extern void early_time_init(void); extern void early_cpu_init(void); extern void vesa_init(void); extern void vesa_mtrr_init(void); struct tss_struct init_tss[NR_CPUS]; char __attribute__ ((__section__(".bss.stack_aligned"))) cpu0_stack[STACK_SIZE]; struct cpuinfo_x86 boot_cpu_data = { 0, 0, 0, 0, -1 }; #if CONFIG_PAGING_LEVELS > 2 unsigned long mmu_cr4_features = X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE; #else unsigned long mmu_cr4_features = X86_CR4_PSE; #endif EXPORT_SYMBOL(mmu_cr4_features); int acpi_disabled; int acpi_force; char acpi_param[10] = ""; static void __init parse_acpi_param(char *s) { /* Save the parameter so it can be propagated to domain0. */ safe_strcpy(acpi_param, s); /* Interpr
[pytest]
minversion = 3.1
norecursedirs = test_* extra_*
xfail_strict = True
addopts =
    # show summary of skipped tests
    -rs
    # capture only Python print and C++ py::print, but not C output (low-level Python errors)
    --capture=sys
    # enable all warnings
    -Wa
filterwarnings =
    # make warnings into errors but ignore certain third-party extension issues
    error
    # importing scipy submodules on some version of Python
    ignore::ImportWarning
    # bogus numpy ABI warning (see numpy/#432)
    ignore:.*numpy.dtype size changed.*:RuntimeWarning
    ignore:.*numpy.ufunc size changed.*:RuntimeWarning
= bvi->blue_size; vga_console_info.u.vesa_lfb.rsvd_pos = bvi->rsvd_pos; vga_console_info.u.vesa_lfb.rsvd_size = bvi->rsvd_size; vga_console_info.u.vesa_lfb.gbl_caps = bvi->capabilities; vga_console_info.u.vesa_lfb.mode_attrs = bvi->vesa_attrib; } } void __init kexec_reserve_area(struct e820map *e820) { unsigned long kdump_start = kexec_crash_area.start; unsigned long kdump_size = kexec_crash_area.size; static int is_reserved = 0; kdump_size = (kdump_size + PAGE_SIZE - 1) & PAGE_MASK; if ( (kdump_start == 0) || (kdump_size == 0) || is_reserved ) return; is_reserved = 1; if ( !reserve_e820_ram(e820, kdump_start, kdump_size) ) { printk("Kdump: DISABLED (failed to reserve %luMB (%lukB) at 0x%lx)" "\n", kdump_size >> 20, kdump_size >> 10, kdump_start); kexec_crash_area.start = kexec_crash_area.size = 0; } else { printk("Kdump: %luMB (%lukB) at 0x%lx\n", kdump_size >> 20, kdump_size >> 10, kdump_start); } } void init_done(void) { extern char __init_begin[], __init_end[]; /* Free (or page-protect) the init areas. */ #ifndef MEMORY_GUARD init_xenheap_pages(__pa(__init_begin), __pa(__init_end)); #endif memguard_guard_range(__init_begin, __init_end - __init_begin); #if defined(CONFIG_X86_64) /* Also zap the mapping in the 1:1 area. */ memguard_guard_range(__va(__pa(__init_begin)), __init_end - __init_begin); #endif printk("Freed %ldkB init memory.\n", (long)(__init_end-__init_begin)>>10); startup_cpu_idle_loop(); } static char * __init cmdline_cook(char *p) { p = p ? : ""; while ( *p == ' ' ) p++; while ( (*p != ' ') && (*p != '\0') ) p++; while ( *p == ' ' ) p++; return p; } void __init __start_xen(unsigned long mbi_p) { char *memmap_type = NULL; char *cmdline, *kextra; unsigned long _initrd_start = 0, _initrd_len = 0; unsigned int initrdidx = 1; multiboot_info_t *mbi = __va(mbi_p); module_t *mod = (module_t *)__va(mbi->mods_addr); unsigned long nr_pages, modules_length; int i, e820_warn = 0, bytes = 0; struct ns16550_defaults ns16550 = { .data_bits = 8, .parity = 'n', .stop_bits = 1 }; extern void early_page_fault(void); set_intr_gate(TRAP_page_fault, &early_page_fault); /* Parse the command-line options. */ cmdline = cmdline_cook((mbi->flags & MBI_CMDLINE) ? __va(mbi->cmdline) : NULL); if ( (kextra = strstr(cmdline, " -- ")) != NULL ) { /* * Options after ' -- ' separator belong to dom0. * 1. Orphan dom0's options from Xen's command line. * 2. Skip all but final leading space from dom0's options. */ *kextra = '\0'; kextra += 3; while ( kextra[1] == ' ' ) kextra++; } cmdline_parse(cmdline); parse_video_info(); set_current((struct vcpu *)0xfffff000); /* debug sanity */ idle_vcpu[0] = current; set_processor_id(0); /* needed early, for smp_processor_id() */ if ( cpu_has_efer ) rdmsrl(MSR_EFER, this_cpu(efer)); asm volatile ( "mov %%cr4,%0" : "=r" (this_cpu(cr4)) ); smp_prepare_boot_cpu(); /* We initialise the serial devices very early so we can get debugging. */ ns16550.io_base = 0x3f8; ns16550.irq = 4; ns16550_init(0, &ns16550); ns16550.io_base = 0x2f8; ns16550.irq = 3; ns16550_init(1, &ns16550); serial_init_preirq(); init_console(); printk("Command line: %s\n", cmdline); printk("Video information:\n"); /* Print VGA display mode information. */ switch ( vga_console_info.video_type ) { case XEN_VGATYPE_TEXT_MODE_3: printk(" VGA is text mode %dx%d, font 8x%d\n", vga_console_info.u.text_mode_3.columns, vga_console_info.u.text_mode_3.rows, vga_console_info.u.text_mode_3.font_height); break; case XEN_VGATYPE_VESA_LFB: printk(" VGA is graphics mode %dx%d, %d bpp\n", vga_console_info.u.vesa_lfb.width, vga_console_info.u.vesa_lfb.height, vga_console_info.u.vesa_lfb.bits_per_pixel); break; default: printk(" No VGA detected\n"); break; } /* Print VBE/DDC EDID information. */ if ( bootsym(boot_edid_caps) != 0x1313 ) { u16 caps = bootsym(boot_edid_caps); printk(" VBE/DDC methods:%s%s%s; ", (caps & 1) ? " V1" : "", (caps & 2) ? " V2" : "", !(caps & 3) ? " none" : ""); printk("EDID transfer time: %d seconds\n", caps >> 8); if ( *(u32 *)bootsym(boot_edid_info) == 0x13131313 ) { printk(" EDID info not retrieved because "); if ( !(caps & 3) ) printk("no DDC retrieval method detected\n"); else if ( (caps >> 8) > 5 ) printk("takes longer than 5 seconds\n"); else printk("of reasons unknown\n"); } } printk("Disc information:\n"); printk(" Found %d MBR signatures\n", bootsym(boot_mbr_signature_nr)); printk(" Found %d EDD information structures\n", bootsym(boot_edd_info_nr)); /* Check that we have at least one Multiboot module. */ if ( !(mbi->flags & MBI_MODULES) || (mbi->mods_count == 0) ) EARLY_FAIL("dom0 kernel not specified. " "Check bootloader configuration.\n"); if ( ((unsigned long)cpu0_stack & (STACK_SIZE-1)) != 0 ) EARLY_FAIL("Misaligned CPU0 stack.\n"); /* * Since there are some stubs getting built on the stacks which use * direct calls/jumps, the heap must be confined to the lower 2G so * that those branches can reach their targets. */ if ( opt_xenheap_megabytes > 2048 ) opt_xenheap_megabytes = 2048; if ( e820_raw_nr != 0 ) { memmap_type = "Xen-e820"; } else if ( bootsym(lowmem_kb) ) { memmap_type = "Xen-e801"; e820_raw[0].addr = 0; e820_raw[0].size = bootsym(lowmem_kb) << 10; e820_raw[0].type = E820_RAM; e820_raw[1].addr = 0x100000; e820_raw[1].size = bootsym(highmem_kb) << 10; e820_raw[1].type = E820_RAM; e820_raw_nr = 2; } else if ( mbi->flags & MBI_MEMMAP ) { memmap_type = "Multiboot-e820"; while ( (bytes < mbi->mmap_length) && (e820_raw_nr < E820MAX) ) { memory_map_t *map = __va(mbi->mmap_addr + bytes); /* * This is a gross workaround for a BIOS bug. Some bootloaders do * not write e820 map entries into pre-zeroed memory. This is * okay if the BIOS fills in all fields of the map entry, but * some broken BIOSes do not bother to write the high word of * the length field if the length is smaller than 4GB. We * detect and fix this by flagging sections below 4GB that * appear to be larger than 4GB in size. */ if ( (map->base_addr_high == 0) && (map->length_high != 0) ) { if ( !e820_warn ) { printk("WARNING: Buggy e820 map detected and fixed " "(truncated length fields).\n"); e820_warn = 1; } map->length_high = 0; } e820_raw[e820_raw_nr].addr = ((u64)map->base_addr_high << 32) | (u64)map->base_addr_low; e820_raw[e820_raw_nr].size = ((u64)map->length_high << 32) | (u64)map->length_low; e820_raw[e820_raw_nr].type = map->type; e820_raw_nr++; bytes += map->size + 4; } } else if ( mbi->flags & MBI_MEMLIMITS ) { memmap_type = "Multiboot-e801"; e820_raw[0].addr = 0; e820_raw[0].size = mbi->mem_lower << 10; e820_raw[0].type = E820_RAM; e820_raw[1].addr = 0x100000; e820_raw[1].size = mbi->mem_upper << 10; e820_raw[1].type = E820_RAM; e820_raw_nr = 2; } else { EARLY_FAIL("Bootloader provided no memory information.\n"); } /* Sanitise the raw E820 map to produce a final clean version. */ max_page = init_e820(memmap_type, e820_raw, &e820_raw_nr); /* Create a temporary copy of the E820 map. */ memcpy(&boot_e820, &e820, sizeof(e820)); /* Early kexec reservation (explicit static start address). */ kexec_reserve_area(&boot_e820); /* * Iterate backwards over all superpage-aligned RAM regions. * * We require superpage alignment because the boot allocator is not yet * initialised. Hence we can only map superpages in the address range * 0 to BOOTSTRAP_DIRECTMAP_END, as this is guaranteed not to require * dynamic allocation of pagetables. * * As well as mapping superpages in that range, in preparation for * initialising the boot allocator, we also look for a region to which * we can relocate the dom0 kernel and other multiboot modules. Also, on * x86/64, we relocate Xen to higher memory. */ modules_length = mod[mbi->mods_count-1].mod_end - mod[0].mod_start; for ( i = boot_e820.nr_map-1; i >= 0; i-- ) { uint64_t s, e, mask = (1UL << L2_PAGETABLE_SHIFT) - 1; /* Superpage-aligned chunks from 16MB to BOOTSTRAP_DIRECTMAP_END. */ s = (boot_e820.map[i].addr + mask) & ~mask; e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask; s = max_t(uint64_t, s, 16 << 20); e = min_t(uint64_t, e, BOOTSTRAP_DIRECTMAP_END); if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) ) continue; /* Map the chunk. No memory will need to be allocated to do this. */ map_pages_to_xen( (unsigned long)maddr_to_bootstrap_virt(s), s >> PAGE_SHIFT, (e-s) >> PAGE_SHIFT, PAGE_HYPERVISOR); #if defined(CONFIG_X86_64) /* Is the region suitable for relocating Xen? */ if ( !xen_phys_start && (((e-s) >> 20) >= opt_xenheap_megabytes) ) { extern l2_pgentry_t l2_xenmap[]; l4_pgentry_t *pl4e; l3_pgentry_t *pl3e; l2_pgentry_t *pl2e; int i, j; /* Select relocation address. */ e = (e - (opt_xenheap_megabytes << 20)) & ~mask; xen_phys_start = e; bootsym(trampoline_xen_phys_start) = e; /* * Perform relocation to new physical address. * Before doing so we must sync static/global data with main memory * with a barrier(). After this we must *not* modify static/global * data until after we have switched to the relocated pagetables! */ barrier(); move_memory(e, 0, __pa(&_end) - xen_phys_start); /* Poison low 1MB to detect stray pointers to physical 0-1MB. */ memset(maddr_to_bootstrap_virt(e), 0x55, 1U<<20); /* Walk initial pagetables, relocating page directory entries. */ pl4e = __va(__pa(idle_pg_table)); for ( i = 0 ; i < L4_PAGETABLE_ENTRIES; i++, pl4e++ ) { if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) ) continue; *pl4e = l4e_from_intpte(l4e_get_intpte(*pl4e) + xen_phys_start); pl3e = l4e_to_l3e(*pl4e); for ( j = 0; j < L3_PAGETABLE_ENTRIES; j++, pl3e++ ) { /* Not present or already relocated? */ if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) || (l3e_get_pfn(*pl3e) > 0x1000) ) continue; *pl3e = l3e_from_intpte(l3e_get_intpte(*pl3e) + xen_phys_start); } } /* The only data mappings to be relocated are in the Xen area. */ pl2e = __va(__pa(l2_xenmap)); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ ) { if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) ) continue; *pl2e = l2e_from_intpte(l2e_get_intpte(*pl2e) + xen_phys_start); } /* Re-sync the stack and then switch to relocated pagetables. */ asm volatile ( "rep movsb ; " /* re-sync the stack */ "movq %%cr4,%%rsi ; " "andb $0x7f,%%sil ; " "movq %%rsi,%%cr4 ; " /* CR4.PGE == 0 */ "movq %0,%%cr3 ; " /* CR3 == new pagetables */ "orb $0x80,%%sil ; " "movq %%rsi,%%cr4 " /* CR4.PGE == 1 */ : : "r" (__pa(idle_pg_table)), "S" (cpu0_stack), "D" (__va(__pa(cpu0_stack))), "c" (STACK_SIZE) : "memory" ); } #endif /* Is the region suitable for relocating the multiboot modules? */ if ( !initial_images_start && (s < e) && ((e-s) >= modules_length) ) { initial_images_end = e; e = (e - modules_length) & PAGE_MASK; initial_images_start = e; move_memory(initial_images_start, mod[0].mod_start, mod[mbi->mods_count-1].mod_end); } if ( !kexec_crash_area.start && (s < e) && ((e-s) >= kexec_crash_area.size) ) { e = (e - kexec_crash_area.size) & PAGE_MASK; kexec_crash_area.start = e; } } if ( !initial_images_start ) EARLY_FAIL("Not enough memory to relocate the dom0 kernel image.\n"); reserve_e820_ram(&boot_e820, initial_images_start, initial_images_end); /* Initialise Xen heap and boot heap. */ xenheap_phys_start = init_boot_allocator(__pa(&_end)); xenheap_phys_end = opt_xenheap_megabytes << 20; #if defined(CONFIG_X86_64) if ( !xen_phys_start ) EARLY_FAIL("Not enough memory to relocate Xen.\n"); xenheap_phys_end += xen_phys_start; reserve_e820_ram(&boot_e820, xen_phys_start, xen_phys_start + (opt_xenheap_megabytes<<20)); #endif /* Late kexec reservation (dynamic start address). */ kexec_reserve_area(&boot_e820); /* * With the boot allocator now initialised, we can walk every RAM region * and map it in its entirety (on x86/64, at least) and notify it to the * boot allocator. */ for ( i = 0; i < boot_e820.nr_map; i++ ) { uint64_t s, e, map_s, map_e, mask = PAGE_SIZE - 1; /* Only page alignment required now. */ s = (boot_e820.map[i].addr + mask) & ~mask; e = (boot_e820.map[i].addr + boot_e820.map[i].size) & ~mask; #if defined(CONFIG_X86_32) s = max_t(uint64_t, s, xenheap_phys_end); #else s = max_t(uint64_t, s, 1<<20); #endif if ( (boot_e820.map[i].type != E820_RAM) || (s >= e) ) continue; /* Need to create mappings above 16MB. */ map_s = max_t(uint64_t, s, 16<<20); map_e = e; #if defined(CONFIG_X86_32) /* mappings are truncated on x86_32 */ map_e = min_t(uint64_t, map_e, BOOTSTRAP_DIRECTMAP_END); #endif /* Pass mapped memory to allocator /before/ creating new mappings. */ init_boot_pages(s, min_t(uint64_t, map_s, e)); /* Create new mappings /before/ passing memory to the allocator. */ if ( map_s < map_e ) map_pages_to_xen( (unsigned long)maddr_to_bootstrap_virt(map_s), map_s >> PAGE_SHIFT, (map_e-map_s) >> PAGE_SHIFT, PAGE_HYPERVISOR); /* Pass remainder of this memory chunk to the allocator. */ init_boot_pages(map_s, e); } memguard_init(); nr_pages = 0; for ( i = 0; i < e820.nr_map; i++ ) if ( e820.map[i].type == E820_RAM ) nr_pages += e820.map[i].size >> PAGE_SHIFT; printk("System RAM: %luMB (%lukB)\n", nr_pages >> (20 - PAGE_SHIFT), nr_pages << (PAGE_SHIFT - 10)); total_pages = nr_pages; /* Sanity check for unwanted bloat of certain hypercall structures. */ BUILD_BUG_ON(sizeof(((struct xen_platform_op *)0)->u) != sizeof(((struct xen_platform_op *)0)->u.pad)); BUILD_BUG_ON(sizeof(((struct xen_domctl *)0)->u) != sizeof(((struct xen_domctl *)0)->u.pad)); BUILD_BUG_ON(sizeof(((struct xen_sysctl *)0)->u) != sizeof(((struct xen_sysctl *)0)->u.pad)); BUILD_BUG_ON(sizeof(start_info_t) > PAGE_SIZE); BUILD_BUG_ON(sizeof(shared_info_t) > PAGE_SIZE); BUILD_BUG_ON(sizeof(struct vcpu_info) != 64); #ifdef CONFIG_COMPAT BUILD_BUG_ON(sizeof(((struct compat_platform_op *)0)->u) != sizeof(((struct compat_platform_op *)0)->u.pad)); BUILD_BUG_ON(sizeof(start_info_compat_t) > PAGE_SIZE); BUILD_BUG_ON(sizeof(struct compat_vcpu_info) != 64); #endif /* Check definitions in public headers match internal defs. */ BUILD_BUG_ON(__HYPERVISOR_VIRT_START != HYPERVISOR_VIRT_START); #ifdef HYPERVISOR_VIRT_END BUILD_BUG_ON(__HYPERVISOR_VIRT_END != HYPERVISOR_VIRT_END); #endif BUILD_BUG_ON(MACH2PHYS_VIRT_START != RO_MPT_VIRT_START); BUILD_BUG_ON(MACH2PHYS_VIRT_END != RO_MPT_VIRT_END); init_frametable(); acpi_boot_table_init(); acpi_numa_init(); numa_initmem_init(0, max_page); /* Initialise the Xen heap, skipping RAM holes. */ init_xenheap_pages(xenheap_phys_start, xenheap_phys_end); nr_pages = (xenheap_phys_end - xenheap_phys_start) >> PAGE_SHIFT; #ifdef __x86_64__ init_xenheap_pages(xen_phys_start, __pa(&_start)); nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT; vesa_init(); #endif xenheap_phys_start = xen_phys_start; printk("Xen heap: %luMB (%lukB)\n", nr_pages >> (20 - PAGE_SHIFT), nr_pages << (PAGE_SHIFT - 10)); end_boot_allocator(); early_boot = 0; early_cpu_init(); paging_init(); tboot_probe(); /* Unmap the first page of CPU0's stack. */ memguard_guard_stack(cpu0_stack); open_softirq(NEW_TLBFLUSH_CLOCK_PERIOD_SOFTIRQ, new_tlbflush_clock_period); if ( opt_watchdog ) nmi_watchdog = NMI_LOCAL_APIC; sort_exception_tables(); find_smp_config(); dmi_scan_machine(); generic_apic_probe(); acpi_boot_init(); init_cpu_to_node(); if ( smp_found_config ) get_smp_config(); #ifdef CONFIG_X86_64 /* Low mappings were only needed for some BIOS table parsing. */ zap_low_mappings(); #endif init_apic_mappings(); init_IRQ(); percpu_init_areas(); xsm_init(&initrdidx, mbi, initial_images_start); init_idle_domain(); trap_init(); rcu_init(); timer_init(); early_time_init(); arch_init_memory(); identify_cpu(&boot_cpu_data); if ( cpu_has_fxsr ) set_in_cr4(X86_CR4_OSFXSR); if ( cpu_has_xmm ) set_in_cr4(X86_CR4_OSXMMEXCPT); #ifdef CONFIG_X86_64 vesa_mtrr_init(); #endif if ( opt_nosmp ) max_cpus = 0; smp_prepare_cpus(max_cpus); /* * Initialise higher-level timer functions. We do this fairly late * (post-SMP) because the time bases and scale factors need to be updated * regularly, and SMP initialisation can cause a long delay with * interrupts not yet enabled. */ init_xen_time(); initialize_keytable(); serial_init_postirq(); BUG_ON(!local_irq_is_enabled()); for_each_present_cpu ( i ) { if ( num_online_cpus() >= max_cpus ) break; if ( !cpu_online(i) ) { rcu_online_cpu(i); __cpu_up(i); } /* Set up cpu_to_node[]. */ srat_detect_node(i); /* Set up node_to_cpumask based on cpu_to_node[]. */ numa_add_cpu(i); } printk("Brought up %ld CPUs\n", (long)num_online_cpus()); smp_cpus_done(max_cpus); initialise_gdb(); /* could be moved earlier */ do_initcalls(); if ( opt_watchdog ) watchdog_enable(); /* Create initial domain 0. */ dom0 = domain_create(0, 0, DOM0_SSIDREF); if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) ) panic("Error creating domain 0\n"); dom0->is_privileged = 1; /* Grab the DOM0 command line. */ cmdline = (char *)(mod[0].string ? __va(mod[0].string) : NULL); if ( (cmdline != NULL) || (kextra != NULL) ) { static char dom0_cmdline[MAX_GUEST_CMDLINE]; cmdline = cmdline_cook(cmdline); safe_strcpy(dom0_cmdline, cmdline); if ( kextra != NULL ) /* kextra always includes exactly one leading space. */ safe_strcat(dom0_cmdline, kextra); /* Append any extra parameters. */ if ( skip_ioapic_setup && !strstr(dom0_cmdline, "noapic") ) safe_strcat(dom0_cmdline, " noapic"); if ( acpi_skip_timer_override && !strstr(dom0_cmdline, "acpi_skip_timer_override") ) safe_strcat(dom0_cmdline, " acpi_skip_timer_override"); if ( (strlen(acpi_param) == 0) && acpi_disabled ) { printk("ACPI is disabled, notifying Domain 0 (acpi=off)\n"); safe_strcpy(acpi_param, "off"); } if ( (strlen(acpi_param) != 0) && !strstr(dom0_cmdline, "acpi=") ) { safe_strcat(dom0_cmdline, " acpi="); safe_strcat(dom0_cmdline, acpi_param); } cmdline = dom0_cmdline; } if ( (initrdidx > 0) && (initrdidx < mbi->mods_count) ) { _initrd_start = initial_images_start + (mod[initrdidx].mod_start - mod[0].mod_start); _initrd_len = mod[initrdidx].mod_end - mod[initrdidx].mod_start; } iommu_setup(); amd_iommu_detect(); /* * We're going to setup domain0 using the module(s) that we stashed safely * above our heap. The second module, if present, is an initrd ramdisk. */ if ( construct_dom0(dom0, initial_images_start, mod[0].mod_end-mod[0].mod_start, _initrd_start, _initrd_len, cmdline) != 0) panic("Could not set up DOM0 guest OS\n"); /* Scrub RAM that is still free and so may go to an unprivileged domain. */ scrub_heap_pages(); init_trace_bufs(); console_endboot(); /* Hide UART from DOM0 if we're using it */ serial_endboot(); domain_unpause_by_systemcontroller(dom0); reset_stack_and_jump(init_done); } void arch_get_xen_caps(xen_capabilities_info_t *info) { /* Interface name is always xen-3.0-* for Xen-3.x. */ int major = 3, minor = 0; char s[32]; (*info)[0] = '\0'; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE) snprintf(s, sizeof(s), "xen-%d.%d-x86_32 ", major, minor); safe_strcat(*info, s); if ( hvm_enabled ) { snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor); safe_strcat(*info, s); } #elif defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor); safe_strcat(*info, s); if ( hvm_enabled ) { snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor); safe_strcat(*info, s); snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor); safe_strcat(*info, s); } #elif defined(CONFIG_X86_64) snprintf(s, sizeof(s), "xen-%d.%d-x86_64 ", major, minor); safe_strcat(*info, s); #ifdef CONFIG_COMPAT snprintf(s, sizeof(s), "xen-%d.%d-x86_32p ", major, minor); safe_strcat(*info, s); #endif if ( hvm_enabled ) { snprintf(s, sizeof(s), "hvm-%d.%d-x86_32 ", major, minor); safe_strcat(*info, s); snprintf(s, sizeof(s), "hvm-%d.%d-x86_32p ", major, minor); safe_strcat(*info, s); snprintf(s, sizeof(s), "hvm-%d.%d-x86_64 ", major, minor); safe_strcat(*info, s); } #endif } /* * Local variables: * mode: C * c-set-style: "BSD" * c-basic-offset: 4 * tab-width: 4 * indent-tabs-mode: nil * End: */