/****************************************************************************** * arch/x86/x86_64/mm.c * * Modifications to Linux original are copyright (c) 2004, K A Fraser tr This * program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include #include #include #include #include #include #include #include #include #include #include #include #include struct page_info *alloc_xen_pagetable(void) { extern int early_boot; unsigned long pfn; if ( !early_boot ) return alloc_domheap_page(NULL); pfn = alloc_boot_pages(1, 1); return ((pfn == 0) ? NULL : mfn_to_page(pfn)); } void free_xen_pagetable(struct page_info *pg) { free_domheap_page(pg); } l2_pgentry_t *virt_to_xen_l2e(unsigned long v) { l4_pgentry_t *pl4e; l3_pgentry_t *pl3e; l2_pgentry_t *pl2e; pl4e = &idle_pg_table[l4_table_offset(v)]; if ( !(l4e_get_flags(*pl4e) & _PAGE_PRESENT) ) { pl3e = page_to_virt(alloc_xen_pagetable()); clear_page(pl3e); *pl4e = l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR); } pl3e = l4e_to_l3e(*pl4e) + l3_table_offset(v); if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) ) { pl2e = page_to_virt(alloc_xen_pagetable()); clear_page(pl2e); *pl3e = l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR); } pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v); return pl2e; } void __init paging_init(void) { unsigned long i, mpt_size; l3_pgentry_t *l3_ro_mpt; l2_pgentry_t *l2_ro_mpt; struct page_info *pg; idle_vcpu[0]->arch.monitor_table = pagetable_from_paddr(__pa(idle_pg_table)); /* Create user-accessible L2 directory to map the MPT for guests. */ l3_ro_mpt = alloc_xenheap_page(); clear_page(l3_ro_mpt); idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)] = l4e_from_page( virt_to_page(l3_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER); l2_ro_mpt = alloc_xenheap_page(); clear_page(l2_ro_mpt); l3_ro_mpt[l3_table_offset(RO_MPT_VIRT_START)] = l3e_from_page( virt_to_page(l2_ro_mpt), __PAGE_HYPERVISOR | _PAGE_USER); l2_ro_mpt += l2_table_offset(RO_MPT_VIRT_START); /* * Allocate and map the machine-to-phys table. * This also ensures L3 is present for fixmaps. */ mpt_size = (max_page * BYTES_PER_LONG) + (1UL << L2_PAGETABLE_SHIFT) - 1; mpt_size &= ~((1UL << L2_PAGETABLE_SHIFT) - 1UL); for ( i = 0; i < (mpt_size >> L2_PAGETABLE_SHIFT); i++ ) { if ( (pg = alloc_domheap_pages(NULL, PAGETABLE_ORDER, 0)) == NULL ) panic("Not enough memory for m2p table\n"); map_pages_to_xen( RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT), page_to_mfn(pg), 1UL << PAGETABLE_ORDER, PAGE_HYPERVISOR); memset((void *)(RDWR_MPT_VIRT_START + (i << L2_PAGETABLE_SHIFT)), 0x55, 1UL << L2_PAGETABLE_SHIFT); *l2_ro_mpt++ = l2e_from_page( pg, _PAGE_GLOBAL|_PAGE_PSE|_PAGE_USER|_PAGE_PRESENT); BUG_ON(((unsigned long)l2_ro_mpt & ~PAGE_MASK) == 0); } /* Set up linear page table mapping. */ idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)] = l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR); /* Install per-domain mappings for idle domain. */ idle_pg_table[l4_table_offset(PERDOMAIN_VIRT_START)] = l4e_from_page( virt_to_page(idle_vcpu[0]->domain->arch.mm_perdomain_l3), __PAGE_HYPERVISOR); } void __init zap_low_mappings(void) { idle_pg_table[0] = l4e_empty(); flush_tlb_all_pge(); } void subarch_init_memory(void) { unsigned long i, v, m2p_start_mfn; l3_pgentry_t l3e; l2_pgentry_t l2e; /* * We are rather picky about the layout of 'struct page_info'. The * count_info and domain fields must be adjacent, as we perform atomic * 64-bit operations on them. */ BUILD_BUG_ON(offsetof(struct page_info, u.inuse._domain) != (offsetof(struct page_info, count_info) + sizeof(u32))); BUILD_BUG_ON((offsetof(struct page_info, count_info) & 7) != 0); BUILD_BUG_ON(sizeof(struct page_info) != (32 + BITS_TO_LONGS(NR_CPUS)*sizeof(long))); /* M2P table is mappable read-only by privileged domains. */ for ( v = RDWR_MPT_VIRT_START; v != RDWR_MPT_VIRT_END; v += 1 << L2_PAGETABLE_SHIFT ) { l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[ l3_table_offset(v)]; if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) continue; l2e = l3e_to_l2e(l3e)[l2_table_offset(v)]; if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) continue; m2p_start_mfn = l2e_get_pfn(l2e); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { struct page_info *page = mfn_to_page(m2p_start_mfn + i); share_xen_page_with_privileged_guests(page, XENSHARE_readonly); } } } long subarch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) { struct xen_machphys_mfn_list xmml; l3_pgentry_t l3e; l2_pgentry_t l2e; unsigned long mfn, v; unsigned int i; long rc = 0; switch ( op ) { case XENMEM_machphys_mfn_list: if ( copy_from_guest(&xmml, arg, 1) ) return -EFAULT; for ( i = 0, v = RDWR_MPT_VIRT_START;
#
# Copyright (C) 2016 Yousong Zhou <yszhou4tech@gmail.com>
#
# This is free software, licensed under the GNU General Public License v2.
# See /LICENSE for more information.
#
define KernelPackage/irqbypass
  SUBMENU:=Virtualization
  TITLE:=IRQ offload/bypass manager
  KCONFIG:=CONFIG_IRQ_BYPASS_MANAGER
  HIDDEN:=1
  FILES:= $(LINUX_DIR)/virt/lib/irqbypass.ko
  AUTOLOAD:=$(call AutoProbe,irqbypass.ko)
endef
$(eval $(call KernelPackage,irqbypass))


define KernelPackage/kvm-x86
  SUBMENU:=Virtualization
  TITLE:=Kernel-based Virtual Machine (KVM) support
  DEPENDS:=@TARGET_x86_generic||TARGET_x86_64 +kmod-irqbypass
  KCONFIG:=\
	  CONFIG_VIRTUALIZATION=y \
	  CONFIG_KVM
  FILES:= $(LINUX_DIR)/arch/$(LINUX_KARCH)/kvm/kvm.ko
  AUTOLOAD:=$(call AutoProbe,kvm.ko)
endef

define KernelPackage/kvm-x86/description
  Support hosting fully virtualized guest machines using hardware
  virtualization extensions.  You will need a fairly recent
  processor equipped with virtualization extensions. You will also
  need to select one or more of the processor modules.

  This module provides access to the hardware capabilities through
  a character device node named /dev/kvm.
endef

$(eval $(call KernelPackage,kvm-x86))


define KernelPackage/kvm-intel
  SUBMENU:=Virtualization
  TITLE:=KVM for Intel processors support
  DEPENDS:=+kmod-kvm-x86
  KCONFIG:=CONFIG_KVM_INTEL
  FILES:= $(LINUX_DIR)/arch/$(LINUX_KARCH)/kvm/kvm-intel.ko
  AUTOLOAD:=$(call AutoProbe,kvm-intel.ko)
endef

define KernelPackage/kvm-intel/description
  Provides support for KVM on Intel processors equipped with the VT
  extensions.
endef

$(eval $(call KernelPackage,kvm-intel))


define KernelPackage/kvm-amd
  SUBMENU:=Virtualization
  TITLE:=KVM for AMD processors support
  DEPENDS:=+kmod-kvm-x86
  KCONFIG:=CONFIG_KVM_AMD
  FILES:= $(LINUX_DIR)/arch/$(LINUX_KARCH)/kvm/kvm-amd.ko
  AUTOLOAD:=$(call AutoProbe,kvm-amd.ko)
endef

define KernelPackage/kvm-amd/description
  Provides support for KVM on AMD processors equipped with the AMD-V
  (SVM) extensions.
endef

$(eval $(call KernelPackage,kvm-amd))