T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git
S: Supported
F: arch/x86/xen/
+F: arch/x86/platform/pvh/
F: drivers/*/xen-*front.c
F: drivers/xen/
F: arch/x86/include/asm/xen/
# Xen paravirtualization support
obj-$(CONFIG_XEN) += xen/
+obj-$(CONFIG_XEN_PVH) += platform/pvh/
+
# Hyper-V paravirtualization support
obj-$(subst m,y,$(CONFIG_HYPERV)) += hyperv/
--- /dev/null
+# SPDX-License-Identifier: GPL-2.0
+OBJECT_FILES_NON_STANDARD_head.o := y
+
+obj-$(CONFIG_XEN_PVH) += enlighten.o
+obj-$(CONFIG_XEN_PVH) += head.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
+
+#include <xen/hvc-console.h>
+
+#include <asm/io_apic.h>
+#include <asm/hypervisor.h>
+#include <asm/e820/api.h>
+#include <asm/x86_init.h>
+
+#include <asm/xen/interface.h>
+#include <asm/xen/hypercall.h>
+
+#include <xen/xen.h>
+#include <xen/interface/memory.h>
+#include <xen/interface/hvm/start_info.h>
+
+/*
+ * PVH variables.
+ *
+ * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment
+ * since they are used after startup_{32|64}, which clear .bss, are invoked.
+ */
+bool xen_pvh __attribute__((section(".data"))) = 0;
+struct boot_params pvh_bootparams __attribute__((section(".data")));
+struct hvm_start_info pvh_start_info __attribute__((section(".data")));
+
+unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
+
+static u64 pvh_get_root_pointer(void)
+{
+ return pvh_start_info.rsdp_paddr;
+}
+
+static void __init init_pvh_bootparams(void)
+{
+ struct xen_memory_map memmap;
+ int rc;
+
+ memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+
+ memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
+ set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
+ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+ if (rc) {
+ xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
+ BUG();
+ }
+ pvh_bootparams.e820_entries = memmap.nr_entries;
+
+ if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
+ ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
+ ISA_END_ADDRESS - ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
+ E820_TYPE_RESERVED;
+ pvh_bootparams.e820_entries++;
+ } else
+ xen_raw_printk("Warning: Can fit ISA range into e820\n");
+
+ pvh_bootparams.hdr.cmd_line_ptr =
+ pvh_start_info.cmdline_paddr;
+
+ /* The first module is always ramdisk. */
+ if (pvh_start_info.nr_modules) {
+ struct hvm_modlist_entry *modaddr =
+ __va(pvh_start_info.modlist_paddr);
+ pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
+ pvh_bootparams.hdr.ramdisk_size = modaddr->size;
+ }
+
+ /*
+ * See Documentation/x86/boot.txt.
+ *
+ * Version 2.12 supports Xen entry point but we will use default x86/PC
+ * environment (i.e. hardware_subarch 0).
+ */
+ pvh_bootparams.hdr.version = (2 << 8) | 12;
+ pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
+
+ x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
+}
+
+/*
+ * This routine (and those that it might call) should not use
+ * anything that lives in .bss since that segment will be cleared later.
+ */
+void __init xen_prepare_pvh(void)
+{
+ u32 msr;
+ u64 pfn;
+
+ if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
+ xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
+ pvh_start_info.magic);
+ BUG();
+ }
+
+ xen_pvh = 1;
+ xen_start_flags = pvh_start_info.flags;
+
+ msr = cpuid_ebx(xen_cpuid_base() + 2);
+ pfn = __pa(hypercall_page);
+ wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
+
+ init_pvh_bootparams();
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+ .code32
+ .text
+#define _pa(x) ((x) - __START_KERNEL_map)
+
+#include <linux/elfnote.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/asm.h>
+#include <asm/boot.h>
+#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <xen/interface/elfnote.h>
+
+ __HEAD
+
+/*
+ * Entry point for PVH guests.
+ *
+ * Xen ABI specifies the following register state when we come here:
+ *
+ * - `ebx`: contains the physical memory address where the loader has placed
+ * the boot start info structure.
+ * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
+ * - `cr4`: all bits are cleared.
+ * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’
+ * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified.
+ * - `ds`, `es`: must be a 32-bit read/write data segment with a base of
+ * ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all
+ * unspecified.
+ * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
+ * of '0x67'.
+ * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared.
+ * Bit 8 (TF) must be cleared. Other bits are all unspecified.
+ *
+ * All other processor registers and flag bits are unspecified. The OS is in
+ * charge of setting up it's own stack, GDT and IDT.
+ */
+
+#define PVH_GDT_ENTRY_CS 1
+#define PVH_GDT_ENTRY_DS 2
+#define PVH_GDT_ENTRY_CANARY 3
+#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
+#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
+#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
+
+ENTRY(pvh_start_xen)
+ cld
+
+ lgdt (_pa(gdt))
+
+ mov $PVH_DS_SEL,%eax
+ mov %eax,%ds
+ mov %eax,%es
+ mov %eax,%ss
+
+ /* Stash hvm_start_info. */
+ mov $_pa(pvh_start_info), %edi
+ mov %ebx, %esi
+ mov _pa(pvh_start_info_sz), %ecx
+ shr $2,%ecx
+ rep
+ movsl
+
+ mov $_pa(early_stack_end), %esp
+
+ /* Enable PAE mode. */
+ mov %cr4, %eax
+ orl $X86_CR4_PAE, %eax
+ mov %eax, %cr4
+
+#ifdef CONFIG_X86_64
+ /* Enable Long mode. */
+ mov $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* Enable pre-constructed page tables. */
+ mov $_pa(init_top_pgt), %eax
+ mov %eax, %cr3
+ mov $(X86_CR0_PG | X86_CR0_PE), %eax
+ mov %eax, %cr0
+
+ /* Jump to 64-bit mode. */
+ ljmp $PVH_CS_SEL, $_pa(1f)
+
+ /* 64-bit entry point. */
+ .code64
+1:
+ /* Set base address in stack canary descriptor. */
+ mov $MSR_GS_BASE,%ecx
+ mov $_pa(canary), %eax
+ xor %edx, %edx
+ wrmsr
+
+ call xen_prepare_pvh
+
+ /* startup_64 expects boot_params in %rsi. */
+ mov $_pa(pvh_bootparams), %rsi
+ mov $_pa(startup_64), %rax
+ jmp *%rax
+
+#else /* CONFIG_X86_64 */
+
+ /* Set base address in stack canary descriptor. */
+ movl $_pa(gdt_start),%eax
+ movl $_pa(canary),%ecx
+ movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
+ shrl $16, %ecx
+ movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
+ movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
+
+ mov $PVH_CANARY_SEL,%eax
+ mov %eax,%gs
+
+ call mk_early_pgtbl_32
+
+ mov $_pa(initial_page_table), %eax
+ mov %eax, %cr3
+
+ mov %cr0, %eax
+ or $(X86_CR0_PG | X86_CR0_PE), %eax
+ mov %eax, %cr0
+
+ ljmp $PVH_CS_SEL, $1f
+1:
+ call xen_prepare_pvh
+ mov $_pa(pvh_bootparams), %esi
+
+ /* startup_32 doesn't expect paging and PAE to be on. */
+ ljmp $PVH_CS_SEL, $_pa(2f)
+2:
+ mov %cr0, %eax
+ and $~X86_CR0_PG, %eax
+ mov %eax, %cr0
+ mov %cr4, %eax
+ and $~X86_CR4_PAE, %eax
+ mov %eax, %cr4
+
+ ljmp $PVH_CS_SEL, $_pa(startup_32)
+#endif
+END(pvh_start_xen)
+
+ .section ".init.data","aw"
+ .balign 8
+gdt:
+ .word gdt_end - gdt_start
+ .long _pa(gdt_start)
+ .word 0
+gdt_start:
+ .quad 0x0000000000000000 /* NULL descriptor */
+#ifdef CONFIG_X86_64
+ .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
+#else
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
+#endif
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
+ .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
+gdt_end:
+
+ .balign 16
+canary:
+ .fill 48, 1, 0
+
+early_stack:
+ .fill BOOT_STACK_SIZE, 1, 0
+early_stack_end:
+
+ ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
+ _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
# SPDX-License-Identifier: GPL-2.0
OBJECT_FILES_NON_STANDARD_xen-asm_$(BITS).o := y
-OBJECT_FILES_NON_STANDARD_xen-pvh.o := y
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
obj-$(CONFIG_XEN_PV) += xen-asm.o
obj-$(CONFIG_XEN_PV) += xen-asm_$(BITS).o
-obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o
-obj-$(CONFIG_XEN_PVH) += xen-pvh.o
-
obj-$(CONFIG_EVENT_TRACING) += trace.o
obj-$(CONFIG_SMP) += smp.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/acpi.h>
-
-#include <xen/hvc-console.h>
-
-#include <asm/io_apic.h>
-#include <asm/hypervisor.h>
-#include <asm/e820/api.h>
-#include <asm/x86_init.h>
-
-#include <asm/xen/interface.h>
-#include <asm/xen/hypercall.h>
-
-#include <xen/xen.h>
-#include <xen/interface/memory.h>
-#include <xen/interface/hvm/start_info.h>
-
-/*
- * PVH variables.
- *
- * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment
- * since they are used after startup_{32|64}, which clear .bss, are invoked.
- */
-bool xen_pvh __attribute__((section(".data"))) = 0;
-struct boot_params pvh_bootparams __attribute__((section(".data")));
-struct hvm_start_info pvh_start_info __attribute__((section(".data")));
-
-unsigned int pvh_start_info_sz = sizeof(pvh_start_info);
-
-static u64 pvh_get_root_pointer(void)
-{
- return pvh_start_info.rsdp_paddr;
-}
-
-static void __init init_pvh_bootparams(void)
-{
- struct xen_memory_map memmap;
- int rc;
-
- memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
-
- memmap.nr_entries = ARRAY_SIZE(pvh_bootparams.e820_table);
- set_xen_guest_handle(memmap.buffer, pvh_bootparams.e820_table);
- rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
- if (rc) {
- xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc);
- BUG();
- }
- pvh_bootparams.e820_entries = memmap.nr_entries;
-
- if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
- ISA_START_ADDRESS;
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
- ISA_END_ADDRESS - ISA_START_ADDRESS;
- pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
- E820_TYPE_RESERVED;
- pvh_bootparams.e820_entries++;
- } else
- xen_raw_printk("Warning: Can fit ISA range into e820\n");
-
- pvh_bootparams.hdr.cmd_line_ptr =
- pvh_start_info.cmdline_paddr;
-
- /* The first module is always ramdisk. */
- if (pvh_start_info.nr_modules) {
- struct hvm_modlist_entry *modaddr =
- __va(pvh_start_info.modlist_paddr);
- pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
- pvh_bootparams.hdr.ramdisk_size = modaddr->size;
- }
-
- /*
- * See Documentation/x86/boot.txt.
- *
- * Version 2.12 supports Xen entry point but we will use default x86/PC
- * environment (i.e. hardware_subarch 0).
- */
- pvh_bootparams.hdr.version = (2 << 8) | 12;
- pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */
-
- x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
-}
-
-/*
- * This routine (and those that it might call) should not use
- * anything that lives in .bss since that segment will be cleared later.
- */
-void __init xen_prepare_pvh(void)
-{
- u32 msr;
- u64 pfn;
-
- if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
- xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
- pvh_start_info.magic);
- BUG();
- }
-
- xen_pvh = 1;
- xen_start_flags = pvh_start_info.flags;
-
- msr = cpuid_ebx(xen_cpuid_base() + 2);
- pfn = __pa(hypercall_page);
- wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
-
- init_pvh_bootparams();
-}
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
- */
-
- .code32
- .text
-#define _pa(x) ((x) - __START_KERNEL_map)
-
-#include <linux/elfnote.h>
-#include <linux/init.h>
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/asm.h>
-#include <asm/boot.h>
-#include <asm/processor-flags.h>
-#include <asm/msr.h>
-#include <xen/interface/elfnote.h>
-
- __HEAD
-
-/*
- * Entry point for PVH guests.
- *
- * Xen ABI specifies the following register state when we come here:
- *
- * - `ebx`: contains the physical memory address where the loader has placed
- * the boot start info structure.
- * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
- * - `cr4`: all bits are cleared.
- * - `cs `: must be a 32-bit read/execute code segment with a base of ‘0’
- * and a limit of ‘0xFFFFFFFF’. The selector value is unspecified.
- * - `ds`, `es`: must be a 32-bit read/write data segment with a base of
- * ‘0’ and a limit of ‘0xFFFFFFFF’. The selector values are all
- * unspecified.
- * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
- * of '0x67'.
- * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared.
- * Bit 8 (TF) must be cleared. Other bits are all unspecified.
- *
- * All other processor registers and flag bits are unspecified. The OS is in
- * charge of setting up it's own stack, GDT and IDT.
- */
-
-#define PVH_GDT_ENTRY_CS 1
-#define PVH_GDT_ENTRY_DS 2
-#define PVH_GDT_ENTRY_CANARY 3
-#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
-#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
-#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
-
-ENTRY(pvh_start_xen)
- cld
-
- lgdt (_pa(gdt))
-
- mov $PVH_DS_SEL,%eax
- mov %eax,%ds
- mov %eax,%es
- mov %eax,%ss
-
- /* Stash hvm_start_info. */
- mov $_pa(pvh_start_info), %edi
- mov %ebx, %esi
- mov _pa(pvh_start_info_sz), %ecx
- shr $2,%ecx
- rep
- movsl
-
- mov $_pa(early_stack_end), %esp
-
- /* Enable PAE mode. */
- mov %cr4, %eax
- orl $X86_CR4_PAE, %eax
- mov %eax, %cr4
-
-#ifdef CONFIG_X86_64
- /* Enable Long mode. */
- mov $MSR_EFER, %ecx
- rdmsr
- btsl $_EFER_LME, %eax
- wrmsr
-
- /* Enable pre-constructed page tables. */
- mov $_pa(init_top_pgt), %eax
- mov %eax, %cr3
- mov $(X86_CR0_PG | X86_CR0_PE), %eax
- mov %eax, %cr0
-
- /* Jump to 64-bit mode. */
- ljmp $PVH_CS_SEL, $_pa(1f)
-
- /* 64-bit entry point. */
- .code64
-1:
- /* Set base address in stack canary descriptor. */
- mov $MSR_GS_BASE,%ecx
- mov $_pa(canary), %eax
- xor %edx, %edx
- wrmsr
-
- call xen_prepare_pvh
-
- /* startup_64 expects boot_params in %rsi. */
- mov $_pa(pvh_bootparams), %rsi
- mov $_pa(startup_64), %rax
- jmp *%rax
-
-#else /* CONFIG_X86_64 */
-
- /* Set base address in stack canary descriptor. */
- movl $_pa(gdt_start),%eax
- movl $_pa(canary),%ecx
- movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
- shrl $16, %ecx
- movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
- movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
-
- mov $PVH_CANARY_SEL,%eax
- mov %eax,%gs
-
- call mk_early_pgtbl_32
-
- mov $_pa(initial_page_table), %eax
- mov %eax, %cr3
-
- mov %cr0, %eax
- or $(X86_CR0_PG | X86_CR0_PE), %eax
- mov %eax, %cr0
-
- ljmp $PVH_CS_SEL, $1f
-1:
- call xen_prepare_pvh
- mov $_pa(pvh_bootparams), %esi
-
- /* startup_32 doesn't expect paging and PAE to be on. */
- ljmp $PVH_CS_SEL, $_pa(2f)
-2:
- mov %cr0, %eax
- and $~X86_CR0_PG, %eax
- mov %eax, %cr0
- mov %cr4, %eax
- and $~X86_CR4_PAE, %eax
- mov %eax, %cr4
-
- ljmp $PVH_CS_SEL, $_pa(startup_32)
-#endif
-END(pvh_start_xen)
-
- .section ".init.data","aw"
- .balign 8
-gdt:
- .word gdt_end - gdt_start
- .long _pa(gdt_start)
- .word 0
-gdt_start:
- .quad 0x0000000000000000 /* NULL descriptor */
-#ifdef CONFIG_X86_64
- .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
-#else
- .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
-#endif
- .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
- .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
-gdt_end:
-
- .balign 16
-canary:
- .fill 48, 1, 0
-
-early_stack:
- .fill BOOT_STACK_SIZE, 1, 0
-early_stack_end:
-
- ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
- _ASM_PTR (pvh_start_xen - __START_KERNEL_map))