x86/virt: KVM: Move "disable SVM" helper into KVM SVM
authorSean Christopherson <seanjc@google.com>
Fri, 21 Jul 2023 20:18:56 +0000 (13:18 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 3 Aug 2023 22:37:15 +0000 (15:37 -0700)
Move cpu_svm_disable() into KVM proper now that all hardware
virtualization management is routed through KVM.  Remove the now-empty
virtext.h.

No functional change intended.

Reviewed-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20230721201859.2307736-17-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/virtext.h [deleted file]
arch/x86/kvm/svm/svm.c

diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
deleted file mode 100644 (file)
index 632575e..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/* CPU virtualization extensions handling
- *
- * This should carry the code for handling CPU virtualization extensions
- * that needs to live in the kernel core.
- *
- * Author: Eduardo Habkost <ehabkost@redhat.com>
- *
- * Copyright (C) 2008, Red Hat Inc.
- *
- * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
- */
-#ifndef _ASM_X86_VIRTEX_H
-#define _ASM_X86_VIRTEX_H
-
-#include <asm/processor.h>
-
-#include <asm/vmx.h>
-#include <asm/svm.h>
-#include <asm/tlbflush.h>
-
-/*
- * SVM functions:
- */
-/** Disable SVM on the current CPU
- */
-static inline void cpu_svm_disable(void)
-{
-       uint64_t efer;
-
-       wrmsrl(MSR_VM_HSAVE_PA, 0);
-       rdmsrl(MSR_EFER, efer);
-       if (efer & EFER_SVME) {
-               /*
-                * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
-                * aren't blocked, e.g. if a fatal error occurred between CLGI
-                * and STGI.  Note, STGI may #UD if SVM is disabled from NMI
-                * context between reading EFER and executing STGI.  In that
-                * case, GIF must already be set, otherwise the NMI would have
-                * been blocked, so just eat the fault.
-                */
-               asm_volatile_goto("1: stgi\n\t"
-                                 _ASM_EXTABLE(1b, %l[fault])
-                                 ::: "memory" : fault);
-fault:
-               wrmsrl(MSR_EFER, efer & ~EFER_SVME);
-       }
-}
-
-#endif /* _ASM_X86_VIRTEX_H */
index 9e449167e71bb8620d962fcbd0b1f86b7e6d8afd..47f9c7156609ceaf41c47383fd47f613d227ce94 100644 (file)
@@ -42,8 +42,6 @@
 #include <asm/reboot.h>
 #include <asm/fpu/api.h>
 
-#include <asm/virtext.h>
-
 #include <trace/events/ipi.h>
 
 #include "trace.h"
@@ -582,9 +580,32 @@ out:
        preempt_enable();
 }
 
+static inline void kvm_cpu_svm_disable(void)
+{
+       uint64_t efer;
+
+       wrmsrl(MSR_VM_HSAVE_PA, 0);
+       rdmsrl(MSR_EFER, efer);
+       if (efer & EFER_SVME) {
+               /*
+                * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
+                * aren't blocked, e.g. if a fatal error occurred between CLGI
+                * and STGI.  Note, STGI may #UD if SVM is disabled from NMI
+                * context between reading EFER and executing STGI.  In that
+                * case, GIF must already be set, otherwise the NMI would have
+                * been blocked, so just eat the fault.
+                */
+               asm_volatile_goto("1: stgi\n\t"
+                                 _ASM_EXTABLE(1b, %l[fault])
+                                 ::: "memory" : fault);
+fault:
+               wrmsrl(MSR_EFER, efer & ~EFER_SVME);
+       }
+}
+
 static void svm_emergency_disable(void)
 {
-       cpu_svm_disable();
+       kvm_cpu_svm_disable();
 }
 
 static void svm_hardware_disable(void)
@@ -593,7 +614,7 @@ static void svm_hardware_disable(void)
        if (tsc_scaling)
                __svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
 
-       cpu_svm_disable();
+       kvm_cpu_svm_disable();
 
        amd_pmu_disable_virt();
 }