x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header
authorThiago Jung Bauermann <bauerman@linux.ibm.com>
Tue, 6 Aug 2019 04:49:17 +0000 (01:49 -0300)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 9 Aug 2019 12:52:08 +0000 (22:52 +1000)
Now that generic code doesn't reference them, move sme_active() and
sme_me_mask to x86's <asm/mem_encrypt.h>.

Also remove the export for sme_active() since it's only used in files that
won't be built as modules. sme_me_mask on the other hand is used in
arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a
module so its export needs to stay.

Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190806044919.10622-5-bauerman@linux.ibm.com
arch/s390/include/asm/mem_encrypt.h
arch/x86/include/asm/mem_encrypt.h
arch/x86/mm/mem_encrypt.c
include/linux/mem_encrypt.h

index 3eb018508190e21897d2e1c0fb713a96ce827b0e..ff813a56bc300714e56674243f2a701236e0f196 100644 (file)
@@ -4,9 +4,7 @@
 
 #ifndef __ASSEMBLY__
 
-#define sme_me_mask    0ULL
-
-static inline bool sme_active(void) { return false; }
+static inline bool mem_encrypt_active(void) { return false; }
 extern bool sev_active(void);
 
 int set_memory_encrypted(unsigned long addr, int numpages);
index 0c196c47d6215f7f8e54b6bd3760b5ca506f9da8..848ce43b9040d6876f95d808b0a4a2e413949185 100644 (file)
@@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
 
 extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
 
+static inline bool mem_encrypt_active(void)
+{
+       return sme_me_mask;
+}
+
+static inline u64 sme_get_me_mask(void)
+{
+       return sme_me_mask;
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __X86_MEM_ENCRYPT_H__ */
index fece30ca8b0cb9fc58d6bb65f2c5a2bc5f4b2b03..94da5a88abe6215224f3519e5ddf9ac01faf85ea 100644 (file)
@@ -344,7 +344,6 @@ bool sme_active(void)
 {
        return sme_me_mask && !sev_enabled;
 }
-EXPORT_SYMBOL(sme_active);
 
 bool sev_active(void)
 {
index 470bd53a89df23be9ac39e9593e91f6a3f13ea22..0c5b0ff9eb2992f51799e0c1222bf2a1959a03a8 100644 (file)
 
 #else  /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
-#define sme_me_mask    0ULL
-
-static inline bool sme_active(void) { return false; }
+static inline bool mem_encrypt_active(void) { return false; }
 static inline bool sev_active(void) { return false; }
 
 #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
 
-static inline bool mem_encrypt_active(void)
-{
-       return sme_me_mask;
-}
-
-static inline u64 sme_get_me_mask(void)
-{
-       return sme_me_mask;
-}
-
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 /*
  * The __sme_set() and __sme_clr() macros are useful for adding or removing