forked from luck/tmp_suning_uos_patched
x86, s390/mm: Move sme_active() and sme_me_mask to x86-specific header
Now that generic code doesn't reference them, move sme_active() and sme_me_mask to x86's <asm/mem_encrypt.h>. Also remove the export for sme_active() since it's only used in files that won't be built as modules. sme_me_mask on the other hand is used in arch/x86/kvm/svm.c (via __sme_set() and __psp_pa()) which can be built as a module so its export needs to stay. Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20190806044919.10622-5-bauerman@linux.ibm.com
This commit is contained in:
parent
e740815a97
commit
284e21fab2
|
@ -4,9 +4,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define sme_me_mask 0ULL
|
||||
|
||||
static inline bool sme_active(void) { return false; }
|
||||
static inline bool mem_encrypt_active(void) { return false; }
|
||||
extern bool sev_active(void);
|
||||
|
||||
int set_memory_encrypted(unsigned long addr, int numpages);
|
||||
|
|
|
@ -92,6 +92,16 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0;
|
|||
|
||||
extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
|
||||
|
||||
static inline bool mem_encrypt_active(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
static inline u64 sme_get_me_mask(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __X86_MEM_ENCRYPT_H__ */
|
||||
|
|
|
@ -344,7 +344,6 @@ bool sme_active(void)
|
|||
{
|
||||
return sme_me_mask && !sev_enabled;
|
||||
}
|
||||
EXPORT_SYMBOL(sme_active);
|
||||
|
||||
bool sev_active(void)
|
||||
{
|
||||
|
|
|
@ -18,23 +18,11 @@
|
|||
|
||||
#else /* !CONFIG_ARCH_HAS_MEM_ENCRYPT */
|
||||
|
||||
#define sme_me_mask 0ULL
|
||||
|
||||
static inline bool sme_active(void) { return false; }
|
||||
static inline bool mem_encrypt_active(void) { return false; }
|
||||
static inline bool sev_active(void) { return false; }
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
|
||||
|
||||
static inline bool mem_encrypt_active(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
static inline u64 sme_get_me_mask(void)
|
||||
{
|
||||
return sme_me_mask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
/*
|
||||
* The __sme_set() and __sme_clr() macros are useful for adding or removing
|
||||
|
|
Loading…
Reference in New Issue
Block a user