KVM: x86: Create mask for guest CR4 reserved bits in kvm_update_cpuid()

Instead of creating the mask for guest CR4 reserved bits in kvm_valid_cr4(),
do it in kvm_update_cpuid() so that it can be reused instead of creating it
each time kvm_valid_cr4() is called.

Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Message-Id: <1594168797-29444-2-git-send-email-krish.sadhukhan@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Krish Sadhukhan 2020-07-08 00:39:55 +00:00 committed by Paolo Bonzini
parent d42e3fae6f
commit b899c13277
4 changed files with 25 additions and 22 deletions

View File

@ -545,6 +545,7 @@ struct kvm_vcpu_arch {
unsigned long cr3;
unsigned long cr4;
unsigned long cr4_guest_owned_bits;
unsigned long cr4_guest_rsvd_bits;
unsigned long cr8;
u32 host_pkru;
u32 pkru;

View File

@ -128,6 +128,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
kvm_mmu_reset_context(vcpu);
kvm_pmu_refresh(vcpu);
vcpu->arch.cr4_guest_rsvd_bits =
__cr4_reserved_bits(guest_cpuid_has, vcpu);
return 0;
}

View File

@ -955,33 +955,12 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
\
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
__reserved_bits |= X86_CR4_OSXSAVE; \
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
__reserved_bits |= X86_CR4_SMEP; \
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
__reserved_bits |= X86_CR4_SMAP; \
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
__reserved_bits |= X86_CR4_FSGSBASE; \
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
__reserved_bits |= X86_CR4_PKE; \
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
__reserved_bits |= X86_CR4_LA57; \
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
__reserved_bits |= X86_CR4_UMIP; \
__reserved_bits; \
})
static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
if (cr4 & cr4_reserved_bits)
return -EINVAL;
if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu))
if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
return -EINVAL;
return 0;

View File

@ -373,4 +373,25 @@ bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
#define KVM_MSR_RET_INVALID 2
#define __cr4_reserved_bits(__cpu_has, __c) \
({ \
u64 __reserved_bits = CR4_RESERVED_BITS; \
\
if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \
__reserved_bits |= X86_CR4_OSXSAVE; \
if (!__cpu_has(__c, X86_FEATURE_SMEP)) \
__reserved_bits |= X86_CR4_SMEP; \
if (!__cpu_has(__c, X86_FEATURE_SMAP)) \
__reserved_bits |= X86_CR4_SMAP; \
if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \
__reserved_bits |= X86_CR4_FSGSBASE; \
if (!__cpu_has(__c, X86_FEATURE_PKU)) \
__reserved_bits |= X86_CR4_PKE; \
if (!__cpu_has(__c, X86_FEATURE_LA57)) \
__reserved_bits |= X86_CR4_LA57; \
if (!__cpu_has(__c, X86_FEATURE_UMIP)) \
__reserved_bits |= X86_CR4_UMIP; \
__reserved_bits; \
})
#endif