forked from luck/tmp_suning_uos_patched
KVM: nVMX: Disable intercept for FS/GS base MSRs in vmcs02 when possible
If L1 is using an MSR bitmap, unconditionally merge the MSR bitmaps from L0 and L1 for MSR_{KERNEL,}_{FS,GS}_BASE. KVM unconditionally exposes MSRs L1. If KVM is also running in L1 then it's highly likely L1 is also exposing the MSRs to L2, i.e. KVM doesn't need to intercept L2 accesses. Based on code from Jintack Lim. Cc: Jintack Lim <jintack@xxxxxxxxxxxxxxx> Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
9b5db6c762
commit
d69129b4e4
|
@ -514,31 +514,11 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
|
||||
struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
|
||||
|
||||
/*
|
||||
* pred_cmd & spec_ctrl are trying to verify two things:
|
||||
*
|
||||
* 1. L0 gave a permission to L1 to actually passthrough the MSR. This
|
||||
* ensures that we do not accidentally generate an L02 MSR bitmap
|
||||
* from the L12 MSR bitmap that is too permissive.
|
||||
* 2. That L1 or L2s have actually used the MSR. This avoids
|
||||
* unnecessarily merging of the bitmap if the MSR is unused. This
|
||||
* works properly because we only update the L01 MSR bitmap lazily.
|
||||
* So even if L0 should pass L1 these MSRs, the L01 bitmap is only
|
||||
* updated to reflect this when L1 (or its L2s) actually write to
|
||||
* the MSR.
|
||||
*/
|
||||
bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
|
||||
bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
|
||||
|
||||
/* Nothing to do if the MSR bitmap is not in use. */
|
||||
if (!cpu_has_vmx_msr_bitmap() ||
|
||||
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
||||
return false;
|
||||
|
||||
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
||||
!pred_cmd && !spec_ctrl)
|
||||
return false;
|
||||
|
||||
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
|
||||
return false;
|
||||
|
||||
|
@ -583,13 +563,36 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
}
|
||||
}
|
||||
|
||||
if (spec_ctrl)
|
||||
/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
|
||||
MSR_FS_BASE, MSR_TYPE_RW);
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
|
||||
MSR_GS_BASE, MSR_TYPE_RW);
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
|
||||
MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
|
||||
|
||||
/*
|
||||
* Checking the L0->L1 bitmap is trying to verify two things:
|
||||
*
|
||||
* 1. L0 gave a permission to L1 to actually passthrough the MSR. This
|
||||
* ensures that we do not accidentally generate an L02 MSR bitmap
|
||||
* from the L12 MSR bitmap that is too permissive.
|
||||
* 2. That L1 or L2s have actually used the MSR. This avoids
|
||||
* unnecessarily merging of the bitmap if the MSR is unused. This
|
||||
* works properly because we only update the L01 MSR bitmap lazily.
|
||||
* So even if L0 should pass L1 these MSRs, the L01 bitmap is only
|
||||
* updated to reflect this when L1 (or its L2s) actually write to
|
||||
* the MSR.
|
||||
*/
|
||||
if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
MSR_IA32_SPEC_CTRL,
|
||||
MSR_TYPE_R | MSR_TYPE_W);
|
||||
|
||||
if (pred_cmd)
|
||||
if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
MSR_IA32_PRED_CMD,
|
||||
|
|
Loading…
Reference in New Issue
Block a user