forked from luck/tmp_suning_uos_patched
Many small x86 bug fixes: SVM segment registers access rights, nested VMX,
preempt notifiers, LAPIC virtual wire mode, NMI injection. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJZMETIAAoJEL/70l94x66DyjgH/i0LJydL2vnDKanUsPdQYrml daoRm67uaz8cy456SIvz+j+NAmVLKoOsQy+jRigFpOWDJglBhy77Fw0rD68uTaf1 vGRl75pOYANxVlDC0BLgUwXVUjfFsLs6sKYpIIb9pTKNf8Q04MaWSpeCX+GUe0IR 8Ere9LK0UKTinF1cHmZe4ihG9DYylK6DEagk/9qnxEu1rU8ZiC9SXguXXeNDQI9p wppkBngOokqbZ5oTVIkBmbbDMpVefj6ioGqeBVBjS6xE0UlfvJsjsJ54wdLXsGue 7CF8E1cX7d8NohtlN5qZGssTDscUPPJghalXpeIhtHYgooKf1VeZExATA5YCVLw= =bHQF -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "Many small x86 bug fixes: SVM segment registers access rights, nested VMX, preempt notifiers, LAPIC virtual wire mode, NMI injection" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: x86: Fix nmi injection failure when vcpu got blocked KVM: SVM: do not zero out segment attributes if segment is unusable or not present KVM: SVM: ignore type when setting segment registers KVM: nVMX: fix nested_vmx_check_vmptr failure paths under debugging KVM: x86: Fix virtual wire mode KVM: nVMX: Fix handling of lmsw instruction KVM: X86: Fix preempt the preemption timer cancel
This commit is contained in:
commit
9ea15a59c3
|
@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
|
|||
|
||||
static void cancel_hv_timer(struct kvm_lapic *apic)
|
||||
{
|
||||
preempt_disable();
|
||||
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
||||
apic->lapic_timer.hv_timer_in_use = false;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static bool start_hv_timer(struct kvm_lapic *apic)
|
||||
|
@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
||||
apic_update_lvtt(apic);
|
||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
||||
if (kvm_vcpu_is_reset_bsp(vcpu) &&
|
||||
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
||||
kvm_lapic_set_reg(apic, APIC_LVT0,
|
||||
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
||||
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
||||
|
|
|
@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
|
|||
* AMD's VMCB does not have an explicit unusable field, so emulate it
|
||||
* for cross vendor migration purposes by "not present"
|
||||
*/
|
||||
var->unusable = !var->present || (var->type == 0);
|
||||
var->unusable = !var->present;
|
||||
|
||||
switch (seg) {
|
||||
case VCPU_SREG_TR:
|
||||
|
@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
if (var->unusable)
|
||||
var->db = 0;
|
||||
/* This is symmetric with svm_set_segment() */
|
||||
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
|
||||
break;
|
||||
}
|
||||
|
@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
|
|||
s->base = var->base;
|
||||
s->limit = var->limit;
|
||||
s->selector = var->selector;
|
||||
if (var->unusable)
|
||||
s->attrib = 0;
|
||||
else {
|
||||
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
|
||||
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
|
||||
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
|
||||
s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
|
||||
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
|
||||
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
|
||||
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
|
||||
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
|
||||
}
|
||||
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
|
||||
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
|
||||
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
|
||||
s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
|
||||
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
|
||||
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
|
||||
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
|
||||
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
|
||||
|
||||
/*
|
||||
* This is always accurate, except if SYSRET returned to a segment
|
||||
|
@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
|
|||
* would entail passing the CPL to userspace and back.
|
||||
*/
|
||||
if (seg == VCPU_SREG_SS)
|
||||
svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
|
||||
/* This is symmetric with svm_get_segment() */
|
||||
svm->vmcb->save.cpl = (var->dpl & 3);
|
||||
|
||||
mark_dirty(svm->vmcb, VMCB_SEG);
|
||||
}
|
||||
|
|
|
@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function performs the various checks including
|
||||
* - if it's 4KB aligned
|
||||
* - No bits beyond the physical address width are set
|
||||
* - Returns 0 on success or else 1
|
||||
* (Intel SDM Section 30.3)
|
||||
*/
|
||||
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
|
||||
gpa_t *vmpointer)
|
||||
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
|
||||
{
|
||||
gva_t gva;
|
||||
gpa_t vmptr;
|
||||
struct x86_exception e;
|
||||
struct page *page;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
int maxphyaddr = cpuid_maxphyaddr(vcpu);
|
||||
|
||||
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
||||
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
|
||||
return 1;
|
||||
|
||||
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
|
||||
sizeof(vmptr), &e)) {
|
||||
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer,
|
||||
sizeof(*vmpointer), &e)) {
|
||||
kvm_inject_page_fault(vcpu, &e);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (exit_reason) {
|
||||
case EXIT_REASON_VMON:
|
||||
/*
|
||||
* SDM 3: 24.11.5
|
||||
* The first 4 bytes of VMXON region contain the supported
|
||||
* VMCS revision identifier
|
||||
*
|
||||
* Note - IA32_VMX_BASIC[48] will never be 1
|
||||
* for the nested case;
|
||||
* which replaces physical address width with 32
|
||||
*
|
||||
*/
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
page = nested_get_page(vcpu, vmptr);
|
||||
if (page == NULL) {
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
vmx->nested.vmxon_ptr = vmptr;
|
||||
break;
|
||||
case EXIT_REASON_VMCLEAR:
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_VMCLEAR_INVALID_ADDRESS);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_VMCLEAR_VMXON_POINTER);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
break;
|
||||
case EXIT_REASON_VMPTRLD:
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_VMPTRLD_INVALID_ADDRESS);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||
nested_vmx_failValid(vcpu,
|
||||
VMXERR_VMPTRLD_VMXON_POINTER);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return 1; /* shouldn't happen */
|
||||
}
|
||||
|
||||
if (vmpointer)
|
||||
*vmpointer = vmptr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -7066,6 +6990,8 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
|||
static int handle_vmon(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ret;
|
||||
gpa_t vmptr;
|
||||
struct page *page;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
|
||||
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||
|
@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
|
||||
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||
return 1;
|
||||
|
||||
|
||||
/*
|
||||
* SDM 3: 24.11.5
|
||||
* The first 4 bytes of VMXON region contain the supported
|
||||
* VMCS revision identifier
|
||||
*
|
||||
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
|
||||
* which replaces physical address width with 32
|
||||
*/
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
page = nested_get_page(vcpu, vmptr);
|
||||
if (page == NULL) {
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
nested_vmx_failInvalid(vcpu);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
|
||||
vmx->nested.vmxon_ptr = vmptr;
|
||||
ret = enter_vmx_operation(vcpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
|
|||
if (!nested_vmx_check_permission(vcpu))
|
||||
return 1;
|
||||
|
||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
|
||||
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||
return 1;
|
||||
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||
nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmptr == vmx->nested.current_vmptr)
|
||||
nested_release_vmcs12(vmx);
|
||||
|
||||
|
@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
|||
if (!nested_vmx_check_permission(vcpu))
|
||||
return 1;
|
||||
|
||||
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
|
||||
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
||||
return 1;
|
||||
|
||||
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
|
||||
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmptr == vmx->nested.vmxon_ptr) {
|
||||
nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
if (vmx->nested.current_vmptr != vmptr) {
|
||||
struct vmcs12 *new_vmcs12;
|
||||
struct page *page;
|
||||
|
@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
||||
int cr = exit_qualification & 15;
|
||||
int reg = (exit_qualification >> 8) & 15;
|
||||
unsigned long val = kvm_register_readl(vcpu, reg);
|
||||
int reg;
|
||||
unsigned long val;
|
||||
|
||||
switch ((exit_qualification >> 4) & 3) {
|
||||
case 0: /* mov to cr */
|
||||
reg = (exit_qualification >> 8) & 15;
|
||||
val = kvm_register_readl(vcpu, reg);
|
||||
switch (cr) {
|
||||
case 0:
|
||||
if (vmcs12->cr0_guest_host_mask &
|
||||
|
@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
|||
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
|
||||
* cr0. Other attempted changes are ignored, with no exit.
|
||||
*/
|
||||
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
|
||||
if (vmcs12->cr0_guest_host_mask & 0xe &
|
||||
(val ^ vmcs12->cr0_read_shadow))
|
||||
return true;
|
||||
|
|
|
@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->arch.pv.pv_unhalted)
|
||||
return true;
|
||||
|
||||
if (atomic_read(&vcpu->arch.nmi_queued))
|
||||
if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
|
||||
(vcpu->arch.nmi_pending &&
|
||||
kvm_x86_ops->nmi_allowed(vcpu)))
|
||||
return true;
|
||||
|
||||
if (kvm_test_request(KVM_REQ_SMI, vcpu))
|
||||
if (kvm_test_request(KVM_REQ_SMI, vcpu) ||
|
||||
(vcpu->arch.smi_pending && !is_smm(vcpu)))
|
||||
return true;
|
||||
|
||||
if (kvm_arch_interrupt_allowed(vcpu) &&
|
||||
|
|
Loading…
Reference in New Issue
Block a user