x86/kvm: Use generic xfer to guest work function

Use the generic infrastructure to check for and handle pending work before
transitioning into guest mode.

This now handles TIF_NOTIFY_RESUME as well which was ignored so
far. Handling it is important as this covers task work and task work will
be used to offload the heavy lifting of POSIX CPU timers to thread context.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Link: https://lkml.kernel.org/r/20200722220520.979724969@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-07-23 00:00:09 +02:00
parent a27a0a5549
commit 72c3c0fe54
3 changed files with 12 additions and 15 deletions

View File

@ -42,6 +42,7 @@ config KVM
select HAVE_KVM_MSI select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_NO_POLL select HAVE_KVM_NO_POLL
select KVM_XFER_TO_GUEST_WORK
select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO select KVM_VFIO
select SRCU select SRCU

View File

@ -27,6 +27,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/trace_events.h> #include <linux/trace_events.h>
#include <linux/entry-kvm.h>
#include <asm/apic.h> #include <asm/apic.h>
#include <asm/asm.h> #include <asm/asm.h>
@ -5373,14 +5374,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
} }
/* /*
* Note, return 1 and not 0, vcpu_run() is responsible for * Note, return 1 and not 0, vcpu_run() will invoke
* morphing the pending signal into the proper return code. * xfer_to_guest_mode() which will create a proper return
* code.
*/ */
if (signal_pending(current)) if (__xfer_to_guest_mode_work_pending())
return 1; return 1;
if (need_resched())
schedule();
} }
return 1; return 1;

View File

@ -56,6 +56,7 @@
#include <linux/sched/stat.h> #include <linux/sched/stat.h>
#include <linux/sched/isolation.h> #include <linux/sched/isolation.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#include <linux/entry-kvm.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
@ -1587,7 +1588,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
{ {
return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
need_resched() || signal_pending(current); xfer_to_guest_mode_work_pending();
} }
EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request); EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
@ -8681,15 +8682,11 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
break; break;
} }
if (signal_pending(current)) { if (xfer_to_guest_mode_work_pending()) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
break;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
cond_resched(); r = xfer_to_guest_mode_handle_work(vcpu);
if (r)
return r;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
} }
} }