forked from luck/tmp_suning_uos_patched
entry: Provide infrastructure for work before transitioning to guest mode
Entering a guest is similar to exiting to user space. Pending work like handling signals, rescheduling, task work etc. needs to be handled before that. Provide generic infrastructure to avoid duplication of the same handling code all over the place. The transfer to guest mode handling is different from the exit to usermode handling, e.g. vs. rseq and live patching, so a separate function is used. The initial list of work items handled is: TIF_SIGPENDING, TIF_NEED_RESCHED, TIF_NOTIFY_RESUME Architecture specific TIF flags can be added via defines in the architecture specific include files. The calling convention is also different from the syscall/interrupt entry functions as KVM invokes this from the outer vcpu_run() loop with interrupts and preemption enabled. To prevent missing a pending work item it invokes a check for pending TIF work from interrupt disabled code right before transitioning to guest mode. The lockdep, RCU and tracing state handling is also done directly around the switch to and from guest mode. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20200722220519.833296398@linutronix.de
This commit is contained in:
parent
a5497bab5f
commit
935ace2fb5
80
include/linux/entry-kvm.h
Normal file
80
include/linux/entry-kvm.h
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef __LINUX_ENTRYKVM_H
|
||||||
|
#define __LINUX_ENTRYKVM_H
|
||||||
|
|
||||||
|
#include <linux/entry-common.h>
|
||||||
|
|
||||||
|
/* Transfer to guest mode work */
|
||||||
|
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
|
||||||
|
|
||||||
|
#ifndef ARCH_XFER_TO_GUEST_MODE_WORK
|
||||||
|
# define ARCH_XFER_TO_GUEST_MODE_WORK (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define XFER_TO_GUEST_MODE_WORK \
|
||||||
|
(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||||||
|
_TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK)
|
||||||
|
|
||||||
|
struct kvm_vcpu;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* arch_xfer_to_guest_mode_handle_work - Architecture specific xfer to guest
|
||||||
|
* mode work handling function.
|
||||||
|
* @vcpu: Pointer to current's VCPU data
|
||||||
|
* @ti_work: Cached TIF flags gathered in xfer_to_guest_mode_handle_work()
|
||||||
|
*
|
||||||
|
* Invoked from xfer_to_guest_mode_handle_work(). Defaults to NOOP. Can be
|
||||||
|
* replaced by architecture specific code.
|
||||||
|
*/
|
||||||
|
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
|
||||||
|
unsigned long ti_work);
|
||||||
|
|
||||||
|
#ifndef arch_xfer_to_guest_mode_work
|
||||||
|
static inline int arch_xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu,
|
||||||
|
unsigned long ti_work)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xfer_to_guest_mode_handle_work - Check and handle pending work which needs
|
||||||
|
* to be handled before going to guest mode
|
||||||
|
* @vcpu: Pointer to current's VCPU data
|
||||||
|
*
|
||||||
|
* Returns: 0 or an error code
|
||||||
|
*/
|
||||||
|
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __xfer_to_guest_mode_work_pending - Check if work is pending
|
||||||
|
*
|
||||||
|
* Returns: True if work pending, False otherwise.
|
||||||
|
*
|
||||||
|
* Bare variant of xfer_to_guest_mode_work_pending(). Can be called from
|
||||||
|
* interrupt enabled code for racy quick checks with care.
|
||||||
|
*/
|
||||||
|
static inline bool __xfer_to_guest_mode_work_pending(void)
|
||||||
|
{
|
||||||
|
unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
|
||||||
|
|
||||||
|
return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* xfer_to_guest_mode_work_pending - Check if work is pending which needs to be
|
||||||
|
* handled before returning to guest mode
|
||||||
|
*
|
||||||
|
* Returns: True if work pending, False otherwise.
|
||||||
|
*
|
||||||
|
* Has to be invoked with interrupts disabled before the transition to
|
||||||
|
* guest mode.
|
||||||
|
*/
|
||||||
|
static inline bool xfer_to_guest_mode_work_pending(void)
|
||||||
|
{
|
||||||
|
lockdep_assert_irqs_disabled();
|
||||||
|
return __xfer_to_guest_mode_work_pending();
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
|
||||||
|
|
||||||
|
#endif
|
|
@ -1439,4 +1439,12 @@ int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
|
||||||
uintptr_t data, const char *name,
|
uintptr_t data, const char *name,
|
||||||
struct task_struct **thread_ptr);
|
struct task_struct **thread_ptr);
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XFER_TO_GUEST_WORK
|
||||||
|
static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
vcpu->run->exit_reason = KVM_EXIT_INTR;
|
||||||
|
vcpu->stat.signal_exits++;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -9,4 +9,5 @@ KCOV_INSTRUMENT := n
|
||||||
CFLAGS_REMOVE_common.o = -fstack-protector -fstack-protector-strong
|
CFLAGS_REMOVE_common.o = -fstack-protector -fstack-protector-strong
|
||||||
CFLAGS_common.o += -fno-stack-protector
|
CFLAGS_common.o += -fno-stack-protector
|
||||||
|
|
||||||
obj-$(CONFIG_GENERIC_ENTRY) += common.o
|
obj-$(CONFIG_GENERIC_ENTRY) += common.o
|
||||||
|
obj-$(CONFIG_KVM_XFER_TO_GUEST_WORK) += kvm.o
|
||||||
|
|
51
kernel/entry/kvm.c
Normal file
51
kernel/entry/kvm.c
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/entry-kvm.h>
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
|
||||||
|
static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
|
||||||
|
{
|
||||||
|
do {
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (ti_work & _TIF_SIGPENDING) {
|
||||||
|
kvm_handle_signal_exit(vcpu);
|
||||||
|
return -EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ti_work & _TIF_NEED_RESCHED)
|
||||||
|
schedule();
|
||||||
|
|
||||||
|
if (ti_work & _TIF_NOTIFY_RESUME) {
|
||||||
|
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||||
|
tracehook_notify_resume(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = arch_xfer_to_guest_mode_handle_work(vcpu, ti_work);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ti_work = READ_ONCE(current_thread_info()->flags);
|
||||||
|
} while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
unsigned long ti_work;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This is invoked from the outer guest loop with interrupts and
|
||||||
|
* preemption enabled.
|
||||||
|
*
|
||||||
|
* KVM invokes xfer_to_guest_mode_work_pending() with interrupts
|
||||||
|
* disabled in the inner loop before going into guest mode. No need
|
||||||
|
* to disable interrupts here.
|
||||||
|
*/
|
||||||
|
ti_work = READ_ONCE(current_thread_info()->flags);
|
||||||
|
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return xfer_to_guest_mode_work(vcpu, ti_work);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xfer_to_guest_mode_handle_work);
|
|
@ -60,3 +60,6 @@ config HAVE_KVM_VCPU_RUN_PID_CHANGE
|
||||||
|
|
||||||
config HAVE_KVM_NO_POLL
|
config HAVE_KVM_NO_POLL
|
||||||
bool
|
bool
|
||||||
|
|
||||||
|
config KVM_XFER_TO_GUEST_WORK
|
||||||
|
bool
|
||||||
|
|
Loading…
Reference in New Issue
Block a user