forked from luck/tmp_suning_uos_patched
Merge remote-tracking branch 'kvmarm/kvm-arm64/stolen-time' into kvmarm-master/next
This commit is contained in:
commit
a4b28f5c67
|
@ -3083,9 +3083,9 @@
|
|||
[X86,PV_OPS] Disable paravirtualized VMware scheduler
|
||||
clock and use the default one.
|
||||
|
||||
no-steal-acc [X86,KVM] Disable paravirtualized steal time accounting.
|
||||
steal time is computed, but won't influence scheduler
|
||||
behaviour
|
||||
no-steal-acc [X86,KVM,ARM64] Disable paravirtualized steal time
|
||||
accounting. steal time is computed, but won't
|
||||
influence scheduler behaviour
|
||||
|
||||
nolapic [X86-32,APIC] Do not enable or use the local APIC.
|
||||
|
||||
|
|
80
Documentation/virt/kvm/arm/pvtime.rst
Normal file
80
Documentation/virt/kvm/arm/pvtime.rst
Normal file
|
@ -0,0 +1,80 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Paravirtualized time support for arm64
|
||||
======================================
|
||||
|
||||
Arm specification DEN0057/A defines a standard for paravirtualised time
|
||||
support for AArch64 guests:
|
||||
|
||||
https://developer.arm.com/docs/den0057/a
|
||||
|
||||
KVM/arm64 implements the stolen time part of this specification by providing
|
||||
some hypervisor service calls to support a paravirtualized guest obtaining a
|
||||
view of the amount of time stolen from its execution.
|
||||
|
||||
Two new SMCCC compatible hypercalls are defined:
|
||||
|
||||
* PV_TIME_FEATURES: 0xC5000020
|
||||
* PV_TIME_ST: 0xC5000021
|
||||
|
||||
These are only available in the SMC64/HVC64 calling convention as
|
||||
paravirtualized time is not available to 32 bit Arm guests. The existence of
|
||||
the PV_FEATURES hypercall should be probed using the SMCCC 1.1 ARCH_FEATURES
|
||||
mechanism before calling it.
|
||||
|
||||
PV_TIME_FEATURES
|
||||
============= ======== ==========
|
||||
Function ID: (uint32) 0xC5000020
|
||||
PV_call_id: (uint32) The function to query for support.
|
||||
Currently only PV_TIME_ST is supported.
|
||||
Return value: (int64) NOT_SUPPORTED (-1) or SUCCESS (0) if the relevant
|
||||
PV-time feature is supported by the hypervisor.
|
||||
============= ======== ==========
|
||||
|
||||
PV_TIME_ST
|
||||
============= ======== ==========
|
||||
Function ID: (uint32) 0xC5000021
|
||||
Return value: (int64) IPA of the stolen time data structure for this
|
||||
VCPU. On failure:
|
||||
NOT_SUPPORTED (-1)
|
||||
============= ======== ==========
|
||||
|
||||
The IPA returned by PV_TIME_ST should be mapped by the guest as normal memory
|
||||
with inner and outer write back caching attributes, in the inner shareable
|
||||
domain. A total of 16 bytes from the IPA returned are guaranteed to be
|
||||
meaningfully filled by the hypervisor (see structure below).
|
||||
|
||||
PV_TIME_ST returns the structure for the calling VCPU.
|
||||
|
||||
Stolen Time
|
||||
-----------
|
||||
|
||||
The structure pointed to by the PV_TIME_ST hypercall is as follows:
|
||||
|
||||
+-------------+-------------+-------------+----------------------------+
|
||||
| Field | Byte Length | Byte Offset | Description |
|
||||
+=============+=============+=============+============================+
|
||||
| Revision | 4 | 0 | Must be 0 for version 1.0 |
|
||||
+-------------+-------------+-------------+----------------------------+
|
||||
| Attributes | 4 | 4 | Must be 0 |
|
||||
+-------------+-------------+-------------+----------------------------+
|
||||
| Stolen time | 8 | 8 | Stolen time in unsigned |
|
||||
| | | | nanoseconds indicating how |
|
||||
| | | | much time this VCPU thread |
|
||||
| | | | was involuntarily not |
|
||||
| | | | running on a physical CPU. |
|
||||
+-------------+-------------+-------------+----------------------------+
|
||||
|
||||
All values in the structure are stored little-endian.
|
||||
|
||||
The structure will be updated by the hypervisor prior to scheduling a VCPU. It
|
||||
will be present within a reserved region of the normal memory given to the
|
||||
guest. The guest should not attempt to write into this memory. There is a
|
||||
structure per VCPU of the guest.
|
||||
|
||||
It is advisable that one or more 64k pages are set aside for the purpose of
|
||||
these structures and not used for other purposes, this enables the guest to map
|
||||
the region using 64k pages and avoids conflicting attributes with other memory.
|
||||
|
||||
For the user space interface see Documentation/virt/kvm/devices/vcpu.txt
|
||||
section "3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL".
|
|
@ -60,3 +60,17 @@ time to use the number provided for a given timer, overwriting any previously
|
|||
configured values on other VCPUs. Userspace should configure the interrupt
|
||||
numbers on at least one VCPU after creating all VCPUs and before running any
|
||||
VCPUs.
|
||||
|
||||
3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL
|
||||
Architectures: ARM64
|
||||
|
||||
3.1 ATTRIBUTE: KVM_ARM_VCPU_PVTIME_IPA
|
||||
Parameters: 64-bit base address
|
||||
Returns: -ENXIO: Stolen time not implemented
|
||||
-EEXIST: Base address already set for this VCPU
|
||||
-EINVAL: Base address not 64 byte aligned
|
||||
|
||||
Specifies the base address of the stolen time structure for this VCPU. The
|
||||
base address must be 64 byte aligned and exist within a valid guest memory
|
||||
region. See Documentation/virt/kvm/arm/pvtime.txt for more information
|
||||
including the layout of the stolen time structure.
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#ifndef __ARM_KVM_HOST_H__
|
||||
#define __ARM_KVM_HOST_H__
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kvm_types.h>
|
||||
|
@ -38,6 +39,7 @@
|
|||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
|
@ -331,6 +333,29 @@ static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return SMCCC_RET_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return GPA_INVALID;
|
||||
}
|
||||
|
||||
static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
|
|
@ -24,7 +24,7 @@ obj-y += kvm-arm.o init.o interrupts.o
|
|||
obj-y += handle_exit.o guest.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
|
||||
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
|
||||
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
|
||||
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
|
||||
obj-y += $(KVM)/arm/aarch32.o
|
||||
|
||||
obj-y += $(KVM)/arm/vgic/vgic.o
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
|
@ -75,26 +74,20 @@ static void cpu_v7_spectre_init(void)
|
|||
case ARM_CPU_PART_CORTEX_A72: {
|
||||
struct arm_smccc_res res;
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
||||
break;
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
return;
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
break;
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_hvc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||
spectre_v2_method = "hypervisor";
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
if ((int)res.a0 != 0)
|
||||
break;
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||
call_smc_arch_workaround_1;
|
||||
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
|
@ -346,6 +347,13 @@ struct kvm_vcpu_arch {
|
|||
/* True when deferrable sysregs are loaded on the physical CPU,
|
||||
* see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */
|
||||
bool sysregs_loaded_on_cpu;
|
||||
|
||||
/* Guest PV state */
|
||||
struct {
|
||||
u64 steal;
|
||||
u64 last_steal;
|
||||
gpa_t base;
|
||||
} steal;
|
||||
};
|
||||
|
||||
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
|
||||
|
@ -486,6 +494,27 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
|
||||
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
|
||||
void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
vcpu_arch->steal.base = GPA_INVALID;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
return (vcpu_arch->steal.base != GPA_INVALID);
|
||||
}
|
||||
|
||||
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
|
|
@ -21,6 +21,13 @@ static inline u64 paravirt_steal_clock(int cpu)
|
|||
{
|
||||
return pv_ops.time.steal_clock(cpu);
|
||||
}
|
||||
#endif
|
||||
|
||||
int __init pv_time_init(void);
|
||||
|
||||
#else
|
||||
|
||||
#define pv_time_init() do {} while (0)
|
||||
|
||||
#endif // CONFIG_PARAVIRT
|
||||
|
||||
#endif
|
||||
|
|
17
arch/arm64/include/asm/pvclock-abi.h
Normal file
17
arch/arm64/include/asm/pvclock-abi.h
Normal file
|
@ -0,0 +1,17 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2019 Arm Ltd. */
|
||||
|
||||
#ifndef __ASM_PVCLOCK_ABI_H
|
||||
#define __ASM_PVCLOCK_ABI_H
|
||||
|
||||
/* The below structure is defined in ARM DEN0057A */
|
||||
|
||||
struct pvclock_vcpu_stolen_time {
|
||||
__le32 revision;
|
||||
__le32 attributes;
|
||||
__le64 stolen_time;
|
||||
/* Structure must be 64 byte aligned, pad to that size */
|
||||
u8 padding[48];
|
||||
} __packed;
|
||||
|
||||
#endif
|
|
@ -324,6 +324,8 @@ struct kvm_vcpu_events {
|
|||
#define KVM_ARM_VCPU_TIMER_CTRL 1
|
||||
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
|
||||
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
|
||||
#define KVM_ARM_VCPU_PVTIME_CTRL 2
|
||||
#define KVM_ARM_VCPU_PVTIME_IPA 0
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <asm/cpu.h>
|
||||
|
@ -166,9 +165,7 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
|
|||
}
|
||||
#endif /* CONFIG_KVM_INDIRECT_VECTORS */
|
||||
|
||||
#include <uapi/linux/psci.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/psci.h>
|
||||
|
||||
static void call_smc_arch_workaround_1(void)
|
||||
{
|
||||
|
@ -212,43 +209,31 @@ static int detect_harden_bp_fw(void)
|
|||
struct arm_smccc_res res;
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
|
||||
return -1;
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
/* This is a guest, no need to patch KVM vectors */
|
||||
smccc_start = NULL;
|
||||
smccc_end = NULL;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
cb = call_hvc_arch_workaround_1;
|
||||
/* This is a guest, no need to patch KVM vectors */
|
||||
smccc_start = NULL;
|
||||
smccc_end = NULL;
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||
switch ((int)res.a0) {
|
||||
case 1:
|
||||
/* Firmware says we're just fine */
|
||||
return 0;
|
||||
case 0:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
break;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
cb = call_smc_arch_workaround_1;
|
||||
smccc_start = __smccc_workaround_1_smc_start;
|
||||
smccc_end = __smccc_workaround_1_smc_end;
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -308,11 +293,11 @@ void __init arm64_update_smccc_conduit(struct alt_instr *alt,
|
|||
|
||||
BUG_ON(nr_inst != 1);
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
insn = aarch64_insn_get_hvc_value();
|
||||
break;
|
||||
case PSCI_CONDUIT_SMC:
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
insn = aarch64_insn_get_smc_value();
|
||||
break;
|
||||
default:
|
||||
|
@ -338,6 +323,8 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
|
|||
|
||||
void arm64_set_ssbd_mitigation(bool state)
|
||||
{
|
||||
int conduit;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
|
||||
pr_info_once("SSBD disabled by kernel configuration\n");
|
||||
return;
|
||||
|
@ -351,19 +338,10 @@ void arm64_set_ssbd_mitigation(bool state)
|
|||
return;
|
||||
}
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
|
||||
break;
|
||||
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state,
|
||||
NULL);
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE);
|
||||
}
|
||||
|
||||
static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
||||
|
@ -373,6 +351,7 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
bool required = true;
|
||||
s32 val;
|
||||
bool this_cpu_safe = false;
|
||||
int conduit;
|
||||
|
||||
WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
|
||||
|
||||
|
@ -390,25 +369,10 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
|
|||
goto out_printmsg;
|
||||
}
|
||||
|
||||
if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
return false;
|
||||
}
|
||||
conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
|
||||
switch (psci_ops.conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
break;
|
||||
|
||||
case PSCI_CONDUIT_SMC:
|
||||
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_2, &res);
|
||||
break;
|
||||
|
||||
default:
|
||||
if (conduit == SMCCC_CONDUIT_NONE) {
|
||||
ssbd_state = ARM64_SSBD_UNKNOWN;
|
||||
if (!this_cpu_safe)
|
||||
__ssb_safe = false;
|
||||
|
|
|
@ -6,13 +6,153 @@
|
|||
* Author: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "arm-pv: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/cpuhotplug.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/psci.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/pvclock-abi.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
struct static_key paravirt_steal_enabled;
|
||||
struct static_key paravirt_steal_rq_enabled;
|
||||
|
||||
struct paravirt_patch_template pv_ops;
|
||||
EXPORT_SYMBOL_GPL(pv_ops);
|
||||
|
||||
struct pv_time_stolen_time_region {
|
||||
struct pvclock_vcpu_stolen_time *kaddr;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct pv_time_stolen_time_region, stolen_time_region);
|
||||
|
||||
static bool steal_acc = true;
|
||||
static int __init parse_no_stealacc(char *arg)
|
||||
{
|
||||
steal_acc = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
early_param("no-steal-acc", parse_no_stealacc);
|
||||
|
||||
/* return stolen time in ns by asking the hypervisor */
|
||||
static u64 pv_steal_clock(int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = per_cpu_ptr(&stolen_time_region, cpu);
|
||||
if (!reg->kaddr) {
|
||||
pr_warn_once("stolen time enabled but not configured for cpu %d\n",
|
||||
cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return le64_to_cpu(READ_ONCE(reg->kaddr->stolen_time));
|
||||
}
|
||||
|
||||
static int stolen_time_dying_cpu(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
|
||||
reg = this_cpu_ptr(&stolen_time_region);
|
||||
if (!reg->kaddr)
|
||||
return 0;
|
||||
|
||||
memunmap(reg->kaddr);
|
||||
memset(reg, 0, sizeof(*reg));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_stolen_time_cpu(unsigned int cpu)
|
||||
{
|
||||
struct pv_time_stolen_time_region *reg;
|
||||
struct arm_smccc_res res;
|
||||
|
||||
reg = this_cpu_ptr(&stolen_time_region);
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_ST, &res);
|
||||
|
||||
if (res.a0 == SMCCC_RET_NOT_SUPPORTED)
|
||||
return -EINVAL;
|
||||
|
||||
reg->kaddr = memremap(res.a0,
|
||||
sizeof(struct pvclock_vcpu_stolen_time),
|
||||
MEMREMAP_WB);
|
||||
|
||||
if (!reg->kaddr) {
|
||||
pr_warn("Failed to map stolen time data structure\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(reg->kaddr->revision) != 0 ||
|
||||
le32_to_cpu(reg->kaddr->attributes) != 0) {
|
||||
pr_warn_once("Unexpected revision or attributes in stolen time data\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pv_time_init_stolen_time(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ARM_KVMPV_STARTING,
|
||||
"hypervisor/arm/pvtime:starting",
|
||||
init_stolen_time_cpu, stolen_time_dying_cpu);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool has_pv_steal_clock(void)
|
||||
{
|
||||
struct arm_smccc_res res;
|
||||
|
||||
/* To detect the presence of PV time support we require SMCCC 1.1+ */
|
||||
if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
|
||||
return false;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_HV_PV_TIME_FEATURES, &res);
|
||||
|
||||
if (res.a0 != SMCCC_RET_SUCCESS)
|
||||
return false;
|
||||
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_HV_PV_TIME_FEATURES,
|
||||
ARM_SMCCC_HV_PV_TIME_ST, &res);
|
||||
|
||||
return (res.a0 == SMCCC_RET_SUCCESS);
|
||||
}
|
||||
|
||||
int __init pv_time_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!has_pv_steal_clock())
|
||||
return 0;
|
||||
|
||||
ret = pv_time_init_stolen_time();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pv_ops.time.steal_clock = pv_steal_clock;
|
||||
|
||||
static_key_slow_inc(¶virt_steal_enabled);
|
||||
if (steal_acc)
|
||||
static_key_slow_inc(¶virt_steal_rq_enabled);
|
||||
|
||||
pr_info("using stolen time PV\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
// Copyright (C) 2017 Arm Ltd.
|
||||
#define pr_fmt(fmt) "sdei: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/arm_sdei.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
@ -161,7 +162,7 @@ unsigned long sdei_arch_get_entry_point(int conduit)
|
|||
return 0;
|
||||
}
|
||||
|
||||
sdei_exit_mode = (conduit == CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
|
||||
sdei_exit_mode = (conduit == SMCCC_CONDUIT_HVC) ? SDEI_EXIT_HVC : SDEI_EXIT_SMC;
|
||||
|
||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||
if (arm64_kernel_unmapped_at_el0()) {
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/paravirt.h>
|
||||
|
||||
unsigned long profile_pc(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -65,4 +66,6 @@ void __init time_init(void)
|
|||
|
||||
/* Calibrate the delay loop directly */
|
||||
lpj_fine = arch_timer_rate / HZ;
|
||||
|
||||
pv_time_init();
|
||||
}
|
||||
|
|
|
@ -21,6 +21,8 @@ if VIRTUALIZATION
|
|||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on OF
|
||||
# for TASKSTATS/TASK_DELAY_ACCT:
|
||||
depends on NET && MULTIUSER
|
||||
select MMU_NOTIFIER
|
||||
select PREEMPT_NOTIFIERS
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
|
@ -39,6 +41,8 @@ config KVM
|
|||
select IRQ_BYPASS_MANAGER
|
||||
select HAVE_KVM_IRQ_BYPASS
|
||||
select HAVE_KVM_VCPU_RUN_PID_CHANGE
|
||||
select TASKSTATS
|
||||
select TASK_DELAY_ACCT
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
We don't support KVM with 16K page tables yet, due to the multiple
|
||||
|
|
|
@ -13,6 +13,8 @@ obj-$(CONFIG_KVM_ARM_HOST) += hyp/
|
|||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/psci.o $(KVM)/arm/perf.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hypercalls.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/pvtime.o
|
||||
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o va_layout.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
|
||||
|
|
|
@ -868,6 +868,9 @@ int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
|
|||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_set_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_ARM_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_arm_pvtime_set_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
|
@ -888,6 +891,9 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
|||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_get_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_ARM_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_arm_pvtime_get_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
|
@ -908,6 +914,9 @@ int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
|||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_has_attr(vcpu, attr);
|
||||
break;
|
||||
case KVM_ARM_VCPU_PVTIME_CTRL:
|
||||
ret = kvm_arm_pvtime_has_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#include <asm/esr.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
@ -22,6 +20,8 @@
|
|||
#include <asm/debug-monitors.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
|
|
|
@ -967,29 +967,29 @@ static int sdei_get_conduit(struct platform_device *pdev)
|
|||
if (np) {
|
||||
if (of_property_read_string(np, "method", &method)) {
|
||||
pr_warn("missing \"method\" property\n");
|
||||
return CONDUIT_INVALID;
|
||||
return SMCCC_CONDUIT_NONE;
|
||||
}
|
||||
|
||||
if (!strcmp("hvc", method)) {
|
||||
sdei_firmware_call = &sdei_smccc_hvc;
|
||||
return CONDUIT_HVC;
|
||||
return SMCCC_CONDUIT_HVC;
|
||||
} else if (!strcmp("smc", method)) {
|
||||
sdei_firmware_call = &sdei_smccc_smc;
|
||||
return CONDUIT_SMC;
|
||||
return SMCCC_CONDUIT_SMC;
|
||||
}
|
||||
|
||||
pr_warn("invalid \"method\" property: %s\n", method);
|
||||
} else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
|
||||
if (acpi_psci_use_hvc()) {
|
||||
sdei_firmware_call = &sdei_smccc_hvc;
|
||||
return CONDUIT_HVC;
|
||||
return SMCCC_CONDUIT_HVC;
|
||||
} else {
|
||||
sdei_firmware_call = &sdei_smccc_smc;
|
||||
return CONDUIT_SMC;
|
||||
return SMCCC_CONDUIT_SMC;
|
||||
}
|
||||
}
|
||||
|
||||
return CONDUIT_INVALID;
|
||||
return SMCCC_CONDUIT_NONE;
|
||||
}
|
||||
|
||||
static int sdei_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -53,10 +53,18 @@ bool psci_tos_resident_on(int cpu)
|
|||
}
|
||||
|
||||
struct psci_operations psci_ops = {
|
||||
.conduit = PSCI_CONDUIT_NONE,
|
||||
.conduit = SMCCC_CONDUIT_NONE,
|
||||
.smccc_version = SMCCC_VERSION_1_0,
|
||||
};
|
||||
|
||||
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void)
|
||||
{
|
||||
if (psci_ops.smccc_version < SMCCC_VERSION_1_1)
|
||||
return SMCCC_CONDUIT_NONE;
|
||||
|
||||
return psci_ops.conduit;
|
||||
}
|
||||
|
||||
typedef unsigned long (psci_fn)(unsigned long, unsigned long,
|
||||
unsigned long, unsigned long);
|
||||
static psci_fn *invoke_psci_fn;
|
||||
|
@ -212,13 +220,13 @@ static unsigned long psci_migrate_info_up_cpu(void)
|
|||
0, 0, 0);
|
||||
}
|
||||
|
||||
static void set_conduit(enum psci_conduit conduit)
|
||||
static void set_conduit(enum arm_smccc_conduit conduit)
|
||||
{
|
||||
switch (conduit) {
|
||||
case PSCI_CONDUIT_HVC:
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
invoke_psci_fn = __invoke_psci_fn_hvc;
|
||||
break;
|
||||
case PSCI_CONDUIT_SMC:
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
invoke_psci_fn = __invoke_psci_fn_smc;
|
||||
break;
|
||||
default:
|
||||
|
@ -240,9 +248,9 @@ static int get_set_conduit_method(struct device_node *np)
|
|||
}
|
||||
|
||||
if (!strcmp("hvc", method)) {
|
||||
set_conduit(PSCI_CONDUIT_HVC);
|
||||
set_conduit(SMCCC_CONDUIT_HVC);
|
||||
} else if (!strcmp("smc", method)) {
|
||||
set_conduit(PSCI_CONDUIT_SMC);
|
||||
set_conduit(SMCCC_CONDUIT_SMC);
|
||||
} else {
|
||||
pr_warn("invalid \"method\" property: %s\n", method);
|
||||
return -EINVAL;
|
||||
|
@ -583,9 +591,9 @@ int __init psci_acpi_init(void)
|
|||
pr_info("probing for conduit method from ACPI.\n");
|
||||
|
||||
if (acpi_psci_use_hvc())
|
||||
set_conduit(PSCI_CONDUIT_HVC);
|
||||
set_conduit(SMCCC_CONDUIT_HVC);
|
||||
else
|
||||
set_conduit(PSCI_CONDUIT_SMC);
|
||||
set_conduit(SMCCC_CONDUIT_SMC);
|
||||
|
||||
return psci_probe();
|
||||
}
|
||||
|
|
|
@ -67,6 +67,8 @@ header-test- += keys/big_key-type.h
|
|||
header-test- += keys/request_key_auth-type.h
|
||||
header-test- += keys/trusted.h
|
||||
header-test- += kvm/arm_arch_timer.h
|
||||
header-test-$(CONFIG_ARM) += kvm/arm_hypercalls.h
|
||||
header-test-$(CONFIG_ARM64) += kvm/arm_hypercalls.h
|
||||
header-test- += kvm/arm_pmu.h
|
||||
header-test-$(CONFIG_ARM) += kvm/arm_psci.h
|
||||
header-test-$(CONFIG_ARM64) += kvm/arm_psci.h
|
||||
|
|
43
include/kvm/arm_hypercalls.h
Normal file
43
include/kvm/arm_hypercalls.h
Normal file
|
@ -0,0 +1,43 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2019 Arm Ltd. */
|
||||
|
||||
#ifndef __KVM_ARM_HYPERCALLS_H
|
||||
#define __KVM_ARM_HYPERCALLS_H
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 0);
|
||||
}
|
||||
|
||||
static inline unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 1);
|
||||
}
|
||||
|
||||
static inline unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 2);
|
||||
}
|
||||
|
||||
static inline unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 3);
|
||||
}
|
||||
|
||||
static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
|
||||
unsigned long a0,
|
||||
unsigned long a1,
|
||||
unsigned long a2,
|
||||
unsigned long a3)
|
||||
{
|
||||
vcpu_set_reg(vcpu, 0, a0);
|
||||
vcpu_set_reg(vcpu, 1, a1);
|
||||
vcpu_set_reg(vcpu, 2, a2);
|
||||
vcpu_set_reg(vcpu, 3, a3);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -40,7 +40,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
|
|||
}
|
||||
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_one_reg;
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define ARM_SMCCC_OWNER_SIP 2
|
||||
#define ARM_SMCCC_OWNER_OEM 3
|
||||
#define ARM_SMCCC_OWNER_STANDARD 4
|
||||
#define ARM_SMCCC_OWNER_STANDARD_HYP 5
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
|
||||
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
|
||||
|
@ -80,6 +81,22 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
enum arm_smccc_conduit {
|
||||
SMCCC_CONDUIT_NONE,
|
||||
SMCCC_CONDUIT_SMC,
|
||||
SMCCC_CONDUIT_HVC,
|
||||
};
|
||||
|
||||
/**
|
||||
* arm_smccc_1_1_get_conduit()
|
||||
*
|
||||
* Returns the conduit to be used for SMCCCv1.1 or later.
|
||||
*
|
||||
* When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
|
||||
*/
|
||||
enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
|
||||
|
||||
/**
|
||||
* struct arm_smccc_res - Result from SMC/HVC call
|
||||
* @a0-a3 result values from registers 0 to 3
|
||||
|
@ -302,5 +319,63 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
|
|||
#define SMCCC_RET_NOT_SUPPORTED -1
|
||||
#define SMCCC_RET_NOT_REQUIRED -2
|
||||
|
||||
/*
|
||||
* Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
|
||||
* Used when the SMCCC conduit is not defined. The empty asm statement
|
||||
* avoids compiler warnings about unused variables.
|
||||
*/
|
||||
#define __fail_smccc_1_1(...) \
|
||||
do { \
|
||||
__declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
|
||||
asm ("" __constraints(__count_args(__VA_ARGS__))); \
|
||||
if (___res) \
|
||||
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
|
||||
*
|
||||
* This is a variadic macro taking one to eight source arguments, and
|
||||
* an optional return structure.
|
||||
*
|
||||
* @a0-a7: arguments passed in registers 0 to 7
|
||||
* @res: result values from registers 0 to 3
|
||||
*
|
||||
* This macro will make either an HVC call or an SMC call depending on the
|
||||
* current SMCCC conduit. If no valid conduit is available then -1
|
||||
* (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
|
||||
*
|
||||
* The return value also provides the conduit that was used.
|
||||
*/
|
||||
#define arm_smccc_1_1_invoke(...) ({ \
|
||||
int method = arm_smccc_1_1_get_conduit(); \
|
||||
switch (method) { \
|
||||
case SMCCC_CONDUIT_HVC: \
|
||||
arm_smccc_1_1_hvc(__VA_ARGS__); \
|
||||
break; \
|
||||
case SMCCC_CONDUIT_SMC: \
|
||||
arm_smccc_1_1_smc(__VA_ARGS__); \
|
||||
break; \
|
||||
default: \
|
||||
__fail_smccc_1_1(__VA_ARGS__); \
|
||||
method = SMCCC_CONDUIT_NONE; \
|
||||
break; \
|
||||
} \
|
||||
method; \
|
||||
})
|
||||
|
||||
/* Paravirtualised time calls (defined by ARM DEN0057A) */
|
||||
#define ARM_SMCCC_HV_PV_TIME_FEATURES \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_64, \
|
||||
ARM_SMCCC_OWNER_STANDARD_HYP, \
|
||||
0x20)
|
||||
|
||||
#define ARM_SMCCC_HV_PV_TIME_ST \
|
||||
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
|
||||
ARM_SMCCC_SMC_64, \
|
||||
ARM_SMCCC_OWNER_STANDARD_HYP, \
|
||||
0x21)
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /*__LINUX_ARM_SMCCC_H*/
|
||||
|
|
|
@ -5,12 +5,6 @@
|
|||
|
||||
#include <uapi/linux/arm_sdei.h>
|
||||
|
||||
enum sdei_conduit_types {
|
||||
CONDUIT_INVALID = 0,
|
||||
CONDUIT_SMC,
|
||||
CONDUIT_HVC,
|
||||
};
|
||||
|
||||
#include <acpi/ghes.h>
|
||||
|
||||
#ifdef CONFIG_ARM_SDE_INTERFACE
|
||||
|
|
|
@ -136,6 +136,7 @@ enum cpuhp_state {
|
|||
/* Must be the last timer callback */
|
||||
CPUHP_AP_DUMMY_TIMER_STARTING,
|
||||
CPUHP_AP_ARM_XEN_STARTING,
|
||||
CPUHP_AP_ARM_KVMPV_STARTING,
|
||||
CPUHP_AP_ARM_CORESIGHT_STARTING,
|
||||
CPUHP_AP_ARM64_ISNDEP_STARTING,
|
||||
CPUHP_AP_SMPCFD_DYING,
|
||||
|
|
|
@ -746,6 +746,28 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
|||
unsigned long len);
|
||||
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
|
||||
gpa_t gpa, unsigned long len);
|
||||
|
||||
#define __kvm_put_guest(kvm, gfn, offset, value, type) \
|
||||
({ \
|
||||
unsigned long __addr = gfn_to_hva(kvm, gfn); \
|
||||
type __user *__uaddr = (type __user *)(__addr + offset); \
|
||||
int __ret = -EFAULT; \
|
||||
\
|
||||
if (!kvm_is_error_hva(__addr)) \
|
||||
__ret = put_user(value, __uaddr); \
|
||||
if (!__ret) \
|
||||
mark_page_dirty(kvm, gfn); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define kvm_put_guest(kvm, gpa, value, type) \
|
||||
({ \
|
||||
gpa_t __gpa = gpa; \
|
||||
struct kvm *__kvm = kvm; \
|
||||
__kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
|
||||
offset_in_page(__gpa), (value), type); \
|
||||
})
|
||||
|
||||
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
|
||||
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
|
||||
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
|
||||
|
@ -1240,7 +1262,7 @@ extern unsigned int halt_poll_ns_grow_start;
|
|||
extern unsigned int halt_poll_ns_shrink;
|
||||
|
||||
struct kvm_device {
|
||||
struct kvm_device_ops *ops;
|
||||
const struct kvm_device_ops *ops;
|
||||
struct kvm *kvm;
|
||||
void *private;
|
||||
struct list_head vm_node;
|
||||
|
@ -1293,7 +1315,7 @@ struct kvm_device_ops {
|
|||
void kvm_device_get(struct kvm_device *dev);
|
||||
void kvm_device_put(struct kvm_device *dev);
|
||||
struct kvm_device *kvm_device_from_filp(struct file *filp);
|
||||
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
|
||||
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
|
||||
void kvm_unregister_device_ops(u32 type);
|
||||
|
||||
extern struct kvm_device_ops kvm_mpic_ops;
|
||||
|
|
|
@ -35,6 +35,8 @@ typedef unsigned long gva_t;
|
|||
typedef u64 gpa_t;
|
||||
typedef u64 gfn_t;
|
||||
|
||||
#define GPA_INVALID (~(gpa_t)0)
|
||||
|
||||
typedef unsigned long hva_t;
|
||||
typedef u64 hpa_t;
|
||||
typedef u64 hfn_t;
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#ifndef __LINUX_PSCI_H
|
||||
#define __LINUX_PSCI_H
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
|
@ -18,12 +19,6 @@ bool psci_tos_resident_on(int cpu);
|
|||
int psci_cpu_suspend_enter(u32 state);
|
||||
bool psci_power_state_is_valid(u32 state);
|
||||
|
||||
enum psci_conduit {
|
||||
PSCI_CONDUIT_NONE,
|
||||
PSCI_CONDUIT_SMC,
|
||||
PSCI_CONDUIT_HVC,
|
||||
};
|
||||
|
||||
enum smccc_version {
|
||||
SMCCC_VERSION_1_0,
|
||||
SMCCC_VERSION_1_1,
|
||||
|
@ -38,7 +33,7 @@ struct psci_operations {
|
|||
int (*affinity_info)(unsigned long target_affinity,
|
||||
unsigned long lowest_affinity_level);
|
||||
int (*migrate_info_type)(void);
|
||||
enum psci_conduit conduit;
|
||||
enum arm_smccc_conduit conduit;
|
||||
enum smccc_version smccc_version;
|
||||
};
|
||||
|
||||
|
|
|
@ -1235,6 +1235,8 @@ enum kvm_device_type {
|
|||
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
|
||||
KVM_DEV_TYPE_XIVE,
|
||||
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
|
||||
KVM_DEV_TYPE_ARM_PV_TIME,
|
||||
#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
|
||||
KVM_DEV_TYPE_MAX,
|
||||
};
|
||||
|
||||
|
|
|
@ -40,6 +40,10 @@
|
|||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#include <kvm/arm_pmu.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
#ifdef REQUIRES_VIRT
|
||||
__asm__(".arch_extension virt");
|
||||
#endif
|
||||
|
@ -373,6 +377,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
|||
|
||||
kvm_arm_reset_debug_ptr(vcpu);
|
||||
|
||||
kvm_arm_pvtime_vcpu_init(&vcpu->arch);
|
||||
|
||||
return kvm_vgic_vcpu_init(vcpu);
|
||||
}
|
||||
|
||||
|
@ -402,6 +408,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
kvm_vcpu_load_sysregs(vcpu);
|
||||
kvm_arch_vcpu_load_fp(vcpu);
|
||||
kvm_vcpu_pmu_restore_guest(vcpu);
|
||||
if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
|
||||
kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
|
||||
|
||||
if (single_task_running())
|
||||
vcpu_clear_wfe_traps(vcpu);
|
||||
|
@ -667,6 +675,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
|
|||
* that a VCPU sees new virtual interrupts.
|
||||
*/
|
||||
kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
|
||||
kvm_update_stolen_time(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
71
virt/kvm/arm/hypercalls.c
Normal file
71
virt/kvm/arm/hypercalls.c
Normal file
|
@ -0,0 +1,71 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2019 Arm Ltd.
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 func_id = smccc_get_function(vcpu);
|
||||
long val = SMCCC_RET_NOT_SUPPORTED;
|
||||
u32 feature;
|
||||
gpa_t gpa;
|
||||
|
||||
switch (func_id) {
|
||||
case ARM_SMCCC_VERSION_FUNC_ID:
|
||||
val = ARM_SMCCC_VERSION_1_1;
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
|
||||
feature = smccc_get_arg1(vcpu);
|
||||
switch (feature) {
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
switch (kvm_arm_harden_branch_predictor()) {
|
||||
case KVM_BP_HARDEN_UNKNOWN:
|
||||
break;
|
||||
case KVM_BP_HARDEN_WA_NEEDED:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_BP_HARDEN_NOT_REQUIRED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
switch (kvm_arm_have_ssbd()) {
|
||||
case KVM_SSBD_FORCE_DISABLE:
|
||||
case KVM_SSBD_UNKNOWN:
|
||||
break;
|
||||
case KVM_SSBD_KERNEL:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_SSBD_FORCE_ENABLE:
|
||||
case KVM_SSBD_MITIGATED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||
val = kvm_hypercall_pv_features(vcpu);
|
||||
break;
|
||||
case ARM_SMCCC_HV_PV_TIME_ST:
|
||||
gpa = kvm_init_stolen_time(vcpu);
|
||||
if (gpa != GPA_INVALID)
|
||||
val = gpa;
|
||||
break;
|
||||
default:
|
||||
return kvm_psci_call(vcpu);
|
||||
}
|
||||
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
#include <asm/kvm_host.h>
|
||||
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
/*
|
||||
* This is an implementation of the Power State Coordination Interface
|
||||
|
@ -23,38 +24,6 @@
|
|||
|
||||
#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
|
||||
|
||||
static u32 smccc_get_function(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 0);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 1);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 2);
|
||||
}
|
||||
|
||||
static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_get_reg(vcpu, 3);
|
||||
}
|
||||
|
||||
static void smccc_set_retval(struct kvm_vcpu *vcpu,
|
||||
unsigned long a0,
|
||||
unsigned long a1,
|
||||
unsigned long a2,
|
||||
unsigned long a3)
|
||||
{
|
||||
vcpu_set_reg(vcpu, 0, a0);
|
||||
vcpu_set_reg(vcpu, 1, a1);
|
||||
vcpu_set_reg(vcpu, 2, a2);
|
||||
vcpu_set_reg(vcpu, 3, a3);
|
||||
}
|
||||
|
||||
static unsigned long psci_affinity_mask(unsigned long affinity_level)
|
||||
{
|
||||
if (affinity_level <= 3)
|
||||
|
@ -373,7 +342,7 @@ static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
|||
* Errors:
|
||||
* -EINVAL: Unrecognized PSCI function
|
||||
*/
|
||||
static int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
int kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_psci_version(vcpu, vcpu->kvm)) {
|
||||
case KVM_ARM_PSCI_1_0:
|
||||
|
@ -387,55 +356,6 @@ static int kvm_psci_call(struct kvm_vcpu *vcpu)
|
|||
};
|
||||
}
|
||||
|
||||
int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 func_id = smccc_get_function(vcpu);
|
||||
u32 val = SMCCC_RET_NOT_SUPPORTED;
|
||||
u32 feature;
|
||||
|
||||
switch (func_id) {
|
||||
case ARM_SMCCC_VERSION_FUNC_ID:
|
||||
val = ARM_SMCCC_VERSION_1_1;
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
|
||||
feature = smccc_get_arg1(vcpu);
|
||||
switch(feature) {
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_1:
|
||||
switch (kvm_arm_harden_branch_predictor()) {
|
||||
case KVM_BP_HARDEN_UNKNOWN:
|
||||
break;
|
||||
case KVM_BP_HARDEN_WA_NEEDED:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_BP_HARDEN_NOT_REQUIRED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_ARCH_WORKAROUND_2:
|
||||
switch (kvm_arm_have_ssbd()) {
|
||||
case KVM_SSBD_FORCE_DISABLE:
|
||||
case KVM_SSBD_UNKNOWN:
|
||||
break;
|
||||
case KVM_SSBD_KERNEL:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_SSBD_FORCE_ENABLE:
|
||||
case KVM_SSBD_MITIGATED:
|
||||
val = SMCCC_RET_NOT_REQUIRED;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return kvm_psci_call(vcpu);
|
||||
}
|
||||
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 3; /* PSCI version and two workaround registers */
|
||||
|
|
131
virt/kvm/arm/pvtime.c
Normal file
131
virt/kvm/arm/pvtime.c
Normal file
|
@ -0,0 +1,131 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2019 Arm Ltd.
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/pvclock-abi.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 steal;
|
||||
__le64 steal_le;
|
||||
u64 offset;
|
||||
int idx;
|
||||
u64 base = vcpu->arch.steal.base;
|
||||
|
||||
if (base == GPA_INVALID)
|
||||
return;
|
||||
|
||||
/* Let's do the local bookkeeping */
|
||||
steal = vcpu->arch.steal.steal;
|
||||
steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
|
||||
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
|
||||
vcpu->arch.steal.steal = steal;
|
||||
|
||||
steal_le = cpu_to_le64(steal);
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
|
||||
kvm_put_guest(kvm, base + offset, steal_le, u64);
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
}
|
||||
|
||||
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 feature = smccc_get_arg1(vcpu);
|
||||
long val = SMCCC_RET_NOT_SUPPORTED;
|
||||
|
||||
switch (feature) {
|
||||
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
||||
case ARM_SMCCC_HV_PV_TIME_ST:
|
||||
val = SMCCC_RET_SUCCESS;
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct pvclock_vcpu_stolen_time init_values = {};
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 base = vcpu->arch.steal.base;
|
||||
int idx;
|
||||
|
||||
if (base == GPA_INVALID)
|
||||
return base;
|
||||
|
||||
/*
|
||||
* Start counting stolen time from the time the guest requests
|
||||
* the feature enabled.
|
||||
*/
|
||||
vcpu->arch.steal.steal = 0;
|
||||
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
|
||||
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
return base;
|
||||
}
|
||||
|
||||
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
u64 __user *user = (u64 __user *)attr->addr;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
u64 ipa;
|
||||
int ret = 0;
|
||||
int idx;
|
||||
|
||||
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
return -ENXIO;
|
||||
|
||||
if (get_user(ipa, user))
|
||||
return -EFAULT;
|
||||
if (!IS_ALIGNED(ipa, 64))
|
||||
return -EINVAL;
|
||||
if (vcpu->arch.steal.base != GPA_INVALID)
|
||||
return -EEXIST;
|
||||
|
||||
/* Check the address is in a valid memslot */
|
||||
idx = srcu_read_lock(&kvm->srcu);
|
||||
if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
|
||||
ret = -EINVAL;
|
||||
srcu_read_unlock(&kvm->srcu, idx);
|
||||
|
||||
if (!ret)
|
||||
vcpu->arch.steal.base = ipa;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
u64 __user *user = (u64 __user *)attr->addr;
|
||||
u64 ipa;
|
||||
|
||||
if (attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
||||
return -ENXIO;
|
||||
|
||||
ipa = vcpu->arch.steal.base;
|
||||
|
||||
if (put_user(ipa, user))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
switch (attr->attr) {
|
||||
case KVM_ARM_VCPU_PVTIME_IPA:
|
||||
return 0;
|
||||
}
|
||||
return -ENXIO;
|
||||
}
|
|
@ -3046,14 +3046,14 @@ struct kvm_device *kvm_device_from_filp(struct file *filp)
|
|||
return filp->private_data;
|
||||
}
|
||||
|
||||
static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
|
||||
static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
|
||||
#ifdef CONFIG_KVM_MPIC
|
||||
[KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
|
||||
[KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
|
||||
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
|
||||
{
|
||||
if (type >= ARRAY_SIZE(kvm_device_ops_table))
|
||||
return -ENOSPC;
|
||||
|
@ -3074,7 +3074,7 @@ void kvm_unregister_device_ops(u32 type)
|
|||
static int kvm_ioctl_create_device(struct kvm *kvm,
|
||||
struct kvm_create_device *cd)
|
||||
{
|
||||
struct kvm_device_ops *ops = NULL;
|
||||
const struct kvm_device_ops *ops = NULL;
|
||||
struct kvm_device *dev;
|
||||
bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
|
||||
int type;
|
||||
|
|
Loading…
Reference in New Issue
Block a user