arm64: KVM: Encapsulate kvm_cpu_context in kvm_host_data

The virt/arm core allocates a kvm_cpu_context_t percpu, at present this is
a typedef to kvm_cpu_context and is used to store host cpu context. The
kvm_cpu_context structure is also used elsewhere to hold vcpu context.
In order to use the percpu to hold additional future host information we
encapsulate kvm_cpu_context in a new structure and rename the typedef and
percpu to match.

Signed-off-by: Andrew Murray <andrew.murray@arm.com>
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Andrew Murray 2019-04-09 20:22:11 +01:00 committed by Marc Zyngier
parent 21bb0ebf5d
commit 630a16854d
5 changed files with 28 additions and 16 deletions

View File

@ -153,9 +153,13 @@ struct kvm_cpu_context {
u32 cp15[NR_CP15_REGS]; u32 cp15[NR_CP15_REGS];
}; };
typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
};
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, typedef struct kvm_host_data kvm_host_data_t;
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu) int cpu)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
@ -185,7 +189,7 @@ struct kvm_vcpu_arch {
struct kvm_vcpu_fault_info fault; struct kvm_vcpu_fault_info fault;
/* Host FP context */ /* Host FP context */
kvm_cpu_context_t *host_cpu_context; struct kvm_cpu_context *host_cpu_context;
/* VGIC state */ /* VGIC state */
struct vgic_cpu vgic_cpu; struct vgic_cpu vgic_cpu;

View File

@ -108,7 +108,8 @@ extern u32 __kvm_get_mdcr_el2(void);
.endm .endm
.macro get_host_ctxt reg, tmp .macro get_host_ctxt reg, tmp
hyp_adr_this_cpu \reg, kvm_host_cpu_state, \tmp hyp_adr_this_cpu \reg, kvm_host_data, \tmp
add \reg, \reg, #HOST_DATA_CONTEXT
.endm .endm
.macro get_vcpu_ptr vcpu, ctxt .macro get_vcpu_ptr vcpu, ctxt

View File

@ -233,7 +233,11 @@ struct kvm_cpu_context {
struct kvm_vcpu *__hyp_running_vcpu; struct kvm_vcpu *__hyp_running_vcpu;
}; };
typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_host_data {
struct kvm_cpu_context host_ctxt;
};
typedef struct kvm_host_data kvm_host_data_t;
struct vcpu_reset_state { struct vcpu_reset_state {
unsigned long pc; unsigned long pc;
@ -278,7 +282,7 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch external_debug_state; struct kvm_guest_debug_arch external_debug_state;
/* Pointer to host CPU context */ /* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context; struct kvm_cpu_context *host_cpu_context;
struct thread_info *host_thread_info; /* hyp VA */ struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */ struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
@ -483,9 +487,9 @@ void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr); struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
static inline void kvm_init_host_cpu_context(kvm_cpu_context_t *cpu_ctxt, static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
int cpu) int cpu)
{ {
/* The host's MPIDR is immutable, so let's set it up at boot time */ /* The host's MPIDR is immutable, so let's set it up at boot time */
@ -503,8 +507,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
* kernel's mapping to the linear mapping, and store it in tpidr_el2 * kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2. * so that we can use adr_l to access per-cpu variables in EL2.
*/ */
u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_cpu_state) - u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
(u64)kvm_ksym_ref(kvm_host_cpu_state)); (u64)kvm_ksym_ref(kvm_host_data));
/* /*
* Call initialization code, and switch to the full blown HYP code. * Call initialization code, and switch to the full blown HYP code.

View File

@ -134,6 +134,7 @@ int main(void)
DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1]));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu));
DEFINE(HOST_DATA_CONTEXT, offsetof(struct kvm_host_data, host_ctxt));
#endif #endif
#ifdef CONFIG_CPU_PM #ifdef CONFIG_CPU_PM
DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));

View File

@ -56,7 +56,7 @@
__asm__(".arch_extension virt"); __asm__(".arch_extension virt");
#endif #endif
DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state); DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
/* Per-CPU variable containing the currently running vcpu. */ /* Per-CPU variable containing the currently running vcpu. */
@ -360,8 +360,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{ {
int *last_ran; int *last_ran;
kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
cpu_data = this_cpu_ptr(&kvm_host_data);
/* /*
* We might get preempted before the vCPU actually runs, but * We might get preempted before the vCPU actually runs, but
@ -373,7 +375,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
vcpu->cpu = cpu; vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state); vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_arm_set_running_vcpu(vcpu); kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu); kvm_vgic_load(vcpu);
@ -1569,11 +1571,11 @@ static int init_hyp_mode(void)
} }
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt; kvm_host_data_t *cpu_data;
cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu); cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
kvm_init_host_cpu_context(cpu_ctxt, cpu); kvm_init_host_cpu_context(&cpu_data->host_ctxt, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP); err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
if (err) { if (err) {
kvm_err("Cannot map host CPU state: %d\n", err); kvm_err("Cannot map host CPU state: %d\n", err);