forked from luck/tmp_suning_uos_patched
KVM: arm64: vgic-v3: Take cpu_if pointer directly instead of vcpu
If we move the used_lrs field to the version-specific cpu interface structure, the following functions only operate on the struct vgic_v3_cpu_if and not the full vcpu: __vgic_v3_save_state __vgic_v3_restore_state __vgic_v3_activate_traps __vgic_v3_deactivate_traps __vgic_v3_save_aprs __vgic_v3_restore_aprs This is going to be very useful for nested virt, so move the used_lrs field and change the prototypes and implementations of these functions to take the cpu_if parameter directly. No functional change. Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
0a78791c0d
commit
fc5d1f1a42
|
@ -56,12 +56,12 @@
|
||||||
|
|
||||||
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
|
||||||
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
|
||||||
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
|
void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||||
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
|
void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
|
||||||
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
|
void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||||
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
|
void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
|
||||||
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
||||||
|
|
|
@ -270,8 +270,8 @@ static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||||
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||||
__vgic_v3_save_state(vcpu);
|
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
__vgic_v3_deactivate_traps(vcpu);
|
__vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,8 +279,8 @@ static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
|
||||||
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
|
static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||||
__vgic_v3_activate_traps(vcpu);
|
__vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
__vgic_v3_restore_state(vcpu);
|
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -194,10 +194,9 @@ static u32 __hyp_text __vgic_v3_read_ap1rn(int n)
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
u64 used_lrs = cpu_if->used_lrs;
|
||||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure stores to the GIC via the memory mapped interface
|
* Make sure stores to the GIC via the memory mapped interface
|
||||||
|
@ -230,10 +229,9 @@ void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
u64 used_lrs = cpu_if->used_lrs;
|
||||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (used_lrs || cpu_if->its_vpe.its_vm) {
|
if (used_lrs || cpu_if->its_vpe.its_vm) {
|
||||||
|
@ -257,10 +255,8 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
|
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
|
||||||
* Group0 interrupt (as generated in GICv2 mode) to be
|
* Group0 interrupt (as generated in GICv2 mode) to be
|
||||||
|
@ -306,9 +302,8 @@ void __hyp_text __vgic_v3_activate_traps(struct kvm_vcpu *vcpu)
|
||||||
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
||||||
u64 val;
|
u64 val;
|
||||||
|
|
||||||
if (!cpu_if->vgic_sre) {
|
if (!cpu_if->vgic_sre) {
|
||||||
|
@ -333,15 +328,11 @@ void __hyp_text __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu)
|
||||||
write_gicreg(0, ICH_HCR_EL2);
|
write_gicreg(0, ICH_HCR_EL2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if;
|
|
||||||
u64 val;
|
u64 val;
|
||||||
u32 nr_pre_bits;
|
u32 nr_pre_bits;
|
||||||
|
|
||||||
vcpu = kern_hyp_va(vcpu);
|
|
||||||
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
||||||
|
|
||||||
val = read_gicreg(ICH_VTR_EL2);
|
val = read_gicreg(ICH_VTR_EL2);
|
||||||
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
||||||
|
|
||||||
|
@ -370,15 +361,11 @@ void __hyp_text __vgic_v3_save_aprs(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __hyp_text __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu)
|
void __hyp_text __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if)
|
||||||
{
|
{
|
||||||
struct vgic_v3_cpu_if *cpu_if;
|
|
||||||
u64 val;
|
u64 val;
|
||||||
u32 nr_pre_bits;
|
u32 nr_pre_bits;
|
||||||
|
|
||||||
vcpu = kern_hyp_va(vcpu);
|
|
||||||
cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
|
||||||
|
|
||||||
val = read_gicreg(ICH_VTR_EL2);
|
val = read_gicreg(ICH_VTR_EL2);
|
||||||
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
nr_pre_bits = vtr_to_nr_pre_bits(val);
|
||||||
|
|
||||||
|
@ -451,7 +438,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
|
||||||
u32 vmcr,
|
u32 vmcr,
|
||||||
u64 *lr_val)
|
u64 *lr_val)
|
||||||
{
|
{
|
||||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||||
u8 priority = GICv3_IDLE_PRIORITY;
|
u8 priority = GICv3_IDLE_PRIORITY;
|
||||||
int i, lr = -1;
|
int i, lr = -1;
|
||||||
|
|
||||||
|
@ -490,7 +477,7 @@ static int __hyp_text __vgic_v3_highest_priority_lr(struct kvm_vcpu *vcpu,
|
||||||
static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
|
static int __hyp_text __vgic_v3_find_active_lr(struct kvm_vcpu *vcpu,
|
||||||
int intid, u64 *lr_val)
|
int intid, u64 *lr_val)
|
||||||
{
|
{
|
||||||
unsigned int used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
unsigned int used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < used_lrs; i++) {
|
for (i = 0; i < used_lrs; i++) {
|
||||||
|
|
|
@ -56,7 +56,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
|
cpuif->vgic_hcr &= ~GICH_HCR_UIE;
|
||||||
|
|
||||||
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
|
for (lr = 0; lr < vgic_cpu->vgic_v2.used_lrs; lr++) {
|
||||||
u32 val = cpuif->vgic_lr[lr];
|
u32 val = cpuif->vgic_lr[lr];
|
||||||
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
|
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
|
||||||
struct vgic_irq *irq;
|
struct vgic_irq *irq;
|
||||||
|
@ -120,7 +120,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
vgic_cpu->used_lrs = 0;
|
cpuif->used_lrs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -427,7 +427,7 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
|
||||||
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||||
{
|
{
|
||||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
u64 used_lrs = cpu_if->used_lrs;
|
||||||
u64 elrsr;
|
u64 elrsr;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -448,7 +448,7 @@ static void save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
|
||||||
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
void vgic_v2_save_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
||||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
u64 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
|
||||||
|
|
||||||
if (!base)
|
if (!base)
|
||||||
return;
|
return;
|
||||||
|
@ -463,7 +463,7 @@ void vgic_v2_restore_state(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
|
||||||
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
void __iomem *base = kvm_vgic_global_state.vctrl_base;
|
||||||
u64 used_lrs = vcpu->arch.vgic_cpu.used_lrs;
|
u64 used_lrs = cpu_if->used_lrs;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!base)
|
if (!base)
|
||||||
|
|
|
@ -39,7 +39,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
|
cpuif->vgic_hcr &= ~ICH_HCR_UIE;
|
||||||
|
|
||||||
for (lr = 0; lr < vgic_cpu->used_lrs; lr++) {
|
for (lr = 0; lr < cpuif->used_lrs; lr++) {
|
||||||
u64 val = cpuif->vgic_lr[lr];
|
u64 val = cpuif->vgic_lr[lr];
|
||||||
u32 intid, cpuid;
|
u32 intid, cpuid;
|
||||||
struct vgic_irq *irq;
|
struct vgic_irq *irq;
|
||||||
|
@ -111,7 +111,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
|
||||||
vgic_put_irq(vcpu->kvm, irq);
|
vgic_put_irq(vcpu->kvm, irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
vgic_cpu->used_lrs = 0;
|
cpuif->used_lrs = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Requires the irq to be locked already */
|
/* Requires the irq to be locked already */
|
||||||
|
@ -662,10 +662,10 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
|
||||||
if (likely(cpu_if->vgic_sre))
|
if (likely(cpu_if->vgic_sre))
|
||||||
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
|
kvm_call_hyp(__vgic_v3_write_vmcr, cpu_if->vgic_vmcr);
|
||||||
|
|
||||||
kvm_call_hyp(__vgic_v3_restore_aprs, vcpu);
|
kvm_call_hyp(__vgic_v3_restore_aprs, kern_hyp_va(cpu_if));
|
||||||
|
|
||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
__vgic_v3_activate_traps(vcpu);
|
__vgic_v3_activate_traps(cpu_if);
|
||||||
|
|
||||||
WARN_ON(vgic_v4_load(vcpu));
|
WARN_ON(vgic_v4_load(vcpu));
|
||||||
}
|
}
|
||||||
|
@ -680,12 +680,14 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
|
||||||
|
|
||||||
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
void vgic_v3_put(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
|
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
|
||||||
|
|
||||||
WARN_ON(vgic_v4_put(vcpu, false));
|
WARN_ON(vgic_v4_put(vcpu, false));
|
||||||
|
|
||||||
vgic_v3_vmcr_sync(vcpu);
|
vgic_v3_vmcr_sync(vcpu);
|
||||||
|
|
||||||
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
|
kvm_call_hyp(__vgic_v3_save_aprs, kern_hyp_va(cpu_if));
|
||||||
|
|
||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
__vgic_v3_deactivate_traps(vcpu);
|
__vgic_v3_deactivate_traps(cpu_if);
|
||||||
}
|
}
|
||||||
|
|
|
@ -786,6 +786,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||||
int count;
|
int count;
|
||||||
bool multi_sgi;
|
bool multi_sgi;
|
||||||
u8 prio = 0xff;
|
u8 prio = 0xff;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
lockdep_assert_held(&vgic_cpu->ap_list_lock);
|
||||||
|
|
||||||
|
@ -827,11 +828,14 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vcpu->arch.vgic_cpu.used_lrs = count;
|
|
||||||
|
|
||||||
/* Nuke remaining LRs */
|
/* Nuke remaining LRs */
|
||||||
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
|
for (i = count ; i < kvm_vgic_global_state.nr_lr; i++)
|
||||||
vgic_clear_lr(vcpu, count);
|
vgic_clear_lr(vcpu, i);
|
||||||
|
|
||||||
|
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||||
|
vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count;
|
||||||
|
else
|
||||||
|
vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool can_access_vgic_from_kernel(void)
|
static inline bool can_access_vgic_from_kernel(void)
|
||||||
|
@ -849,13 +853,13 @@ static inline void vgic_save_state(struct kvm_vcpu *vcpu)
|
||||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||||
vgic_v2_save_state(vcpu);
|
vgic_v2_save_state(vcpu);
|
||||||
else
|
else
|
||||||
__vgic_v3_save_state(vcpu);
|
__vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
|
/* Sync back the hardware VGIC state into our emulation after a guest's run. */
|
||||||
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
|
int used_lrs;
|
||||||
|
|
||||||
/* An empty ap_list_head implies used_lrs == 0 */
|
/* An empty ap_list_head implies used_lrs == 0 */
|
||||||
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
|
||||||
|
@ -864,7 +868,12 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
|
||||||
if (can_access_vgic_from_kernel())
|
if (can_access_vgic_from_kernel())
|
||||||
vgic_save_state(vcpu);
|
vgic_save_state(vcpu);
|
||||||
|
|
||||||
if (vgic_cpu->used_lrs)
|
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||||
|
used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs;
|
||||||
|
else
|
||||||
|
used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs;
|
||||||
|
|
||||||
|
if (used_lrs)
|
||||||
vgic_fold_lr_state(vcpu);
|
vgic_fold_lr_state(vcpu);
|
||||||
vgic_prune_ap_list(vcpu);
|
vgic_prune_ap_list(vcpu);
|
||||||
}
|
}
|
||||||
|
@ -874,7 +883,7 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||||
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
if (!static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
||||||
vgic_v2_restore_state(vcpu);
|
vgic_v2_restore_state(vcpu);
|
||||||
else
|
else
|
||||||
__vgic_v3_restore_state(vcpu);
|
__vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Flush our emulation state into the GIC hardware before entering the guest. */
|
/* Flush our emulation state into the GIC hardware before entering the guest. */
|
||||||
|
|
|
@ -274,6 +274,8 @@ struct vgic_v2_cpu_if {
|
||||||
u32 vgic_vmcr;
|
u32 vgic_vmcr;
|
||||||
u32 vgic_apr;
|
u32 vgic_apr;
|
||||||
u32 vgic_lr[VGIC_V2_MAX_LRS];
|
u32 vgic_lr[VGIC_V2_MAX_LRS];
|
||||||
|
|
||||||
|
unsigned int used_lrs;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vgic_v3_cpu_if {
|
struct vgic_v3_cpu_if {
|
||||||
|
@ -291,6 +293,8 @@ struct vgic_v3_cpu_if {
|
||||||
* linking the Linux IRQ subsystem and the ITS together.
|
* linking the Linux IRQ subsystem and the ITS together.
|
||||||
*/
|
*/
|
||||||
struct its_vpe its_vpe;
|
struct its_vpe its_vpe;
|
||||||
|
|
||||||
|
unsigned int used_lrs;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vgic_cpu {
|
struct vgic_cpu {
|
||||||
|
@ -300,7 +304,6 @@ struct vgic_cpu {
|
||||||
struct vgic_v3_cpu_if vgic_v3;
|
struct vgic_v3_cpu_if vgic_v3;
|
||||||
};
|
};
|
||||||
|
|
||||||
unsigned int used_lrs;
|
|
||||||
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
|
struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS];
|
||||||
|
|
||||||
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
|
raw_spinlock_t ap_list_lock; /* Protects the ap_list */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user