KVM: arm/arm64: vgic: Add refcounting for IRQs

In the moment our struct vgic_irq's are statically allocated at guest
creation time. So getting a pointer to an IRQ structure is trivial and
safe. LPIs are more dynamic, they can be mapped and unmapped at any time
during the guest's _runtime_.
In preparation for supporting LPIs we introduce reference counting for
those structures using the kernel's kref infrastructure.
Since private IRQs and SPIs are statically allocated, we avoid actually
refcounting them, since they would never be released anyway.
But we take provisions to increase the refcount when an IRQ gets onto a
VCPU list and decrease it when it gets removed. Also this introduces
vgic_put_irq(), which wraps kref_put and hides the release function from
the callers.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Tested-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
Andre Przywara 2016-07-15 12:43:27 +01:00 committed by Marc Zyngier
parent 8a39d00670
commit 5dd4b924e3
9 changed files with 99 additions and 12 deletions

View File

@ -96,6 +96,7 @@ struct vgic_irq {
bool active; /* not used for LPIs */ bool active; /* not used for LPIs */
bool enabled; bool enabled;
bool hw; /* Tied to HW IRQ */ bool hw; /* Tied to HW IRQ */
struct kref refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */ u32 hwintid; /* HW INTID number */
union { union {
u8 targets; /* GICv2 target VCPUs mask */ u8 targets; /* GICv2 target VCPUs mask */

View File

@ -177,6 +177,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
spin_lock_init(&irq->irq_lock); spin_lock_init(&irq->irq_lock);
irq->vcpu = NULL; irq->vcpu = NULL;
irq->target_vcpu = vcpu0; irq->target_vcpu = vcpu0;
kref_init(&irq->refcount);
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2) if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2)
irq->targets = 0; irq->targets = 0;
else else
@ -211,6 +212,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
irq->vcpu = NULL; irq->vcpu = NULL;
irq->target_vcpu = vcpu; irq->target_vcpu = vcpu;
irq->targets = 1U << vcpu->vcpu_id; irq->targets = 1U << vcpu->vcpu_id;
kref_init(&irq->refcount);
if (vgic_irq_is_sgi(i)) { if (vgic_irq_is_sgi(i)) {
/* SGIs */ /* SGIs */
irq->enabled = 1; irq->enabled = 1;

View File

@ -102,6 +102,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
irq->source |= 1U << source_vcpu->vcpu_id; irq->source |= 1U << source_vcpu->vcpu_id;
vgic_queue_irq_unlock(source_vcpu->kvm, irq); vgic_queue_irq_unlock(source_vcpu->kvm, irq);
vgic_put_irq(source_vcpu->kvm, irq);
} }
} }
@ -116,6 +117,8 @@ static unsigned long vgic_mmio_read_target(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->targets << (i * 8); val |= (u64)irq->targets << (i * 8);
vgic_put_irq(vcpu->kvm, irq);
} }
return val; return val;
@ -143,6 +146,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu,
irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target);
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -157,6 +161,8 @@ static unsigned long vgic_mmio_read_sgipend(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->source << (i * 8); val |= (u64)irq->source << (i * 8);
vgic_put_irq(vcpu->kvm, irq);
} }
return val; return val;
} }
@ -178,6 +184,7 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu,
irq->pending = false; irq->pending = false;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -201,6 +208,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu,
} else { } else {
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
} }
vgic_put_irq(vcpu->kvm, irq);
} }
} }

View File

@ -80,15 +80,17 @@ static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
{ {
int intid = VGIC_ADDR_TO_INTID(addr, 64); int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
unsigned long ret = 0;
if (!irq) if (!irq)
return 0; return 0;
/* The upper word is RAZ for us. */ /* The upper word is RAZ for us. */
if (addr & 4) if (!(addr & 4))
return 0; ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
return extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len); vgic_put_irq(vcpu->kvm, irq);
return ret;
} }
static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
@ -96,15 +98,17 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
unsigned long val) unsigned long val)
{ {
int intid = VGIC_ADDR_TO_INTID(addr, 64); int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid); struct vgic_irq *irq;
if (!irq)
return;
/* The upper word is WI for us since we don't implement Aff3. */ /* The upper word is WI for us since we don't implement Aff3. */
if (addr & 4) if (addr & 4)
return; return;
irq = vgic_get_irq(vcpu->kvm, NULL, intid);
if (!irq)
return;
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
/* We only care about and preserve Aff0, Aff1 and Aff2. */ /* We only care about and preserve Aff0, Aff1 and Aff2. */
@ -112,6 +116,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu, static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
@ -445,5 +450,6 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
irq->pending = true; irq->pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq); vgic_queue_irq_unlock(vcpu->kvm, irq);
vgic_put_irq(vcpu->kvm, irq);
} }
} }

View File

@ -56,6 +56,8 @@ unsigned long vgic_mmio_read_enable(struct kvm_vcpu *vcpu,
if (irq->enabled) if (irq->enabled)
value |= (1U << i); value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq);
} }
return value; return value;
@ -74,6 +76,8 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu,
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
irq->enabled = true; irq->enabled = true;
vgic_queue_irq_unlock(vcpu->kvm, irq); vgic_queue_irq_unlock(vcpu->kvm, irq);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -92,6 +96,7 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu,
irq->enabled = false; irq->enabled = false;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -108,6 +113,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
if (irq->pending) if (irq->pending)
value |= (1U << i); value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq);
} }
return value; return value;
@ -129,6 +136,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
irq->soft_pending = true; irq->soft_pending = true;
vgic_queue_irq_unlock(vcpu->kvm, irq); vgic_queue_irq_unlock(vcpu->kvm, irq);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -152,6 +160,7 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -168,6 +177,8 @@ unsigned long vgic_mmio_read_active(struct kvm_vcpu *vcpu,
if (irq->active) if (irq->active)
value |= (1U << i); value |= (1U << i);
vgic_put_irq(vcpu->kvm, irq);
} }
return value; return value;
@ -242,6 +253,7 @@ void vgic_mmio_write_cactive(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, false); vgic_mmio_change_active(vcpu, irq, false);
vgic_put_irq(vcpu->kvm, irq);
} }
vgic_change_active_finish(vcpu, intid); vgic_change_active_finish(vcpu, intid);
} }
@ -257,6 +269,7 @@ void vgic_mmio_write_sactive(struct kvm_vcpu *vcpu,
for_each_set_bit(i, &val, len * 8) { for_each_set_bit(i, &val, len * 8) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
vgic_mmio_change_active(vcpu, irq, true); vgic_mmio_change_active(vcpu, irq, true);
vgic_put_irq(vcpu->kvm, irq);
} }
vgic_change_active_finish(vcpu, intid); vgic_change_active_finish(vcpu, intid);
} }
@ -272,6 +285,8 @@ unsigned long vgic_mmio_read_priority(struct kvm_vcpu *vcpu,
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
val |= (u64)irq->priority << (i * 8); val |= (u64)irq->priority << (i * 8);
vgic_put_irq(vcpu->kvm, irq);
} }
return val; return val;
@ -298,6 +313,8 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu,
/* Narrow the priority range to what we actually support */ /* Narrow the priority range to what we actually support */
irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS);
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }
@ -313,6 +330,8 @@ unsigned long vgic_mmio_read_config(struct kvm_vcpu *vcpu,
if (irq->config == VGIC_CONFIG_EDGE) if (irq->config == VGIC_CONFIG_EDGE)
value |= (2U << (i * 2)); value |= (2U << (i * 2));
vgic_put_irq(vcpu->kvm, irq);
} }
return value; return value;
@ -326,7 +345,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
int i; int i;
for (i = 0; i < len * 4; i++) { for (i = 0; i < len * 4; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); struct vgic_irq *irq;
/* /*
* The configuration cannot be changed for SGIs in general, * The configuration cannot be changed for SGIs in general,
@ -337,14 +356,18 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu,
if (intid + i < VGIC_NR_PRIVATE_IRQS) if (intid + i < VGIC_NR_PRIVATE_IRQS)
continue; continue;
irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
if (test_bit(i * 2 + 1, &val)) { if (test_bit(i * 2 + 1, &val)) {
irq->config = VGIC_CONFIG_EDGE; irq->config = VGIC_CONFIG_EDGE;
} else { } else {
irq->config = VGIC_CONFIG_LEVEL; irq->config = VGIC_CONFIG_LEVEL;
irq->pending = irq->line_level | irq->soft_pending; irq->pending = irq->line_level | irq->soft_pending;
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }

View File

@ -124,6 +124,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }

View File

@ -113,6 +113,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
} }
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
} }
} }

View File

@ -64,6 +64,28 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
return NULL; return NULL;
} }
static void vgic_get_irq_kref(struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
return;
kref_get(&irq->refcount);
}
/* The refcount should never drop to 0 at the moment. */
static void vgic_irq_release(struct kref *ref)
{
WARN_ON(1);
}
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq)
{
if (irq->intid < VGIC_MIN_LPI)
return;
kref_put(&irq->refcount, vgic_irq_release);
}
/** /**
* kvm_vgic_target_oracle - compute the target vcpu for an irq * kvm_vgic_target_oracle - compute the target vcpu for an irq
* *
@ -236,6 +258,11 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq)
goto retry; goto retry;
} }
/*
* Grab a reference to the irq to reflect the fact that it is
* now in the ap_list.
*/
vgic_get_irq_kref(irq);
list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head);
irq->vcpu = vcpu; irq->vcpu = vcpu;
@ -269,14 +296,17 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
if (!irq) if (!irq)
return -EINVAL; return -EINVAL;
if (irq->hw != mapped_irq) if (irq->hw != mapped_irq) {
vgic_put_irq(kvm, irq);
return -EINVAL; return -EINVAL;
}
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
if (!vgic_validate_injection(irq, level)) { if (!vgic_validate_injection(irq, level)) {
/* Nothing to see here, move along... */ /* Nothing to see here, move along... */
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(kvm, irq);
return 0; return 0;
} }
@ -288,6 +318,7 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
} }
vgic_queue_irq_unlock(kvm, irq); vgic_queue_irq_unlock(kvm, irq);
vgic_put_irq(kvm, irq);
return 0; return 0;
} }
@ -330,25 +361,28 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
irq->hwintid = phys_irq; irq->hwintid = phys_irq;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
return 0; return 0;
} }
int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
{ {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); struct vgic_irq *irq;
BUG_ON(!irq);
if (!vgic_initialized(vcpu->kvm)) if (!vgic_initialized(vcpu->kvm))
return -EAGAIN; return -EAGAIN;
irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
BUG_ON(!irq);
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
irq->hw = false; irq->hw = false;
irq->hwintid = 0; irq->hwintid = 0;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
return 0; return 0;
} }
@ -386,6 +420,15 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
list_del(&irq->ap_list); list_del(&irq->ap_list);
irq->vcpu = NULL; irq->vcpu = NULL;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
/*
* This vgic_put_irq call matches the
* vgic_get_irq_kref in vgic_queue_irq_unlock,
* where we added the LPI to the ap_list. As
* we remove the irq from the list, we drop
* also drop the refcount.
*/
vgic_put_irq(vcpu->kvm, irq);
continue; continue;
} }
@ -614,6 +657,7 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
spin_lock(&irq->irq_lock); spin_lock(&irq->irq_lock);
map_is_active = irq->hw && irq->active; map_is_active = irq->hw && irq->active;
spin_unlock(&irq->irq_lock); spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);
return map_is_active; return map_is_active;
} }

View File

@ -38,6 +38,7 @@ struct vgic_vmcr {
struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid); u32 intid);
void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
void vgic_kick_vcpus(struct kvm *kvm); void vgic_kick_vcpus(struct kvm *kvm);