forked from luck/tmp_suning_uos_patched
KVM: Move vcpu alloc and init invocation to common code
Now that all architectures tightly couple vcpu allocation/free with the mandatory calls to kvm_{un}init_vcpu(), move the sequences verbatim to common KVM code. Move both allocation and initialization in a single patch to eliminate thrash in arch specific code. The bisection benefits of moving the two pieces in separate patches is marginal at best, whereas the odds of introducing a transient arch specific bug are non-zero. Acked-by: Christoffer Dall <christoffer.dall@arm.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Reviewed-by: Cornelia Huck <cohuck@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4543bdc088
commit
e529ef66e6
|
@ -285,25 +285,14 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int err, size;
|
||||
void *gebase, *p, *handler, *refill_start, *refill_end;
|
||||
int i;
|
||||
|
||||
struct kvm_vcpu *vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
|
||||
if (!vcpu) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = kvm_vcpu_init(vcpu, kvm, id);
|
||||
|
||||
if (err)
|
||||
goto out_free_cpu;
|
||||
|
||||
kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
|
||||
kvm_debug("kvm @ %p: create cpu %d at %p\n",
|
||||
vcpu->kvm, vcpu->vcpu_id, vcpu);
|
||||
|
||||
/*
|
||||
* Allocate space for host mode exception handlers that handle
|
||||
|
@ -318,7 +307,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
|
||||
if (!gebase) {
|
||||
err = -ENOMEM;
|
||||
goto out_uninit_cpu;
|
||||
goto out;
|
||||
}
|
||||
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
|
@ -397,19 +386,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
vcpu->arch.last_sched_cpu = -1;
|
||||
vcpu->arch.last_exec_cpu = -1;
|
||||
|
||||
return vcpu;
|
||||
return 0;
|
||||
|
||||
out_free_gebase:
|
||||
kfree(gebase);
|
||||
|
||||
out_uninit_cpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
|
||||
out_free_cpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
|
||||
out:
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
|
@ -421,9 +403,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
kvm_mmu_free_memory_caches(vcpu);
|
||||
kfree(vcpu->arch.guest_ebase);
|
||||
kfree(vcpu->arch.kseg0_commpage);
|
||||
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
|
|
|
@ -725,32 +725,17 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int err;
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (err)
|
||||
goto free_vcpu;
|
||||
|
||||
err = kvmppc_core_vcpu_create(vcpu);
|
||||
if (err)
|
||||
goto uninit_vcpu;
|
||||
return err;
|
||||
|
||||
vcpu->arch.wqp = &vcpu->wq;
|
||||
kvmppc_create_vcpu_debugfs(vcpu, id);
|
||||
return vcpu;
|
||||
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
return ERR_PTR(err);
|
||||
kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
|
@ -780,10 +765,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
kvmppc_core_vcpu_free(vcpu);
|
||||
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -2530,9 +2530,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
if (vcpu->kvm->arch.use_cmma)
|
||||
kvm_s390_vcpu_unsetup_cmma(vcpu);
|
||||
free_page((unsigned long)(vcpu->arch.sie_block));
|
||||
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
static void kvm_free_vcpus(struct kvm *kvm)
|
||||
|
@ -3014,29 +3011,15 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct sie_page *sie_page;
|
||||
int rc;
|
||||
|
||||
rc = -ENOMEM;
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu)
|
||||
goto out;
|
||||
|
||||
rc = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (rc)
|
||||
goto out_free_cpu;
|
||||
|
||||
rc = -ENOMEM;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
|
||||
sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!sie_page)
|
||||
goto out_uninit_vcpu;
|
||||
return -ENOMEM;
|
||||
|
||||
vcpu->arch.sie_block = &sie_page->sie_block;
|
||||
vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
|
||||
|
@ -3045,9 +3028,9 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|||
vcpu->arch.sie_block->mso = 0;
|
||||
vcpu->arch.sie_block->msl = sclp.hamax;
|
||||
|
||||
vcpu->arch.sie_block->icpua = id;
|
||||
vcpu->arch.sie_block->icpua = vcpu->vcpu_id;
|
||||
spin_lock_init(&vcpu->arch.local_int.lock);
|
||||
vcpu->arch.sie_block->gd = (u32)(u64)kvm->arch.gisa_int.origin;
|
||||
vcpu->arch.sie_block->gd = (u32)(u64)vcpu->kvm->arch.gisa_int.origin;
|
||||
if (vcpu->arch.sie_block->gd && sclp.has_gisaf)
|
||||
vcpu->arch.sie_block->gd |= GISA_FORMAT1;
|
||||
seqcount_init(&vcpu->arch.cputm_seqcount);
|
||||
|
@ -3083,19 +3066,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
|||
goto out_free_sie_block;
|
||||
}
|
||||
|
||||
VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
|
||||
vcpu->arch.sie_block);
|
||||
trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
|
||||
VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK",
|
||||
vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
|
||||
trace_kvm_s390_create_vcpu(vcpu->vcpu_id, vcpu, vcpu->arch.sie_block);
|
||||
|
||||
return 0;
|
||||
|
||||
return vcpu;
|
||||
out_free_sie_block:
|
||||
free_page((unsigned long)(vcpu->arch.sie_block));
|
||||
out_uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
out_free_cpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
out:
|
||||
return ERR_PTR(rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -9179,30 +9179,9 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
int r;
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
|
||||
if (!vcpu)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
r = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (r)
|
||||
goto free_vcpu;
|
||||
|
||||
r = kvm_x86_ops->vcpu_create(vcpu);
|
||||
if (r)
|
||||
goto uninit_vcpu;
|
||||
return vcpu;
|
||||
|
||||
uninit_vcpu:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
return ERR_PTR(r);
|
||||
return kvm_x86_ops->vcpu_create(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
@ -9254,9 +9233,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
||||
kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu);
|
||||
kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu);
|
||||
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
|
|
|
@ -876,7 +876,7 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
|
|||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
|
||||
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -290,32 +290,9 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int err;
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (err)
|
||||
goto free_vcpu;
|
||||
|
||||
err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
|
||||
if (err)
|
||||
goto vcpu_uninit;
|
||||
|
||||
return vcpu;
|
||||
vcpu_uninit:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
free_vcpu:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
out:
|
||||
return ERR_PTR(err);
|
||||
return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
|
||||
}
|
||||
|
||||
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
|
||||
|
@ -330,8 +307,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
|||
kvm_mmu_free_memory_caches(vcpu);
|
||||
kvm_timer_vcpu_terminate(vcpu);
|
||||
kvm_pmu_vcpu_destroy(vcpu);
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
|
|
|
@ -378,6 +378,9 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
|
|||
void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_arch_vcpu_destroy(vcpu);
|
||||
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_vcpu_destroy);
|
||||
|
||||
|
@ -2738,12 +2741,20 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
|
|||
if (r)
|
||||
goto vcpu_decrement;
|
||||
|
||||
vcpu = kvm_arch_vcpu_create(kvm, id);
|
||||
if (IS_ERR(vcpu)) {
|
||||
r = PTR_ERR(vcpu);
|
||||
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
|
||||
if (!vcpu) {
|
||||
r = -ENOMEM;
|
||||
goto vcpu_decrement;
|
||||
}
|
||||
|
||||
r = kvm_vcpu_init(vcpu, kvm, id);
|
||||
if (r)
|
||||
goto vcpu_free;
|
||||
|
||||
r = kvm_arch_vcpu_create(vcpu);
|
||||
if (r)
|
||||
goto vcpu_uninit;
|
||||
|
||||
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
|
||||
|
||||
r = kvm_arch_vcpu_setup(vcpu);
|
||||
|
@ -2787,6 +2798,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
|
|||
debugfs_remove_recursive(vcpu->debugfs_dentry);
|
||||
vcpu_destroy:
|
||||
kvm_arch_vcpu_destroy(vcpu);
|
||||
vcpu_uninit:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
vcpu_free:
|
||||
kmem_cache_free(kvm_vcpu_cache, vcpu);
|
||||
vcpu_decrement:
|
||||
mutex_lock(&kvm->lock);
|
||||
kvm->created_vcpus--;
|
||||
|
|
Loading…
Reference in New Issue
Block a user